aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/acevents.h51
-rw-r--r--drivers/acpi/acpica/acglobal.h25
-rw-r--r--drivers/acpi/acpica/acinterp.h9
-rw-r--r--drivers/acpi/acpica/aclocal.h19
-rw-r--r--drivers/acpi/acpica/actables.h4
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c10
-rw-r--r--drivers/acpi/acpica/dsobject.c14
-rw-r--r--drivers/acpi/acpica/dsopcode.c13
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswstate.c10
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c167
-rw-r--r--drivers/acpi/acpica/evgpeblk.c766
-rw-r--r--drivers/acpi/acpica/evgpeinit.c653
-rw-r--r--drivers/acpi/acpica/evgpeutil.c337
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evxface.c24
-rw-r--r--drivers/acpi/acpica/evxfevnt.c191
-rw-r--r--drivers/acpi/acpica/exconfig.c21
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c4
-rw-r--r--drivers/acpi/acpica/exdebug.c261
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c16
-rw-r--r--drivers/acpi/acpica/exmisc.c8
-rw-r--r--drivers/acpi/acpica/exmutex.c46
-rw-r--r--drivers/acpi/acpica/exnames.c4
-rw-r--r--drivers/acpi/acpica/exoparg1.c18
-rw-r--r--drivers/acpi/acpica/exoparg2.c37
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/exoparg6.c4
-rw-r--r--drivers/acpi/acpica/exprep.c4
-rw-r--r--drivers/acpi/acpica/exregion.c17
-rw-r--r--drivers/acpi/acpica/exresnte.c4
-rw-r--r--drivers/acpi/acpica/exresolv.c11
-rw-r--r--drivers/acpi/acpica/exresop.c8
-rw-r--r--drivers/acpi/acpica/exstore.c218
-rw-r--r--drivers/acpi/acpica/exsystem.c10
-rw-r--r--drivers/acpi/acpica/hwregs.c6
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c4
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c4
-rw-r--r--drivers/acpi/acpica/psargs.c4
-rw-r--r--drivers/acpi/acpica/psloop.c3
-rw-r--r--drivers/acpi/acpica/psxface.c5
-rw-r--r--drivers/acpi/acpica/rscreate.c14
-rw-r--r--drivers/acpi/acpica/rslist.c6
-rw-r--r--drivers/acpi/acpica/rsmisc.c4
-rw-r--r--drivers/acpi/acpica/tbfadt.c16
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c69
-rw-r--r--drivers/acpi/acpica/tbutils.c101
-rw-r--r--drivers/acpi/acpica/tbxface.c80
-rw-r--r--drivers/acpi/acpica/tbxfroot.c6
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c14
-rw-r--r--drivers/acpi/acpica/utdelete.c6
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c1
-rw-r--r--drivers/acpi/acpica/utmisc.c6
-rw-r--r--drivers/acpi/acpica/utmutex.c4
-rw-r--r--drivers/acpi/acpica/utobject.c8
-rw-r--r--drivers/acpi/bus.c44
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/acpi/pci_irq.c8
-rw-r--r--drivers/acpi/power.c1
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/system.c5
-rw-r--r--drivers/ata/Kconfig20
-rw-r--r--drivers/ata/Makefile3
-rw-r--r--drivers/ata/ahci.c2544
-rw-r--r--drivers/ata/ahci.h343
-rw-r--r--drivers/ata/ahci_platform.c192
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libahci.c2216
-rw-r--r--drivers/ata/libata-core.c216
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/libata-pmp.c32
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-sff.c1387
-rw-r--r--drivers/ata/libata.h29
-rw-r--r--drivers/ata/pata_acpi.c8
-rw-r--r--drivers/ata/pata_ali.c2
-rw-r--r--drivers/ata/pata_at91.c1
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_bf54x.c87
-rw-r--r--drivers/ata/pata_cmd640.c13
-rw-r--r--drivers/ata/pata_cs5520.c2
-rw-r--r--drivers/ata/pata_cs5530.c4
-rw-r--r--drivers/ata/pata_hpt366.c2
-rw-r--r--drivers/ata/pata_hpt37x.c4
-rw-r--r--drivers/ata/pata_hpt3x2n.c2
-rw-r--r--drivers/ata/pata_icside.c5
-rw-r--r--drivers/ata/pata_it821x.c6
-rw-r--r--drivers/ata/pata_macio.c5
-rw-r--r--drivers/ata/pata_mpc52xx.c78
-rw-r--r--drivers/ata/pata_ns87415.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c12
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_pcmcia.c49
-rw-r--r--drivers/ata/pata_pdc2027x.c4
-rw-r--r--drivers/ata/pata_pdc202xx_old.c2
-rw-r--r--drivers/ata/pata_platform.c1
-rw-r--r--drivers/ata/pata_radisys.c2
-rw-r--r--drivers/ata/pata_sc1200.c4
-rw-r--r--drivers/ata/pata_scc.c80
-rw-r--r--drivers/ata/pata_sch.c12
-rw-r--r--drivers/ata/pata_serverworks.c6
-rw-r--r--drivers/ata/pata_sil680.c30
-rw-r--r--drivers/ata/pata_via.c6
-rw-r--r--drivers/ata/pdc_adma.c74
-rw-r--r--drivers/ata/sata_inic162x.c25
-rw-r--r--drivers/ata/sata_mv.c47
-rw-r--r--drivers/ata/sata_nv.c263
-rw-r--r--drivers/ata/sata_promise.c32
-rw-r--r--drivers/ata/sata_qstor.c90
-rw-r--r--drivers/ata/sata_sil.c9
-rw-r--r--drivers/ata/sata_sil24.c9
-rw-r--r--drivers/ata/sata_svw.c2
-rw-r--r--drivers/ata/sata_sx4.c10
-rw-r--r--drivers/ata/sata_uli.c4
-rw-r--r--drivers/ata/sata_vsc.c10
-rw-r--r--drivers/base/iommu.c43
-rw-r--r--drivers/base/platform.c30
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/base/power/sysfs.c65
-rw-r--r--drivers/block/amiflop.c47
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/block/hd.c4
-rw-r--r--drivers/bluetooth/bluecard_cs.c11
-rw-r--r--drivers/bluetooth/bt3c_cs.c11
-rw-r--r--drivers/bluetooth/btuart_cs.c11
-rw-r--r--drivers/bluetooth/dtl1_cs.c11
-rw-r--r--drivers/char/agp/generic.c2
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/char/i8k.c21
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c9
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c5
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c19
-rw-r--r--drivers/char/pcmcia/ipwireless/main.h1
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c19
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.h3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c22
-rw-r--r--drivers/char/serial167.c2
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/char/tpm/Kconfig6
-rw-r--r--drivers/char/tpm/tpm.c47
-rw-r--r--drivers/char/tpm/tpm_tis.c40
-rw-r--r--drivers/clocksource/cs5535-clockevt.c8
-rw-r--r--drivers/clocksource/sh_cmt.c45
-rw-r--r--drivers/clocksource/sh_mtu2.c37
-rw-r--r--drivers/clocksource/sh_tmu.c41
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c48
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c115
-rw-r--r--drivers/cpuidle/governors/ladder.c2
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/dma/shdma.c25
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/gpio/wm8994-gpio.c12
-rw-r--r--drivers/gpu/drm/drm_bufs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios.h4
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c168
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-highlander.c5
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c14
-rw-r--r--drivers/i2c/busses/i2c-mpc.c6
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c265
-rw-r--r--drivers/i2c/busses/i2c-pxa.c26
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c19
-rw-r--r--drivers/i2c/i2c-core.c166
-rw-r--r--drivers/ide/ide-cs.c20
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/cma.c74
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/core/user_mad.c12
-rw-r--r--drivers/infiniband/core/uverbs_main.c11
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c133
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig18
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2374
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c882
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c520
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h745
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c811
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c518
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1577
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c417
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h550
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h829
-rw-r--r--drivers/infiniband/hw/cxgb4/user.h66
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c16
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c20
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c115
-rw-r--r--drivers/input/gameport/gameport.c4
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/misc/pcspkr.c6
-rw-r--r--drivers/input/touchscreen/Kconfig4
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c40
-rw-r--r--drivers/isdn/divert/divert_procfs.c18
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c76
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hisax/avma1_cs.c63
-rw-r--r--drivers/isdn/hisax/elsa_cs.c40
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c60
-rw-r--r--drivers/isdn/hisax/teles_cs.c50
-rw-r--r--drivers/macintosh/windfarm_pm81.c2
-rw-r--r--drivers/media/IR/Kconfig59
-rw-r--r--drivers/media/IR/Makefile14
-rw-r--r--drivers/media/IR/imon.c2396
-rw-r--r--drivers/media/IR/ir-core-priv.h126
-rw-r--r--drivers/media/IR/ir-functions.c1
-rw-r--r--drivers/media/IR/ir-jvc-decoder.c320
-rw-r--r--drivers/media/IR/ir-keymaps.c3494
-rw-r--r--drivers/media/IR/ir-keytable.c687
-rw-r--r--drivers/media/IR/ir-nec-decoder.c328
-rw-r--r--drivers/media/IR/ir-raw-event.c251
-rw-r--r--drivers/media/IR/ir-rc5-decoder.c324
-rw-r--r--drivers/media/IR/ir-rc6-decoder.c419
-rw-r--r--drivers/media/IR/ir-sony-decoder.c312
-rw-r--r--drivers/media/IR/ir-sysfs.c202
-rw-r--r--drivers/media/IR/keymaps/Kconfig15
-rw-r--r--drivers/media/IR/keymaps/Makefile67
-rw-r--r--drivers/media/IR/keymaps/rc-adstech-dvb-t-pci.c89
-rw-r--r--drivers/media/IR/keymaps/rc-apac-viewcomp.c80
-rw-r--r--drivers/media/IR/keymaps/rc-asus-pc39.c91
-rw-r--r--drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c69
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-a16d.c75
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-cardbus.c97
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-dvbt.c78
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia-m135a-rm-jx.c90
-rw-r--r--drivers/media/IR/keymaps/rc-avermedia.c86
-rw-r--r--drivers/media/IR/keymaps/rc-avertv-303.c85
-rw-r--r--drivers/media/IR/keymaps/rc-behold-columbus.c108
-rw-r--r--drivers/media/IR/keymaps/rc-behold.c141
-rw-r--r--drivers/media/IR/keymaps/rc-budget-ci-old.c92
-rw-r--r--drivers/media/IR/keymaps/rc-cinergy-1400.c84
-rw-r--r--drivers/media/IR/keymaps/rc-cinergy.c78
-rw-r--r--drivers/media/IR/keymaps/rc-dm1105-nec.c76
-rw-r--r--drivers/media/IR/keymaps/rc-dntv-live-dvb-t.c78
-rw-r--r--drivers/media/IR/keymaps/rc-dntv-live-dvbt-pro.c97
-rw-r--r--drivers/media/IR/keymaps/rc-em-terratec.c69
-rw-r--r--drivers/media/IR/keymaps/rc-empty.c44
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv-fm53.c81
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv.c112
-rw-r--r--drivers/media/IR/keymaps/rc-encore-enltv2.c90
-rw-r--r--drivers/media/IR/keymaps/rc-evga-indtube.c61
-rw-r--r--drivers/media/IR/keymaps/rc-eztv.c96
-rw-r--r--drivers/media/IR/keymaps/rc-flydvb.c77
-rw-r--r--drivers/media/IR/keymaps/rc-flyvideo.c70
-rw-r--r--drivers/media/IR/keymaps/rc-fusionhdtv-mce.c98
-rw-r--r--drivers/media/IR/keymaps/rc-gadmei-rm008z.c81
-rw-r--r--drivers/media/IR/keymaps/rc-genius-tvgo-a11mce.c84
-rw-r--r--drivers/media/IR/keymaps/rc-gotview7135.c79
-rw-r--r--drivers/media/IR/keymaps/rc-hauppauge-new.c100
-rw-r--r--drivers/media/IR/keymaps/rc-imon-mce.c142
-rw-r--r--drivers/media/IR/keymaps/rc-imon-pad.c156
-rw-r--r--drivers/media/IR/keymaps/rc-iodata-bctv7e.c88
-rw-r--r--drivers/media/IR/keymaps/rc-kaiomy.c87
-rw-r--r--drivers/media/IR/keymaps/rc-kworld-315u.c83
-rw-r--r--drivers/media/IR/keymaps/rc-kworld-plus-tv-analog.c99
-rw-r--r--drivers/media/IR/keymaps/rc-manli.c135
-rw-r--r--drivers/media/IR/keymaps/rc-msi-tvanywhere-plus.c123
-rw-r--r--drivers/media/IR/keymaps/rc-msi-tvanywhere.c69
-rw-r--r--drivers/media/IR/keymaps/rc-nebula.c96
-rw-r--r--drivers/media/IR/keymaps/rc-nec-terratec-cinergy-xs.c105
-rw-r--r--drivers/media/IR/keymaps/rc-norwood.c85
-rw-r--r--drivers/media/IR/keymaps/rc-npgtech.c80
-rw-r--r--drivers/media/IR/keymaps/rc-pctv-sedna.c80
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-color.c94
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-grey.c89
-rw-r--r--drivers/media/IR/keymaps/rc-pinnacle-pctv-hd.c73
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview-mk12.c83
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview-new.c83
-rw-r--r--drivers/media/IR/keymaps/rc-pixelview.c82
-rw-r--r--drivers/media/IR/keymaps/rc-powercolor-real-angel.c81
-rw-r--r--drivers/media/IR/keymaps/rc-proteus-2309.c69
-rw-r--r--drivers/media/IR/keymaps/rc-purpletv.c81
-rw-r--r--drivers/media/IR/keymaps/rc-pv951.c78
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-hauppauge-new.c103
-rw-r--r--drivers/media/IR/keymaps/rc-rc5-tv.c81
-rw-r--r--drivers/media/IR/keymaps/rc-real-audio-220-32-keys.c78
-rw-r--r--drivers/media/IR/keymaps/rc-tbs-nec.c73
-rw-r--r--drivers/media/IR/keymaps/rc-terratec-cinergy-xs.c92
-rw-r--r--drivers/media/IR/keymaps/rc-tevii-nec.c88
-rw-r--r--drivers/media/IR/keymaps/rc-tt-1500.c82
-rw-r--r--drivers/media/IR/keymaps/rc-videomate-s350.c85
-rw-r--r--drivers/media/IR/keymaps/rc-videomate-tv-pvr.c87
-rw-r--r--drivers/media/IR/keymaps/rc-winfast-usbii-deluxe.c82
-rw-r--r--drivers/media/IR/keymaps/rc-winfast.c102
-rw-r--r--drivers/media/IR/rc-map.c84
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c25
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/dvb/bt8xx/dst.c2
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c25
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c11
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c25
-rw-r--r--drivers/media/dvb/dvb-usb/a800.c6
-rw-r--r--drivers/media/dvb/dvb-usb/af9005-remote.c16
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.c8
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.h4
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c43
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h18
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c6
-rw-r--r--drivers/media/dvb/dvb-usb/az6027.c114
-rw-r--r--drivers/media/dvb/dvb-usb/cinergyT2-core.c6
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c51
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c100
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c4
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mb.c8
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mc.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb.h2
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c6
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c18
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h9
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-urb.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h7
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c44
-rw-r--r--drivers/media/dvb/dvb-usb/friio-fe.c2
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c4
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.c18
-rw-r--r--drivers/media/dvb/dvb-usb/nova-t-usb2.c18
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c16
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x.c12
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c12
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c2
-rw-r--r--drivers/media/dvb/frontends/atbm8830_priv.h2
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c7
-rw-r--r--drivers/media/dvb/frontends/au8522_dig.c26
-rw-r--r--drivers/media/dvb/frontends/au8522_priv.h5
-rw-r--r--drivers/media/dvb/frontends/dib3000mc.c35
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c36
-rw-r--r--drivers/media/dvb/frontends/dib8000.h1
-rw-r--r--drivers/media/dvb/frontends/ds3000.c2
-rw-r--r--drivers/media/dvb/frontends/stv0900_core.c16
-rw-r--r--drivers/media/dvb/frontends/stv090x.c94
-rw-r--r--drivers/media/dvb/frontends/stv090x.h1
-rw-r--r--drivers/media/dvb/frontends/stv6110x.c34
-rw-r--r--drivers/media/dvb/frontends/stv6110x.h1
-rw-r--r--drivers/media/dvb/mantis/mantis_input.c4
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.c10
-rw-r--r--drivers/media/dvb/ngene/Kconfig2
-rw-r--r--drivers/media/dvb/ngene/Makefile4
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c328
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c518
-rw-r--r--drivers/media/dvb/ngene/ngene-dvb.c172
-rw-r--r--drivers/media/dvb/ngene/ngene-i2c.c179
-rw-r--r--drivers/media/dvb/ngene/ngene.h30
-rw-r--r--drivers/media/dvb/pt1/pt1.c271
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007s.c32
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007s.h8
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007t.c31
-rw-r--r--drivers/media/dvb/pt1/va1j5jf8007t.h8
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c52
-rw-r--r--drivers/media/dvb/ttpci/budget.c47
-rw-r--r--drivers/media/radio/radio-mr800.c19
-rw-r--r--drivers/media/video/Kconfig52
-rw-r--r--drivers/media/video/Makefile14
-rw-r--r--drivers/media/video/ak881x.c368
-rw-r--r--drivers/media/video/arv.c524
-rw-r--r--drivers/media/video/au0828/au0828-video.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c27
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c30
-rw-r--r--drivers/media/video/bw-qcam.c452
-rw-r--r--drivers/media/video/c-qcam.c483
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/video/cx18/cx18-av-core.c54
-rw-r--r--drivers/media/video/cx18/cx18-av-core.h24
-rw-r--r--drivers/media/video/cx18/cx18-av-vbi.c42
-rw-r--r--drivers/media/video/cx18/cx18-cards.c4
-rw-r--r--drivers/media/video/cx18/cx18-cards.h4
-rw-r--r--drivers/media/video/cx18/cx18-controls.c2
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c22
-rw-r--r--drivers/media/video/cx18/cx18-streams.c5
-rw-r--r--drivers/media/video/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c13
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c52
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c9
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx2341x.c14
-rw-r--r--drivers/media/video/cx23885/cimax2.c12
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c16
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c8
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c4
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c17
-rw-r--r--drivers/media/video/cx25840/cx25840-core.h5
-rw-r--r--drivers/media/video/cx25840/cx25840-vbi.c40
-rw-r--r--drivers/media/video/cx88/cx88-core.c3
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/video/cx88/cx88-input.c149
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c11
-rw-r--r--drivers/media/video/cx88/cx88-video.c20
-rw-r--r--drivers/media/video/cx88/cx88.h8
-rw-r--r--drivers/media/video/davinci/dm644x_ccdc.c131
-rw-r--r--drivers/media/video/davinci/dm644x_ccdc_regs.h10
-rw-r--r--drivers/media/video/davinci/isif_regs.h2
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c83
-rw-r--r--drivers/media/video/davinci/vpif_capture.c8
-rw-r--r--drivers/media/video/davinci/vpif_display.c6
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c56
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c25
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c11
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c29
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c66
-rw-r--r--drivers/media/video/em28xx/em28xx.h7
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c6
-rw-r--r--drivers/media/video/font.h407
-rw-r--r--drivers/media/video/gspca/Kconfig6
-rw-r--r--drivers/media/video/gspca/cpia1.c36
-rw-r--r--drivers/media/video/gspca/gspca.c124
-rw-r--r--drivers/media/video/gspca/gspca.h6
-rw-r--r--drivers/media/video/gspca/ov534.c563
-rw-r--r--drivers/media/video/gspca/pac207.c4
-rw-r--r--drivers/media/video/gspca/sn9c2028.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c359
-rw-r--r--drivers/media/video/gspca/sonixj.c585
-rw-r--r--drivers/media/video/gspca/spca561.c58
-rw-r--r--drivers/media/video/gspca/t613.c172
-rw-r--r--drivers/media/video/gspca/vc032x.c747
-rw-r--r--drivers/media/video/gspca/zc3xx.c95
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c33
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c18
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h5
-rw-r--r--drivers/media/video/ir-kbd-i2c.c28
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c5
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h10
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c55
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c6
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c119
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c23
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c17
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c18
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/mem2mem_testdev.c1052
-rw-r--r--drivers/media/video/meye.c78
-rw-r--r--drivers/media/video/meye.h10
-rw-r--r--drivers/media/video/mt9t031.c66
-rw-r--r--drivers/media/video/mt9v022.c2
-rw-r--r--drivers/media/video/mx1_camera.c4
-rw-r--r--drivers/media/video/mx3_camera.c4
-rw-r--r--drivers/media/video/omap/Kconfig11
-rw-r--r--drivers/media/video/omap/Makefile7
-rw-r--r--drivers/media/video/omap/omap_vout.c2643
-rw-r--r--drivers/media/video/omap/omap_voutdef.h147
-rw-r--r--drivers/media/video/omap/omap_voutlib.c293
-rw-r--r--drivers/media/video/omap/omap_voutlib.h34
-rw-r--r--drivers/media/video/omap24xxcam.c4
-rw-r--r--drivers/media/video/ov511.c11
-rw-r--r--drivers/media/video/ov7670.c286
-rw-r--r--drivers/media/video/ov9640.c6
-rw-r--r--drivers/media/video/pms.c18
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c28
-rw-r--r--drivers/media/video/pwc/Kconfig2
-rw-r--r--drivers/media/video/pxa_camera.c4
-rw-r--r--drivers/media/video/rj54n1cb0c.c18
-rw-r--r--drivers/media/video/s2255drv.c1161
-rw-r--r--drivers/media/video/saa7115.c74
-rw-r--r--drivers/media/video/saa7127.c31
-rw-r--r--drivers/media/video/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c111
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c22
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c9
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c283
-rw-r--r--drivers/media/video/saa7134/saa7134-reg.h24
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c25
-rw-r--r--drivers/media/video/saa7134/saa7134.h7
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c613
-rw-r--r--drivers/media/video/sh_vou.c1476
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c6
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h18
-rw-r--r--drivers/media/video/sn9c102/sn9c102_hv7131d.c2
-rw-r--r--drivers/media/video/soc_camera.c21
-rw-r--r--drivers/media/video/tlg2300/pd-dvb.c6
-rw-r--r--drivers/media/video/tlg2300/pd-main.c7
-rw-r--r--drivers/media/video/tlg2300/pd-radio.c21
-rw-r--r--drivers/media/video/tlg2300/pd-video.c18
-rw-r--r--drivers/media/video/tvp514x.c13
-rw-r--r--drivers/media/video/tvp5150.c67
-rw-r--r--drivers/media/video/tvp7002.c53
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c82
-rw-r--r--drivers/media/video/usbvision/usbvision.h6
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c50
-rw-r--r--drivers/media/video/uvc/uvc_driver.c25
-rw-r--r--drivers/media/video/uvc/uvc_queue.c8
-rw-r--r--drivers/media/video/uvc/uvcvideo.h4
-rw-r--r--drivers/media/video/v4l2-common.c101
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c3
-rw-r--r--drivers/media/video/v4l2-dev.c6
-rw-r--r--drivers/media/video/v4l2-device.c11
-rw-r--r--drivers/media/video/v4l2-event.c292
-rw-r--r--drivers/media/video/v4l2-fh.c79
-rw-r--r--drivers/media/video/v4l2-ioctl.c103
-rw-r--r--drivers/media/video/v4l2-mem2mem.c633
-rw-r--r--drivers/media/video/videobuf-core.c244
-rw-r--r--drivers/media/video/videobuf-dma-contig.c115
-rw-r--r--drivers/media/video/videobuf-dma-sg.c383
-rw-r--r--drivers/media/video/videobuf-dvb.c2
-rw-r--r--drivers/media/video/videobuf-vmalloc.c183
-rw-r--r--drivers/media/video/vivi.c806
-rw-r--r--drivers/media/video/w9966.c1149
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c6
-rw-r--r--drivers/media/video/zc0301/zc0301_sensor.h2
-rw-r--r--drivers/media/video/zoran/zoran.h24
-rw-r--r--drivers/media/video/zoran/zoran_driver.c16
-rw-r--r--drivers/media/video/zr364xx.c4
-rw-r--r--drivers/message/i2o/i2o_config.c16
-rw-r--r--drivers/mfd/Kconfig8
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/davinci_voicecodec.c190
-rw-r--r--drivers/mfd/twl-core.c4
-rw-r--r--drivers/mfd/wm831x-irq.c30
-rw-r--r--drivers/mfd/wm8994-core.c43
-rw-r--r--drivers/mfd/wm8994-irq.c310
-rw-r--r--drivers/misc/eeprom/eeprom.c36
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c39
-rw-r--r--drivers/misc/eeprom/max6875.c52
-rw-r--r--drivers/misc/vmware_balloon.c4
-rw-r--r--drivers/mmc/host/mmci.c19
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mtd/maps/pcmciamtd.c3
-rw-r--r--drivers/net/a2065.c1
-rw-r--r--drivers/net/ariadne.c1
-rw-r--r--drivers/net/bnx2x_hsi.h2
-rw-r--r--drivers/net/e1000e/netdev.c22
-rw-r--r--drivers/net/hydra.c1
-rw-r--r--drivers/net/igbvf/netdev.c6
-rw-r--r--drivers/net/pcmcia/3c574_cs.c15
-rw-r--r--drivers/net/pcmcia/3c589_cs.c16
-rw-r--r--drivers/net/pcmcia/axnet_cs.c21
-rw-r--r--drivers/net/pcmcia/com20020_cs.c29
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c18
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c16
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c14
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c16
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c17
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c36
-rw-r--r--drivers/net/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/smsc911x.c4
-rw-r--r--drivers/net/wireless/airo_cs.c72
-rw-r--r--drivers/net/wireless/atmel_cs.c70
-rw-r--r--drivers/net/wireless/b43/pcmcia.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c38
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c11
-rw-r--r--drivers/net/wireless/libertas/if_cs.c21
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c27
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c27
-rw-r--r--drivers/net/wireless/ray_cs.c15
-rw-r--r--drivers/net/wireless/ray_cs.h1
-rw-r--r--drivers/net/wireless/wl3501.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c23
-rw-r--r--drivers/net/zorro8390.c1
-rw-r--r--drivers/oprofile/cpu_buffer.c75
-rw-r--r--drivers/oprofile/oprof.c12
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/timer_int.c78
-rw-r--r--drivers/parport/parport_cs.c13
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/intel-iommu.c22
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pcmcia/Kconfig25
-rw-r--r--drivers/pcmcia/Makefile10
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/pcmcia/cistpl.c121
-rw-r--r--drivers/pcmcia/cs.c1
-rw-r--r--drivers/pcmcia/cs_internal.h22
-rw-r--r--drivers/pcmcia/ds.c34
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pcmcia_cis.c356
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c23
-rw-r--r--drivers/pcmcia/pcmcia_resource.c634
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c229
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c172
-rw-r--r--drivers/pcmcia/rsrc_mgr.c112
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c164
-rw-r--r--drivers/pcmcia/yenta_socket.c7
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-davinci.c673
-rw-r--r--drivers/rtc/rtc-rx8581.c6
-rw-r--r--drivers/rtc/rtc-stk17ta8.c4
-rw-r--r--drivers/s390/block/dasd.c22
-rw-r--r--drivers/s390/block/dasd_3990_erp.c20
-rw-r--r--drivers/s390/block/dasd_alias.c125
-rw-r--r--drivers/s390/block/dasd_devmap.c174
-rw-r--r--drivers/s390/block/dasd_eckd.c116
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_int.h49
-rw-r--r--drivers/s390/char/Kconfig3
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/keyboard.c21
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c2
-rw-r--r--drivers/s390/char/vmcp.c38
-rw-r--r--drivers/s390/char/zcore.c4
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/cio.c3
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/cio/qdio.h15
-rw-r--r--drivers/s390/cio/qdio_main.c67
-rw-r--r--drivers/s390/cio/qdio_setup.c8
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c17
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c1
-rw-r--r--drivers/sbus/char/flash.c6
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h2
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h4
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c9
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c9
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c23
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h1
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c13
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c9
-rw-r--r--drivers/scsi/zorro7xx.c1
-rw-r--r--drivers/serial/Kconfig23
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/atmel_serial.c207
-rw-r--r--drivers/serial/serial_cs.c36
-rw-r--r--drivers/serial/sh-sci.c189
-rw-r--r--drivers/sh/Kconfig24
-rw-r--r--drivers/sh/Makefile2
-rw-r--r--drivers/sh/clk-cpg.c298
-rw-r--r--drivers/sh/clk.c545
-rw-r--r--drivers/sh/intc.c333
-rw-r--r--drivers/spi/pxa2xx_spi.c25
-rw-r--r--drivers/spi/spi_mpc8xxx.c2
-rw-r--r--drivers/ssb/main.c2
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c45
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c35
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c42
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c42
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c42
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c19
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c45
-rw-r--r--drivers/staging/cx25821/cx25821-alsa.c103
-rw-r--r--drivers/staging/cx25821/cx25821-audio-upstream.c146
-rw-r--r--drivers/staging/cx25821/cx25821-audups11.c84
-rw-r--r--drivers/staging/cx25821/cx25821-cards.c4
-rw-r--r--drivers/staging/cx25821/cx25821-core.c160
-rw-r--r--drivers/staging/cx25821/cx25821-gpio.c25
-rw-r--r--drivers/staging/cx25821/cx25821-i2c.c14
-rw-r--r--drivers/staging/cx25821/cx25821-medusa-video.c260
-rw-r--r--drivers/staging/cx25821/cx25821-video-upstream.c145
-rw-r--r--drivers/staging/cx25821/cx25821-video.c145
-rw-r--r--drivers/staging/cx25821/cx25821-video.h78
-rw-r--r--drivers/staging/cx25821/cx25821-video0.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video1.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video2.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video3.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video4.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video5.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video6.c88
-rw-r--r--drivers/staging/cx25821/cx25821-video7.c88
-rw-r--r--drivers/staging/cx25821/cx25821-videoioctl.c84
-rw-r--r--drivers/staging/cx25821/cx25821-vidups10.c84
-rw-r--r--drivers/staging/cx25821/cx25821-vidups9.c84
-rw-r--r--drivers/staging/netwave/netwave_cs.c9
-rw-r--r--drivers/staging/tm6000/Kconfig32
-rw-r--r--drivers/staging/tm6000/Makefile17
-rw-r--r--drivers/staging/tm6000/README22
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c414
-rw-r--r--drivers/staging/tm6000/tm6000-cards.c973
-rw-r--r--drivers/staging/tm6000/tm6000-core.c602
-rw-r--r--drivers/staging/tm6000/tm6000-dvb.c346
-rw-r--r--drivers/staging/tm6000/tm6000-i2c.c370
-rw-r--r--drivers/staging/tm6000/tm6000-regs.h541
-rw-r--r--drivers/staging/tm6000/tm6000-stds.c873
-rw-r--r--drivers/staging/tm6000/tm6000-usb-isoc.h53
-rw-r--r--drivers/staging/tm6000/tm6000-video.c1557
-rw-r--r--drivers/staging/tm6000/tm6000.h301
-rw-r--r--drivers/staging/vt6655/rxtx.c14
-rw-r--r--drivers/staging/vt6656/rxtx.c6
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c15
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c9
-rw-r--r--drivers/telephony/ixj_pcmcia.c3
-rw-r--r--drivers/usb/gadget/at91_udc.c7
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/sl811_cs.c28
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c2
-rw-r--r--drivers/video/amifb.c49
-rw-r--r--drivers/video/cirrusfb.c1
-rw-r--r--drivers/video/cobalt_lcdfb.c2
-rw-r--r--drivers/video/fm2fb.c1
-rw-r--r--drivers/virtio/virtio_pci.c2
-rw-r--r--drivers/vlynq/Kconfig2
-rw-r--r--drivers/xen/manage.c14
-rw-r--r--drivers/zorro/proc.c6
-rw-r--r--drivers/zorro/zorro-driver.c24
-rw-r--r--drivers/zorro/zorro-sysfs.c11
-rw-r--r--drivers/zorro/zorro.c243
748 files changed, 55866 insertions, 20369 deletions
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 7423052ece5a..d93cc06f4bf8 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,12 +14,12 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o
+ evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o
acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
- exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o
+ exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 3e6ba99e4053..64d1e5c2d4ae 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -73,8 +73,10 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
u32 notify_value);
/*
- * evgpe - GPE handling and dispatch
+ * evgpe - Low-level GPE support
*/
+u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
+
acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
@@ -85,19 +87,13 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number);
+struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
+ struct acpi_gpe_block_info
+ *gpe_block);
+
/*
- * evgpeblk
+ * evgpeblk - Upper-level GPE block support
*/
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
-
-acpi_status
-acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
-
-acpi_status
-acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block,
- void *context);
-
acpi_status
acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_generic_address *gpe_block_address,
@@ -116,12 +112,37 @@ u32
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
u32 gpe_number);
-u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
+/*
+ * evgpeinit - GPE initialization and update
+ */
+acpi_status acpi_ev_gpe_initialize(void);
+
+void acpi_ev_update_gpes(acpi_owner_id table_owner_id);
acpi_status
-acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_ev_match_gpe_method(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
-acpi_status acpi_ev_gpe_initialize(void);
+acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+/*
+ * evgpeutil - GPE utilities
+ */
+acpi_status
+acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
+
+struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
+
+acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *context);
/*
* evregion - Address Space handling
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index f8dd8f250ac4..9070f1fe8f17 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -112,6 +112,19 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+/*
+ * Optionally enable output from the AML Debug Object.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+
+/*
+ * Optionally copy the entire DSDT to local memory (instead of simply
+ * mapping it.) There are some BIOSs that corrupt or replace the original
+ * DSDT, creating the need for this option. Default is FALSE, do not copy
+ * the DSDT.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
+
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
struct acpi_table_fadt acpi_gbl_FADT;
@@ -145,11 +158,10 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
****************************************************************************/
/*
- * acpi_gbl_root_table_list is the master list of ACPI tables found in the
- * RSDT/XSDT.
- *
+ * acpi_gbl_root_table_list is the master list of ACPI tables that were
+ * found in the RSDT/XSDT.
*/
-ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list;
+ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
/* These addresses are calculated from the FADT Event Block addresses */
@@ -160,6 +172,11 @@ ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status;
ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
+/* DSDT information. Used to check for DSDT corruption */
+
+ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
+ACPI_EXTERN struct acpi_table_header acpi_gbl_original_dsdt_header;
+
/*
* Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
* determined by the revision of the DSDT: If the DSDT revision is less than
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 6df3f8428168..049e203bd621 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -121,6 +121,13 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
struct acpi_walk_state *walk_state);
/*
+ * exdebug - AML debug object
+ */
+void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+ u32 level, u32 index);
+
+/*
* exfield - ACPI AML (p-code) execution - field manipulation
*/
acpi_status
@@ -274,7 +281,7 @@ acpi_status
acpi_ex_system_do_notify_op(union acpi_operand_object *value,
union acpi_operand_object *obj_desc);
-acpi_status acpi_ex_system_do_suspend(u64 time);
+acpi_status acpi_ex_system_do_sleep(u64 time);
acpi_status acpi_ex_system_do_stall(u32 time);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 24b8faa5c395..147a7e6bd38f 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -213,12 +213,12 @@ struct acpi_namespace_node {
#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
-/* One internal RSDT for table management */
+/* Internal ACPI table management - master table list */
-struct acpi_internal_rsdt {
- struct acpi_table_desc *tables;
- u32 count;
- u32 size;
+struct acpi_table_list {
+ struct acpi_table_desc *tables; /* Table descriptor array */
+ u32 current_table_count; /* Tables currently in the array */
+ u32 max_table_count; /* Max tables array will hold */
u8 flags;
};
@@ -427,8 +427,8 @@ struct acpi_gpe_event_info {
struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
u8 flags; /* Misc info about this GPE */
u8 gpe_number; /* This GPE */
- u8 runtime_count;
- u8 wakeup_count;
+ u8 runtime_count; /* References to a run GPE */
+ u8 wakeup_count; /* References to a wake GPE */
};
/* Information about a GPE register pair, one per each status/enable pair in an array */
@@ -454,6 +454,7 @@ struct acpi_gpe_block_info {
struct acpi_gpe_event_info *event_info; /* One for each GPE */
struct acpi_generic_address block_address; /* Base address of the block */
u32 register_count; /* Number of register pairs in block */
+ u16 gpe_count; /* Number of individual GPEs in block */
u8 block_base_number; /* Base GPE number for this block */
};
@@ -469,6 +470,10 @@ struct acpi_gpe_xrupt_info {
struct acpi_gpe_walk_info {
struct acpi_namespace_node *gpe_device;
struct acpi_gpe_block_info *gpe_block;
+ u16 count;
+ acpi_owner_id owner_id;
+ u8 enable_this_gpe;
+ u8 execute_by_owner_id;
};
struct acpi_gpe_device_info {
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 8ff3b741df28..62a576e34361 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -107,6 +107,10 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length);
acpi_status
acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length);
+void acpi_tb_check_dsdt_header(void);
+
+struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index);
+
void
acpi_tb_install_table(acpi_physical_address address,
char *signature, u32 table_index);
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index bb13817e0c31..347bee1726f1 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -323,7 +323,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
default:
ACPI_ERROR((AE_INFO,
- "Invalid opcode in field list: %X",
+ "Invalid opcode in field list: 0x%X",
arg->common.aml_opcode));
return_ACPI_STATUS(AE_AML_BAD_OPCODE);
}
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 721039233aa7..2a9a561c2f01 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -225,7 +225,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
(walk_state->thread->current_sync_level >
obj_desc->method.mutex->mutex.sync_level)) {
ACPI_ERROR((AE_INFO,
- "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
+ "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(method_node),
walk_state->thread->current_sync_level));
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index cc343b959540..f3d52f59250b 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -262,7 +262,7 @@ acpi_ds_method_data_get_node(u8 type,
if (index > ACPI_METHOD_MAX_LOCAL) {
ACPI_ERROR((AE_INFO,
- "Local index %d is invalid (max %d)",
+ "Local index %u is invalid (max %u)",
index, ACPI_METHOD_MAX_LOCAL));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
@@ -276,7 +276,7 @@ acpi_ds_method_data_get_node(u8 type,
if (index > ACPI_METHOD_MAX_ARG) {
ACPI_ERROR((AE_INFO,
- "Arg index %d is invalid (max %d)",
+ "Arg index %u is invalid (max %u)",
index, ACPI_METHOD_MAX_ARG));
return_ACPI_STATUS(AE_AML_INVALID_INDEX);
}
@@ -287,7 +287,7 @@ acpi_ds_method_data_get_node(u8 type,
break;
default:
- ACPI_ERROR((AE_INFO, "Type %d is invalid", type));
+ ACPI_ERROR((AE_INFO, "Type %u is invalid", type));
return_ACPI_STATUS(AE_TYPE);
}
@@ -424,7 +424,7 @@ acpi_ds_method_data_get_value(u8 type,
case ACPI_REFCLASS_ARG:
ACPI_ERROR((AE_INFO,
- "Uninitialized Arg[%d] at node %p",
+ "Uninitialized Arg[%u] at node %p",
index, node));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
@@ -440,7 +440,7 @@ acpi_ds_method_data_get_value(u8 type,
default:
ACPI_ERROR((AE_INFO,
- "Not a Arg/Local opcode: %X",
+ "Not a Arg/Local opcode: 0x%X",
type));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 891e08bf560b..3607adcaf085 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -288,7 +288,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
if (byte_list) {
if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
ACPI_ERROR((AE_INFO,
- "Expecting bytelist, got AML opcode %X in op %p",
+ "Expecting bytelist, found AML opcode 0x%X in op %p",
byte_list->common.aml_opcode, byte_list));
acpi_ut_remove_reference(obj_desc);
@@ -511,7 +511,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
}
ACPI_INFO((AE_INFO,
- "Actual Package length (0x%X) is larger than NumElements field (0x%X), truncated\n",
+ "Actual Package length (%u) is larger than NumElements field (%u), truncated\n",
i, element_count));
} else if (i < element_count) {
/*
@@ -519,7 +519,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
* Note: this is not an error, the package is padded out with NULLs.
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Package List length (0x%X) smaller than NumElements count (0x%X), padded with null elements\n",
+ "Package List length (%u) smaller than NumElements count (%u), padded with null elements\n",
i, element_count));
}
@@ -701,7 +701,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unknown constant opcode %X",
+ "Unknown constant opcode 0x%X",
opcode));
status = AE_AML_OPERAND_TYPE;
break;
@@ -717,7 +717,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Integer type %X",
+ ACPI_ERROR((AE_INFO, "Unknown Integer type 0x%X",
op_info->type));
status = AE_AML_OPERAND_TYPE;
break;
@@ -806,7 +806,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unimplemented reference type for AML opcode: %4.4X",
+ "Unimplemented reference type for AML opcode: 0x%4.4X",
opcode));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
@@ -816,7 +816,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default:
- ACPI_ERROR((AE_INFO, "Unimplemented data type: %X",
+ ACPI_ERROR((AE_INFO, "Unimplemented data type: 0x%X",
obj_desc->common.type));
status = AE_AML_OPERAND_TYPE;
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index bf980cadb1e8..53a7e416f33e 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -292,7 +292,7 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->buffer.node;
if (!node) {
ACPI_ERROR((AE_INFO,
- "No pointer back to NS node in buffer obj %p",
+ "No pointer back to namespace node in buffer object %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -336,7 +336,7 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
node = obj_desc->package.node;
if (!node) {
ACPI_ERROR((AE_INFO,
- "No pointer back to NS node in package %p",
+ "No pointer back to namespace node in package %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -580,7 +580,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
default:
ACPI_ERROR((AE_INFO,
- "Unknown field creation opcode %02x", aml_opcode));
+ "Unknown field creation opcode 0x%02X",
+ aml_opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
@@ -589,7 +590,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)",
+ "Field [%4.4s] at %u exceeds Buffer [%4.4s] size %u (bits)",
acpi_ut_get_node_name(result_desc),
bit_offset + bit_count,
acpi_ut_get_node_name(buffer_desc->buffer.node),
@@ -693,7 +694,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
status = acpi_ex_resolve_operands(op->common.aml_opcode,
ACPI_WALK_OPERANDS, walk_state);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
+ ACPI_ERROR((AE_INFO, "(%s) bad operand(s), status 0x%X",
acpi_ps_get_opcode_name(op->common.aml_opcode),
status));
@@ -1461,7 +1462,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
default:
- ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p",
+ ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
op->common.aml_opcode, op));
status = AE_AML_BAD_OPCODE;
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 6b76c486d784..d555b374e314 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -140,7 +140,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (local_obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
+ "Bad predicate (not an integer) ObjDesc=%p State=%p Type=0x%X",
obj_desc, walk_state, obj_desc->common.type));
status = AE_AML_OPERAND_TYPE;
@@ -354,7 +354,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
op_class = walk_state->op_info->class;
if (op_class == AML_CLASS_UNKNOWN) {
- ACPI_ERROR((AE_INFO, "Unknown opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown opcode 0x%X",
op->common.aml_opcode));
return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
}
@@ -678,7 +678,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p",
+ "Unimplemented opcode, class=0x%X type=0x%X Opcode=-0x%X Op=%p",
op_class, op_type, op->common.aml_opcode,
op));
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 050df8164165..83155dd8671e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -179,7 +179,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
if (!object) {
ACPI_ERROR((AE_INFO,
- "Null Object! Obj=%p State=%p Num=%X",
+ "Null Object! Obj=%p State=%p Num=%u",
object, walk_state, walk_state->result_count));
return (AE_BAD_PARAMETER);
}
@@ -223,7 +223,7 @@ static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
ACPI_RESULTS_OBJ_NUM_MAX) {
- ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X",
+ ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%u",
walk_state, walk_state->result_size));
return (AE_STACK_OVERFLOW);
}
@@ -314,7 +314,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
ACPI_ERROR((AE_INFO,
- "Object stack overflow! Obj=%p State=%p #Ops=%X",
+ "Object stack overflow! Obj=%p State=%p #Ops=%u",
object, walk_state, walk_state->num_operands));
return (AE_STACK_OVERFLOW);
}
@@ -365,7 +365,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
if (walk_state->num_operands == 0) {
ACPI_ERROR((AE_INFO,
- "Object stack underflow! Count=%X State=%p #Ops=%X",
+ "Object stack underflow! Count=%X State=%p #Ops=%u",
pop_count, walk_state,
walk_state->num_operands));
return (AE_STACK_UNDERFLOW);
@@ -377,7 +377,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
walk_state->operands[walk_state->num_operands] = NULL;
}
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%u\n",
pop_count, walk_state, walk_state->num_operands));
return (AE_OK);
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c1e6f472d435..f5795915a2e9 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -302,7 +302,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
ACPI_DISABLE_EVENT);
ACPI_ERROR((AE_INFO,
- "No installed handler for fixed event [%08X]",
+ "No installed handler for fixed event [0x%08X]",
event));
return (ACPI_INTERRUPT_NOT_HANDLED);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 78c55508aff5..a221ad404167 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -60,7 +60,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
*
* RETURN: Status
*
- * DESCRIPTION: Updates GPE register enable masks based on the GPE type
+ * DESCRIPTION: Updates GPE register enable masks based upon whether there are
+ * references (either wake or run) to this GPE
*
******************************************************************************/
@@ -81,14 +82,20 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
(1 <<
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
+ /* Clear the wake/run bits up front */
+
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
- if (gpe_event_info->runtime_count)
+ /* Set the mask bits only if there are references to this GPE */
+
+ if (gpe_event_info->runtime_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+ }
- if (gpe_event_info->wakeup_count)
+ if (gpe_event_info->wakeup_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+ }
return_ACPI_STATUS(AE_OK);
}
@@ -101,7 +108,10 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
*
* RETURN: Status
*
- * DESCRIPTION: Enable a GPE based on the GPE type
+ * DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless
+ * of type or number of references.
+ *
+ * Note: The GPE lock should be already acquired when this function is called.
*
******************************************************************************/
@@ -109,20 +119,36 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
+
ACPI_FUNCTION_TRACE(ev_enable_gpe);
- /* Make sure HW enable masks are updated */
+
+ /*
+ * We will only allow a GPE to be enabled if it has either an
+ * associated method (_Lxx/_Exx) or a handler. Otherwise, the
+ * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
+ * first time it fires.
+ */
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
+ return_ACPI_STATUS(AE_NO_HANDLER);
+ }
+
+ /* Ensure the HW enable masks are current */
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
+
+ /* Clear the GPE (of stale events) */
- /* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
/* Enable the requested GPE */
+
status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
return_ACPI_STATUS(status);
}
@@ -135,7 +161,10 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
*
* RETURN: Status
*
- * DESCRIPTION: Disable a GPE based on the GPE type
+ * DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE,
+ * regardless of the type or number of references.
+ *
+ * Note: The GPE lock should be already acquired when this function is called.
*
******************************************************************************/
@@ -145,24 +174,71 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
ACPI_FUNCTION_TRACE(ev_disable_gpe);
- /* Make sure HW enable masks are updated */
+
+ /*
+ * Note: Always disable the GPE, even if we think that that it is already
+ * disabled. It is possible that the AML or some other code has enabled
+ * the GPE behind our back.
+ */
+
+ /* Ensure the HW enable masks are current */
status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
+ }
/*
- * Even if we don't know the GPE type, make sure that we always
- * disable it. low_disable_gpe will just clear the enable bit for this
- * GPE and write it. It will not write out the current GPE enable mask,
- * since this may inadvertently enable GPEs too early, if a rogue GPE has
- * come in during ACPICA initialization - possibly as a result of AML or
- * other code that has enabled the GPE.
+ * Always H/W disable this GPE, even if we don't know the GPE type.
+ * Simply clear the enable bit for this particular GPE, but do not
+ * write out the current GPE enable mask since this may inadvertently
+ * enable GPEs too early. An example is a rogue GPE that has arrived
+ * during ACPICA initialization - possibly because AML or other code
+ * has enabled the GPE.
*/
status = acpi_hw_low_disable_gpe(gpe_event_info);
return_ACPI_STATUS(status);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_low_get_gpe_info
+ *
+ * PARAMETERS: gpe_number - Raw GPE number
+ * gpe_block - A GPE info block
+ *
+ * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number
+ * is not within the specified GPE block)
+ *
+ * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
+ * the low-level implementation of ev_get_gpe_event_info.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
+ struct acpi_gpe_block_info
+ *gpe_block)
+{
+ u32 gpe_index;
+
+ /*
+ * Validate that the gpe_number is within the specified gpe_block.
+ * (Two steps)
+ */
+ if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
+ return (NULL);
+ }
+
+ gpe_index = gpe_number - gpe_block->block_base_number;
+ if (gpe_index >= gpe_block->gpe_count) {
+ return (NULL);
+ }
+
+ return (&gpe_block->event_info[gpe_index]);
+}
+
+
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_event_info
@@ -184,29 +260,23 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number)
{
union acpi_operand_object *obj_desc;
- struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_event_info *gpe_info;
u32 i;
ACPI_FUNCTION_ENTRY();
- /* A NULL gpe_block means use the FADT-defined GPE block(s) */
+ /* A NULL gpe_device means use the FADT-defined GPE block(s) */
if (!gpe_device) {
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
- gpe_block = acpi_gbl_gpe_fadt_blocks[i];
- if (gpe_block) {
- if ((gpe_number >= gpe_block->block_base_number)
- && (gpe_number <
- gpe_block->block_base_number +
- (gpe_block->register_count * 8))) {
- return (&gpe_block->
- event_info[gpe_number -
- gpe_block->
- block_base_number]);
- }
+ gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
+ acpi_gbl_gpe_fadt_blocks
+ [i]);
+ if (gpe_info) {
+ return (gpe_info);
}
}
@@ -223,16 +293,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
return (NULL);
}
- gpe_block = obj_desc->device.gpe_block;
-
- if ((gpe_number >= gpe_block->block_base_number) &&
- (gpe_number <
- gpe_block->block_base_number + (gpe_block->register_count * 8))) {
- return (&gpe_block->
- event_info[gpe_number - gpe_block->block_base_number]);
- }
-
- return (NULL);
+ return (acpi_ev_low_get_gpe_info
+ (gpe_number, obj_desc->device.gpe_block));
}
/*******************************************************************************
@@ -389,7 +451,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
return_VOID;
}
- /* Set the GPE flags for return to enabled state */
+ /* Update the GPE register masks for return to enabled state */
(void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
@@ -499,7 +561,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE[%2X]",
+ "Unable to clear GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -532,7 +594,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE[%2X]",
+ "Unable to clear GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -548,7 +610,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to disable GPE[%2X]",
+ "Unable to disable GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
@@ -562,27 +624,30 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to queue handler for GPE[%2X] - event disabled",
+ "Unable to queue handler for GPE[0x%2X] - event disabled",
gpe_number));
}
break;
default:
- /* No handler or method to run! */
-
+ /*
+ * No handler or method to run!
+ * 03/2010: This case should no longer be possible. We will not allow
+ * a GPE to be enabled if it has no handler or method.
+ */
ACPI_ERROR((AE_INFO,
- "No handler or method for GPE[%2X], disabling event",
+ "No handler or method for GPE[0x%2X], disabling event",
gpe_number));
/*
- * Disable the GPE. The GPE will remain disabled until the ACPICA
- * Core Subsystem is restarted, or a handler is installed.
+ * Disable the GPE. The GPE will remain disabled a handler
+ * is installed or ACPICA is restarted.
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Unable to disable GPE[%2X]",
+ "Unable to disable GPE[0x%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index fef721917eaf..7c28f2d9fd35 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -51,20 +51,6 @@ ACPI_MODULE_NAME("evgpeblk")
/* Local prototypes */
static acpi_status
-acpi_ev_save_method_info(acpi_handle obj_handle,
- u32 level, void *obj_desc, void **return_value);
-
-static acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
- u32 level, void *info, void **return_value);
-
-static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
- interrupt_number);
-
-static acpi_status
-acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
-
-static acpi_status
acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
u32 interrupt_number);
@@ -73,527 +59,6 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
/*******************************************************************************
*
- * FUNCTION: acpi_ev_valid_gpe_event
- *
- * PARAMETERS: gpe_event_info - Info for this GPE
- *
- * RETURN: TRUE if the gpe_event is valid
- *
- * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
- * Should be called only when the GPE lists are semaphore locked
- * and not subject to change.
- *
- ******************************************************************************/
-
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
-{
- struct acpi_gpe_xrupt_info *gpe_xrupt_block;
- struct acpi_gpe_block_info *gpe_block;
-
- ACPI_FUNCTION_ENTRY();
-
- /* No need for spin lock since we are not changing any list elements */
-
- /* Walk the GPE interrupt levels */
-
- gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
- while (gpe_xrupt_block) {
- gpe_block = gpe_xrupt_block->gpe_block_list_head;
-
- /* Walk the GPE blocks on this interrupt level */
-
- while (gpe_block) {
- if ((&gpe_block->event_info[0] <= gpe_event_info) &&
- (&gpe_block->event_info[((acpi_size)
- gpe_block->
- register_count) * 8] >
- gpe_event_info)) {
- return (TRUE);
- }
-
- gpe_block = gpe_block->next;
- }
-
- gpe_xrupt_block = gpe_xrupt_block->next;
- }
-
- return (FALSE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_walk_gpe_list
- *
- * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
- * Context - Value passed to callback
- *
- * RETURN: Status
- *
- * DESCRIPTION: Walk the GPE lists.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
-{
- struct acpi_gpe_block_info *gpe_block;
- struct acpi_gpe_xrupt_info *gpe_xrupt_info;
- acpi_status status = AE_OK;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
- /* Walk the interrupt level descriptor list */
-
- gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
- while (gpe_xrupt_info) {
-
- /* Walk all Gpe Blocks attached to this interrupt level */
-
- gpe_block = gpe_xrupt_info->gpe_block_list_head;
- while (gpe_block) {
-
- /* One callback per GPE block */
-
- status =
- gpe_walk_callback(gpe_xrupt_info, gpe_block,
- context);
- if (ACPI_FAILURE(status)) {
- if (status == AE_CTRL_END) { /* Callback abort */
- status = AE_OK;
- }
- goto unlock_and_exit;
- }
-
- gpe_block = gpe_block->next;
- }
-
- gpe_xrupt_info = gpe_xrupt_info->next;
- }
-
- unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_delete_gpe_handlers
- *
- * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
- * gpe_block - Gpe Block info
- *
- * RETURN: Status
- *
- * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
- * Used only prior to termination.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block,
- void *context)
-{
- struct acpi_gpe_event_info *gpe_event_info;
- u32 i;
- u32 j;
-
- ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
-
- /* Examine each GPE Register within the block */
-
- for (i = 0; i < gpe_block->register_count; i++) {
-
- /* Now look at the individual GPEs in this byte register */
-
- for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
- gpe_event_info = &gpe_block->event_info[((acpi_size) i *
- ACPI_GPE_REGISTER_WIDTH)
- + j];
-
- if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_HANDLER) {
- ACPI_FREE(gpe_event_info->dispatch.handler);
- gpe_event_info->dispatch.handler = NULL;
- gpe_event_info->flags &=
- ~ACPI_GPE_DISPATCH_MASK;
- }
- }
- }
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_save_method_info
- *
- * PARAMETERS: Callback from walk_namespace
- *
- * RETURN: Status
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- * control method under the _GPE portion of the namespace.
- * Extract the name and GPE type from the object, saving this
- * information for quick lookup during GPE dispatch
- *
- * The name of each GPE control method is of the form:
- * "_Lxx" or "_Exx"
- * Where:
- * L - means that the GPE is level triggered
- * E - means that the GPE is edge triggered
- * xx - is the GPE number [in HEX]
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_save_method_info(acpi_handle obj_handle,
- u32 level, void *obj_desc, void **return_value)
-{
- struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
- struct acpi_gpe_event_info *gpe_event_info;
- u32 gpe_number;
- char name[ACPI_NAME_SIZE + 1];
- u8 type;
-
- ACPI_FUNCTION_TRACE(ev_save_method_info);
-
- /*
- * _Lxx and _Exx GPE method support
- *
- * 1) Extract the name from the object and convert to a string
- */
- ACPI_MOVE_32_TO_32(name,
- &((struct acpi_namespace_node *)obj_handle)->name.
- integer);
- name[ACPI_NAME_SIZE] = 0;
-
- /*
- * 2) Edge/Level determination is based on the 2nd character
- * of the method name
- *
- * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
- * if a _PRW object is found that points to this GPE.
- */
- switch (name[1]) {
- case 'L':
- type = ACPI_GPE_LEVEL_TRIGGERED;
- break;
-
- case 'E':
- type = ACPI_GPE_EDGE_TRIGGERED;
- break;
-
- default:
- /* Unknown method type, just ignore it! */
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Ignoring unknown GPE method type: %s "
- "(name not of form _Lxx or _Exx)", name));
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Convert the last two characters of the name to the GPE Number */
-
- gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
- if (gpe_number == ACPI_UINT32_MAX) {
-
- /* Conversion failed; invalid method, just ignore it */
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Could not extract GPE number from name: %s "
- "(name is not of form _Lxx or _Exx)", name));
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Ensure that we have a valid GPE number for this GPE block */
-
- if ((gpe_number < gpe_block->block_base_number) ||
- (gpe_number >= (gpe_block->block_base_number +
- (gpe_block->register_count * 8)))) {
- /*
- * Not valid for this GPE block, just ignore it. However, it may be
- * valid for a different GPE block, since GPE0 and GPE1 methods both
- * appear under \_GPE.
- */
- return_ACPI_STATUS(AE_OK);
- }
-
- /*
- * Now we can add this information to the gpe_event_info block for use
- * during dispatch of this GPE.
- */
- gpe_event_info =
- &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
-
- gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD);
-
- gpe_event_info->dispatch.method_node =
- (struct acpi_namespace_node *)obj_handle;
-
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "Registered GPE method %s as GPE number 0x%.2X\n",
- name, gpe_number));
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_match_prw_and_gpe
- *
- * PARAMETERS: Callback from walk_namespace
- *
- * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
- * not aborted on a single _PRW failure.
- *
- * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
- * Device. Run the _PRW method. If present, extract the GPE
- * number and mark the GPE as a WAKE GPE.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
- u32 level, void *info, void **return_value)
-{
- struct acpi_gpe_walk_info *gpe_info = (void *)info;
- struct acpi_namespace_node *gpe_device;
- struct acpi_gpe_block_info *gpe_block;
- struct acpi_namespace_node *target_gpe_device;
- struct acpi_gpe_event_info *gpe_event_info;
- union acpi_operand_object *pkg_desc;
- union acpi_operand_object *obj_desc;
- u32 gpe_number;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
-
- /* Check for a _PRW method under this device */
-
- status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
- ACPI_BTYPE_PACKAGE, &pkg_desc);
- if (ACPI_FAILURE(status)) {
-
- /* Ignore all errors from _PRW, we don't want to abort the subsystem */
-
- return_ACPI_STATUS(AE_OK);
- }
-
- /* The returned _PRW package must have at least two elements */
-
- if (pkg_desc->package.count < 2) {
- goto cleanup;
- }
-
- /* Extract pointers from the input context */
-
- gpe_device = gpe_info->gpe_device;
- gpe_block = gpe_info->gpe_block;
-
- /*
- * The _PRW object must return a package, we are only interested in the
- * first element
- */
- obj_desc = pkg_desc->package.elements[0];
-
- if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
-
- /* Use FADT-defined GPE device (from definition of _PRW) */
-
- target_gpe_device = acpi_gbl_fadt_gpe_device;
-
- /* Integer is the GPE number in the FADT described GPE blocks */
-
- gpe_number = (u32) obj_desc->integer.value;
- } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
-
- /* Package contains a GPE reference and GPE number within a GPE block */
-
- if ((obj_desc->package.count < 2) ||
- ((obj_desc->package.elements[0])->common.type !=
- ACPI_TYPE_LOCAL_REFERENCE) ||
- ((obj_desc->package.elements[1])->common.type !=
- ACPI_TYPE_INTEGER)) {
- goto cleanup;
- }
-
- /* Get GPE block reference and decode */
-
- target_gpe_device =
- obj_desc->package.elements[0]->reference.node;
- gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
- } else {
- /* Unknown type, just ignore it */
-
- goto cleanup;
- }
-
- /*
- * Is this GPE within this block?
- *
- * TRUE if and only if these conditions are true:
- * 1) The GPE devices match.
- * 2) The GPE index(number) is within the range of the Gpe Block
- * associated with the GPE device.
- */
- if ((gpe_device == target_gpe_device) &&
- (gpe_number >= gpe_block->block_base_number) &&
- (gpe_number < gpe_block->block_base_number +
- (gpe_block->register_count * 8))) {
- gpe_event_info = &gpe_block->event_info[gpe_number -
- gpe_block->
- block_base_number];
-
- gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
- }
-
- cleanup:
- acpi_ut_remove_reference(pkg_desc);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_get_gpe_xrupt_block
- *
- * PARAMETERS: interrupt_number - Interrupt for a GPE block
- *
- * RETURN: A GPE interrupt block
- *
- * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
- * block per unique interrupt level used for GPEs. Should be
- * called only when the GPE lists are semaphore locked and not
- * subject to change.
- *
- ******************************************************************************/
-
-static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
- interrupt_number)
-{
- struct acpi_gpe_xrupt_info *next_gpe_xrupt;
- struct acpi_gpe_xrupt_info *gpe_xrupt;
- acpi_status status;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
-
- /* No need for lock since we are not changing any list elements here */
-
- next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
- while (next_gpe_xrupt) {
- if (next_gpe_xrupt->interrupt_number == interrupt_number) {
- return_PTR(next_gpe_xrupt);
- }
-
- next_gpe_xrupt = next_gpe_xrupt->next;
- }
-
- /* Not found, must allocate a new xrupt descriptor */
-
- gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
- if (!gpe_xrupt) {
- return_PTR(NULL);
- }
-
- gpe_xrupt->interrupt_number = interrupt_number;
-
- /* Install new interrupt descriptor with spin lock */
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- if (acpi_gbl_gpe_xrupt_list_head) {
- next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
- while (next_gpe_xrupt->next) {
- next_gpe_xrupt = next_gpe_xrupt->next;
- }
-
- next_gpe_xrupt->next = gpe_xrupt;
- gpe_xrupt->previous = next_gpe_xrupt;
- } else {
- acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
- }
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
- /* Install new interrupt handler if not SCI_INT */
-
- if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
- status = acpi_os_install_interrupt_handler(interrupt_number,
- acpi_ev_gpe_xrupt_handler,
- gpe_xrupt);
- if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not install GPE interrupt handler at level 0x%X",
- interrupt_number));
- return_PTR(NULL);
- }
- }
-
- return_PTR(gpe_xrupt);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_delete_gpe_xrupt
- *
- * PARAMETERS: gpe_xrupt - A GPE interrupt info block
- *
- * RETURN: Status
- *
- * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
- * interrupt handler if not the SCI interrupt.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
-{
- acpi_status status;
- acpi_cpu_flags flags;
-
- ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
-
- /* We never want to remove the SCI interrupt handler */
-
- if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
- gpe_xrupt->gpe_block_list_head = NULL;
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Disable this interrupt */
-
- status =
- acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
- acpi_ev_gpe_xrupt_handler);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Unlink the interrupt block with lock */
-
- flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- if (gpe_xrupt->previous) {
- gpe_xrupt->previous->next = gpe_xrupt->next;
- } else {
- /* No previous, update list head */
-
- acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
- }
-
- if (gpe_xrupt->next) {
- gpe_xrupt->next->previous = gpe_xrupt->previous;
- }
- acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-
- /* Free the block */
-
- ACPI_FREE(gpe_xrupt);
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ev_install_gpe_block
*
* PARAMETERS: gpe_block - New GPE block
@@ -705,8 +170,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
}
- acpi_current_gpe_count -=
- gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH;
+ acpi_current_gpe_count -= gpe_block->gpe_count;
/* Free the gpe_block */
@@ -760,9 +224,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* Allocate the GPE event_info block. There are eight distinct GPEs
* per register. Initialization to zeros is sufficient.
*/
- gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
- register_count *
- ACPI_GPE_REGISTER_WIDTH) *
+ gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
sizeof(struct
acpi_gpe_event_info));
if (!gpe_event_info) {
@@ -880,6 +342,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
{
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_walk_info walk_info;
ACPI_FUNCTION_TRACE(ev_create_gpe_block);
@@ -897,6 +360,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
/* Initialize the new GPE block */
gpe_block->node = gpe_device;
+ gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
gpe_block->register_count = register_count;
gpe_block->block_base_number = gpe_block_base_number;
@@ -921,12 +385,17 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
return_ACPI_STATUS(status);
}
- /* Find all GPE methods (_Lxx, _Exx) for this block */
+ /* Find all GPE methods (_Lxx or_Exx) for this block */
+
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_device;
+ walk_info.enable_this_gpe = FALSE;
+ walk_info.execute_by_owner_id = FALSE;
status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
- acpi_ev_save_method_info, NULL,
- gpe_block, NULL);
+ acpi_ev_match_gpe_method, NULL,
+ &walk_info, NULL);
/* Return the new block */
@@ -938,14 +407,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
"GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
(u32) gpe_block->block_base_number,
(u32) (gpe_block->block_base_number +
- ((gpe_block->register_count *
- ACPI_GPE_REGISTER_WIDTH) - 1)),
+ (gpe_block->gpe_count - 1)),
gpe_device->name.ascii, gpe_block->register_count,
interrupt_number));
/* Update global count of currently available GPEs */
- acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH;
+ acpi_current_gpe_count += gpe_block->gpe_count;
return_ACPI_STATUS(AE_OK);
}
@@ -969,10 +437,13 @@ acpi_status
acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_block_info *gpe_block)
{
+ acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
- struct acpi_gpe_walk_info gpe_info;
+ struct acpi_gpe_walk_info walk_info;
u32 wake_gpe_count;
u32 gpe_enabled_count;
+ u32 gpe_index;
+ u32 gpe_number;
u32 i;
u32 j;
@@ -995,210 +466,75 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
* definition a wake GPE and will not be enabled while the machine
* is running.
*/
- gpe_info.gpe_block = gpe_block;
- gpe_info.gpe_device = gpe_device;
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_device;
+ walk_info.execute_by_owner_id = FALSE;
- acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ status =
+ acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
acpi_ev_match_prw_and_gpe, NULL,
- &gpe_info, NULL);
+ &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While executing _PRW methods"));
+ }
}
/*
- * Enable all GPEs that have a corresponding method and aren't
+ * Enable all GPEs that have a corresponding method and are not
* capable of generating wakeups. Any other GPEs within this block
- * must be enabled via the acpi_enable_gpe() interface.
+ * must be enabled via the acpi_enable_gpe interface.
*/
wake_gpe_count = 0;
gpe_enabled_count = 0;
- if (gpe_device == acpi_gbl_fadt_gpe_device)
+
+ if (gpe_device == acpi_gbl_fadt_gpe_device) {
gpe_device = NULL;
+ }
for (i = 0; i < gpe_block->register_count; i++) {
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
- acpi_status status;
- acpi_size gpe_index;
- int gpe_number;
/* Get the info block for this particular GPE */
- gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j;
+
+ gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
wake_gpe_count++;
- if (acpi_gbl_leave_wake_gpes_disabled)
+ if (acpi_gbl_leave_wake_gpes_disabled) {
continue;
+ }
}
- if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD))
+ /* Ignore GPEs that have no corresponding _Lxx/_Exx method */
+
+ if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
continue;
+ }
+
+ /* Enable this GPE */
gpe_number = gpe_index + gpe_block->block_base_number;
status = acpi_enable_gpe(gpe_device, gpe_number,
- ACPI_GPE_TYPE_RUNTIME);
- if (ACPI_FAILURE(status))
- ACPI_ERROR((AE_INFO,
- "Failed to enable GPE %02X\n",
- gpe_number));
- else
- gpe_enabled_count++;
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
- wake_gpe_count, gpe_enabled_count));
-
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_gpe_initialize
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Initialize the GPE data structures
- *
- ******************************************************************************/
-
-acpi_status acpi_ev_gpe_initialize(void)
-{
- u32 register_count0 = 0;
- u32 register_count1 = 0;
- u32 gpe_number_max = 0;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_gpe_initialize);
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Initialize the GPE Block(s) defined in the FADT
- *
- * Why the GPE register block lengths are divided by 2: From the ACPI
- * Spec, section "General-Purpose Event Registers", we have:
- *
- * "Each register block contains two registers of equal length
- * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
- * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
- * The length of the GPE1_STS and GPE1_EN registers is equal to
- * half the GPE1_LEN. If a generic register block is not supported
- * then its respective block pointer and block length values in the
- * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
- * to be the same size."
- */
-
- /*
- * Determine the maximum GPE number for this machine.
- *
- * Note: both GPE0 and GPE1 are optional, and either can exist without
- * the other.
- *
- * If EITHER the register length OR the block address are zero, then that
- * particular block is not supported.
- */
- if (acpi_gbl_FADT.gpe0_block_length &&
- acpi_gbl_FADT.xgpe0_block.address) {
-
- /* GPE block 0 exists (has both length and address > 0) */
-
- register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
-
- gpe_number_max =
- (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
-
- /* Install GPE Block 0 */
-
- status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
- &acpi_gbl_FADT.xgpe0_block,
- register_count0, 0,
- acpi_gbl_FADT.sci_interrupt,
- &acpi_gbl_gpe_fadt_blocks[0]);
-
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Could not create GPE Block 0"));
- }
- }
-
- if (acpi_gbl_FADT.gpe1_block_length &&
- acpi_gbl_FADT.xgpe1_block.address) {
-
- /* GPE block 1 exists (has both length and address > 0) */
-
- register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
-
- /* Check for GPE0/GPE1 overlap (if both banks exist) */
-
- if ((register_count0) &&
- (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
- ACPI_ERROR((AE_INFO,
- "GPE0 block (GPE 0 to %d) overlaps the GPE1 block "
- "(GPE %d to %d) - Ignoring GPE1",
- gpe_number_max, acpi_gbl_FADT.gpe1_base,
- acpi_gbl_FADT.gpe1_base +
- ((register_count1 *
- ACPI_GPE_REGISTER_WIDTH) - 1)));
-
- /* Ignore GPE1 block by setting the register count to zero */
-
- register_count1 = 0;
- } else {
- /* Install GPE Block 1 */
-
- status =
- acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
- &acpi_gbl_FADT.xgpe1_block,
- register_count1,
- acpi_gbl_FADT.gpe1_base,
- acpi_gbl_FADT.
- sci_interrupt,
- &acpi_gbl_gpe_fadt_blocks
- [1]);
-
+ ACPI_GPE_TYPE_RUNTIME);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not create GPE Block 1"));
+ "Could not enable GPE 0x%02X",
+ gpe_number));
+ continue;
}
- /*
- * GPE0 and GPE1 do not have to be contiguous in the GPE number
- * space. However, GPE0 always starts at GPE number zero.
- */
- gpe_number_max = acpi_gbl_FADT.gpe1_base +
- ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+ gpe_enabled_count++;
}
}
- /* Exit if there are no GPE registers */
-
- if ((register_count0 + register_count1) == 0) {
-
- /* GPEs are not required by ACPI, this is OK */
-
+ if (gpe_enabled_count || wake_gpe_count) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
- "There are no GPE blocks defined in the FADT\n"));
- status = AE_OK;
- goto cleanup;
- }
-
- /* Check for Max GPE number out-of-range */
-
- if (gpe_number_max > ACPI_GPE_MAX) {
- ACPI_ERROR((AE_INFO,
- "Maximum GPE number from FADT is too large: 0x%X",
- gpe_number_max));
- status = AE_BAD_VALUE;
- goto cleanup;
+ "Enabled %u Runtime GPEs, added %u Wake GPEs in this block\n",
+ gpe_enabled_count, wake_gpe_count));
}
- cleanup:
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
new file mode 100644
index 000000000000..3f6c2d26410d
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -0,0 +1,653 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeinit - System GPE initialization and update
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeinit")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_initialize
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks
+ *
+ ******************************************************************************/
+acpi_status acpi_ev_gpe_initialize(void)
+{
+ u32 register_count0 = 0;
+ u32 register_count1 = 0;
+ u32 gpe_number_max = 0;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_gpe_initialize);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Initialize the GPE Block(s) defined in the FADT
+ *
+ * Why the GPE register block lengths are divided by 2: From the ACPI
+ * Spec, section "General-Purpose Event Registers", we have:
+ *
+ * "Each register block contains two registers of equal length
+ * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
+ * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
+ * The length of the GPE1_STS and GPE1_EN registers is equal to
+ * half the GPE1_LEN. If a generic register block is not supported
+ * then its respective block pointer and block length values in the
+ * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
+ * to be the same size."
+ */
+
+ /*
+ * Determine the maximum GPE number for this machine.
+ *
+ * Note: both GPE0 and GPE1 are optional, and either can exist without
+ * the other.
+ *
+ * If EITHER the register length OR the block address are zero, then that
+ * particular block is not supported.
+ */
+ if (acpi_gbl_FADT.gpe0_block_length &&
+ acpi_gbl_FADT.xgpe0_block.address) {
+
+ /* GPE block 0 exists (has both length and address > 0) */
+
+ register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
+
+ gpe_number_max =
+ (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
+
+ /* Install GPE Block 0 */
+
+ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe0_block,
+ register_count0, 0,
+ acpi_gbl_FADT.sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks[0]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 0"));
+ }
+ }
+
+ if (acpi_gbl_FADT.gpe1_block_length &&
+ acpi_gbl_FADT.xgpe1_block.address) {
+
+ /* GPE block 1 exists (has both length and address > 0) */
+
+ register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2);
+
+ /* Check for GPE0/GPE1 overlap (if both banks exist) */
+
+ if ((register_count0) &&
+ (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
+ ACPI_ERROR((AE_INFO,
+ "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
+ "(GPE %u to %u) - Ignoring GPE1",
+ gpe_number_max, acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.gpe1_base +
+ ((register_count1 *
+ ACPI_GPE_REGISTER_WIDTH) - 1)));
+
+ /* Ignore GPE1 block by setting the register count to zero */
+
+ register_count1 = 0;
+ } else {
+ /* Install GPE Block 1 */
+
+ status =
+ acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe1_block,
+ register_count1,
+ acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.
+ sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks
+ [1]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 1"));
+ }
+
+ /*
+ * GPE0 and GPE1 do not have to be contiguous in the GPE number
+ * space. However, GPE0 always starts at GPE number zero.
+ */
+ gpe_number_max = acpi_gbl_FADT.gpe1_base +
+ ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+ }
+ }
+
+ /* Exit if there are no GPE registers */
+
+ if ((register_count0 + register_count1) == 0) {
+
+ /* GPEs are not required by ACPI, this is OK */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "There are no GPE blocks defined in the FADT\n"));
+ status = AE_OK;
+ goto cleanup;
+ }
+
+ /* Check for Max GPE number out-of-range */
+
+ if (gpe_number_max > ACPI_GPE_MAX) {
+ ACPI_ERROR((AE_INFO,
+ "Maximum GPE number from FADT is too large: 0x%X",
+ gpe_number_max));
+ status = AE_BAD_VALUE;
+ goto cleanup;
+ }
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_update_gpes
+ *
+ * PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
+ * result of a Load() or load_table() operation. If new GPE
+ * methods have been installed, register the new methods and
+ * enable and runtime GPEs that are associated with them. Also,
+ * run any newly loaded _PRW methods in order to discover any
+ * new CAN_WAKE GPEs.
+ *
+ ******************************************************************************/
+
+void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_walk_info walk_info;
+ acpi_status status = AE_OK;
+ u32 new_wake_gpe_count = 0;
+
+ /* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
+
+ walk_info.owner_id = table_owner_id;
+ walk_info.execute_by_owner_id = TRUE;
+ walk_info.count = 0;
+
+ if (acpi_gbl_leave_wake_gpes_disabled) {
+ /*
+ * 1) Run any newly-loaded _PRW methods to find any GPEs that
+ * can now be marked as CAN_WAKE GPEs. Note: We must run the
+ * _PRW methods before we process the _Lxx/_Exx methods because
+ * we will enable all runtime GPEs associated with the new
+ * _Lxx/_Exx methods at the time we process those methods.
+ *
+ * Unlock interpreter so that we can run the _PRW methods.
+ */
+ walk_info.gpe_block = NULL;
+ walk_info.gpe_device = NULL;
+
+ acpi_ex_exit_interpreter();
+
+ status =
+ acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_prw_and_gpe, NULL,
+ &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While executing _PRW methods"));
+ }
+
+ acpi_ex_enter_interpreter();
+ new_wake_gpe_count = walk_info.count;
+ }
+
+ /*
+ * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
+ *
+ * Any GPEs that correspond to new _Lxx/_Exx methods and are not
+ * marked as CAN_WAKE are immediately enabled.
+ *
+ * Examine the namespace underneath each gpe_device within the
+ * gpe_block lists.
+ */
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ walk_info.count = 0;
+ walk_info.enable_this_gpe = TRUE;
+
+ /* Walk the interrupt level descriptor list */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+
+ /* Walk all Gpe Blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+ walk_info.gpe_block = gpe_block;
+ walk_info.gpe_device = gpe_block->node;
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
+ walk_info.gpe_device,
+ ACPI_UINT32_MAX,
+ ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_gpe_method,
+ NULL, &walk_info, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While decoding _Lxx/_Exx methods"));
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+
+ if (walk_info.count || new_wake_gpe_count) {
+ ACPI_INFO((AE_INFO,
+ "Enabled %u new runtime GPEs, added %u new wakeup GPEs",
+ walk_info.count, new_wake_gpe_count));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_gpe_method
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * control method under the _GPE portion of the namespace.
+ * Extract the name and GPE type from the object, saving this
+ * information for quick lookup during GPE dispatch. Allows a
+ * per-owner_id evaluation if execute_by_owner_id is TRUE in the
+ * walk_info parameter block.
+ *
+ * The name of each GPE control method is of the form:
+ * "_Lxx" or "_Exx", where:
+ * L - means that the GPE is level triggered
+ * E - means that the GPE is edge triggered
+ * xx - is the GPE number [in HEX]
+ *
+ * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
+ * with that owner.
+ * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
+ * method is immediately enabled (Used for Load/load_table operators)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_match_gpe_method(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_namespace_node *method_node =
+ ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
+ struct acpi_gpe_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_namespace_node *gpe_device;
+ acpi_status status;
+ u32 gpe_number;
+ char name[ACPI_NAME_SIZE + 1];
+ u8 type;
+
+ ACPI_FUNCTION_TRACE(ev_match_gpe_method);
+
+ /* Check if requested owner_id matches this owner_id */
+
+ if ((walk_info->execute_by_owner_id) &&
+ (method_node->owner_id != walk_info->owner_id)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Match and decode the _Lxx and _Exx GPE method names
+ *
+ * 1) Extract the method name and null terminate it
+ */
+ ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
+ name[ACPI_NAME_SIZE] = 0;
+
+ /* 2) Name must begin with an underscore */
+
+ if (name[0] != '_') {
+ return_ACPI_STATUS(AE_OK); /* Ignore this method */
+ }
+
+ /*
+ * 3) Edge/Level determination is based on the 2nd character
+ * of the method name
+ *
+ * NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
+ * found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
+ */
+ switch (name[1]) {
+ case 'L':
+ type = ACPI_GPE_LEVEL_TRIGGERED;
+ break;
+
+ case 'E':
+ type = ACPI_GPE_EDGE_TRIGGERED;
+ break;
+
+ default:
+ /* Unknown method type, just ignore it */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Ignoring unknown GPE method type: %s "
+ "(name not of form _Lxx or _Exx)", name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* 4) The last two characters of the name are the hex GPE Number */
+
+ gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
+ if (gpe_number == ACPI_UINT32_MAX) {
+
+ /* Conversion failed; invalid method, just ignore it */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Could not extract GPE number from name: %s "
+ "(name is not of form _Lxx or _Exx)", name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Ensure that we have a valid GPE number for this GPE block */
+
+ gpe_event_info =
+ acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
+ if (!gpe_event_info) {
+ /*
+ * This gpe_number is not valid for this GPE block, just ignore it.
+ * However, it may be valid for a different GPE block, since GPE0
+ * and GPE1 methods both appear under \_GPE.
+ */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+
+ /* If there is already a handler, ignore this GPE method */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_METHOD) {
+ /*
+ * If there is already a method, ignore this method. But check
+ * for a type mismatch (if both the _Lxx AND _Exx exist)
+ */
+ if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
+ ACPI_ERROR((AE_INFO,
+ "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
+ gpe_number, gpe_number, gpe_number));
+ }
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Add the GPE information from above to the gpe_event_info block for
+ * use during dispatch of this GPE.
+ */
+ gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
+ gpe_event_info->dispatch.method_node = method_node;
+
+ /*
+ * Enable this GPE if requested. This only happens when during the
+ * execution of a Load or load_table operator. We have found a new
+ * GPE method and want to immediately enable the GPE if it is a
+ * runtime GPE.
+ */
+ if (walk_info->enable_this_gpe) {
+
+ /* Ignore GPEs that can wake the system */
+
+ if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
+ !acpi_gbl_leave_wake_gpes_disabled) {
+ walk_info->count++;
+ gpe_device = walk_info->gpe_device;
+
+ if (gpe_device == acpi_gbl_fadt_gpe_device) {
+ gpe_device = NULL;
+ }
+
+ status = acpi_enable_gpe(gpe_device, gpe_number,
+ ACPI_GPE_TYPE_RUNTIME);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not enable GPE 0x%02X",
+ gpe_number));
+ }
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Registered GPE method %s as GPE number 0x%.2X\n",
+ name, gpe_number));
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_prw_and_gpe
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
+ * not aborted on a single _PRW failure.
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * Device. Run the _PRW method. If present, extract the GPE
+ * number and mark the GPE as a CAN_WAKE GPE. Allows a
+ * per-owner_id execution if execute_by_owner_id is TRUE in the
+ * walk_info parameter block.
+ *
+ * If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
+ * owner.
+ * If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
+ * we only execute _PRWs that refer to the input gpe_device.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ struct acpi_gpe_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
+ struct acpi_namespace_node *gpe_device;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_namespace_node *target_gpe_device;
+ struct acpi_namespace_node *prw_node;
+ struct acpi_gpe_event_info *gpe_event_info;
+ union acpi_operand_object *pkg_desc;
+ union acpi_operand_object *obj_desc;
+ u32 gpe_number;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
+
+ /* Check for a _PRW method under this device */
+
+ status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
+ ACPI_NS_NO_UPSEARCH, &prw_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Check if requested owner_id matches this owner_id */
+
+ if ((walk_info->execute_by_owner_id) &&
+ (prw_node->owner_id != walk_info->owner_id)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Execute the _PRW */
+
+ status = acpi_ut_evaluate_object(prw_node, NULL,
+ ACPI_BTYPE_PACKAGE, &pkg_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* The returned _PRW package must have at least two elements */
+
+ if (pkg_desc->package.count < 2) {
+ goto cleanup;
+ }
+
+ /* Extract pointers from the input context */
+
+ gpe_device = walk_info->gpe_device;
+ gpe_block = walk_info->gpe_block;
+
+ /*
+ * The _PRW object must return a package, we are only interested
+ * in the first element
+ */
+ obj_desc = pkg_desc->package.elements[0];
+
+ if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
+
+ /* Use FADT-defined GPE device (from definition of _PRW) */
+
+ target_gpe_device = NULL;
+ if (gpe_device) {
+ target_gpe_device = acpi_gbl_fadt_gpe_device;
+ }
+
+ /* Integer is the GPE number in the FADT described GPE blocks */
+
+ gpe_number = (u32)obj_desc->integer.value;
+ } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
+
+ /* Package contains a GPE reference and GPE number within a GPE block */
+
+ if ((obj_desc->package.count < 2) ||
+ ((obj_desc->package.elements[0])->common.type !=
+ ACPI_TYPE_LOCAL_REFERENCE) ||
+ ((obj_desc->package.elements[1])->common.type !=
+ ACPI_TYPE_INTEGER)) {
+ goto cleanup;
+ }
+
+ /* Get GPE block reference and decode */
+
+ target_gpe_device =
+ obj_desc->package.elements[0]->reference.node;
+ gpe_number = (u32)obj_desc->package.elements[1]->integer.value;
+ } else {
+ /* Unknown type, just ignore it */
+
+ goto cleanup;
+ }
+
+ /* Get the gpe_event_info for this GPE */
+
+ if (gpe_device) {
+ /*
+ * Is this GPE within this block?
+ *
+ * TRUE if and only if these conditions are true:
+ * 1) The GPE devices match.
+ * 2) The GPE index(number) is within the range of the Gpe Block
+ * associated with the GPE device.
+ */
+ if (gpe_device != target_gpe_device) {
+ goto cleanup;
+ }
+
+ gpe_event_info =
+ acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
+ } else {
+ /* gpe_device is NULL, just match the target_device and gpe_number */
+
+ gpe_event_info =
+ acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
+ }
+
+ if (gpe_event_info) {
+ if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+
+ /* This GPE can wake the system */
+
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ walk_info->count++;
+ }
+ }
+
+ cleanup:
+ acpi_ut_remove_reference(pkg_desc);
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
new file mode 100644
index 000000000000..19a0e513ea48
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -0,0 +1,337 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeutil - GPE utilities
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeutil")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_walk_gpe_list
+ *
+ * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
+ * Context - Value passed to callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk the GPE lists.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
+{
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ acpi_status status = AE_OK;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Walk the interrupt level descriptor list */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+
+ /* Walk all Gpe Blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+
+ /* One callback per GPE block */
+
+ status =
+ gpe_walk_callback(gpe_xrupt_info, gpe_block,
+ context);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_END) { /* Callback abort */
+ status = AE_OK;
+ }
+ goto unlock_and_exit;
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_valid_gpe_event
+ *
+ * PARAMETERS: gpe_event_info - Info for this GPE
+ *
+ * RETURN: TRUE if the gpe_event is valid
+ *
+ * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
+ * Should be called only when the GPE lists are semaphore locked
+ * and not subject to change.
+ *
+ ******************************************************************************/
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_block;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* No need for spin lock since we are not changing any list elements */
+
+ /* Walk the GPE interrupt levels */
+
+ gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_block) {
+ gpe_block = gpe_xrupt_block->gpe_block_list_head;
+
+ /* Walk the GPE blocks on this interrupt level */
+
+ while (gpe_block) {
+ if ((&gpe_block->event_info[0] <= gpe_event_info) &&
+ (&gpe_block->event_info[gpe_block->gpe_count] >
+ gpe_event_info)) {
+ return (TRUE);
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_block = gpe_xrupt_block->next;
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_get_gpe_xrupt_block
+ *
+ * PARAMETERS: interrupt_number - Interrupt for a GPE block
+ *
+ * RETURN: A GPE interrupt block
+ *
+ * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
+ * block per unique interrupt level used for GPEs. Should be
+ * called only when the GPE lists are semaphore locked and not
+ * subject to change.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
+{
+ struct acpi_gpe_xrupt_info *next_gpe_xrupt;
+ struct acpi_gpe_xrupt_info *gpe_xrupt;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
+
+ /* No need for lock since we are not changing any list elements here */
+
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt) {
+ if (next_gpe_xrupt->interrupt_number == interrupt_number) {
+ return_PTR(next_gpe_xrupt);
+ }
+
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ /* Not found, must allocate a new xrupt descriptor */
+
+ gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
+ if (!gpe_xrupt) {
+ return_PTR(NULL);
+ }
+
+ gpe_xrupt->interrupt_number = interrupt_number;
+
+ /* Install new interrupt descriptor with spin lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (acpi_gbl_gpe_xrupt_list_head) {
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt->next) {
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ next_gpe_xrupt->next = gpe_xrupt;
+ gpe_xrupt->previous = next_gpe_xrupt;
+ } else {
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Install new interrupt handler if not SCI_INT */
+
+ if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
+ status = acpi_os_install_interrupt_handler(interrupt_number,
+ acpi_ev_gpe_xrupt_handler,
+ gpe_xrupt);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not install GPE interrupt handler at level 0x%X",
+ interrupt_number));
+ return_PTR(NULL);
+ }
+ }
+
+ return_PTR(gpe_xrupt);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_xrupt
+ *
+ * PARAMETERS: gpe_xrupt - A GPE interrupt info block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
+ * interrupt handler if not the SCI interrupt.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
+
+ /* We never want to remove the SCI interrupt handler */
+
+ if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
+ gpe_xrupt->gpe_block_list_head = NULL;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Disable this interrupt */
+
+ status =
+ acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
+ acpi_ev_gpe_xrupt_handler);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Unlink the interrupt block with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (gpe_xrupt->previous) {
+ gpe_xrupt->previous->next = gpe_xrupt->next;
+ } else {
+ /* No previous, update list head */
+
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
+ }
+
+ if (gpe_xrupt->next) {
+ gpe_xrupt->next->previous = gpe_xrupt->previous;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Free the block */
+
+ ACPI_FREE(gpe_xrupt);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_handlers
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
+ * Used only prior to termination.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Now look at the individual GPEs in this byte register */
+
+ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+ gpe_event_info = &gpe_block->event_info[((acpi_size) i *
+ ACPI_GPE_REGISTER_WIDTH)
+ + j];
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+ ACPI_FREE(gpe_event_info->dispatch.handler);
+ gpe_event_info->dispatch.handler = NULL;
+ gpe_event_info->flags &=
+ ~ACPI_GPE_DISPATCH_MASK;
+ }
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 9a3cb7045a32..df0aea9a8cfd 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -590,7 +590,7 @@ void acpi_ev_terminate(void)
status = acpi_disable_event(i, 0);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
- "Could not disable fixed event %d",
+ "Could not disable fixed event %u",
(u32) i));
}
}
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index b40757955f9b..cc825023012a 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -142,7 +142,7 @@ acpi_install_fixed_event_handler(u32 event,
if (ACPI_SUCCESS(status))
status = acpi_enable_event(event, 0);
if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
+ ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
event));
/* Remove the handler */
@@ -203,7 +203,7 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO,
- "Could not write to fixed event enable register %X",
+ "Could not write to fixed event enable register 0x%X",
event));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
@@ -682,14 +682,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
/* Parameter validation */
- if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) {
- status = AE_BAD_PARAMETER;
- goto exit;
+ if ((!address) || (type & ~ACPI_GPE_XRUPT_TYPE_MASK)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
- goto exit;
+ return_ACPI_STATUS(status);
}
/* Ensure that we have a valid GPE number */
@@ -720,6 +719,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
+ /* Disable the GPE before installing the handler */
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE (status)) {
+ goto unlock_and_exit;
+ }
+
/* Install the handler */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -733,12 +739,8 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- exit:
- if (ACPI_FAILURE(status))
- ACPI_EXCEPTION((AE_INFO, status,
- "Installing notify handler failed"));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 5ff32c78ea2d..7c7bbb4d402c 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -203,21 +203,26 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
*
* FUNCTION: acpi_set_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * action - Enable or disable
- * Called from ISR or not
+ * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
*
* RETURN: Status
*
- * DESCRIPTION: Enable or disable an ACPI event (general purpose)
+ * DESCRIPTION: Enable or disable an individual GPE. This function bypasses
+ * the reference count mechanism used in the acpi_enable_gpe and
+ * acpi_disable_gpe interfaces -- and should be used with care.
+ *
+ * Note: Typically used to disable a runtime GPE for short period of time,
+ * then re-enable it, without disturbing the existing reference counts. This
+ * is useful, for example, in the Embedded Controller (EC) driver.
*
******************************************************************************/
acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
{
- acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_status status;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_set_gpe);
@@ -243,7 +248,6 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid action\n"));
status = AE_BAD_PARAMETER;
break;
}
@@ -259,25 +263,31 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe)
*
* FUNCTION: acpi_enable_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * type - Purpose the GPE will be used for
+ * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
+ * or both
*
* RETURN: Status
*
- * DESCRIPTION: Take a reference to a GPE and enable it if necessary
+ * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
+ * hardware-enabled (for runtime GPEs), or the GPE register mask
+ * is updated (for wake GPEs).
*
******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
{
acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_enable_gpe);
- if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
+ /* Parameter validation */
+
+ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -289,26 +299,43 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
goto unlock_and_exit;
}
- if (type & ACPI_GPE_TYPE_RUNTIME) {
- if (++gpe_event_info->runtime_count == 1) {
+ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
+ if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
+ status = AE_LIMIT; /* Too many references */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->runtime_count++;
+ if (gpe_event_info->runtime_count == 1) {
status = acpi_ev_enable_gpe(gpe_event_info);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count--;
+ goto unlock_and_exit;
+ }
}
}
- if (type & ACPI_GPE_TYPE_WAKE) {
+ if (gpe_type & ACPI_GPE_TYPE_WAKE) {
+ /* The GPE must have the ability to wake the system */
+
if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
- status = AE_BAD_PARAMETER;
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ if (gpe_event_info->wakeup_count == ACPI_UINT8_MAX) {
+ status = AE_LIMIT; /* Too many references */
goto unlock_and_exit;
}
/*
- * Wake-up GPEs are only enabled right prior to putting the
- * system into a sleep state.
+ * Update the enable mask on the first wakeup reference. Wake GPEs
+ * are only hardware-enabled just before sleeping.
*/
- if (++gpe_event_info->wakeup_count == 1)
- acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ gpe_event_info->wakeup_count++;
+ if (gpe_event_info->wakeup_count == 1) {
+ (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ }
}
unlock_and_exit:
@@ -321,27 +348,34 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
*
* FUNCTION: acpi_disable_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * type - Purpose the GPE won't be used for any more
+ * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
+ * or both
*
* RETURN: Status
*
- * DESCRIPTION: Release a reference to a GPE and disable it if necessary
+ * DESCRIPTION: Remove a reference to a GPE. When the last reference is
+ * removed, only then is the GPE disabled (for runtime GPEs), or
+ * the GPE mask bit disabled (for wake GPEs)
*
******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
{
acpi_status status = AE_OK;
- acpi_cpu_flags flags;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_disable_gpe);
- if (type & ~ACPI_GPE_TYPE_WAKE_RUN)
+ /* Parameter validation */
+
+ if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
@@ -350,18 +384,39 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
goto unlock_and_exit;
}
- if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) {
- if (--gpe_event_info->runtime_count == 0)
+ /* Hardware-disable a runtime GPE on removal of the last reference */
+
+ if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
+ if (!gpe_event_info->runtime_count) {
+ status = AE_LIMIT; /* There are no references to remove */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->runtime_count--;
+ if (!gpe_event_info->runtime_count) {
status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ gpe_event_info->runtime_count++;
+ goto unlock_and_exit;
+ }
+ }
}
- if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) {
- /*
- * Wake-up GPEs are not enabled after leaving system sleep
- * states, so we don't need to disable them here.
- */
- if (--gpe_event_info->wakeup_count == 0)
- acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ /*
+ * Update masks for wake GPE on removal of the last reference.
+ * No need to hardware-disable wake GPEs here, they are not currently
+ * enabled.
+ */
+ if (gpe_type & ACPI_GPE_TYPE_WAKE) {
+ if (!gpe_event_info->wakeup_count) {
+ status = AE_LIMIT; /* There are no references to remove */
+ goto unlock_and_exit;
+ }
+
+ gpe_event_info->wakeup_count--;
+ if (!gpe_event_info->wakeup_count) {
+ (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
+ }
}
unlock_and_exit:
@@ -465,30 +520,23 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
*
* FUNCTION: acpi_clear_gpe
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * Flags - Called from an ISR or not
*
* RETURN: Status
*
* DESCRIPTION: Clear an ACPI event (general purpose)
*
******************************************************************************/
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_clear_gpe);
- /* Use semaphore lock if not executing at interrupt level */
-
- if (flags & ACPI_NOT_ISR) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@@ -501,9 +549,7 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
status = acpi_hw_clear_gpe(gpe_event_info);
unlock_and_exit:
- if (flags & ACPI_NOT_ISR) {
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -569,9 +615,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
*
* FUNCTION: acpi_get_gpe_status
*
- * PARAMETERS: gpe_device - Parent GPE Device
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * Flags - Called from an ISR or not
* event_status - Where the current status of the event will
* be returned
*
@@ -582,21 +627,15 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
******************************************************************************/
acpi_status
acpi_get_gpe_status(acpi_handle gpe_device,
- u32 gpe_number, u32 flags, acpi_event_status * event_status)
+ u32 gpe_number, acpi_event_status *event_status)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
+ acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
- /* Use semaphore lock if not executing at interrupt level */
-
- if (flags & ACPI_NOT_ISR) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
@@ -614,9 +653,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
*event_status |= ACPI_EVENT_FLAG_HANDLE;
unlock_and_exit:
- if (flags & ACPI_NOT_ISR) {
- (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
- }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
@@ -673,20 +710,15 @@ acpi_install_gpe_block(acpi_handle gpe_device,
goto unlock_and_exit;
}
- /* Run the _PRW methods and enable the GPEs */
-
- status = acpi_ev_initialize_gpe_block(node, gpe_block);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
-
- /* Get the device_object attached to the node */
+ /* Install block in the device_object attached to the node */
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
- /* No object, create a new one */
-
+ /*
+ * No object, create a new one (Device nodes do not always have
+ * an attached object)
+ */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
if (!obj_desc) {
status = AE_NO_MEMORY;
@@ -705,10 +737,14 @@ acpi_install_gpe_block(acpi_handle gpe_device,
}
}
- /* Install the GPE block in the device_object */
+ /* Now install the GPE block in the device_object */
obj_desc->device.gpe_block = gpe_block;
+ /* Run the _PRW methods and enable the runtime GPEs in the new block */
+
+ status = acpi_ev_initialize_gpe_block(node, gpe_block);
+
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
@@ -839,8 +875,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Increment Index by the number of GPEs in this block */
- info->next_block_base_index +=
- (gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH);
+ info->next_block_base_index += gpe_block->gpe_count;
if (info->index < info->next_block_base_index) {
/*
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 7e8b3bedc376..008621c5ad85 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -82,8 +82,9 @@ acpi_ex_add_table(u32 table_index,
struct acpi_namespace_node *parent_node,
union acpi_operand_object **ddb_handle)
{
- acpi_status status;
union acpi_operand_object *obj_desc;
+ acpi_status status;
+ acpi_owner_id owner_id;
ACPI_FUNCTION_TRACE(ex_add_table);
@@ -119,7 +120,14 @@ acpi_ex_add_table(u32 table_index,
acpi_ns_exec_module_code_list();
acpi_ex_enter_interpreter();
- return_ACPI_STATUS(status);
+ /* Update GPEs for any new _PRW or _Lxx/_Exx methods. Ignore errors */
+
+ status = acpi_tb_get_owner_id(table_index, &owner_id);
+ if (ACPI_SUCCESS(status)) {
+ acpi_ev_update_gpes(owner_id);
+ }
+
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -248,10 +256,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_SUCCESS(status)) {
- ACPI_INFO((AE_INFO,
- "Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]",
- table->signature, table->oem_id,
- table->oem_table_id));
+ ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ acpi_tb_print_table_header(0, table);
}
/* Invoke table handler if present */
@@ -525,6 +531,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(status);
}
+ ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ acpi_tb_print_table_header(0, table_desc.pointer);
+
/* Remove the reference by added by acpi_ex_store above */
acpi_ut_remove_reference(ddb_handle);
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bda7aed0404b..b73bc50c5b76 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -650,7 +650,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
- "Bad destination type during conversion: %X",
+ "Bad destination type during conversion: 0x%X",
destination_type));
status = AE_AML_INTERNAL;
break;
@@ -665,7 +665,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
+ "Unknown Target type ID 0x%X AmlOpcode 0x%X DestType %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->opcode,
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 0aa57d938698..3c61b48c73f5 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -306,12 +306,12 @@ acpi_ex_create_region(u8 * aml_start,
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) {
- ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
+ ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
- ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (%X)\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
acpi_ut_get_region_name(region_space), region_space));
/* Create the region descriptor */
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
new file mode 100644
index 000000000000..be8c98b480d7
--- /dev/null
+++ b/drivers/acpi/acpica/exdebug.c
@@ -0,0 +1,261 @@
+/******************************************************************************
+ *
+ * Module Name: exdebug - Support for stores to the AML Debug Object
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exdebug")
+
+#ifndef ACPI_NO_ERROR_MESSAGES
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_debug_object
+ *
+ * PARAMETERS: source_desc - Object to be output to "Debug Object"
+ * Level - Indentation level (used for packages)
+ * Index - Current package element, zero if not pkg
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Handles stores to the AML Debug Object. For example:
+ * Store(INT1, Debug)
+ *
+ * This function is not compiled if ACPI_NO_ERROR_MESSAGES is set.
+ *
+ * This function is only enabled if acpi_gbl_enable_aml_debug_object is set, or
+ * if ACPI_LV_DEBUG_OBJECT is set in the acpi_dbg_level. Thus, in the normal
+ * operational case, stores to the debug object are ignored but can be easily
+ * enabled if necessary.
+ *
+ ******************************************************************************/
+void
+acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
+ u32 level, u32 index)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
+
+ /* Output must be enabled via the debug_object global or the dbg_level */
+
+ if (!acpi_gbl_enable_aml_debug_object &&
+ !(acpi_dbg_level & ACPI_LV_DEBUG_OBJECT)) {
+ return_VOID;
+ }
+
+ /*
+ * Print line header as long as we are not in the middle of an
+ * object display
+ */
+ if (!((level > 0) && index == 0)) {
+ acpi_os_printf("[ACPI Debug] %*s", level, " ");
+ }
+
+ /* Display the index for package output only */
+
+ if (index > 0) {
+ acpi_os_printf("(%.2u) ", index - 1);
+ }
+
+ if (!source_desc) {
+ acpi_os_printf("[Null Object]\n");
+ return_VOID;
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
+ acpi_os_printf("%s ",
+ acpi_ut_get_object_type_name(source_desc));
+
+ if (!acpi_ut_valid_internal_object(source_desc)) {
+ acpi_os_printf("%p, Invalid Internal Object!\n",
+ source_desc);
+ return_VOID;
+ }
+ } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf("%s: %p\n",
+ acpi_ut_get_type_name(((struct
+ acpi_namespace_node *)
+ source_desc)->type),
+ source_desc);
+ return_VOID;
+ } else {
+ return_VOID;
+ }
+
+ /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
+
+ switch (source_desc->common.type) {
+ case ACPI_TYPE_INTEGER:
+
+ /* Output correct integer width */
+
+ if (acpi_gbl_integer_byte_width == 4) {
+ acpi_os_printf("0x%8.8X\n",
+ (u32)source_desc->integer.value);
+ } else {
+ acpi_os_printf("0x%8.8X%8.8X\n",
+ ACPI_FORMAT_UINT64(source_desc->integer.
+ value));
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length);
+ acpi_ut_dump_buffer2(source_desc->buffer.pointer,
+ (source_desc->buffer.length < 256) ?
+ source_desc->buffer.length : 256,
+ DB_BYTE_DISPLAY);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ acpi_os_printf("[0x%.2X] \"%s\"\n",
+ source_desc->string.length,
+ source_desc->string.pointer);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+
+ acpi_os_printf("[Contains 0x%.2X Elements]\n",
+ source_desc->package.count);
+
+ /* Output the entire contents of the package */
+
+ for (i = 0; i < source_desc->package.count; i++) {
+ acpi_ex_do_debug_object(source_desc->package.
+ elements[i], level + 4, i + 1);
+ }
+ break;
+
+ case ACPI_TYPE_LOCAL_REFERENCE:
+
+ acpi_os_printf("[%s] ",
+ acpi_ut_get_reference_name(source_desc));
+
+ /* Decode the reference */
+
+ switch (source_desc->reference.class) {
+ case ACPI_REFCLASS_INDEX:
+
+ acpi_os_printf("0x%X\n", source_desc->reference.value);
+ break;
+
+ case ACPI_REFCLASS_TABLE:
+
+ /* Case for ddb_handle */
+
+ acpi_os_printf("Table Index 0x%X\n",
+ source_desc->reference.value);
+ return;
+
+ default:
+ break;
+ }
+
+ acpi_os_printf(" ");
+
+ /* Check for valid node first, then valid object */
+
+ if (source_desc->reference.node) {
+ if (ACPI_GET_DESCRIPTOR_TYPE
+ (source_desc->reference.node) !=
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf
+ (" %p - Not a valid namespace node\n",
+ source_desc->reference.node);
+ } else {
+ acpi_os_printf("Node %p [%4.4s] ",
+ source_desc->reference.node,
+ (source_desc->reference.node)->
+ name.ascii);
+
+ switch ((source_desc->reference.node)->type) {
+
+ /* These types have no attached object */
+
+ case ACPI_TYPE_DEVICE:
+ acpi_os_printf("Device\n");
+ break;
+
+ case ACPI_TYPE_THERMAL:
+ acpi_os_printf("Thermal Zone\n");
+ break;
+
+ default:
+ acpi_ex_do_debug_object((source_desc->
+ reference.
+ node)->object,
+ level + 4, 0);
+ break;
+ }
+ }
+ } else if (source_desc->reference.object) {
+ if (ACPI_GET_DESCRIPTOR_TYPE
+ (source_desc->reference.object) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_ex_do_debug_object(((struct
+ acpi_namespace_node *)
+ source_desc->reference.
+ object)->object,
+ level + 4, 0);
+ } else {
+ acpi_ex_do_debug_object(source_desc->reference.
+ object, level + 4, 0);
+ }
+ }
+ break;
+
+ default:
+
+ acpi_os_printf("%p\n", source_desc);
+ break;
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
+ return_VOID;
+}
+#endif
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 6c79fecbee42..f17d2ff0031b 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -281,7 +281,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
- "SMBus or IPMI write requires Buffer of length %X, found length %X",
+ "SMBus or IPMI write requires Buffer of length %u, found length %u",
length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f68a216168be..a6dc26f0b3be 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -94,7 +94,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
/* We must have a valid region */
if (rgn_desc->common.type != ACPI_TYPE_REGION) {
- ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)",
+ ACPI_ERROR((AE_INFO, "Needed Region, found type 0x%X (%s)",
rgn_desc->common.type,
acpi_ut_get_object_type_name(rgn_desc)));
@@ -175,7 +175,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* byte, and a field with Dword access specified.
*/
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)",
+ "Field [%4.4s] access width (%u bytes) too large for region [%4.4s] (length %u)",
acpi_ut_get_node_name(obj_desc->
common_field.node),
obj_desc->common_field.access_byte_width,
@@ -189,7 +189,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* exceeds region length, indicate an error
*/
ACPI_ERROR((AE_INFO,
- "Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)",
+ "Field [%4.4s] Base+Offset+Width %u+%u+%u is beyond end of region [%4.4s] (length %u)",
acpi_ut_get_node_name(obj_desc->common_field.node),
obj_desc->common_field.base_byte_offset,
field_datum_byte_offset,
@@ -281,13 +281,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
ACPI_ERROR((AE_INFO,
- "Region %s(%X) not implemented",
+ "Region %s(0x%X) not implemented",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
} else if (status == AE_NOT_EXIST) {
ACPI_ERROR((AE_INFO,
- "Region %s(%X) has no handler",
+ "Region %s(0x%X) has no handler",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
@@ -525,7 +525,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X",
+ ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %u",
obj_desc->common.type));
status = AE_AML_INTERNAL;
break;
@@ -630,7 +630,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default:
ACPI_ERROR((AE_INFO,
- "Unknown UpdateRule value: %X",
+ "Unknown UpdateRule value: 0x%X",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
@@ -689,7 +689,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
if (buffer_length <
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
ACPI_ERROR((AE_INFO,
- "Field size %X (bits) is too large for buffer (%X)",
+ "Field size %u (bits) is too large for buffer (%u)",
obj_desc->common_field.bit_length, buffer_length));
return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index c5bb1eeed2df..95db4be0877b 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -99,7 +99,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -115,7 +115,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
default:
- ACPI_ERROR((AE_INFO, "Invalid descriptor type %X",
+ ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X",
ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
return_ACPI_STATUS(AE_TYPE);
}
@@ -276,7 +276,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
operand0->common.type));
status = AE_AML_INTERNAL;
}
@@ -378,7 +378,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
/* Invalid object type, should not happen here */
- ACPI_ERROR((AE_INFO, "Invalid object type: %X",
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
operand0->common.type));
status = AE_AML_INTERNAL;
goto cleanup;
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 7116bc86494d..f73be97043c0 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -85,10 +85,10 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
/*
- * Migrate the previous sync level associated with this mutex to the
- * previous mutex on the list so that it may be preserved. This handles
- * the case where several mutexes have been acquired at the same level,
- * but are not released in opposite order.
+ * Migrate the previous sync level associated with this mutex to
+ * the previous mutex on the list so that it may be preserved.
+ * This handles the case where several mutexes have been acquired
+ * at the same level, but are not released in opposite order.
*/
(obj_desc->mutex.prev)->mutex.original_sync_level =
obj_desc->mutex.original_sync_level;
@@ -101,8 +101,8 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
*
* FUNCTION: acpi_ex_link_mutex
*
- * PARAMETERS: obj_desc - The mutex to be linked
- * Thread - Current executing thread object
+ * PARAMETERS: obj_desc - The mutex to be linked
+ * Thread - Current executing thread object
*
* RETURN: None
*
@@ -138,9 +138,9 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_acquire_mutex_object
*
- * PARAMETERS: time_desc - Timeout in milliseconds
+ * PARAMETERS: Timeout - Timeout in milliseconds
* obj_desc - Mutex object
- * Thread - Current thread state
+ * thread_id - Current thread state
*
* RETURN: Status
*
@@ -234,7 +234,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Must have a valid thread ID */
+ /* Must have a valid thread state struct */
if (!walk_state->thread) {
ACPI_ERROR((AE_INFO,
@@ -249,7 +249,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
*/
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)",
+ "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)",
acpi_ut_get_node_name(obj_desc->mutex.node),
walk_state->thread->current_sync_level));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
@@ -359,6 +359,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
{
acpi_status status = AE_OK;
u8 previous_sync_level;
+ struct acpi_thread_state *owner_thread;
ACPI_FUNCTION_TRACE(ex_release_mutex);
@@ -366,9 +367,11 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ owner_thread = obj_desc->mutex.owner_thread;
+
/* The mutex must have been previously acquired in order to release it */
- if (!obj_desc->mutex.owner_thread) {
+ if (!owner_thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
@@ -387,16 +390,13 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* The Mutex is owned, but this thread must be the owner.
* Special case for Global Lock, any thread can release
*/
- if ((obj_desc->mutex.owner_thread->thread_id !=
- walk_state->thread->thread_id)
- && (obj_desc != acpi_gbl_global_lock_mutex)) {
+ if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
+ (obj_desc != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
"Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
ACPI_CAST_PTR(void, walk_state->thread->thread_id),
acpi_ut_get_node_name(obj_desc->mutex.node),
- ACPI_CAST_PTR(void,
- obj_desc->mutex.owner_thread->
- thread_id)));
+ ACPI_CAST_PTR(void, owner_thread->thread_id)));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
@@ -407,10 +407,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* different level can only mean that the mutex ordering rule is being
* violated. This behavior is clarified in ACPI 4.0 specification.
*/
- if (obj_desc->mutex.sync_level !=
- walk_state->thread->current_sync_level) {
+ if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
+ "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u",
acpi_ut_get_node_name(obj_desc->mutex.node),
obj_desc->mutex.sync_level,
walk_state->thread->current_sync_level));
@@ -423,7 +422,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* acquired, but are not released in reverse order.
*/
previous_sync_level =
- walk_state->thread->acquired_mutex_list->mutex.original_sync_level;
+ owner_thread->acquired_mutex_list->mutex.original_sync_level;
status = acpi_ex_release_mutex_object(obj_desc);
if (ACPI_FAILURE(status)) {
@@ -434,8 +433,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* Restore the previous sync_level */
- walk_state->thread->current_sync_level = previous_sync_level;
+ owner_thread->current_sync_level = previous_sync_level;
}
+
return_ACPI_STATUS(status);
}
@@ -443,7 +443,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_release_all_mutexes
*
- * PARAMETERS: Thread - Current executing thread object
+ * PARAMETERS: Thread - Current executing thread object
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 679f308c5a89..d11e539ef763 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -102,7 +102,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
name_string = ACPI_ALLOCATE(size_needed);
if (!name_string) {
ACPI_ERROR((AE_INFO,
- "Could not allocate size %d", size_needed));
+ "Could not allocate size %u", size_needed));
return_PTR(NULL);
}
@@ -216,7 +216,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
*/
status = AE_AML_BAD_NAME;
ACPI_ERROR((AE_INFO,
- "Bad character %02x in name, at %p",
+ "Bad character 0x%02x in name, at %p",
*aml_address, aml_address));
}
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 99adbab5acbf..84e4d185aa25 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -110,7 +110,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -173,7 +173,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
case AML_SLEEP_OP: /* Sleep (msec_time) */
- status = acpi_ex_system_do_suspend(operand[0]->integer.value);
+ status = acpi_ex_system_do_sleep(operand[0]->integer.value);
break;
case AML_STALL_OP: /* Stall (usec_time) */
@@ -189,7 +189,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -229,7 +229,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -399,7 +399,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
if (digit > 0) {
ACPI_ERROR((AE_INFO,
- "Integer too large to convert to BCD: %8.8X%8.8X",
+ "Integer too large to convert to BCD: 0x%8.8X%8.8X",
ACPI_FORMAT_UINT64(operand[0]->
integer.value)));
status = AE_AML_NUMERIC_OVERFLOW;
@@ -540,7 +540,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
default: /* Unknown opcode */
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -979,7 +979,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown Index TargetType %X in reference object %p",
+ "Unknown Index TargetType 0x%X in reference object %p",
operand[0]->reference.
target_type, operand[0]));
status = AE_AML_OPERAND_TYPE;
@@ -1007,7 +1007,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown class in reference(%p) - %2.2X",
+ "Unknown class in reference(%p) - 0x%2.2X",
operand[0],
operand[0]->reference.class));
@@ -1019,7 +1019,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 22841bbbe63c..10e104cf0fb9 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -119,33 +119,6 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
status = AE_AML_OPERAND_TYPE;
break;
}
-#ifdef ACPI_GPE_NOTIFY_CHECK
- /*
- * GPE method wake/notify check. Here, we want to ensure that we
- * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
- * GPE method during system runtime. If we do, the GPE is marked
- * as "wake-only" and disabled.
- *
- * 1) Is the Notify() value == device_wake?
- * 2) Is this a GPE deferred method? (An _Lxx or _Exx method)
- * 3) Did the original GPE happen at system runtime?
- * (versus during wake)
- *
- * If all three cases are true, this is a wake-only GPE that should
- * be disabled at runtime.
- */
- if (value == 2) { /* device_wake */
- status =
- acpi_ev_check_for_wake_only_gpe(walk_state->
- gpe_event_info);
- if (ACPI_FAILURE(status)) {
-
- /* AE_WAKE_ONLY_GPE only error, means ignore this notify */
-
- return_ACPI_STATUS(AE_OK)
- }
- }
-#endif
/*
* Dispatch the notify to the appropriate handler
@@ -159,7 +132,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
}
@@ -224,7 +197,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -441,7 +414,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Index (%X%8.8X) is beyond end of object",
+ "Index (0x%8.8X%8.8X) is beyond end of object",
ACPI_FORMAT_UINT64(index)));
goto cleanup;
}
@@ -464,7 +437,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
@@ -572,7 +545,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 8bb1012ef44e..7a08d23befcd 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -119,7 +119,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
@@ -244,7 +244,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index f256b6a25f2e..4b50730cf9a0 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -245,7 +245,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
index = operand[5]->integer.value;
if (index >= operand[0]->package.count) {
ACPI_ERROR((AE_INFO,
- "Index (%X%8.8X) beyond package end (%X)",
+ "Index (0x%8.8X%8.8X) beyond package end (0x%X)",
ACPI_FORMAT_UINT64(index),
operand[0]->package.count));
status = AE_AML_PACKAGE_LIMIT;
@@ -314,7 +314,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
default:
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X",
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 2fbfe51fb141..25059dace0ad 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -275,7 +275,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
default:
/* Invalid field access type */
- ACPI_ERROR((AE_INFO, "Unknown field access type %X", access));
+ ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
return_UINT32(0);
}
@@ -430,7 +430,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
type = acpi_ns_get_type(info->region_node);
if (type != ACPI_TYPE_REGION) {
ACPI_ERROR((AE_INFO,
- "Needed Region, found type %X (%s)",
+ "Needed Region, found type 0x%X (%s)",
type, acpi_ut_get_type_name(type)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 486b2e5661b6..531000fc77d2 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -105,7 +105,7 @@ acpi_ex_system_memory_space_handler(u32 function,
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
+ ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u",
bit_width));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
@@ -173,7 +173,7 @@ acpi_ex_system_memory_space_handler(u32 function,
mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length);
if (!mem_info->mapped_logical_address) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X%8.8X, size %X",
+ "Could not map memory at 0x%8.8X%8.8X, size %u",
ACPI_FORMAT_NATIVE_UINT(address),
(u32) map_length));
mem_info->mapped_length = 0;
@@ -491,8 +491,10 @@ acpi_ex_data_table_space_handler(u32 function,
{
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
- /* Perform the memory read or write */
-
+ /*
+ * Perform the memory read or write. The bit_width was already
+ * validated.
+ */
switch (function) {
case ACPI_READ:
@@ -502,9 +504,14 @@ acpi_ex_data_table_space_handler(u32 function,
break;
case ACPI_WRITE:
+
+ ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address),
+ ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
+ break;
+
default:
- return_ACPI_STATUS(AE_SUPPORT);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index fdc1b27999ef..1fa4289a687e 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -252,7 +252,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* No named references are allowed here */
ACPI_ERROR((AE_INFO,
- "Unsupported Reference type %X",
+ "Unsupported Reference type 0x%X",
source_desc->reference.class));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -264,7 +264,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* Default case is for unknown types */
ACPI_ERROR((AE_INFO,
- "Node %p - Unknown object type %X",
+ "Node %p - Unknown object type 0x%X",
node, entry_type));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index fdd6a7079b97..7ca35ea8acea 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -231,7 +231,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
/* Invalid reference object */
ACPI_ERROR((AE_INFO,
- "Unknown TargetType %X in Index/Reference object %p",
+ "Unknown TargetType 0x%X in Index/Reference object %p",
stack_desc->reference.target_type,
stack_desc));
status = AE_AML_INTERNAL;
@@ -273,8 +273,8 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference type %X in %p", ref_type,
- stack_desc));
+ "Unknown Reference type 0x%X in %p",
+ ref_type, stack_desc));
status = AE_AML_INTERNAL;
break;
}
@@ -403,7 +403,8 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
ACPI_DESC_TYPE_NAMED) {
- ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
+ ACPI_ERROR((AE_INFO,
+ "Not a namespace node %p [%s]",
node,
acpi_ut_get_descriptor_name(node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
@@ -507,7 +508,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference Class %2.2X",
+ "Unknown Reference Class 0x%2.2X",
obj_desc->reference.class));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index c5ecd615f145..8c97cfd6a0fd 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -153,7 +153,7 @@ acpi_ex_resolve_operands(u16 opcode,
arg_types = op_info->runtime_args;
if (arg_types == ARGI_INVALID_OPCODE) {
- ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", opcode));
+ ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", opcode));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -218,7 +218,7 @@ acpi_ex_resolve_operands(u16 opcode,
if (!acpi_ut_valid_object_type(object_type)) {
ACPI_ERROR((AE_INFO,
- "Bad operand object type [%X]",
+ "Bad operand object type [0x%X]",
object_type));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -253,7 +253,7 @@ acpi_ex_resolve_operands(u16 opcode,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Reference Class %2.2X in %p",
+ "Unknown Reference Class 0x%2.2X in %p",
obj_desc->reference.class,
obj_desc));
@@ -665,7 +665,7 @@ acpi_ex_resolve_operands(u16 opcode,
/* Unknown type */
ACPI_ERROR((AE_INFO,
- "Internal - Unknown ARGI (required operand) type %X",
+ "Internal - Unknown ARGI (required operand) type 0x%X",
this_arg_type));
return_ACPI_STATUS(AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 702b9ecfd44b..1624436ba4c5 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exstore - AML Interpreter object store support
@@ -53,10 +52,6 @@
ACPI_MODULE_NAME("exstore")
/* Local prototypes */
-static void
-acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
- u32 level, u32 index);
-
static acpi_status
acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
union acpi_operand_object *dest_desc,
@@ -64,215 +59,6 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
/*******************************************************************************
*
- * FUNCTION: acpi_ex_do_debug_object
- *
- * PARAMETERS: source_desc - Value to be stored
- * Level - Indentation level (used for packages)
- * Index - Current package element, zero if not pkg
- *
- * RETURN: None
- *
- * DESCRIPTION: Handles stores to the Debug Object.
- *
- ******************************************************************************/
-
-static void
-acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
- u32 level, u32 index)
-{
- u32 i;
-
- ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
-
- /* Print line header as long as we are not in the middle of an object display */
-
- if (!((level > 0) && index == 0)) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
- level, " "));
- }
-
- /* Display index for package output only */
-
- if (index > 0) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "(%.2u) ", index - 1));
- }
-
- if (!source_desc) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
- return_VOID;
- }
-
- if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
- acpi_ut_get_object_type_name
- (source_desc)));
-
- if (!acpi_ut_valid_internal_object(source_desc)) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "%p, Invalid Internal Object!\n",
- source_desc));
- return_VOID;
- }
- } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
- ACPI_DESC_TYPE_NAMED) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n",
- acpi_ut_get_type_name(((struct
- acpi_namespace_node
- *)source_desc)->
- type),
- source_desc));
- return_VOID;
- } else {
- return_VOID;
- }
-
- /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
-
- switch (source_desc->common.type) {
- case ACPI_TYPE_INTEGER:
-
- /* Output correct integer width */
-
- if (acpi_gbl_integer_byte_width == 4) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n",
- (u32) source_desc->integer.
- value));
- } else {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "0x%8.8X%8.8X\n",
- ACPI_FORMAT_UINT64(source_desc->
- integer.
- value)));
- }
- break;
-
- case ACPI_TYPE_BUFFER:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n",
- (u32) source_desc->buffer.length));
- ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
- (source_desc->buffer.length <
- 256) ? source_desc->buffer.length : 256);
- break;
-
- case ACPI_TYPE_STRING:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n",
- source_desc->string.length,
- source_desc->string.pointer));
- break;
-
- case ACPI_TYPE_PACKAGE:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "[Contains 0x%.2X Elements]\n",
- source_desc->package.count));
-
- /* Output the entire contents of the package */
-
- for (i = 0; i < source_desc->package.count; i++) {
- acpi_ex_do_debug_object(source_desc->package.
- elements[i], level + 4, i + 1);
- }
- break;
-
- case ACPI_TYPE_LOCAL_REFERENCE:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
- acpi_ut_get_reference_name(source_desc)));
-
- /* Decode the reference */
-
- switch (source_desc->reference.class) {
- case ACPI_REFCLASS_INDEX:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
- source_desc->reference.value));
- break;
-
- case ACPI_REFCLASS_TABLE:
-
- /* Case for ddb_handle */
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "Table Index 0x%X\n",
- source_desc->reference.value));
- return;
-
- default:
- break;
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " "));
-
- /* Check for valid node first, then valid object */
-
- if (source_desc->reference.node) {
- if (ACPI_GET_DESCRIPTOR_TYPE
- (source_desc->reference.node) !=
- ACPI_DESC_TYPE_NAMED) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- " %p - Not a valid namespace node\n",
- source_desc->reference.
- node));
- } else {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
- "Node %p [%4.4s] ",
- source_desc->reference.
- node,
- (source_desc->reference.
- node)->name.ascii));
-
- switch ((source_desc->reference.node)->type) {
-
- /* These types have no attached object */
-
- case ACPI_TYPE_DEVICE:
- acpi_os_printf("Device\n");
- break;
-
- case ACPI_TYPE_THERMAL:
- acpi_os_printf("Thermal Zone\n");
- break;
-
- default:
- acpi_ex_do_debug_object((source_desc->
- reference.
- node)->object,
- level + 4, 0);
- break;
- }
- }
- } else if (source_desc->reference.object) {
- if (ACPI_GET_DESCRIPTOR_TYPE
- (source_desc->reference.object) ==
- ACPI_DESC_TYPE_NAMED) {
- acpi_ex_do_debug_object(((struct
- acpi_namespace_node *)
- source_desc->reference.
- object)->object,
- level + 4, 0);
- } else {
- acpi_ex_do_debug_object(source_desc->reference.
- object, level + 4, 0);
- }
- }
- break;
-
- default:
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
- source_desc));
- break;
- }
-
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
- return_VOID;
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ex_store
*
* PARAMETERS: *source_desc - Value to be stored
@@ -402,12 +188,12 @@ acpi_ex_store(union acpi_operand_object *source_desc,
source_desc,
acpi_ut_get_object_type_name(source_desc)));
- acpi_ex_do_debug_object(source_desc, 0, 0);
+ ACPI_DEBUG_OBJECT(source_desc, 0, 0);
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X",
+ ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
ref_desc->reference.class));
ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index e11b6cb42a57..6d32e09327f1 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -170,7 +170,7 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
* (ACPI specifies 100 usec as max, but this gives some slack in
* order to support existing BIOSs)
*/
- ACPI_ERROR((AE_INFO, "Time parameter is too large (%d)",
+ ACPI_ERROR((AE_INFO, "Time parameter is too large (%u)",
how_long));
status = AE_AML_OPERAND_VALUE;
} else {
@@ -182,18 +182,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
/*******************************************************************************
*
- * FUNCTION: acpi_ex_system_do_suspend
+ * FUNCTION: acpi_ex_system_do_sleep
*
- * PARAMETERS: how_long - The amount of time to suspend,
+ * PARAMETERS: how_long - The amount of time to sleep,
* in milliseconds
*
* RETURN: None
*
- * DESCRIPTION: Suspend running thread for specified amount of time.
+ * DESCRIPTION: Sleep the running thread for specified amount of time.
*
******************************************************************************/
-acpi_status acpi_ex_system_do_suspend(u64 how_long)
+acpi_status acpi_ex_system_do_sleep(u64 how_long)
{
ACPI_FUNCTION_ENTRY();
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index ec7fc227b33f..5d1273b660ae 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -299,7 +299,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
ACPI_FUNCTION_ENTRY();
if (register_id > ACPI_BITREG_MAX) {
- ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
+ ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: 0x%X",
register_id));
return (NULL);
}
@@ -413,7 +413,7 @@ acpi_hw_register_read(u32 register_id, u32 * return_value)
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
+ ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
status = AE_BAD_PARAMETER;
break;
}
@@ -549,7 +549,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
break;
default:
- ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id));
+ ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
status = AE_BAD_PARAMETER;
break;
}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 5e6d4dbb8024..36eb803dd9d0 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -245,7 +245,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
- ACPI_ERROR((AE_INFO, "Sleep values out of range: A=%X B=%X",
+ ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index e26c17d4b716..c10d587c1641 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -150,7 +150,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
if (last_address > ACPI_UINT16_MAX) {
ACPI_ERROR((AE_INFO,
- "Illegal I/O port address/length above 64K: 0x%p/%X",
+ "Illegal I/O port address/length above 64K: %p/0x%X",
ACPI_CAST_PTR(void, address), byte_width));
return_ACPI_STATUS(AE_LIMIT);
}
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index aa2b80132d0a..3a2814676ac3 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -222,7 +222,7 @@ acpi_status acpi_ns_root_initialize(void)
default:
ACPI_ERROR((AE_INFO,
- "Unsupported initial type value %X",
+ "Unsupported initial type value 0x%X",
init_val->type));
acpi_ut_remove_reference(obj_desc);
obj_desc = NULL;
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 0689d36638d9..2110cc2360f0 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -205,8 +205,8 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
/* Check the node type and name */
if (type > ACPI_TYPE_LOCAL_MAX) {
- ACPI_WARNING((AE_INFO, "Invalid ACPI Object Type %08X",
- type));
+ ACPI_WARNING((AE_INFO,
+ "Invalid ACPI Object Type 0x%08X", type));
}
if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 959372451635..7dea0031605c 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -107,7 +107,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
if (index != 0) {
ACPI_ERROR((AE_INFO,
- "Could not construct external pathname; index=%X, size=%X, Path=%s",
+ "Could not construct external pathname; index=%u, size=%u, Path=%s",
(u32) index, (u32) size, &name_buffer[size]));
return (AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 08f8b3f5ccaa..a8e42b5e9463 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -311,7 +311,7 @@ acpi_ns_search_and_enter(u32 target_name,
if (!node || !target_name || !return_node) {
ACPI_ERROR((AE_INFO,
- "Null parameter: Node %p Name %X ReturnNode %p",
+ "Null parameter: Node %p Name 0x%X ReturnNode %p",
node, target_name, return_node));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 24d05a87a2a3..bab559712da1 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -276,7 +276,7 @@ u32 acpi_ns_local(acpi_object_type type)
/* Type code out of range */
- ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return_UINT32(ACPI_NS_NORMAL);
}
@@ -764,7 +764,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
/* type code out of range */
- ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
+ ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
return_UINT32(ACPI_NS_NORMAL);
}
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 00493e108a01..7df1a4c95274 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -460,7 +460,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType 0x%X", arg_type));
return_VOID;
}
@@ -742,7 +742,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType: 0x%X", arg_type));
status = AE_AML_OPERAND_TYPE;
break;
}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 59aabaeab1d3..2f2e7760938c 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -136,7 +136,7 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
/* The opcode is unrecognized. Just skip unknown opcodes */
ACPI_ERROR((AE_INFO,
- "Found unknown opcode %X at AML address %p offset %X, ignoring",
+ "Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring",
walk_state->opcode, walk_state->parser_state.aml,
walk_state->aml_offset));
@@ -1021,7 +1021,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (status == AE_AML_NO_RETURN_VALUE) {
ACPI_EXCEPTION((AE_INFO, status,
"Invoked method did not return a value"));
-
}
ACPI_EXCEPTION((AE_INFO, status,
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 6064dd4e94c2..c42f067cff9d 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -46,6 +46,7 @@
#include "acparser.h"
#include "acdispat.h"
#include "acinterp.h"
+#include "actables.h"
#include "amlcode.h"
#define _COMPONENT ACPI_PARSER
@@ -220,6 +221,10 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
ACPI_FUNCTION_TRACE(ps_execute_method);
+ /* Quick validation of DSDT header */
+
+ acpi_tb_check_dsdt_header();
+
/* Validate the Info and method Node */
if (!info || !info->resolved_node) {
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index f2ee3b548609..c80a2eea3a01 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -212,7 +212,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if ((*top_object_list)->common.type != ACPI_TYPE_PACKAGE) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X]) Need sub-package, found %s",
+ "(PRT[%u]) Need sub-package, found %s",
index,
acpi_ut_get_object_type_name
(*top_object_list)));
@@ -223,7 +223,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if ((*top_object_list)->package.count != 4) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X]) Need package of length 4, found length %d",
+ "(PRT[%u]) Need package of length 4, found length %u",
index, (*top_object_list)->package.count));
return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT);
}
@@ -240,7 +240,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[0];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Address) Need Integer, found %s",
+ "(PRT[%u].Address) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -253,7 +253,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[1];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Pin) Need Integer, found %s",
+ "(PRT[%u].Pin) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -289,7 +289,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
if (obj_desc->reference.class !=
ACPI_REFCLASS_NAME) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Source) Need name, found Reference Class %X",
+ "(PRT[%u].Source) Need name, found Reference Class 0x%X",
index,
obj_desc->reference.class));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -340,7 +340,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
default:
ACPI_ERROR((AE_INFO,
- "(PRT[%X].Source) Need Ref/String/Integer, found %s",
+ "(PRT[%u].Source) Need Ref/String/Integer, found %s",
index,
acpi_ut_get_object_type_name
(obj_desc)));
@@ -358,7 +358,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
obj_desc = sub_object_list[3];
if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].SourceIndex) Need Integer, found %s",
+ "(PRT[%u].SourceIndex) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index fd057c72d252..7335f22aac20 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -94,7 +94,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
[resource_index]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not convert AML resource (Type %X)",
+ "Could not convert AML resource (Type 0x%X)",
*aml));
return_ACPI_STATUS(status);
}
@@ -147,7 +147,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
ACPI_ERROR((AE_INFO,
- "Invalid descriptor type (%X) in resource list",
+ "Invalid descriptor type (0x%X) in resource list",
resource->type));
return_ACPI_STATUS(AE_BAD_DATA);
}
@@ -161,7 +161,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
[resource->type]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "Could not convert resource (type %X) to AML",
+ "Could not convert resource (type 0x%X) to AML",
resource->type));
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 07de352fa443..f8cd9e87d987 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -88,7 +88,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
/* Each internal resource struct is expected to be 32-bit aligned */
ACPI_WARNING((AE_INFO,
- "Misaligned resource pointer (get): %p Type %2.2X Len %X",
+ "Misaligned resource pointer (get): %p Type 0x%2.2X Length %u",
resource, resource->type, resource->length));
}
@@ -541,7 +541,7 @@ if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
* "IRQ Format"), so 0x00 and 0x09 are illegal.
*/
ACPI_ERROR((AE_INFO,
- "Invalid interrupt polarity/trigger in resource list, %X",
+ "Invalid interrupt polarity/trigger in resource list, 0x%X",
aml->irq.flags));
return_ACPI_STATUS(AE_BAD_DATA);
}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index f43fbe0fc3fc..1728cb9bf600 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -283,7 +283,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO,
"FADT (revision %u) is longer than ACPI 2.0 version, "
- "truncating length 0x%X to 0x%X",
+ "truncating length %u to %u",
table->revision, length,
(u32)sizeof(struct acpi_table_fadt)));
}
@@ -422,7 +422,7 @@ static void acpi_tb_convert_fadt(void)
if (address64->address && address32 &&
(address64->address != (u64) address32)) {
ACPI_ERROR((AE_INFO,
- "32/64X address mismatch in %s: %8.8X/%8.8X%8.8X, using 32",
+ "32/64X address mismatch in %s: 0x%8.8X/0x%8.8X%8.8X, using 32",
fadt_info_table[i].name, address32,
ACPI_FORMAT_UINT64(address64->address)));
}
@@ -481,7 +481,7 @@ static void acpi_tb_validate_fadt(void)
(acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
ACPI_WARNING((AE_INFO,
"32/64X FACS address mismatch in FADT - "
- "%8.8X/%8.8X%8.8X, using 32",
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
acpi_gbl_FADT.facs,
ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
@@ -492,7 +492,7 @@ static void acpi_tb_validate_fadt(void)
(acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
ACPI_WARNING((AE_INFO,
"32/64X DSDT address mismatch in FADT - "
- "%8.8X/%8.8X%8.8X, using 32",
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
acpi_gbl_FADT.dsdt,
ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
@@ -521,7 +521,7 @@ static void acpi_tb_validate_fadt(void)
if (address64->address &&
(address64->bit_width != ACPI_MUL_8(length))) {
ACPI_WARNING((AE_INFO,
- "32/64X length mismatch in %s: %d/%d",
+ "32/64X length mismatch in %s: %u/%u",
name, ACPI_MUL_8(length),
address64->bit_width));
}
@@ -534,7 +534,7 @@ static void acpi_tb_validate_fadt(void)
if (!address64->address || !length) {
ACPI_ERROR((AE_INFO,
"Required field %s has zero address and/or length:"
- " %8.8X%8.8X/%X",
+ " 0x%8.8X%8.8X/0x%X",
name,
ACPI_FORMAT_UINT64(address64->
address),
@@ -550,7 +550,7 @@ static void acpi_tb_validate_fadt(void)
(!address64->address && length)) {
ACPI_WARNING((AE_INFO,
"Optional field %s has zero address or length: "
- "%8.8X%8.8X/%X",
+ "0x%8.8X%8.8X/0x%X",
name,
ACPI_FORMAT_UINT64(address64->
address),
@@ -600,7 +600,7 @@ static void acpi_tb_setup_fadt_registers(void)
(fadt_info_table[i].default_length !=
target64->bit_width)) {
ACPI_WARNING((AE_INFO,
- "Invalid length for %s: %d, using default %d",
+ "Invalid length for %s: %u, using default %u",
fadt_info_table[i].name,
target64->bit_width,
fadt_info_table[i].
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index e252180ce61c..989d5c867864 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -83,7 +83,7 @@ acpi_tb_find_table(char *signature,
/* Search for the table */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
header.signature, ACPI_NAME_SIZE)) {
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 7ec02b0f69e0..83d7af8d0905 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -137,7 +137,7 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
/* Check if table is already registered */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (!acpi_gbl_root_table_list.tables[i].pointer) {
status =
acpi_tb_verify_table(&acpi_gbl_root_table_list.
@@ -273,7 +273,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Increase the Table Array size */
tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
- size +
+ max_table_count +
ACPI_ROOT_TABLE_SIZE_INCREMENT) *
sizeof(struct acpi_table_desc));
if (!tables) {
@@ -286,8 +286,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
if (acpi_gbl_root_table_list.tables) {
ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
- (acpi_size) acpi_gbl_root_table_list.size *
- sizeof(struct acpi_table_desc));
+ (acpi_size) acpi_gbl_root_table_list.
+ max_table_count * sizeof(struct acpi_table_desc));
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -295,8 +295,9 @@ acpi_status acpi_tb_resize_root_table_list(void)
}
acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
- acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
+ acpi_gbl_root_table_list.max_table_count +=
+ ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.flags |= (u8)ACPI_ROOT_ORIGIN_ALLOCATED;
return_ACPI_STATUS(AE_OK);
}
@@ -321,38 +322,36 @@ acpi_tb_store_table(acpi_physical_address address,
struct acpi_table_header *table,
u32 length, u8 flags, u32 *table_index)
{
- acpi_status status = AE_OK;
+ acpi_status status;
+ struct acpi_table_desc *new_table;
/* Ensure that there is room for the table in the Root Table List */
- if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
+ if (acpi_gbl_root_table_list.current_table_count >=
+ acpi_gbl_root_table_list.max_table_count) {
status = acpi_tb_resize_root_table_list();
if (ACPI_FAILURE(status)) {
return (status);
}
}
+ new_table =
+ &acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
+ current_table_count];
+
/* Initialize added table */
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- address = address;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- pointer = table;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
- length;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- owner_id = 0;
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
- flags;
-
- ACPI_MOVE_32_TO_32(&
- (acpi_gbl_root_table_list.
- tables[acpi_gbl_root_table_list.count].signature),
- table->signature);
-
- *table_index = acpi_gbl_root_table_list.count;
- acpi_gbl_root_table_list.count++;
- return (status);
+ new_table->address = address;
+ new_table->pointer = table;
+ new_table->length = length;
+ new_table->owner_id = 0;
+ new_table->flags = flags;
+
+ ACPI_MOVE_32_TO_32(&new_table->signature, table->signature);
+
+ *table_index = acpi_gbl_root_table_list.current_table_count;
+ acpi_gbl_root_table_list.current_table_count++;
+ return (AE_OK);
}
/*******************************************************************************
@@ -408,7 +407,7 @@ void acpi_tb_terminate(void)
/* Delete the individual tables */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
}
@@ -422,7 +421,7 @@ void acpi_tb_terminate(void)
acpi_gbl_root_table_list.tables = NULL;
acpi_gbl_root_table_list.flags = 0;
- acpi_gbl_root_table_list.count = 0;
+ acpi_gbl_root_table_list.current_table_count = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
@@ -452,7 +451,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index)
return_ACPI_STATUS(status);
}
- if (table_index >= acpi_gbl_root_table_list.count) {
+ if (table_index >= acpi_gbl_root_table_list.current_table_count) {
/* The table index does not exist */
@@ -505,7 +504,7 @@ acpi_status acpi_tb_allocate_owner_id(u32 table_index)
ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
status = acpi_ut_allocate_owner_id
(&(acpi_gbl_root_table_list.tables[table_index].owner_id));
}
@@ -533,7 +532,7 @@ acpi_status acpi_tb_release_owner_id(u32 table_index)
ACPI_FUNCTION_TRACE(tb_release_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
acpi_ut_release_owner_id(&
(acpi_gbl_root_table_list.
tables[table_index].owner_id));
@@ -564,7 +563,7 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
ACPI_FUNCTION_TRACE(tb_get_owner_id);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
*owner_id =
acpi_gbl_root_table_list.tables[table_index].owner_id;
status = AE_OK;
@@ -589,7 +588,7 @@ u8 acpi_tb_is_table_loaded(u32 table_index)
u8 is_loaded = FALSE;
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
is_loaded = (u8)
(acpi_gbl_root_table_list.tables[table_index].flags &
ACPI_TABLE_IS_LOADED);
@@ -616,7 +615,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded)
{
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (table_index < acpi_gbl_root_table_list.count) {
+ if (table_index < acpi_gbl_root_table_list.current_table_count) {
if (is_loaded) {
acpi_gbl_root_table_list.tables[table_index].flags |=
ACPI_TABLE_IS_LOADED;
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 02723a9fb10c..34f9c2bc5e1f 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -158,7 +158,7 @@ acpi_status acpi_tb_initialize_facs(void)
u8 acpi_tb_tables_loaded(void)
{
- if (acpi_gbl_root_table_list.count >= 3) {
+ if (acpi_gbl_root_table_list.current_table_count >= 3) {
return (TRUE);
}
@@ -309,7 +309,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
if (checksum) {
ACPI_WARNING((AE_INFO,
- "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
+ "Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X",
table->signature, table->checksum,
(u8) (table->checksum - checksum)));
@@ -349,6 +349,84 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length)
/*******************************************************************************
*
+ * FUNCTION: acpi_tb_check_dsdt_header
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Quick compare to check validity of the DSDT. This will detect
+ * if the DSDT has been replaced from outside the OS and/or if
+ * the DSDT header has been corrupted.
+ *
+ ******************************************************************************/
+
+void acpi_tb_check_dsdt_header(void)
+{
+
+ /* Compare original length and checksum to current values */
+
+ if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length ||
+ acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) {
+ ACPI_ERROR((AE_INFO,
+ "The DSDT has been corrupted or replaced - old, new headers below"));
+ acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header);
+ acpi_tb_print_table_header(0, acpi_gbl_DSDT);
+
+ ACPI_ERROR((AE_INFO,
+ "Please send DMI info to linux-acpi@vger.kernel.org\n"
+ "If system does not work as expected, please boot with acpi=copy_dsdt"));
+
+ /* Disable further error messages */
+
+ acpi_gbl_original_dsdt_header.length = acpi_gbl_DSDT->length;
+ acpi_gbl_original_dsdt_header.checksum =
+ acpi_gbl_DSDT->checksum;
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_copy_dsdt
+ *
+ * PARAMETERS: table_desc - Installed table to copy
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Implements a subsystem option to copy the DSDT to local memory.
+ * Some very bad BIOSs are known to either corrupt the DSDT or
+ * install a new, bad DSDT. This copy works around the problem.
+ *
+ ******************************************************************************/
+
+struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
+{
+ struct acpi_table_header *new_table;
+ struct acpi_table_desc *table_desc;
+
+ table_desc = &acpi_gbl_root_table_list.tables[table_index];
+
+ new_table = ACPI_ALLOCATE(table_desc->length);
+ if (!new_table) {
+ ACPI_ERROR((AE_INFO, "Could not copy DSDT of length 0x%X",
+ table_desc->length));
+ return (NULL);
+ }
+
+ ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length);
+ acpi_tb_delete_table(table_desc);
+ table_desc->pointer = new_table;
+ table_desc->flags = ACPI_TABLE_ORIGIN_ALLOCATED;
+
+ ACPI_INFO((AE_INFO,
+ "Forced DSDT copy: length 0x%05X copied locally, original unmapped",
+ new_table->length));
+
+ return (new_table);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_tb_install_table
*
* PARAMETERS: Address - Physical address of DSDT or FACS
@@ -496,7 +574,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
/* Will truncate 64-bit address to 32 bits, issue warning */
ACPI_WARNING((AE_INFO,
- "64-bit Physical Address in XSDT is too large (%8.8X%8.8X),"
+ "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
" truncating",
ACPI_FORMAT_UINT64(address64)));
}
@@ -629,14 +707,14 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
*/
table_entry =
ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
- acpi_gbl_root_table_list.count = 2;
+ acpi_gbl_root_table_list.current_table_count = 2;
/*
* Initialize the root table array from the RSDT/XSDT
*/
for (i = 0; i < table_count; i++) {
- if (acpi_gbl_root_table_list.count >=
- acpi_gbl_root_table_list.size) {
+ if (acpi_gbl_root_table_list.current_table_count >=
+ acpi_gbl_root_table_list.max_table_count) {
/* There is no more room in the root table array, attempt resize */
@@ -646,19 +724,20 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
"Truncating %u table entries!",
(unsigned) (table_count -
(acpi_gbl_root_table_list.
- count - 2))));
+ current_table_count -
+ 2))));
break;
}
}
/* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
- acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
- address =
+ acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
+ current_table_count].address =
acpi_tb_get_root_table_entry(table_entry, table_entry_size);
table_entry += table_entry_size;
- acpi_gbl_root_table_list.count++;
+ acpi_gbl_root_table_list.current_table_count++;
}
/*
@@ -671,7 +750,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
* Complete the initialization of the root table array by examining
* the header of each table
*/
- for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 2; i < acpi_gbl_root_table_list.current_table_count; i++) {
acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
address, NULL, i);
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 5217a6159a31..4a8b9e6ea57a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -72,7 +72,7 @@ static int no_auto_ssdt;
acpi_status acpi_allocate_root_table(u32 initial_table_count)
{
- acpi_gbl_root_table_list.size = initial_table_count;
+ acpi_gbl_root_table_list.max_table_count = initial_table_count;
acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
return (acpi_tb_resize_root_table_list());
@@ -130,7 +130,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
sizeof(struct acpi_table_desc));
acpi_gbl_root_table_list.tables = initial_table_array;
- acpi_gbl_root_table_list.size = initial_table_count;
+ acpi_gbl_root_table_list.max_table_count = initial_table_count;
acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
if (allow_resize) {
acpi_gbl_root_table_list.flags |=
@@ -172,6 +172,7 @@ acpi_status acpi_reallocate_root_table(void)
{
struct acpi_table_desc *tables;
acpi_size new_size;
+ acpi_size current_size;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -183,10 +184,17 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
- new_size = ((acpi_size) acpi_gbl_root_table_list.count +
- ACPI_ROOT_TABLE_SIZE_INCREMENT) *
+ /*
+ * Get the current size of the root table and add the default
+ * increment to create the new table size.
+ */
+ current_size = (acpi_size)
+ acpi_gbl_root_table_list.current_table_count *
sizeof(struct acpi_table_desc);
+ new_size = current_size +
+ (ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc));
+
/* Create new array and copy the old array */
tables = ACPI_ALLOCATE_ZEROED(new_size);
@@ -194,10 +202,17 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_NO_MEMORY);
}
- ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
+ ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size);
- acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
+ /*
+ * Update the root table descriptor. The new size will be the current
+ * number of tables plus the increment, independent of the reserved
+ * size of the original table list.
+ */
acpi_gbl_root_table_list.tables = tables;
+ acpi_gbl_root_table_list.max_table_count =
+ acpi_gbl_root_table_list.current_table_count +
+ ACPI_ROOT_TABLE_SIZE_INCREMENT;
acpi_gbl_root_table_list.flags =
ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
@@ -278,7 +293,8 @@ acpi_get_table_header(char *signature,
/* Walk the root table list */
- for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
+ i++) {
if (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
@@ -341,7 +357,7 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
ACPI_FUNCTION_TRACE(acpi_unload_table_id);
/* Find table in the global table list */
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
continue;
}
@@ -391,7 +407,8 @@ acpi_get_table_with_size(char *signature,
/* Walk the root table list */
- for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
+ for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
+ i++) {
if (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
@@ -459,7 +476,7 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
/* Validate index */
- if (table_index >= acpi_gbl_root_table_list.count) {
+ if (table_index >= acpi_gbl_root_table_list.current_table_count) {
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -500,16 +517,17 @@ static acpi_status acpi_tb_load_namespace(void)
{
acpi_status status;
u32 i;
+ struct acpi_table_header *new_dsdt;
ACPI_FUNCTION_TRACE(tb_load_namespace);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/*
- * Load the namespace. The DSDT is required, but any SSDT and PSDT tables
- * are optional.
+ * Load the namespace. The DSDT is required, but any SSDT and
+ * PSDT tables are optional. Verify the DSDT.
*/
- if (!acpi_gbl_root_table_list.count ||
+ if (!acpi_gbl_root_table_list.current_table_count ||
!ACPI_COMPARE_NAME(&
(acpi_gbl_root_table_list.
tables[ACPI_TABLE_INDEX_DSDT].signature),
@@ -522,17 +540,35 @@ static acpi_status acpi_tb_load_namespace(void)
goto unlock_and_exit;
}
- /* A valid DSDT is required */
-
- status =
- acpi_tb_verify_table(&acpi_gbl_root_table_list.
- tables[ACPI_TABLE_INDEX_DSDT]);
- if (ACPI_FAILURE(status)) {
+ /*
+ * Save the DSDT pointer for simple access. This is the mapped memory
+ * address. We must take care here because the address of the .Tables
+ * array can change dynamically as tables are loaded at run-time. Note:
+ * .Pointer field is not validated until after call to acpi_tb_verify_table.
+ */
+ acpi_gbl_DSDT =
+ acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
- status = AE_NO_ACPI_TABLES;
- goto unlock_and_exit;
+ /*
+ * Optionally copy the entire DSDT to local memory (instead of simply
+ * mapping it.) There are some BIOSs that corrupt or replace the original
+ * DSDT, creating the need for this option. Default is FALSE, do not copy
+ * the DSDT.
+ */
+ if (acpi_gbl_copy_dsdt_locally) {
+ new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
+ if (new_dsdt) {
+ acpi_gbl_DSDT = new_dsdt;
+ }
}
+ /*
+ * Save the original DSDT header for detection of table corruption
+ * and/or replacement of the DSDT from outside the OS.
+ */
+ ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
+ sizeof(struct acpi_table_header));
+
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
/* Load and parse tables */
@@ -545,7 +581,7 @@ static acpi_status acpi_tb_load_namespace(void)
/* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if ((!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
ACPI_SIG_SSDT)
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index dda6e8c497d3..fd2c07d1d3ac 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -134,7 +134,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
ACPI_EBDA_PTR_LENGTH);
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -159,7 +159,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
ACPI_EBDA_WINDOW_SIZE);
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
physical_address, ACPI_EBDA_WINDOW_SIZE));
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -191,7 +191,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
if (!table_ptr) {
ACPI_ERROR((AE_INFO,
- "Could not map memory at %8.8X for length %X",
+ "Could not map memory at 0x%8.8X for length %u",
ACPI_HI_RSDP_WINDOW_BASE,
ACPI_HI_RSDP_WINDOW_SIZE));
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 3d706b8fd449..8f0896281567 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -340,7 +340,7 @@ void *acpi_ut_allocate(acpi_size size,
/* Report allocation error */
ACPI_WARNING((module, line,
- "Could not allocate size %X", (u32) size));
+ "Could not allocate size %u", (u32) size));
return_PTR(NULL);
}
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 97ec3621e71d..6fef83f04bcd 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -677,16 +677,24 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
u16 reference_count;
union acpi_operand_object *next_object;
acpi_status status;
+ acpi_size copy_size;
/* Save fields from destination that we don't want to overwrite */
reference_count = dest_desc->common.reference_count;
next_object = dest_desc->common.next_object;
- /* Copy the entire source object over the destination object */
+ /*
+ * Copy the entire source object over the destination object.
+ * Note: Source can be either an operand object or namespace node.
+ */
+ copy_size = sizeof(union acpi_operand_object);
+ if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) {
+ copy_size = sizeof(struct acpi_namespace_node);
+ }
- ACPI_MEMCPY((char *)dest_desc, (char *)source_desc,
- sizeof(union acpi_operand_object));
+ ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc),
+ ACPI_CAST_PTR(char, source_desc), copy_size);
/* Restore the saved fields */
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 16b51c69606a..ed794cd033ea 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -434,7 +434,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
default:
- ACPI_ERROR((AE_INFO, "Unknown action (%X)", action));
+ ACPI_ERROR((AE_INFO, "Unknown action (0x%X)", action));
break;
}
@@ -444,8 +444,8 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
*/
if (count > ACPI_MAX_REFERENCE_COUNT) {
ACPI_WARNING((AE_INFO,
- "Large Reference Count (%X) in object %p", count,
- object));
+ "Large Reference Count (0x%X) in object %p",
+ count, object));
}
}
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 7f5e734ce7f7..6dfdeb653490 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -307,7 +307,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
prefix_node, path, AE_TYPE);
ACPI_ERROR((AE_INFO,
- "Type returned from %s was incorrect: %s, expected Btypes: %X",
+ "Type returned from %s was incorrect: %s, expected Btypes: 0x%X",
path,
acpi_ut_get_object_type_name(info->return_object),
expected_return_btypes));
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index eda3e656c4af..66116750a0f9 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -785,6 +785,7 @@ acpi_status acpi_ut_init_globals(void)
/* Miscellaneous variables */
+ acpi_gbl_DSDT = NULL;
acpi_gbl_cm_single_step = FALSE;
acpi_gbl_db_terminate_threads = FALSE;
acpi_gbl_shutdown = FALSE;
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 32982e2ac384..e8d0724ee403 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -205,7 +205,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
/* Guard against multiple allocations of ID to the same location */
if (*owner_id) {
- ACPI_ERROR((AE_INFO, "Owner ID [%2.2X] already exists",
+ ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
*owner_id));
return_ACPI_STATUS(AE_ALREADY_EXISTS);
}
@@ -315,7 +315,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
/* Zero is not a valid owner_iD */
if (owner_id == 0) {
- ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id));
+ ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
return_VOID;
}
@@ -341,7 +341,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
acpi_gbl_owner_id_mask[index] ^= bit;
} else {
ACPI_ERROR((AE_INFO,
- "Release of non-allocated OwnerId: %2.2X",
+ "Release of non-allocated OwnerId: 0x%2.2X",
owner_id + 1));
}
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 55d014ed6d55..058b3df48271 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -258,7 +258,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %p could not acquire Mutex [%X]",
+ "Thread %p could not acquire Mutex [0x%X]",
ACPI_CAST_PTR(void, this_thread_id), mutex_id));
}
@@ -297,7 +297,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
*/
if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_ERROR((AE_INFO,
- "Mutex [%X] is not acquired, cannot release",
+ "Mutex [0x%X] is not acquired, cannot release",
mutex_id));
return (AE_NOT_ACQUIRED);
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 3356f0cb0745..fd1fa2749ea5 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -251,7 +251,7 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
if (!buffer) {
- ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+ ACPI_ERROR((AE_INFO, "Could not allocate size %u",
(u32) buffer_size));
acpi_ut_remove_reference(buffer_desc);
return_PTR(NULL);
@@ -303,7 +303,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
*/
string = ACPI_ALLOCATE_ZEROED(string_size + 1);
if (!string) {
- ACPI_ERROR((AE_INFO, "Could not allocate size %X",
+ ACPI_ERROR((AE_INFO, "Could not allocate size %u",
(u32) string_size));
acpi_ut_remove_reference(string_desc);
return_PTR(NULL);
@@ -533,7 +533,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
*/
ACPI_ERROR((AE_INFO,
"Cannot convert to external object - "
- "unsupported Reference Class [%s] %X in object %p",
+ "unsupported Reference Class [%s] 0x%X in object %p",
acpi_ut_get_reference_name(internal_object),
internal_object->reference.class,
internal_object));
@@ -545,7 +545,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
default:
ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
- "unsupported type [%s] %X in object %p",
+ "unsupported type [%s] 0x%X in object %p",
acpi_ut_get_object_type_name(internal_object),
internal_object->common.type, internal_object));
status = AE_TYPE;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 743576bf1bd7..9042a8579668 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -69,6 +69,44 @@ static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
};
+#ifdef CONFIG_X86
+static int set_copy_dsdt(const struct dmi_system_id *id)
+{
+ printk(KERN_NOTICE "%s detected - "
+ "force copy of DSDT to local memory\n", id->ident);
+ acpi_gbl_copy_dsdt_locally = 1;
+ return 0;
+}
+
+static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+ /*
+ * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=14679
+ */
+ {
+ .callback = set_copy_dsdt,
+ .ident = "TOSHIBA Satellite A505",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
+ },
+ },
+ {
+ .callback = set_copy_dsdt,
+ .ident = "TOSHIBA Satellite L505D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
+ },
+ },
+ {}
+};
+#else
+static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+ {}
+};
+#endif
+
/* --------------------------------------------------------------------------
Device Management
-------------------------------------------------------------------------- */
@@ -813,6 +851,12 @@ void __init acpi_early_init(void)
acpi_gbl_permanent_mmap = 1;
+ /*
+ * If the machine falls into the DMI check table,
+ * DSDT will be copied to memory
+ */
+ dmi_check_system(dsdt_dmi_table);
+
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7594f65800cf..4bc1c4178f50 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1406,7 +1406,7 @@ acpi_os_invalidate_address(
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SytemMemory
+ /* Only interference checks against SystemIO and SystemMemory
are needed */
res.start = address;
res.end = address + length - 1;
@@ -1458,7 +1458,7 @@ acpi_os_validate_address (
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SytemMemory
+ /* Only interference checks against SystemIO and SystemMemory
are needed */
res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
if (!res)
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b0a71ecee682..e4804fb05e23 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -401,11 +401,13 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
* driver reported one, then use it. Exit in any case.
*/
if (gsi < 0) {
+ u32 dev_gsi;
dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
/* Interrupt Line values above 0xF are forbidden */
- if (dev->irq > 0 && (dev->irq <= 0xF)) {
- printk(" - using IRQ %d\n", dev->irq);
- acpi_register_gsi(&dev->dev, dev->irq,
+ if (dev->irq > 0 && (dev->irq <= 0xF) &&
+ (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
+ printk(" - using ISA IRQ %d\n", dev->irq);
+ acpi_register_gsi(&dev->dev, dev_gsi,
ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
return 0;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index ddc76787b842..f74d3b31e5c9 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -172,7 +172,6 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
- /* */
for (i = 0; i < list->count; i++) {
/*
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5939e7f7d8e9..c3817e1f32c7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -698,7 +698,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
"max_cstate: C%d\n"
"maximum allowed latency: %d usec\n",
pr->power.state ? pr->power.state - pr->power.states : 0,
- max_cstate, pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
+ max_cstate, pm_qos_request(PM_QOS_CPU_DMA_LATENCY));
seq_puts(seq, "states:\n");
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0338f513a010..7f2e051ed4f1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -765,7 +765,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
}
status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number,
- ACPI_NOT_ISR, &event_status);
+ &event_status);
if (status == AE_OK)
device->wakeup.flags.run_wake =
!!(event_status & ACPI_EVENT_FLAG_HANDLE);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 4aaf24976138..e35525b39f6b 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -303,8 +303,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
"Invalid GPE 0x%x\n", index));
goto end;
}
- result = acpi_get_gpe_status(*handle, index,
- ACPI_NOT_ISR, status);
+ result = acpi_get_gpe_status(*handle, index, status);
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
result = acpi_get_event_status(index - num_gpes, status);
@@ -395,7 +394,7 @@ static ssize_t counter_set(struct kobject *kobj,
result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
else if (!strcmp(buf, "clear\n") &&
(status & ACPI_EVENT_FLAG_SET))
- result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR);
+ result = acpi_clear_gpe(handle, index);
else
all_counters[index].count = strtoul(buf, NULL, 0);
} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 01c52c415bdc..e68541f662b9 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -65,6 +65,14 @@ config SATA_AHCI
If unsure, say N.
+config SATA_AHCI_PLATFORM
+ tristate "Platform AHCI SATA support"
+ help
+ This option enables support for Platform AHCI Serial ATA
+ controllers.
+
+ If unsure, say N.
+
config SATA_SIL24
tristate "Silicon Image 3124/3132 SATA support"
depends on PCI
@@ -73,6 +81,12 @@ config SATA_SIL24
If unsure, say N.
+config SATA_INIC162X
+ tristate "Initio 162x SATA support"
+ depends on PCI
+ help
+ This option enables support for Initio 162x Serial ATA.
+
config SATA_FSL
tristate "Freescale 3.0Gbps SATA support"
depends on FSL_SOC
@@ -213,12 +227,6 @@ config SATA_VITESSE
If unsure, say N.
-config SATA_INIC162X
- tristate "Initio 162x SATA support"
- depends on PCI
- help
- This option enables support for Initio 162x Serial ATA.
-
config PATA_ACPI
tristate "ACPI firmware driver for PATA"
depends on ATA_ACPI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index fc936d4471d6..d0a93c4ad3ec 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -1,7 +1,8 @@
obj-$(CONFIG_ATA) += libata.o
-obj-$(CONFIG_SATA_AHCI) += ahci.o
+obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
obj-$(CONFIG_SATA_SVW) += sata_svw.o
obj-$(CONFIG_ATA_PIIX) += ata_piix.o
obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5326af28a410..8ca16f54e1ed 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,403 +46,48 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
+#include "ahci.h"
#define DRV_NAME "ahci"
#define DRV_VERSION "3.0"
-/* Enclosure Management Control */
-#define EM_CTRL_MSG_TYPE 0x000f0000
-
-/* Enclosure Management LED Message Type */
-#define EM_MSG_LED_HBA_PORT 0x0000000f
-#define EM_MSG_LED_PMP_SLOT 0x0000ff00
-#define EM_MSG_LED_VALUE 0xffff0000
-#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
-#define EM_MSG_LED_VALUE_OFF 0xfff80000
-#define EM_MSG_LED_VALUE_ON 0x00010000
-
-static int ahci_skip_host_reset;
-static int ahci_ignore_sss;
-
-module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
-MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
-
-module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
-MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
-
-static int ahci_enable_alpm(struct ata_port *ap,
- enum link_pm policy);
-static void ahci_disable_alpm(struct ata_port *ap);
-static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
-static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
- size_t size);
-static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
- ssize_t size);
-
enum {
AHCI_PCI_BAR = 5,
- AHCI_MAX_PORTS = 32,
- AHCI_MAX_SG = 168, /* hardware max is 64K */
- AHCI_DMA_BOUNDARY = 0xffffffff,
- AHCI_MAX_CMDS = 32,
- AHCI_CMD_SZ = 32,
- AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
- AHCI_RX_FIS_SZ = 256,
- AHCI_CMD_TBL_CDB = 0x40,
- AHCI_CMD_TBL_HDR_SZ = 0x80,
- AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
- AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
- AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
- AHCI_RX_FIS_SZ,
- AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
- AHCI_CMD_TBL_AR_SZ +
- (AHCI_RX_FIS_SZ * 16),
- AHCI_IRQ_ON_SG = (1 << 31),
- AHCI_CMD_ATAPI = (1 << 5),
- AHCI_CMD_WRITE = (1 << 6),
- AHCI_CMD_PREFETCH = (1 << 7),
- AHCI_CMD_RESET = (1 << 8),
- AHCI_CMD_CLR_BUSY = (1 << 10),
-
- RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
- RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
- RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
-
- board_ahci = 0,
- board_ahci_vt8251 = 1,
- board_ahci_ign_iferr = 2,
- board_ahci_sb600 = 3,
- board_ahci_mv = 4,
- board_ahci_sb700 = 5, /* for SB700 and SB800 */
- board_ahci_mcp65 = 6,
- board_ahci_nopmp = 7,
- board_ahci_yesncq = 8,
- board_ahci_nosntf = 9,
-
- /* global controller registers */
- HOST_CAP = 0x00, /* host capabilities */
- HOST_CTL = 0x04, /* global host control */
- HOST_IRQ_STAT = 0x08, /* interrupt status */
- HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
- HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
- HOST_EM_LOC = 0x1c, /* Enclosure Management location */
- HOST_EM_CTL = 0x20, /* Enclosure Management Control */
- HOST_CAP2 = 0x24, /* host capabilities, extended */
-
- /* HOST_CTL bits */
- HOST_RESET = (1 << 0), /* reset controller; self-clear */
- HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
- HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
-
- /* HOST_CAP bits */
- HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
- HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
- HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
- HOST_CAP_PART = (1 << 13), /* Partial state capable */
- HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
- HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
- HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
- HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
- HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
- HOST_CAP_CLO = (1 << 24), /* Command List Override support */
- HOST_CAP_LED = (1 << 25), /* Supports activity LED */
- HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
- HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
- HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
- HOST_CAP_SNTF = (1 << 29), /* SNotification register */
- HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
- HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
-
- /* HOST_CAP2 bits */
- HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
- HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
- HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
-
- /* registers for each SATA port */
- PORT_LST_ADDR = 0x00, /* command list DMA addr */
- PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
- PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
- PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
- PORT_IRQ_STAT = 0x10, /* interrupt status */
- PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
- PORT_CMD = 0x18, /* port command */
- PORT_TFDATA = 0x20, /* taskfile data */
- PORT_SIG = 0x24, /* device TF signature */
- PORT_CMD_ISSUE = 0x38, /* command issue */
- PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
- PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
- PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
- PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
- PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
- PORT_FBS = 0x40, /* FIS-based Switching */
-
- /* PORT_IRQ_{STAT,MASK} bits */
- PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
- PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
- PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
- PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
- PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
- PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
- PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
- PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
-
- PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
- PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
- PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
- PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
- PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
- PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
- PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
- PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
- PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
-
- PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
- PORT_IRQ_IF_ERR |
- PORT_IRQ_CONNECT |
- PORT_IRQ_PHYRDY |
- PORT_IRQ_UNK_FIS |
- PORT_IRQ_BAD_PMP,
- PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
- PORT_IRQ_TF_ERR |
- PORT_IRQ_HBUS_DATA_ERR,
- DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
- PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
- PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
-
- /* PORT_CMD bits */
- PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
- PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
- PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
- PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
- PORT_CMD_PMP = (1 << 17), /* PMP attached */
- PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
- PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
- PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
- PORT_CMD_CLO = (1 << 3), /* Command list override */
- PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
- PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
- PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
-
- PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
- PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
- PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
- PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
-
- PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
- PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
- PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
- PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
- PORT_FBS_SDE = (1 << 2), /* FBS single device error */
- PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
- PORT_FBS_EN = (1 << 0), /* Enable FBS */
-
- /* hpriv->flags bits */
- AHCI_HFLAG_NO_NCQ = (1 << 0),
- AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
- AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
- AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
- AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
- AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
- AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
- AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
- AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
- AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
- AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
- AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
- link offline */
- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
-
- /* ap->flags bits */
-
- AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
- ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
- ATA_FLAG_IPM,
-
- ICH_MAP = 0x90, /* ICH MAP register */
-
- /* em constants */
- EM_MAX_SLOTS = 8,
- EM_MAX_RETRY = 5,
-
- /* em_ctl bits */
- EM_CTL_RST = (1 << 9), /* Reset */
- EM_CTL_TM = (1 << 8), /* Transmit Message */
- EM_CTL_ALHD = (1 << 26), /* Activity LED */
-};
-
-struct ahci_cmd_hdr {
- __le32 opts;
- __le32 status;
- __le32 tbl_addr;
- __le32 tbl_addr_hi;
- __le32 reserved[4];
-};
-
-struct ahci_sg {
- __le32 addr;
- __le32 addr_hi;
- __le32 reserved;
- __le32 flags_size;
-};
-
-struct ahci_em_priv {
- enum sw_activity blink_policy;
- struct timer_list timer;
- unsigned long saved_activity;
- unsigned long activity;
- unsigned long led_state;
-};
-
-struct ahci_host_priv {
- unsigned int flags; /* AHCI_HFLAG_* */
- u32 cap; /* cap to use */
- u32 cap2; /* cap2 to use */
- u32 port_map; /* port map to use */
- u32 saved_cap; /* saved initial cap */
- u32 saved_cap2; /* saved initial cap2 */
- u32 saved_port_map; /* saved initial port_map */
- u32 em_loc; /* enclosure management location */
};
-struct ahci_port_priv {
- struct ata_link *active_link;
- struct ahci_cmd_hdr *cmd_slot;
- dma_addr_t cmd_slot_dma;
- void *cmd_tbl;
- dma_addr_t cmd_tbl_dma;
- void *rx_fis;
- dma_addr_t rx_fis_dma;
- /* for NCQ spurious interrupt analysis */
- unsigned int ncq_saw_d2h:1;
- unsigned int ncq_saw_dmas:1;
- unsigned int ncq_saw_sdb:1;
- u32 intr_mask; /* interrupts to enable */
- bool fbs_supported; /* set iff FBS is supported */
- bool fbs_enabled; /* set iff FBS is enabled */
- int fbs_last_dev; /* save FBS.DEV of last FIS */
- /* enclosure management info per PM slot */
- struct ahci_em_priv em_priv[EM_MAX_SLOTS];
+enum board_ids {
+ /* board IDs by feature in alphabetical order */
+ board_ahci,
+ board_ahci_ign_iferr,
+ board_ahci_nosntf,
+
+ /* board IDs for specific chipsets in alphabetical order */
+ board_ahci_mcp65,
+ board_ahci_mcp77,
+ board_ahci_mcp89,
+ board_ahci_mv,
+ board_ahci_sb600,
+ board_ahci_sb700, /* for SB700 and SB800 */
+ board_ahci_vt8251,
+
+ /* aliases */
+ board_ahci_mcp_linux = board_ahci_mcp65,
+ board_ahci_mcp67 = board_ahci_mcp65,
+ board_ahci_mcp73 = board_ahci_mcp65,
+ board_ahci_mcp79 = board_ahci_mcp77,
};
-static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
-static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
-static int ahci_port_start(struct ata_port *ap);
-static void ahci_port_stop(struct ata_port *ap);
-static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
-static void ahci_qc_prep(struct ata_queued_cmd *qc);
-static void ahci_freeze(struct ata_port *ap);
-static void ahci_thaw(struct ata_port *ap);
-static void ahci_enable_fbs(struct ata_port *ap);
-static void ahci_disable_fbs(struct ata_port *ap);
-static void ahci_pmp_attach(struct ata_port *ap);
-static void ahci_pmp_detach(struct ata_port *ap);
-static int ahci_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
-static int ahci_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
-static void ahci_postreset(struct ata_link *link, unsigned int *class);
-static void ahci_error_handler(struct ata_port *ap);
-static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
-static int ahci_port_resume(struct ata_port *ap);
-static void ahci_dev_config(struct ata_device *dev);
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
- u32 opts);
#ifdef CONFIG_PM
-static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int ahci_pci_device_resume(struct pci_dev *pdev);
#endif
-static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
-static ssize_t ahci_activity_store(struct ata_device *dev,
- enum sw_activity val);
-static void ahci_init_sw_activity(struct ata_link *link);
-
-static ssize_t ahci_show_host_caps(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_host_cap2(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_host_version(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t ahci_show_port_cmd(struct device *dev,
- struct device_attribute *attr, char *buf);
-
-static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
-static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
-static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
-static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
-
-static struct device_attribute *ahci_shost_attrs[] = {
- &dev_attr_link_power_management_policy,
- &dev_attr_em_message_type,
- &dev_attr_em_message,
- &dev_attr_ahci_host_caps,
- &dev_attr_ahci_host_cap2,
- &dev_attr_ahci_host_version,
- &dev_attr_ahci_port_cmd,
- NULL
-};
-
-static struct device_attribute *ahci_sdev_attrs[] = {
- &dev_attr_sw_activity,
- &dev_attr_unload_heads,
- NULL
-};
-
-static struct scsi_host_template ahci_sht = {
- ATA_NCQ_SHT(DRV_NAME),
- .can_queue = AHCI_MAX_CMDS - 1,
- .sg_tablesize = AHCI_MAX_SG,
- .dma_boundary = AHCI_DMA_BOUNDARY,
- .shost_attrs = ahci_shost_attrs,
- .sdev_attrs = ahci_sdev_attrs,
-};
-
-static struct ata_port_operations ahci_ops = {
- .inherits = &sata_pmp_port_ops,
-
- .qc_defer = ahci_pmp_qc_defer,
- .qc_prep = ahci_qc_prep,
- .qc_issue = ahci_qc_issue,
- .qc_fill_rtf = ahci_qc_fill_rtf,
-
- .freeze = ahci_freeze,
- .thaw = ahci_thaw,
- .softreset = ahci_softreset,
- .hardreset = ahci_hardreset,
- .postreset = ahci_postreset,
- .pmp_softreset = ahci_softreset,
- .error_handler = ahci_error_handler,
- .post_internal_cmd = ahci_post_internal_cmd,
- .dev_config = ahci_dev_config,
-
- .scr_read = ahci_scr_read,
- .scr_write = ahci_scr_write,
- .pmp_attach = ahci_pmp_attach,
- .pmp_detach = ahci_pmp_detach,
-
- .enable_pm = ahci_enable_alpm,
- .disable_pm = ahci_disable_alpm,
- .em_show = ahci_led_show,
- .em_store = ahci_led_store,
- .sw_activity_show = ahci_activity_show,
- .sw_activity_store = ahci_activity_store,
-#ifdef CONFIG_PM
- .port_suspend = ahci_port_suspend,
- .port_resume = ahci_port_resume,
-#endif
- .port_start = ahci_port_start,
- .port_stop = ahci_port_stop,
-};
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
@@ -463,6 +108,7 @@ static struct ata_port_operations ahci_sb600_ops = {
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
static const struct ata_port_info ahci_port_info[] = {
+ /* by features */
[board_ahci] =
{
.flags = AHCI_FLAG_COMMON,
@@ -470,81 +116,83 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_vt8251] =
+ [board_ahci_ign_iferr] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_vt8251_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_ign_iferr] =
+ [board_ahci_nosntf] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_sb600] =
+ /* by chipsets */
+ [board_ahci_mcp65] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
- AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
- AHCI_HFLAG_32BIT_ONLY),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
+ AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_mv] =
+ [board_ahci_mcp77] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
- AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
+ .flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_sb700] = /* for SB700 and SB800 */
+ [board_ahci_mcp89] =
{
- AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_ops,
},
- [board_ahci_mcp65] =
+ [board_ahci_mv] =
{
- AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
- .flags = AHCI_FLAG_COMMON,
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
+ AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nopmp] =
+ [board_ahci_sb600] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
+ AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
+ AHCI_HFLAG_32BIT_ONLY),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_sb600_ops,
},
- [board_ahci_yesncq] =
+ [board_ahci_sb700] = /* for SB700 and SB800 */
{
- AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_sb600_ops,
},
- [board_ahci_nosntf] =
+ [board_ahci_vt8251] =
{
- AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
+ .port_ops = &ahci_vt8251_ops,
},
};
@@ -629,82 +277,82 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
- { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
- { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */
- { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
- { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
- { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
- { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
- { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */
+ { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */
+ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */
+ { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */
+ { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */
/* SiS */
{ PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
@@ -737,12 +385,6 @@ static struct pci_driver ahci_pci_driver = {
#endif
};
-static int ahci_em_messages = 1;
-module_param(ahci_em_messages, int, 0444);
-/* add other LED protocol types when they become supported */
-MODULE_PARM_DESC(ahci_em_messages,
- "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
-
#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
static int marvell_enable;
#else
@@ -752,166 +394,15 @@ module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
-static inline int ahci_nr_ports(u32 cap)
-{
- return (cap & 0x1f) + 1;
-}
-
-static inline void __iomem *__ahci_port_base(struct ata_host *host,
- unsigned int port_no)
-{
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
-
- return mmio + 0x100 + (port_no * 0x80);
-}
-
-static inline void __iomem *ahci_port_base(struct ata_port *ap)
-{
- return __ahci_port_base(ap->host, ap->port_no);
-}
-
-static void ahci_enable_ahci(void __iomem *mmio)
-{
- int i;
- u32 tmp;
-
- /* turn on AHCI_EN */
- tmp = readl(mmio + HOST_CTL);
- if (tmp & HOST_AHCI_EN)
- return;
-
- /* Some controllers need AHCI_EN to be written multiple times.
- * Try a few times before giving up.
- */
- for (i = 0; i < 5; i++) {
- tmp |= HOST_AHCI_EN;
- writel(tmp, mmio + HOST_CTL);
- tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
- if (tmp & HOST_AHCI_EN)
- return;
- msleep(10);
- }
-
- WARN_ON(1);
-}
-
-static ssize_t ahci_show_host_caps(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- return sprintf(buf, "%x\n", hpriv->cap);
-}
-
-static ssize_t ahci_show_host_cap2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- return sprintf(buf, "%x\n", hpriv->cap2);
-}
-
-static ssize_t ahci_show_host_version(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
-
- return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
-}
-
-static ssize_t ahci_show_port_cmd(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ata_port *ap = ata_shost_to_port(shost);
- void __iomem *port_mmio = ahci_port_base(ap);
-
- return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
-}
-
-/**
- * ahci_save_initial_config - Save and fixup initial config values
- * @pdev: target PCI device
- * @hpriv: host private area to store config values
- *
- * Some registers containing configuration info might be setup by
- * BIOS and might be cleared on reset. This function saves the
- * initial values of those registers into @hpriv such that they
- * can be restored after controller reset.
- *
- * If inconsistent, config values are fixed up by this function.
- *
- * LOCKING:
- * None.
- */
-static void ahci_save_initial_config(struct pci_dev *pdev,
- struct ahci_host_priv *hpriv)
+static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
{
- void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
- u32 cap, cap2, vers, port_map;
- int i;
- int mv;
-
- /* make sure AHCI mode is enabled before accessing CAP */
- ahci_enable_ahci(mmio);
+ unsigned int force_port_map = 0;
+ unsigned int mask_port_map = 0;
- /* Values prefixed with saved_ are written back to host after
- * reset. Values without are used for driver operation.
- */
- hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
- hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
-
- /* CAP2 register is only defined for AHCI 1.2 and later */
- vers = readl(mmio + HOST_VERSION);
- if ((vers >> 16) > 1 ||
- ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
- hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
- else
- hpriv->saved_cap2 = cap2 = 0;
-
- /* some chips have errata preventing 64bit use */
- if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do 64bit DMA, forcing 32bit\n");
- cap &= ~HOST_CAP_64;
- }
-
- if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do NCQ, turning off CAP_NCQ\n");
- cap &= ~HOST_CAP_NCQ;
- }
-
- if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can do NCQ, turning on CAP_NCQ\n");
- cap |= HOST_CAP_NCQ;
- }
-
- if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do PMP, turning off CAP_PMP\n");
- cap &= ~HOST_CAP_PMP;
- }
-
- if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "controller can't do SNTF, turning off CAP_SNTF\n");
- cap &= ~HOST_CAP_SNTF;
- }
-
- if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
- port_map != 1) {
- dev_printk(KERN_INFO, &pdev->dev,
- "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
- port_map, 1);
- port_map = 1;
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
+ dev_info(&pdev->dev, "JMB361 has only one port\n");
+ force_port_map = 1;
}
/*
@@ -921,469 +412,25 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
*/
if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
if (pdev->device == 0x6121)
- mv = 0x3;
+ mask_port_map = 0x3;
else
- mv = 0xf;
- dev_printk(KERN_ERR, &pdev->dev,
- "MV_AHCI HACK: port_map %x -> %x\n",
- port_map,
- port_map & mv);
- dev_printk(KERN_ERR, &pdev->dev,
+ mask_port_map = 0xf;
+ dev_info(&pdev->dev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
-
- port_map &= mv;
}
- /* cross check port_map and cap.n_ports */
- if (port_map) {
- int map_ports = 0;
-
- for (i = 0; i < AHCI_MAX_PORTS; i++)
- if (port_map & (1 << i))
- map_ports++;
-
- /* If PI has more ports than n_ports, whine, clear
- * port_map and let it be generated from n_ports.
- */
- if (map_ports > ahci_nr_ports(cap)) {
- dev_printk(KERN_WARNING, &pdev->dev,
- "implemented port map (0x%x) contains more "
- "ports than nr_ports (%u), using nr_ports\n",
- port_map, ahci_nr_ports(cap));
- port_map = 0;
- }
- }
-
- /* fabricate port_map from cap.nr_ports */
- if (!port_map) {
- port_map = (1 << ahci_nr_ports(cap)) - 1;
- dev_printk(KERN_WARNING, &pdev->dev,
- "forcing PORTS_IMPL to 0x%x\n", port_map);
-
- /* write the fixed up value to the PI register */
- hpriv->saved_port_map = port_map;
- }
-
- /* record values to use during operation */
- hpriv->cap = cap;
- hpriv->cap2 = cap2;
- hpriv->port_map = port_map;
-}
-
-/**
- * ahci_restore_initial_config - Restore initial config
- * @host: target ATA host
- *
- * Restore initial config stored by ahci_save_initial_config().
- *
- * LOCKING:
- * None.
- */
-static void ahci_restore_initial_config(struct ata_host *host)
-{
- struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
-
- writel(hpriv->saved_cap, mmio + HOST_CAP);
- if (hpriv->saved_cap2)
- writel(hpriv->saved_cap2, mmio + HOST_CAP2);
- writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
- (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
-}
-
-static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
-{
- static const int offset[] = {
- [SCR_STATUS] = PORT_SCR_STAT,
- [SCR_CONTROL] = PORT_SCR_CTL,
- [SCR_ERROR] = PORT_SCR_ERR,
- [SCR_ACTIVE] = PORT_SCR_ACT,
- [SCR_NOTIFICATION] = PORT_SCR_NTF,
- };
- struct ahci_host_priv *hpriv = ap->host->private_data;
-
- if (sc_reg < ARRAY_SIZE(offset) &&
- (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
- return offset[sc_reg];
- return 0;
-}
-
-static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- int offset = ahci_scr_offset(link->ap, sc_reg);
-
- if (offset) {
- *val = readl(port_mmio + offset);
- return 0;
- }
- return -EINVAL;
-}
-
-static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- int offset = ahci_scr_offset(link->ap, sc_reg);
-
- if (offset) {
- writel(val, port_mmio + offset);
- return 0;
- }
- return -EINVAL;
-}
-
-static void ahci_start_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- /* start DMA */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_START;
- writel(tmp, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* flush */
-}
-
-static int ahci_stop_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- tmp = readl(port_mmio + PORT_CMD);
-
- /* check if the HBA is idle */
- if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
- return 0;
-
- /* setting HBA to idle */
- tmp &= ~PORT_CMD_START;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* wait for engine to stop. This could be as long as 500 msec */
- tmp = ata_wait_register(port_mmio + PORT_CMD,
- PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
- if (tmp & PORT_CMD_LIST_ON)
- return -EIO;
-
- return 0;
-}
-
-static void ahci_start_fis_rx(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- u32 tmp;
-
- /* set FIS registers */
- if (hpriv->cap & HOST_CAP_64)
- writel((pp->cmd_slot_dma >> 16) >> 16,
- port_mmio + PORT_LST_ADDR_HI);
- writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
-
- if (hpriv->cap & HOST_CAP_64)
- writel((pp->rx_fis_dma >> 16) >> 16,
- port_mmio + PORT_FIS_ADDR_HI);
- writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
-
- /* enable FIS reception */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_FIS_RX;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* flush */
- readl(port_mmio + PORT_CMD);
+ ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
+ mask_port_map);
}
-static int ahci_stop_fis_rx(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
-
- /* disable FIS reception */
- tmp = readl(port_mmio + PORT_CMD);
- tmp &= ~PORT_CMD_FIS_RX;
- writel(tmp, port_mmio + PORT_CMD);
-
- /* wait for completion, spec says 500ms, give it 1000 */
- tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
- PORT_CMD_FIS_ON, 10, 1000);
- if (tmp & PORT_CMD_FIS_ON)
- return -EBUSY;
-
- return 0;
-}
-
-static void ahci_power_up(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
-
- cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
-
- /* spin up device */
- if (hpriv->cap & HOST_CAP_SSS) {
- cmd |= PORT_CMD_SPIN_UP;
- writel(cmd, port_mmio + PORT_CMD);
- }
-
- /* wake up link */
- writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
-}
-
-static void ahci_disable_alpm(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
- struct ahci_port_priv *pp = ap->private_data;
-
- /* IPM bits should be disabled by libata-core */
- /* get the existing command bits */
- cmd = readl(port_mmio + PORT_CMD);
-
- /* disable ALPM and ASP */
- cmd &= ~PORT_CMD_ASP;
- cmd &= ~PORT_CMD_ALPE;
-
- /* force the interface back to active */
- cmd |= PORT_CMD_ICC_ACTIVE;
-
- /* write out new cmd value */
- writel(cmd, port_mmio + PORT_CMD);
- cmd = readl(port_mmio + PORT_CMD);
-
- /* wait 10ms to be sure we've come out of any low power state */
- msleep(10);
-
- /* clear out any PhyRdy stuff from interrupt status */
- writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
-
- /* go ahead and clean out PhyRdy Change from Serror too */
- ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
-
- /*
- * Clear flag to indicate that we should ignore all PhyRdy
- * state changes
- */
- hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
-
- /*
- * Enable interrupts on Phy Ready.
- */
- pp->intr_mask |= PORT_IRQ_PHYRDY;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-
- /*
- * don't change the link pm policy - we can be called
- * just to turn of link pm temporarily
- */
-}
-
-static int ahci_enable_alpm(struct ata_port *ap,
- enum link_pm policy)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd;
- struct ahci_port_priv *pp = ap->private_data;
- u32 asp;
-
- /* Make sure the host is capable of link power management */
- if (!(hpriv->cap & HOST_CAP_ALPM))
- return -EINVAL;
-
- switch (policy) {
- case MAX_PERFORMANCE:
- case NOT_AVAILABLE:
- /*
- * if we came here with NOT_AVAILABLE,
- * it just means this is the first time we
- * have tried to enable - default to max performance,
- * and let the user go to lower power modes on request.
- */
- ahci_disable_alpm(ap);
- return 0;
- case MIN_POWER:
- /* configure HBA to enter SLUMBER */
- asp = PORT_CMD_ASP;
- break;
- case MEDIUM_POWER:
- /* configure HBA to enter PARTIAL */
- asp = 0;
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * Disable interrupts on Phy Ready. This keeps us from
- * getting woken up due to spurious phy ready interrupts
- * TBD - Hot plug should be done via polling now, is
- * that even supported?
- */
- pp->intr_mask &= ~PORT_IRQ_PHYRDY;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-
- /*
- * Set a flag to indicate that we should ignore all PhyRdy
- * state changes since these can happen now whenever we
- * change link state
- */
- hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
-
- /* get the existing command bits */
- cmd = readl(port_mmio + PORT_CMD);
-
- /*
- * Set ASP based on Policy
- */
- cmd |= asp;
-
- /*
- * Setting this bit will instruct the HBA to aggressively
- * enter a lower power link state when it's appropriate and
- * based on the value set above for ASP
- */
- cmd |= PORT_CMD_ALPE;
-
- /* write out new cmd value */
- writel(cmd, port_mmio + PORT_CMD);
- cmd = readl(port_mmio + PORT_CMD);
-
- /* IPM bits should be set by libata-core */
- return 0;
-}
-
-#ifdef CONFIG_PM
-static void ahci_power_down(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd, scontrol;
-
- if (!(hpriv->cap & HOST_CAP_SSS))
- return;
-
- /* put device into listen mode, first set PxSCTL.DET to 0 */
- scontrol = readl(port_mmio + PORT_SCR_CTL);
- scontrol &= ~0xf;
- writel(scontrol, port_mmio + PORT_SCR_CTL);
-
- /* then set PxCMD.SUD to 0 */
- cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
- cmd &= ~PORT_CMD_SPIN_UP;
- writel(cmd, port_mmio + PORT_CMD);
-}
-#endif
-
-static void ahci_start_port(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_link *link;
- struct ahci_em_priv *emp;
- ssize_t rc;
- int i;
-
- /* enable FIS reception */
- ahci_start_fis_rx(ap);
-
- /* enable DMA */
- ahci_start_engine(ap);
-
- /* turn on LEDs */
- if (ap->flags & ATA_FLAG_EM) {
- ata_for_each_link(link, ap, EDGE) {
- emp = &pp->em_priv[link->pmp];
-
- /* EM Transmit bit maybe busy during init */
- for (i = 0; i < EM_MAX_RETRY; i++) {
- rc = ahci_transmit_led_message(ap,
- emp->led_state,
- 4);
- if (rc == -EBUSY)
- msleep(1);
- else
- break;
- }
- }
- }
-
- if (ap->flags & ATA_FLAG_SW_ACTIVITY)
- ata_for_each_link(link, ap, EDGE)
- ahci_init_sw_activity(link);
-
-}
-
-static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
-{
- int rc;
-
- /* disable DMA */
- rc = ahci_stop_engine(ap);
- if (rc) {
- *emsg = "failed to stop engine";
- return rc;
- }
-
- /* disable FIS reception */
- rc = ahci_stop_fis_rx(ap);
- if (rc) {
- *emsg = "failed stop FIS RX";
- return rc;
- }
-
- return 0;
-}
-
-static int ahci_reset_controller(struct ata_host *host)
+static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
- struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 tmp;
- /* we must be in AHCI mode, before using anything
- * AHCI-specific, such as HOST_RESET.
- */
- ahci_enable_ahci(mmio);
-
- /* global controller reset */
- if (!ahci_skip_host_reset) {
- tmp = readl(mmio + HOST_CTL);
- if ((tmp & HOST_RESET) == 0) {
- writel(tmp | HOST_RESET, mmio + HOST_CTL);
- readl(mmio + HOST_CTL); /* flush */
- }
-
- /*
- * to perform host reset, OS should set HOST_RESET
- * and poll until this bit is read to be "0".
- * reset must complete within 1 second, or
- * the hardware should be considered fried.
- */
- tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
- HOST_RESET, 10, 1000);
-
- if (tmp & HOST_RESET) {
- dev_printk(KERN_ERR, host->dev,
- "controller reset failed (0x%x)\n", tmp);
- return -EIO;
- }
-
- /* turn on AHCI mode */
- ahci_enable_ahci(mmio);
-
- /* Some registers might be cleared on reset. Restore
- * initial values.
- */
- ahci_restore_initial_config(host);
- } else
- dev_printk(KERN_INFO, host->dev,
- "skipping global host reset\n");
+ ahci_reset_controller(host);
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ struct ahci_host_priv *hpriv = host->private_data;
u16 tmp16;
/* configure PCS */
@@ -1397,267 +444,10 @@ static int ahci_reset_controller(struct ata_host *host)
return 0;
}
-static void ahci_sw_activity(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
- return;
-
- emp->activity++;
- if (!timer_pending(&emp->timer))
- mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
-}
-
-static void ahci_sw_activity_blink(unsigned long arg)
-{
- struct ata_link *link = (struct ata_link *)arg;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
- unsigned long led_message = emp->led_state;
- u32 activity_led_state;
- unsigned long flags;
-
- led_message &= EM_MSG_LED_VALUE;
- led_message |= ap->port_no | (link->pmp << 8);
-
- /* check to see if we've had activity. If so,
- * toggle state of LED and reset timer. If not,
- * turn LED to desired idle state.
- */
- spin_lock_irqsave(ap->lock, flags);
- if (emp->saved_activity != emp->activity) {
- emp->saved_activity = emp->activity;
- /* get the current LED state */
- activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
-
- if (activity_led_state)
- activity_led_state = 0;
- else
- activity_led_state = 1;
-
- /* clear old state */
- led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
-
- /* toggle state */
- led_message |= (activity_led_state << 16);
- mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
- } else {
- /* switch to idle */
- led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
- if (emp->blink_policy == BLINK_OFF)
- led_message |= (1 << 16);
- }
- spin_unlock_irqrestore(ap->lock, flags);
- ahci_transmit_led_message(ap, led_message, 4);
-}
-
-static void ahci_init_sw_activity(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- /* init activity stats, setup timer */
- emp->saved_activity = emp->activity = 0;
- setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
-
- /* check our blink policy and set flag for link if it's enabled */
- if (emp->blink_policy)
- link->flags |= ATA_LFLAG_SW_ACTIVITY;
-}
-
-static int ahci_reset_em(struct ata_host *host)
-{
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 em_ctl;
-
- em_ctl = readl(mmio + HOST_EM_CTL);
- if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
- return -EINVAL;
-
- writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
- return 0;
-}
-
-static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
- ssize_t size)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
- u32 em_ctl;
- u32 message[] = {0, 0};
- unsigned long flags;
- int pmp;
- struct ahci_em_priv *emp;
-
- /* get the slot number from the message */
- pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
- emp = &pp->em_priv[pmp];
- else
- return -EINVAL;
-
- spin_lock_irqsave(ap->lock, flags);
-
- /*
- * if we are still busy transmitting a previous message,
- * do not allow
- */
- em_ctl = readl(mmio + HOST_EM_CTL);
- if (em_ctl & EM_CTL_TM) {
- spin_unlock_irqrestore(ap->lock, flags);
- return -EBUSY;
- }
-
- /*
- * create message header - this is all zero except for
- * the message size, which is 4 bytes.
- */
- message[0] |= (4 << 8);
-
- /* ignore 0:4 of byte zero, fill in port info yourself */
- message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
-
- /* write message to EM_LOC */
- writel(message[0], mmio + hpriv->em_loc);
- writel(message[1], mmio + hpriv->em_loc+4);
-
- /* save off new led state for port/slot */
- emp->led_state = state;
-
- /*
- * tell hardware to transmit the message
- */
- writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
-
- spin_unlock_irqrestore(ap->lock, flags);
- return size;
-}
-
-static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
-{
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_link *link;
- struct ahci_em_priv *emp;
- int rc = 0;
-
- ata_for_each_link(link, ap, EDGE) {
- emp = &pp->em_priv[link->pmp];
- rc += sprintf(buf, "%lx\n", emp->led_state);
- }
- return rc;
-}
-
-static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
- size_t size)
-{
- int state;
- int pmp;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp;
-
- state = simple_strtoul(buf, NULL, 0);
-
- /* get the slot number from the message */
- pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
- emp = &pp->em_priv[pmp];
- else
- return -EINVAL;
-
- /* mask off the activity bits if we are in sw_activity
- * mode, user should turn off sw_activity before setting
- * activity led through em_message
- */
- if (emp->blink_policy)
- state &= ~EM_MSG_LED_VALUE_ACTIVITY;
-
- return ahci_transmit_led_message(ap, state, size);
-}
-
-static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
-{
- struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
- u32 port_led_state = emp->led_state;
-
- /* save the desired Activity LED behavior */
- if (val == OFF) {
- /* clear LFLAG */
- link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
-
- /* set the LED to OFF */
- port_led_state &= EM_MSG_LED_VALUE_OFF;
- port_led_state |= (ap->port_no | (link->pmp << 8));
- ahci_transmit_led_message(ap, port_led_state, 4);
- } else {
- link->flags |= ATA_LFLAG_SW_ACTIVITY;
- if (val == BLINK_OFF) {
- /* set LED to ON for idle */
- port_led_state &= EM_MSG_LED_VALUE_OFF;
- port_led_state |= (ap->port_no | (link->pmp << 8));
- port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
- ahci_transmit_led_message(ap, port_led_state, 4);
- }
- }
- emp->blink_policy = val;
- return 0;
-}
-
-static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
-{
- struct ata_link *link = dev->link;
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
-
- /* display the saved value of activity behavior for this
- * disk.
- */
- return sprintf(buf, "%d\n", emp->blink_policy);
-}
-
-static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
- int port_no, void __iomem *mmio,
- void __iomem *port_mmio)
-{
- const char *emsg = NULL;
- int rc;
- u32 tmp;
-
- /* make sure port is not active */
- rc = ahci_deinit_port(ap, &emsg);
- if (rc)
- dev_printk(KERN_WARNING, &pdev->dev,
- "%s (%d)\n", emsg, rc);
-
- /* clear SError */
- tmp = readl(port_mmio + PORT_SCR_ERR);
- VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
- writel(tmp, port_mmio + PORT_SCR_ERR);
-
- /* clear port IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
- if (tmp)
- writel(tmp, port_mmio + PORT_IRQ_STAT);
-
- writel(1 << port_no, mmio + HOST_IRQ_STAT);
-}
-
-static void ahci_init_controller(struct ata_host *host)
+static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- int i;
void __iomem *port_mmio;
u32 tmp;
int mv;
@@ -1678,220 +468,7 @@ static void ahci_init_controller(struct ata_host *host)
writel(tmp, port_mmio + PORT_IRQ_STAT);
}
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
-
- port_mmio = ahci_port_base(ap);
- if (ata_port_is_dummy(ap))
- continue;
-
- ahci_port_init(pdev, ap, i, mmio, port_mmio);
- }
-
- tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
- writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
- tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
-}
-
-static void ahci_dev_config(struct ata_device *dev)
-{
- struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
-
- if (hpriv->flags & AHCI_HFLAG_SECT255) {
- dev->max_sectors = 255;
- ata_dev_printk(dev, KERN_INFO,
- "SB600 AHCI: limiting to 255 sectors per cmd\n");
- }
-}
-
-static unsigned int ahci_dev_classify(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ata_taskfile tf;
- u32 tmp;
-
- tmp = readl(port_mmio + PORT_SIG);
- tf.lbah = (tmp >> 24) & 0xff;
- tf.lbam = (tmp >> 16) & 0xff;
- tf.lbal = (tmp >> 8) & 0xff;
- tf.nsect = (tmp) & 0xff;
-
- return ata_dev_classify(&tf);
-}
-
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
- u32 opts)
-{
- dma_addr_t cmd_tbl_dma;
-
- cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
-
- pp->cmd_slot[tag].opts = cpu_to_le32(opts);
- pp->cmd_slot[tag].status = 0;
- pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
- pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
-}
-
-static int ahci_kick_engine(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_host_priv *hpriv = ap->host->private_data;
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
- u32 tmp;
- int busy, rc;
-
- /* stop engine */
- rc = ahci_stop_engine(ap);
- if (rc)
- goto out_restart;
-
- /* need to do CLO?
- * always do CLO if PMP is attached (AHCI-1.3 9.2)
- */
- busy = status & (ATA_BUSY | ATA_DRQ);
- if (!busy && !sata_pmp_attached(ap)) {
- rc = 0;
- goto out_restart;
- }
-
- if (!(hpriv->cap & HOST_CAP_CLO)) {
- rc = -EOPNOTSUPP;
- goto out_restart;
- }
-
- /* perform CLO */
- tmp = readl(port_mmio + PORT_CMD);
- tmp |= PORT_CMD_CLO;
- writel(tmp, port_mmio + PORT_CMD);
-
- rc = 0;
- tmp = ata_wait_register(port_mmio + PORT_CMD,
- PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
- if (tmp & PORT_CMD_CLO)
- rc = -EIO;
-
- /* restart engine */
- out_restart:
- ahci_start_engine(ap);
- return rc;
-}
-
-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
- struct ata_taskfile *tf, int is_cmd, u16 flags,
- unsigned long timeout_msec)
-{
- const u32 cmd_fis_len = 5; /* five dwords */
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u8 *fis = pp->cmd_tbl;
- u32 tmp;
-
- /* prep the command */
- ata_tf_to_fis(tf, pmp, is_cmd, fis);
- ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
-
- /* issue & wait */
- writel(1, port_mmio + PORT_CMD_ISSUE);
-
- if (timeout_msec) {
- tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
- 1, timeout_msec);
- if (tmp & 0x1) {
- ahci_kick_engine(ap);
- return -EBUSY;
- }
- } else
- readl(port_mmio + PORT_CMD_ISSUE); /* flush */
-
- return 0;
-}
-
-static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
- int pmp, unsigned long deadline,
- int (*check_ready)(struct ata_link *link))
-{
- struct ata_port *ap = link->ap;
- struct ahci_host_priv *hpriv = ap->host->private_data;
- const char *reason = NULL;
- unsigned long now, msecs;
- struct ata_taskfile tf;
- int rc;
-
- DPRINTK("ENTER\n");
-
- /* prepare for SRST (AHCI-1.1 10.4.1) */
- rc = ahci_kick_engine(ap);
- if (rc && rc != -EOPNOTSUPP)
- ata_link_printk(link, KERN_WARNING,
- "failed to reset engine (errno=%d)\n", rc);
-
- ata_tf_init(link->device, &tf);
-
- /* issue the first D2H Register FIS */
- msecs = 0;
- now = jiffies;
- if (time_after(now, deadline))
- msecs = jiffies_to_msecs(deadline - now);
-
- tf.ctl |= ATA_SRST;
- if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
- AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
- rc = -EIO;
- reason = "1st FIS failed";
- goto fail;
- }
-
- /* spec says at least 5us, but be generous and sleep for 1ms */
- msleep(1);
-
- /* issue the second D2H Register FIS */
- tf.ctl &= ~ATA_SRST;
- ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
-
- /* wait for link to become ready */
- rc = ata_wait_after_reset(link, deadline, check_ready);
- if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
- /*
- * Workaround for cases where link online status can't
- * be trusted. Treat device readiness timeout as link
- * offline.
- */
- ata_link_printk(link, KERN_INFO,
- "device not ready, treating as offline\n");
- *class = ATA_DEV_NONE;
- } else if (rc) {
- /* link occupied, -ENODEV too is an error */
- reason = "device not ready";
- goto fail;
- } else
- *class = ahci_dev_classify(ap);
-
- DPRINTK("EXIT, class=%u\n", *class);
- return 0;
-
- fail:
- ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
- return rc;
-}
-
-static int ahci_check_ready(struct ata_link *link)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
-
- return ata_check_ready(status);
-}
-
-static int ahci_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- int pmp = sata_srst_pmp(link);
-
- DPRINTK("ENTER\n");
-
- return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+ ahci_init_controller(host);
}
static int ahci_sb600_check_ready(struct ata_link *link)
@@ -1943,38 +520,6 @@ static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
return rc;
}
-static int ahci_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
- struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
- struct ata_taskfile tf;
- bool online;
- int rc;
-
- DPRINTK("ENTER\n");
-
- ahci_stop_engine(ap);
-
- /* clear D2H reception area to properly wait for D2H FIS */
- ata_tf_init(link->device, &tf);
- tf.command = 0x80;
- ata_tf_to_fis(&tf, 0, 0, d2h_fis);
-
- rc = sata_link_hardreset(link, timing, deadline, &online,
- ahci_check_ready);
-
- ahci_start_engine(ap);
-
- if (online)
- *class = ahci_dev_classify(ap);
-
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
- return rc;
-}
-
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
@@ -2043,605 +588,12 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
return rc;
}
-static void ahci_postreset(struct ata_link *link, unsigned int *class)
-{
- struct ata_port *ap = link->ap;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 new_tmp, tmp;
-
- ata_std_postreset(link, class);
-
- /* Make sure port's ATAPI bit is set appropriately */
- new_tmp = tmp = readl(port_mmio + PORT_CMD);
- if (*class == ATA_DEV_ATAPI)
- new_tmp |= PORT_CMD_ATAPI;
- else
- new_tmp &= ~PORT_CMD_ATAPI;
- if (new_tmp != tmp) {
- writel(new_tmp, port_mmio + PORT_CMD);
- readl(port_mmio + PORT_CMD); /* flush */
- }
-}
-
-static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
-{
- struct scatterlist *sg;
- struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
- unsigned int si;
-
- VPRINTK("ENTER\n");
-
- /*
- * Next, the S/G list.
- */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- dma_addr_t addr = sg_dma_address(sg);
- u32 sg_len = sg_dma_len(sg);
-
- ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
- ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
- ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
- }
-
- return si;
-}
-
-static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ahci_port_priv *pp = ap->private_data;
-
- if (!sata_pmp_attached(ap) || pp->fbs_enabled)
- return ata_std_qc_defer(qc);
- else
- return sata_pmp_qc_defer_cmd_switch(qc);
-}
-
-static void ahci_qc_prep(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ahci_port_priv *pp = ap->private_data;
- int is_atapi = ata_is_atapi(qc->tf.protocol);
- void *cmd_tbl;
- u32 opts;
- const u32 cmd_fis_len = 5; /* five dwords */
- unsigned int n_elem;
-
- /*
- * Fill in command table information. First, the header,
- * a SATA Register - Host to Device command FIS.
- */
- cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
-
- ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
- if (is_atapi) {
- memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
- memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
- }
-
- n_elem = 0;
- if (qc->flags & ATA_QCFLAG_DMAMAP)
- n_elem = ahci_fill_sg(qc, cmd_tbl);
-
- /*
- * Fill in command slot information.
- */
- opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
- if (qc->tf.flags & ATA_TFLAG_WRITE)
- opts |= AHCI_CMD_WRITE;
- if (is_atapi)
- opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
-
- ahci_fill_cmd_slot(pp, qc->tag, opts);
-}
-
-static void ahci_fbs_dec_intr(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs = readl(port_mmio + PORT_FBS);
- int retries = 3;
-
- DPRINTK("ENTER\n");
- BUG_ON(!pp->fbs_enabled);
-
- /* time to wait for DEC is not specified by AHCI spec,
- * add a retry loop for safety.
- */
- writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- while ((fbs & PORT_FBS_DEC) && retries--) {
- udelay(1);
- fbs = readl(port_mmio + PORT_FBS);
- }
-
- if (fbs & PORT_FBS_DEC)
- dev_printk(KERN_ERR, ap->host->dev,
- "failed to clear device error\n");
-}
-
-static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct ahci_port_priv *pp = ap->private_data;
- struct ata_eh_info *host_ehi = &ap->link.eh_info;
- struct ata_link *link = NULL;
- struct ata_queued_cmd *active_qc;
- struct ata_eh_info *active_ehi;
- bool fbs_need_dec = false;
- u32 serror;
-
- /* determine active link with error */
- if (pp->fbs_enabled) {
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs = readl(port_mmio + PORT_FBS);
- int pmp = fbs >> PORT_FBS_DWE_OFFSET;
-
- if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
- ata_link_online(&ap->pmp_link[pmp])) {
- link = &ap->pmp_link[pmp];
- fbs_need_dec = true;
- }
-
- } else
- ata_for_each_link(link, ap, EDGE)
- if (ata_link_active(link))
- break;
-
- if (!link)
- link = &ap->link;
-
- active_qc = ata_qc_from_tag(ap, link->active_tag);
- active_ehi = &link->eh_info;
-
- /* record irq stat */
- ata_ehi_clear_desc(host_ehi);
- ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
-
- /* AHCI needs SError cleared; otherwise, it might lock up */
- ahci_scr_read(&ap->link, SCR_ERROR, &serror);
- ahci_scr_write(&ap->link, SCR_ERROR, serror);
- host_ehi->serror |= serror;
-
- /* some controllers set IRQ_IF_ERR on device errors, ignore it */
- if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
- irq_stat &= ~PORT_IRQ_IF_ERR;
-
- if (irq_stat & PORT_IRQ_TF_ERR) {
- /* If qc is active, charge it; otherwise, the active
- * link. There's no active qc on NCQ errors. It will
- * be determined by EH by reading log page 10h.
- */
- if (active_qc)
- active_qc->err_mask |= AC_ERR_DEV;
- else
- active_ehi->err_mask |= AC_ERR_DEV;
-
- if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
- host_ehi->serror &= ~SERR_INTERNAL;
- }
-
- if (irq_stat & PORT_IRQ_UNK_FIS) {
- u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
-
- active_ehi->err_mask |= AC_ERR_HSM;
- active_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(active_ehi,
- "unknown FIS %08x %08x %08x %08x" ,
- unk[0], unk[1], unk[2], unk[3]);
- }
-
- if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
- active_ehi->err_mask |= AC_ERR_HSM;
- active_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(active_ehi, "incorrect PMP");
- }
-
- if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
- host_ehi->err_mask |= AC_ERR_HOST_BUS;
- host_ehi->action |= ATA_EH_RESET;
- ata_ehi_push_desc(host_ehi, "host bus error");
- }
-
- if (irq_stat & PORT_IRQ_IF_ERR) {
- if (fbs_need_dec)
- active_ehi->err_mask |= AC_ERR_DEV;
- else {
- host_ehi->err_mask |= AC_ERR_ATA_BUS;
- host_ehi->action |= ATA_EH_RESET;
- }
-
- ata_ehi_push_desc(host_ehi, "interface fatal error");
- }
-
- if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
- ata_ehi_hotplugged(host_ehi);
- ata_ehi_push_desc(host_ehi, "%s",
- irq_stat & PORT_IRQ_CONNECT ?
- "connection status changed" : "PHY RDY changed");
- }
-
- /* okay, let's hand over to EH */
-
- if (irq_stat & PORT_IRQ_FREEZE)
- ata_port_freeze(ap);
- else if (fbs_need_dec) {
- ata_link_abort(link);
- ahci_fbs_dec_intr(ap);
- } else
- ata_port_abort(ap);
-}
-
-static void ahci_port_intr(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ata_eh_info *ehi = &ap->link.eh_info;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_host_priv *hpriv = ap->host->private_data;
- int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
- u32 status, qc_active = 0;
- int rc;
-
- status = readl(port_mmio + PORT_IRQ_STAT);
- writel(status, port_mmio + PORT_IRQ_STAT);
-
- /* ignore BAD_PMP while resetting */
- if (unlikely(resetting))
- status &= ~PORT_IRQ_BAD_PMP;
-
- /* If we are getting PhyRdy, this is
- * just a power state change, we should
- * clear out this, plus the PhyRdy/Comm
- * Wake bits from Serror
- */
- if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
- (status & PORT_IRQ_PHYRDY)) {
- status &= ~PORT_IRQ_PHYRDY;
- ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
- }
-
- if (unlikely(status & PORT_IRQ_ERROR)) {
- ahci_error_intr(ap, status);
- return;
- }
-
- if (status & PORT_IRQ_SDB_FIS) {
- /* If SNotification is available, leave notification
- * handling to sata_async_notification(). If not,
- * emulate it by snooping SDB FIS RX area.
- *
- * Snooping FIS RX area is probably cheaper than
- * poking SNotification but some constrollers which
- * implement SNotification, ICH9 for example, don't
- * store AN SDB FIS into receive area.
- */
- if (hpriv->cap & HOST_CAP_SNTF)
- sata_async_notification(ap);
- else {
- /* If the 'N' bit in word 0 of the FIS is set,
- * we just received asynchronous notification.
- * Tell libata about it.
- *
- * Lack of SNotification should not appear in
- * ahci 1.2, so the workaround is unnecessary
- * when FBS is enabled.
- */
- if (pp->fbs_enabled)
- WARN_ON_ONCE(1);
- else {
- const __le32 *f = pp->rx_fis + RX_FIS_SDB;
- u32 f0 = le32_to_cpu(f[0]);
- if (f0 & (1 << 15))
- sata_async_notification(ap);
- }
- }
- }
-
- /* pp->active_link is not reliable once FBS is enabled, both
- * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
- * NCQ and non-NCQ commands may be in flight at the same time.
- */
- if (pp->fbs_enabled) {
- if (ap->qc_active) {
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
- }
- } else {
- /* pp->active_link is valid iff any command is in flight */
- if (ap->qc_active && pp->active_link->sactive)
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- else
- qc_active = readl(port_mmio + PORT_CMD_ISSUE);
- }
-
- rc = ata_qc_complete_multiple(ap, qc_active);
-
- /* while resetting, invalid completions are expected */
- if (unlikely(rc < 0 && !resetting)) {
- ehi->err_mask |= AC_ERR_HSM;
- ehi->action |= ATA_EH_RESET;
- ata_port_freeze(ap);
- }
-}
-
-static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct ahci_host_priv *hpriv;
- unsigned int i, handled = 0;
- void __iomem *mmio;
- u32 irq_stat, irq_masked;
-
- VPRINTK("ENTER\n");
-
- hpriv = host->private_data;
- mmio = host->iomap[AHCI_PCI_BAR];
-
- /* sigh. 0xffffffff is a valid return from h/w */
- irq_stat = readl(mmio + HOST_IRQ_STAT);
- if (!irq_stat)
- return IRQ_NONE;
-
- irq_masked = irq_stat & hpriv->port_map;
-
- spin_lock(&host->lock);
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
-
- if (!(irq_masked & (1 << i)))
- continue;
-
- ap = host->ports[i];
- if (ap) {
- ahci_port_intr(ap);
- VPRINTK("port %u\n", i);
- } else {
- VPRINTK("port %u (no irq)\n", i);
- if (ata_ratelimit())
- dev_printk(KERN_WARNING, host->dev,
- "interrupt on disabled port %u\n", i);
- }
-
- handled = 1;
- }
-
- /* HOST_IRQ_STAT behaves as level triggered latch meaning that
- * it should be cleared after all the port events are cleared;
- * otherwise, it will raise a spurious interrupt after each
- * valid one. Please read section 10.6.2 of ahci 1.1 for more
- * information.
- *
- * Also, use the unmasked value to clear interrupt as spurious
- * pending event on a dummy port might cause screaming IRQ.
- */
- writel(irq_stat, mmio + HOST_IRQ_STAT);
-
- spin_unlock(&host->lock);
-
- VPRINTK("EXIT\n");
-
- return IRQ_RETVAL(handled);
-}
-
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
-
- /* Keep track of the currently active link. It will be used
- * in completion path to determine whether NCQ phase is in
- * progress.
- */
- pp->active_link = qc->dev->link;
-
- if (qc->tf.protocol == ATA_PROT_NCQ)
- writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
-
- if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
- u32 fbs = readl(port_mmio + PORT_FBS);
- fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
- fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
- writel(fbs, port_mmio + PORT_FBS);
- pp->fbs_last_dev = qc->dev->link->pmp;
- }
-
- writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
-
- ahci_sw_activity(qc->dev->link);
-
- return 0;
-}
-
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
-{
- struct ahci_port_priv *pp = qc->ap->private_data;
- u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
-
- if (pp->fbs_enabled)
- d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
-
- ata_tf_from_fis(d2h_fis, &qc->result_tf);
- return true;
-}
-
-static void ahci_freeze(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
-
- /* turn IRQ off */
- writel(0, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_thaw(struct ata_port *ap)
-{
- void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 tmp;
- struct ahci_port_priv *pp = ap->private_data;
-
- /* clear IRQ */
- tmp = readl(port_mmio + PORT_IRQ_STAT);
- writel(tmp, port_mmio + PORT_IRQ_STAT);
- writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
-
- /* turn IRQ back on */
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_error_handler(struct ata_port *ap)
-{
- if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
- /* restart engine */
- ahci_stop_engine(ap);
- ahci_start_engine(ap);
- }
-
- sata_pmp_error_handler(ap);
-}
-
-static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
-
- /* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
- ahci_kick_engine(ap);
-}
-
-static void ahci_enable_fbs(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs;
- int rc;
-
- if (!pp->fbs_supported)
- return;
-
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN) {
- pp->fbs_enabled = true;
- pp->fbs_last_dev = -1; /* initialization */
- return;
- }
-
- rc = ahci_stop_engine(ap);
- if (rc)
- return;
-
- writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN) {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
- pp->fbs_enabled = true;
- pp->fbs_last_dev = -1; /* initialization */
- } else
- dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
-
- ahci_start_engine(ap);
-}
-
-static void ahci_disable_fbs(struct ata_port *ap)
-{
- struct ahci_port_priv *pp = ap->private_data;
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 fbs;
- int rc;
-
- if (!pp->fbs_supported)
- return;
-
- fbs = readl(port_mmio + PORT_FBS);
- if ((fbs & PORT_FBS_EN) == 0) {
- pp->fbs_enabled = false;
- return;
- }
-
- rc = ahci_stop_engine(ap);
- if (rc)
- return;
-
- writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
- fbs = readl(port_mmio + PORT_FBS);
- if (fbs & PORT_FBS_EN)
- dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
- else {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
- pp->fbs_enabled = false;
- }
-
- ahci_start_engine(ap);
-}
-
-static void ahci_pmp_attach(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
- u32 cmd;
-
- cmd = readl(port_mmio + PORT_CMD);
- cmd |= PORT_CMD_PMP;
- writel(cmd, port_mmio + PORT_CMD);
-
- ahci_enable_fbs(ap);
-
- pp->intr_mask |= PORT_IRQ_BAD_PMP;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static void ahci_pmp_detach(struct ata_port *ap)
-{
- void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
- u32 cmd;
-
- ahci_disable_fbs(ap);
-
- cmd = readl(port_mmio + PORT_CMD);
- cmd &= ~PORT_CMD_PMP;
- writel(cmd, port_mmio + PORT_CMD);
-
- pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
-}
-
-static int ahci_port_resume(struct ata_port *ap)
-{
- ahci_power_up(ap);
- ahci_start_port(ap);
-
- if (sata_pmp_attached(ap))
- ahci_pmp_attach(ap);
- else
- ahci_pmp_detach(ap);
-
- return 0;
-}
-
#ifdef CONFIG_PM
-static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
-{
- const char *emsg = NULL;
- int rc;
-
- rc = ahci_deinit_port(ap, &emsg);
- if (rc == 0)
- ahci_power_down(ap);
- else {
- ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
- ahci_start_port(ap);
- }
-
- return rc;
-}
-
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
+ void __iomem *mmio = hpriv->mmio;
u32 ctl;
if (mesg.event & PM_EVENT_SUSPEND &&
@@ -2675,11 +627,11 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
- ahci_init_controller(host);
+ ahci_pci_init_controller(host);
}
ata_host_resume(host);
@@ -2688,92 +640,6 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
}
#endif
-static int ahci_port_start(struct ata_port *ap)
-{
- struct ahci_host_priv *hpriv = ap->host->private_data;
- struct device *dev = ap->host->dev;
- struct ahci_port_priv *pp;
- void *mem;
- dma_addr_t mem_dma;
- size_t dma_sz, rx_fis_sz;
-
- pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
- if (!pp)
- return -ENOMEM;
-
- /* check FBS capability */
- if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
- void __iomem *port_mmio = ahci_port_base(ap);
- u32 cmd = readl(port_mmio + PORT_CMD);
- if (cmd & PORT_CMD_FBSCP)
- pp->fbs_supported = true;
- else
- dev_printk(KERN_WARNING, dev,
- "The port is not capable of FBS\n");
- }
-
- if (pp->fbs_supported) {
- dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
- rx_fis_sz = AHCI_RX_FIS_SZ * 16;
- } else {
- dma_sz = AHCI_PORT_PRIV_DMA_SZ;
- rx_fis_sz = AHCI_RX_FIS_SZ;
- }
-
- mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
- memset(mem, 0, dma_sz);
-
- /*
- * First item in chunk of DMA memory: 32-slot command table,
- * 32 bytes each in size
- */
- pp->cmd_slot = mem;
- pp->cmd_slot_dma = mem_dma;
-
- mem += AHCI_CMD_SLOT_SZ;
- mem_dma += AHCI_CMD_SLOT_SZ;
-
- /*
- * Second item: Received-FIS area
- */
- pp->rx_fis = mem;
- pp->rx_fis_dma = mem_dma;
-
- mem += rx_fis_sz;
- mem_dma += rx_fis_sz;
-
- /*
- * Third item: data area for storing a single command
- * and its scatter-gather table
- */
- pp->cmd_tbl = mem;
- pp->cmd_tbl_dma = mem_dma;
-
- /*
- * Save off initial list of interrupts to be enabled.
- * This could be changed later
- */
- pp->intr_mask = DEF_PORT_IRQ;
-
- ap->private_data = pp;
-
- /* engage engines, captain */
- return ahci_port_resume(ap);
-}
-
-static void ahci_port_stop(struct ata_port *ap)
-{
- const char *emsg = NULL;
- int rc;
-
- /* de-initialize port */
- rc = ahci_deinit_port(ap, &emsg);
- if (rc)
- ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
-}
-
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
@@ -2806,31 +672,12 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
return 0;
}
-static void ahci_print_info(struct ata_host *host)
+static void ahci_pci_print_info(struct ata_host *host)
{
- struct ahci_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
- void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
- u32 vers, cap, cap2, impl, speed;
- const char *speed_s;
u16 cc;
const char *scc_s;
- vers = readl(mmio + HOST_VERSION);
- cap = hpriv->cap;
- cap2 = hpriv->cap2;
- impl = hpriv->port_map;
-
- speed = (cap >> 20) & 0xf;
- if (speed == 1)
- speed_s = "1.5";
- else if (speed == 2)
- speed_s = "3";
- else if (speed == 3)
- speed_s = "6";
- else
- speed_s = "?";
-
pci_read_config_word(pdev, 0x0a, &cc);
if (cc == PCI_CLASS_STORAGE_IDE)
scc_s = "IDE";
@@ -2841,50 +688,7 @@ static void ahci_print_info(struct ata_host *host)
else
scc_s = "unknown";
- dev_printk(KERN_INFO, &pdev->dev,
- "AHCI %02x%02x.%02x%02x "
- "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
- ,
-
- (vers >> 24) & 0xff,
- (vers >> 16) & 0xff,
- (vers >> 8) & 0xff,
- vers & 0xff,
-
- ((cap >> 8) & 0x1f) + 1,
- (cap & 0x1f) + 1,
- speed_s,
- impl,
- scc_s);
-
- dev_printk(KERN_INFO, &pdev->dev,
- "flags: "
- "%s%s%s%s%s%s%s"
- "%s%s%s%s%s%s%s"
- "%s%s%s%s%s%s\n"
- ,
-
- cap & HOST_CAP_64 ? "64bit " : "",
- cap & HOST_CAP_NCQ ? "ncq " : "",
- cap & HOST_CAP_SNTF ? "sntf " : "",
- cap & HOST_CAP_MPS ? "ilck " : "",
- cap & HOST_CAP_SSS ? "stag " : "",
- cap & HOST_CAP_ALPM ? "pm " : "",
- cap & HOST_CAP_LED ? "led " : "",
- cap & HOST_CAP_CLO ? "clo " : "",
- cap & HOST_CAP_ONLY ? "only " : "",
- cap & HOST_CAP_PMP ? "pmp " : "",
- cap & HOST_CAP_FBS ? "fbs " : "",
- cap & HOST_CAP_PIO_MULTI ? "pio " : "",
- cap & HOST_CAP_SSC ? "slum " : "",
- cap & HOST_CAP_PART ? "part " : "",
- cap & HOST_CAP_CCC ? "ccc " : "",
- cap & HOST_CAP_EMS ? "ems " : "",
- cap & HOST_CAP_SXS ? "sxs " : "",
- cap2 & HOST_CAP2_APST ? "apst " : "",
- cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
- cap2 & HOST_CAP2_BOH ? "boh " : ""
- );
+ ahci_print_info(host, scc_s);
}
/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
@@ -3308,41 +1112,28 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
pci_intx(pdev, 1);
+ hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+
/* save initial config */
- ahci_save_initial_config(pdev, hpriv);
+ ahci_pci_save_initial_config(pdev, hpriv);
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
- /* Auto-activate optimization is supposed to be supported on
- all AHCI controllers indicating NCQ support, but it seems
- to be broken at least on some NVIDIA MCP79 chipsets.
- Until we get info on which NVIDIA chipsets don't have this
- issue, if any, disable AA on all NVIDIA AHCIs. */
- if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
+ /*
+ * Auto-activate optimization is supposed to be
+ * supported on all AHCI controllers indicating NCQ
+ * capability, but it seems to be broken on some
+ * chipsets including NVIDIAs.
+ */
+ if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
pi.flags |= ATA_FLAG_FPDMA_AA;
}
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
- if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
- u8 messages;
- void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
- u32 em_loc = readl(mmio + HOST_EM_LOC);
- u32 em_ctl = readl(mmio + HOST_EM_CTL);
-
- messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
-
- /* we only support LED message type right now */
- if ((messages & 0x01) && (ahci_em_messages == 1)) {
- /* store em_loc */
- hpriv->em_loc = ((em_loc >> 16) * 4);
- pi.flags |= ATA_FLAG_EM;
- if (!(em_ctl & EM_CTL_ALHD))
- pi.flags |= ATA_FLAG_SW_ACTIVITY;
- }
- }
+ ahci_set_em_messages(hpriv, &pi);
if (ahci_broken_system_poweroff(pdev)) {
pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
@@ -3372,7 +1163,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
- host->iomap = pcim_iomap_table(pdev);
host->private_data = hpriv;
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
@@ -3395,7 +1185,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set enclosure management message type */
if (ap->flags & ATA_FLAG_EM)
- ap->em_message_type = ahci_em_messages;
+ ap->em_message_type = hpriv->em_msg_type;
/* disabled/not-implemented port */
@@ -3414,12 +1204,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
- ahci_init_controller(host);
- ahci_print_info(host);
+ ahci_pci_init_controller(host);
+ ahci_pci_print_info(host);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
new file mode 100644
index 000000000000..7113c5724471
--- /dev/null
+++ b/drivers/ata/ahci.h
@@ -0,0 +1,343 @@
+/*
+ * ahci.h - Common AHCI SATA definitions and declarations
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#ifndef _AHCI_H
+#define _AHCI_H
+
+#include <linux/libata.h>
+
+/* Enclosure Management Control */
+#define EM_CTRL_MSG_TYPE 0x000f0000
+
+/* Enclosure Management LED Message Type */
+#define EM_MSG_LED_HBA_PORT 0x0000000f
+#define EM_MSG_LED_PMP_SLOT 0x0000ff00
+#define EM_MSG_LED_VALUE 0xffff0000
+#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
+#define EM_MSG_LED_VALUE_OFF 0xfff80000
+#define EM_MSG_LED_VALUE_ON 0x00010000
+
+enum {
+ AHCI_MAX_PORTS = 32,
+ AHCI_MAX_SG = 168, /* hardware max is 64K */
+ AHCI_DMA_BOUNDARY = 0xffffffff,
+ AHCI_MAX_CMDS = 32,
+ AHCI_CMD_SZ = 32,
+ AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
+ AHCI_RX_FIS_SZ = 256,
+ AHCI_CMD_TBL_CDB = 0x40,
+ AHCI_CMD_TBL_HDR_SZ = 0x80,
+ AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
+ AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
+ AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
+ AHCI_RX_FIS_SZ,
+ AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
+ AHCI_CMD_TBL_AR_SZ +
+ (AHCI_RX_FIS_SZ * 16),
+ AHCI_IRQ_ON_SG = (1 << 31),
+ AHCI_CMD_ATAPI = (1 << 5),
+ AHCI_CMD_WRITE = (1 << 6),
+ AHCI_CMD_PREFETCH = (1 << 7),
+ AHCI_CMD_RESET = (1 << 8),
+ AHCI_CMD_CLR_BUSY = (1 << 10),
+
+ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
+ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
+ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
+
+ /* global controller registers */
+ HOST_CAP = 0x00, /* host capabilities */
+ HOST_CTL = 0x04, /* global host control */
+ HOST_IRQ_STAT = 0x08, /* interrupt status */
+ HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
+ HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
+ HOST_EM_LOC = 0x1c, /* Enclosure Management location */
+ HOST_EM_CTL = 0x20, /* Enclosure Management Control */
+ HOST_CAP2 = 0x24, /* host capabilities, extended */
+
+ /* HOST_CTL bits */
+ HOST_RESET = (1 << 0), /* reset controller; self-clear */
+ HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
+ HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
+
+ /* HOST_CAP bits */
+ HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
+ HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
+ HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
+ HOST_CAP_PART = (1 << 13), /* Partial state capable */
+ HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
+ HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
+ HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
+ HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
+ HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
+ HOST_CAP_CLO = (1 << 24), /* Command List Override support */
+ HOST_CAP_LED = (1 << 25), /* Supports activity LED */
+ HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
+ HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
+ HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
+ HOST_CAP_SNTF = (1 << 29), /* SNotification register */
+ HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
+ HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
+
+ /* HOST_CAP2 bits */
+ HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
+ HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
+ HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
+
+ /* registers for each SATA port */
+ PORT_LST_ADDR = 0x00, /* command list DMA addr */
+ PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
+ PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
+ PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
+ PORT_IRQ_STAT = 0x10, /* interrupt status */
+ PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
+ PORT_CMD = 0x18, /* port command */
+ PORT_TFDATA = 0x20, /* taskfile data */
+ PORT_SIG = 0x24, /* device TF signature */
+ PORT_CMD_ISSUE = 0x38, /* command issue */
+ PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
+ PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
+ PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
+ PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
+ PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
+ PORT_FBS = 0x40, /* FIS-based Switching */
+
+ /* PORT_IRQ_{STAT,MASK} bits */
+ PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
+ PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
+ PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
+ PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
+ PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
+ PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
+ PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
+ PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
+
+ PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
+ PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
+ PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
+ PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
+ PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
+ PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
+ PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
+ PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
+ PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
+
+ PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
+ PORT_IRQ_IF_ERR |
+ PORT_IRQ_CONNECT |
+ PORT_IRQ_PHYRDY |
+ PORT_IRQ_UNK_FIS |
+ PORT_IRQ_BAD_PMP,
+ PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
+ PORT_IRQ_TF_ERR |
+ PORT_IRQ_HBUS_DATA_ERR,
+ DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
+ PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
+ PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
+
+ /* PORT_CMD bits */
+ PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
+ PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
+ PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
+ PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
+ PORT_CMD_PMP = (1 << 17), /* PMP attached */
+ PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
+ PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
+ PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
+ PORT_CMD_CLO = (1 << 3), /* Command list override */
+ PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
+ PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
+ PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
+
+ PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
+ PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
+ PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
+ PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+
+ PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
+ PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
+ PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
+ PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
+ PORT_FBS_SDE = (1 << 2), /* FBS single device error */
+ PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
+ PORT_FBS_EN = (1 << 0), /* Enable FBS */
+
+ /* hpriv->flags bits */
+ AHCI_HFLAG_NO_NCQ = (1 << 0),
+ AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
+ AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
+ AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
+ AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
+ AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
+ AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
+ AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
+ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
+ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
+ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
+ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
+ link offline */
+ AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
+ AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
+
+ /* ap->flags bits */
+
+ AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
+ ATA_FLAG_IPM,
+
+ ICH_MAP = 0x90, /* ICH MAP register */
+
+ /* em constants */
+ EM_MAX_SLOTS = 8,
+ EM_MAX_RETRY = 5,
+
+ /* em_ctl bits */
+ EM_CTL_RST = (1 << 9), /* Reset */
+ EM_CTL_TM = (1 << 8), /* Transmit Message */
+ EM_CTL_MR = (1 << 0), /* Message Recieved */
+ EM_CTL_ALHD = (1 << 26), /* Activity LED */
+ EM_CTL_XMT = (1 << 25), /* Transmit Only */
+ EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
+
+ /* em message type */
+ EM_MSG_TYPE_LED = (1 << 0), /* LED */
+ EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */
+ EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */
+ EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */
+};
+
+struct ahci_cmd_hdr {
+ __le32 opts;
+ __le32 status;
+ __le32 tbl_addr;
+ __le32 tbl_addr_hi;
+ __le32 reserved[4];
+};
+
+struct ahci_sg {
+ __le32 addr;
+ __le32 addr_hi;
+ __le32 reserved;
+ __le32 flags_size;
+};
+
+struct ahci_em_priv {
+ enum sw_activity blink_policy;
+ struct timer_list timer;
+ unsigned long saved_activity;
+ unsigned long activity;
+ unsigned long led_state;
+};
+
+struct ahci_port_priv {
+ struct ata_link *active_link;
+ struct ahci_cmd_hdr *cmd_slot;
+ dma_addr_t cmd_slot_dma;
+ void *cmd_tbl;
+ dma_addr_t cmd_tbl_dma;
+ void *rx_fis;
+ dma_addr_t rx_fis_dma;
+ /* for NCQ spurious interrupt analysis */
+ unsigned int ncq_saw_d2h:1;
+ unsigned int ncq_saw_dmas:1;
+ unsigned int ncq_saw_sdb:1;
+ u32 intr_mask; /* interrupts to enable */
+ bool fbs_supported; /* set iff FBS is supported */
+ bool fbs_enabled; /* set iff FBS is enabled */
+ int fbs_last_dev; /* save FBS.DEV of last FIS */
+ /* enclosure management info per PM slot */
+ struct ahci_em_priv em_priv[EM_MAX_SLOTS];
+};
+
+struct ahci_host_priv {
+ void __iomem * mmio; /* bus-independant mem map */
+ unsigned int flags; /* AHCI_HFLAG_* */
+ u32 cap; /* cap to use */
+ u32 cap2; /* cap2 to use */
+ u32 port_map; /* port map to use */
+ u32 saved_cap; /* saved initial cap */
+ u32 saved_cap2; /* saved initial cap2 */
+ u32 saved_port_map; /* saved initial port_map */
+ u32 em_loc; /* enclosure management location */
+ u32 em_buf_sz; /* EM buffer size in byte */
+ u32 em_msg_type; /* EM message type */
+};
+
+extern int ahci_ignore_sss;
+
+extern struct scsi_host_template ahci_sht;
+extern struct ata_port_operations ahci_ops;
+
+void ahci_save_initial_config(struct device *dev,
+ struct ahci_host_priv *hpriv,
+ unsigned int force_port_map,
+ unsigned int mask_port_map);
+void ahci_init_controller(struct ata_host *host);
+int ahci_reset_controller(struct ata_host *host);
+
+int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ int pmp, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link));
+
+int ahci_stop_engine(struct ata_port *ap);
+void ahci_start_engine(struct ata_port *ap);
+int ahci_check_ready(struct ata_link *link);
+int ahci_kick_engine(struct ata_port *ap);
+void ahci_set_em_messages(struct ahci_host_priv *hpriv,
+ struct ata_port_info *pi);
+int ahci_reset_em(struct ata_host *host);
+irqreturn_t ahci_interrupt(int irq, void *dev_instance);
+void ahci_print_info(struct ata_host *host, const char *scc_s);
+
+static inline void __iomem *__ahci_port_base(struct ata_host *host,
+ unsigned int port_no)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ return mmio + 0x100 + (port_no * 0x80);
+}
+
+static inline void __iomem *ahci_port_base(struct ata_port *ap)
+{
+ return __ahci_port_base(ap->host, ap->port_no);
+}
+
+static inline int ahci_nr_ports(u32 cap)
+{
+ return (cap & 0x1f) + 1;
+}
+
+#endif /* _AHCI_H */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
new file mode 100644
index 000000000000..5e11b160f247
--- /dev/null
+++ b/drivers/ata/ahci_platform.c
@@ -0,0 +1,192 @@
+/*
+ * AHCI SATA platform driver
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ * Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010 MontaVista Software, LLC.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include "ahci.h"
+
+static int __init ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_platform_data *pdata = dev->platform_data;
+ struct ata_port_info pi = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ };
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct ahci_host_priv *hpriv;
+ struct ata_host *host;
+ struct resource *mem;
+ int irq;
+ int n_ports;
+ int i;
+ int rc;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(dev, "no mmio space\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "no irq\n");
+ return -EINVAL;
+ }
+
+ if (pdata && pdata->init) {
+ rc = pdata->init(dev);
+ if (rc)
+ return rc;
+ }
+
+ if (pdata && pdata->ata_port_info)
+ pi = *pdata->ata_port_info;
+
+ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ hpriv->flags |= (unsigned long)pi.private_data;
+
+ hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
+ if (!hpriv->mmio) {
+ dev_err(dev, "can't map %pR\n", mem);
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ ahci_save_initial_config(dev, hpriv,
+ pdata ? pdata->force_port_map : 0,
+ pdata ? pdata->mask_port_map : 0);
+
+ /* prepare host */
+ if (hpriv->cap & HOST_CAP_NCQ)
+ pi.flags |= ATA_FLAG_NCQ;
+
+ if (hpriv->cap & HOST_CAP_PMP)
+ pi.flags |= ATA_FLAG_PMP;
+
+ ahci_set_em_messages(hpriv, &pi);
+
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+ * both CAP.NP and port_map.
+ */
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+ if (!host) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ host->private_data = hpriv;
+
+ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+ host->flags |= ATA_HOST_PARALLEL_SCAN;
+ else
+ printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+
+ if (pi.flags & ATA_FLAG_EM)
+ ahci_reset_em(host);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ata_port_desc(ap, "mmio %pR", mem);
+ ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+ /* set initial link pm policy */
+ ap->pm_policy = NOT_AVAILABLE;
+
+ /* set enclosure management message type */
+ if (ap->flags & ATA_FLAG_EM)
+ ap->em_message_type = hpriv->em_msg_type;
+
+ /* disabled/not-implemented port */
+ if (!(hpriv->port_map & (1 << i)))
+ ap->ops = &ata_dummy_port_ops;
+ }
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ goto err0;
+
+ ahci_init_controller(host);
+ ahci_print_info(host, "platform");
+
+ rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
+ &ahci_sht);
+ if (rc)
+ goto err0;
+
+ return 0;
+err0:
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+ return rc;
+}
+
+static int __devexit ahci_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ahci_platform_data *pdata = dev->platform_data;
+ struct ata_host *host = dev_get_drvdata(dev);
+
+ ata_host_detach(host);
+
+ if (pdata && pdata->exit)
+ pdata->exit(dev);
+
+ return 0;
+}
+
+static struct platform_driver ahci_driver = {
+ .probe = ahci_probe,
+ .remove = __devexit_p(ahci_remove),
+ .driver = {
+ .name = "ahci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ahci_init(void)
+{
+ return platform_driver_probe(&ahci_driver, ahci_probe);
+}
+module_init(ahci_init);
+
+static void __exit ahci_exit(void)
+{
+ platform_driver_unregister(&ahci_driver);
+}
+module_exit(ahci_exit);
+
+MODULE_DESCRIPTION("AHCI SATA platform driver");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ahci");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 83bc49fac9bb..ec52fc618763 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -43,7 +43,7 @@
* driver the list of errata that are relevant is below, going back to
* PIIX4. Older device documentation is now a bit tricky to find.
*
- * The chipsets all follow very much the same design. The orginal Triton
+ * The chipsets all follow very much the same design. The original Triton
* series chipsets do _not_ support independant device timings, but this
* is fixed in Triton II. With the odd mobile exception the chips then
* change little except in gaining more modes until SATA arrives. This
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
new file mode 100644
index 000000000000..1984a6e89e84
--- /dev/null
+++ b/drivers/ata/libahci.c
@@ -0,0 +1,2216 @@
+/*
+ * libahci.c - Common AHCI SATA low-level routines
+ *
+ * Maintained by: Jeff Garzik <jgarzik@pobox.com>
+ * Please ALWAYS copy linux-ide@vger.kernel.org
+ * on emails.
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "ahci.h"
+
+static int ahci_skip_host_reset;
+int ahci_ignore_sss;
+EXPORT_SYMBOL_GPL(ahci_ignore_sss);
+
+module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
+MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
+
+module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
+MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy);
+static void ahci_disable_alpm(struct ata_port *ap);
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size);
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size);
+
+
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static int ahci_port_start(struct ata_port *ap);
+static void ahci_port_stop(struct ata_port *ap);
+static void ahci_qc_prep(struct ata_queued_cmd *qc);
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
+static void ahci_freeze(struct ata_port *ap);
+static void ahci_thaw(struct ata_port *ap);
+static void ahci_enable_fbs(struct ata_port *ap);
+static void ahci_disable_fbs(struct ata_port *ap);
+static void ahci_pmp_attach(struct ata_port *ap);
+static void ahci_pmp_detach(struct ata_port *ap);
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static void ahci_postreset(struct ata_link *link, unsigned int *class);
+static void ahci_error_handler(struct ata_port *ap);
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
+static int ahci_port_resume(struct ata_port *ap);
+static void ahci_dev_config(struct ata_device *dev);
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts);
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
+#endif
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
+static ssize_t ahci_activity_store(struct ata_device *dev,
+ enum sw_activity val);
+static void ahci_init_sw_activity(struct ata_link *link);
+
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_cap2(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_read_em_buffer(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t ahci_store_em_buffer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size);
+
+static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
+static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
+static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
+static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
+static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
+ ahci_read_em_buffer, ahci_store_em_buffer);
+
+static struct device_attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_policy,
+ &dev_attr_em_message_type,
+ &dev_attr_em_message,
+ &dev_attr_ahci_host_caps,
+ &dev_attr_ahci_host_cap2,
+ &dev_attr_ahci_host_version,
+ &dev_attr_ahci_port_cmd,
+ &dev_attr_em_buffer,
+ NULL
+};
+
+static struct device_attribute *ahci_sdev_attrs[] = {
+ &dev_attr_sw_activity,
+ &dev_attr_unload_heads,
+ NULL
+};
+
+struct scsi_host_template ahci_sht = {
+ ATA_NCQ_SHT("ahci"),
+ .can_queue = AHCI_MAX_CMDS - 1,
+ .sg_tablesize = AHCI_MAX_SG,
+ .dma_boundary = AHCI_DMA_BOUNDARY,
+ .shost_attrs = ahci_shost_attrs,
+ .sdev_attrs = ahci_sdev_attrs,
+};
+EXPORT_SYMBOL_GPL(ahci_sht);
+
+struct ata_port_operations ahci_ops = {
+ .inherits = &sata_pmp_port_ops,
+
+ .qc_defer = ahci_pmp_qc_defer,
+ .qc_prep = ahci_qc_prep,
+ .qc_issue = ahci_qc_issue,
+ .qc_fill_rtf = ahci_qc_fill_rtf,
+
+ .freeze = ahci_freeze,
+ .thaw = ahci_thaw,
+ .softreset = ahci_softreset,
+ .hardreset = ahci_hardreset,
+ .postreset = ahci_postreset,
+ .pmp_softreset = ahci_softreset,
+ .error_handler = ahci_error_handler,
+ .post_internal_cmd = ahci_post_internal_cmd,
+ .dev_config = ahci_dev_config,
+
+ .scr_read = ahci_scr_read,
+ .scr_write = ahci_scr_write,
+ .pmp_attach = ahci_pmp_attach,
+ .pmp_detach = ahci_pmp_detach,
+
+ .enable_pm = ahci_enable_alpm,
+ .disable_pm = ahci_disable_alpm,
+ .em_show = ahci_led_show,
+ .em_store = ahci_led_store,
+ .sw_activity_show = ahci_activity_show,
+ .sw_activity_store = ahci_activity_store,
+#ifdef CONFIG_PM
+ .port_suspend = ahci_port_suspend,
+ .port_resume = ahci_port_resume,
+#endif
+ .port_start = ahci_port_start,
+ .port_stop = ahci_port_stop,
+};
+EXPORT_SYMBOL_GPL(ahci_ops);
+
+int ahci_em_messages = 1;
+EXPORT_SYMBOL_GPL(ahci_em_messages);
+module_param(ahci_em_messages, int, 0444);
+/* add other LED protocol types when they become supported */
+MODULE_PARM_DESC(ahci_em_messages,
+ "AHCI Enclosure Management Message control (0 = off, 1 = on)");
+
+static void ahci_enable_ahci(void __iomem *mmio)
+{
+ int i;
+ u32 tmp;
+
+ /* turn on AHCI_EN */
+ tmp = readl(mmio + HOST_CTL);
+ if (tmp & HOST_AHCI_EN)
+ return;
+
+ /* Some controllers need AHCI_EN to be written multiple times.
+ * Try a few times before giving up.
+ */
+ for (i = 0; i < 5; i++) {
+ tmp |= HOST_AHCI_EN;
+ writel(tmp, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
+ if (tmp & HOST_AHCI_EN)
+ return;
+ msleep(10);
+ }
+
+ WARN_ON(1);
+}
+
+static ssize_t ahci_show_host_caps(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return sprintf(buf, "%x\n", hpriv->cap);
+}
+
+static ssize_t ahci_show_host_cap2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return sprintf(buf, "%x\n", hpriv->cap2);
+}
+
+static ssize_t ahci_show_host_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
+}
+
+static ssize_t ahci_show_port_cmd(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+}
+
+static ssize_t ahci_read_em_buffer(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ void __iomem *em_mmio = mmio + hpriv->em_loc;
+ u32 em_ctl, msg;
+ unsigned long flags;
+ size_t count;
+ int i;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
+ !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EINVAL;
+ }
+
+ if (!(em_ctl & EM_CTL_MR)) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EAGAIN;
+ }
+
+ if (!(em_ctl & EM_CTL_SMB))
+ em_mmio += hpriv->em_buf_sz;
+
+ count = hpriv->em_buf_sz;
+
+ /* the count should not be larger than PAGE_SIZE */
+ if (count > PAGE_SIZE) {
+ if (printk_ratelimit())
+ ata_port_printk(ap, KERN_WARNING,
+ "EM read buffer size too large: "
+ "buffer size %u, page size %lu\n",
+ hpriv->em_buf_sz, PAGE_SIZE);
+ count = PAGE_SIZE;
+ }
+
+ for (i = 0; i < count; i += 4) {
+ msg = readl(em_mmio + i);
+ buf[i] = msg & 0xff;
+ buf[i + 1] = (msg >> 8) & 0xff;
+ buf[i + 2] = (msg >> 16) & 0xff;
+ buf[i + 3] = (msg >> 24) & 0xff;
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return i;
+}
+
+static ssize_t ahci_store_em_buffer(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ void __iomem *em_mmio = mmio + hpriv->em_loc;
+ u32 em_ctl, msg;
+ unsigned long flags;
+ int i;
+
+ /* check size validity */
+ if (!(ap->flags & ATA_FLAG_EM) ||
+ !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
+ size % 4 || size > hpriv->em_buf_sz)
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (em_ctl & EM_CTL_TM) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < size; i += 4) {
+ msg = buf[i] | buf[i + 1] << 8 |
+ buf[i + 2] << 16 | buf[i + 3] << 24;
+ writel(msg, em_mmio + i);
+ }
+
+ writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return size;
+}
+
+/**
+ * ahci_save_initial_config - Save and fixup initial config values
+ * @dev: target AHCI device
+ * @hpriv: host private area to store config values
+ * @force_port_map: force port map to a specified value
+ * @mask_port_map: mask out particular bits from port map
+ *
+ * Some registers containing configuration info might be setup by
+ * BIOS and might be cleared on reset. This function saves the
+ * initial values of those registers into @hpriv such that they
+ * can be restored after controller reset.
+ *
+ * If inconsistent, config values are fixed up by this function.
+ *
+ * LOCKING:
+ * None.
+ */
+void ahci_save_initial_config(struct device *dev,
+ struct ahci_host_priv *hpriv,
+ unsigned int force_port_map,
+ unsigned int mask_port_map)
+{
+ void __iomem *mmio = hpriv->mmio;
+ u32 cap, cap2, vers, port_map;
+ int i;
+
+ /* make sure AHCI mode is enabled before accessing CAP */
+ ahci_enable_ahci(mmio);
+
+ /* Values prefixed with saved_ are written back to host after
+ * reset. Values without are used for driver operation.
+ */
+ hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
+ hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
+
+ /* CAP2 register is only defined for AHCI 1.2 and later */
+ vers = readl(mmio + HOST_VERSION);
+ if ((vers >> 16) > 1 ||
+ ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
+ hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
+ else
+ hpriv->saved_cap2 = cap2 = 0;
+
+ /* some chips have errata preventing 64bit use */
+ if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do 64bit DMA, forcing 32bit\n");
+ cap &= ~HOST_CAP_64;
+ }
+
+ if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do NCQ, turning off CAP_NCQ\n");
+ cap &= ~HOST_CAP_NCQ;
+ }
+
+ if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can do NCQ, turning on CAP_NCQ\n");
+ cap |= HOST_CAP_NCQ;
+ }
+
+ if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do PMP, turning off CAP_PMP\n");
+ cap &= ~HOST_CAP_PMP;
+ }
+
+ if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
+ dev_printk(KERN_INFO, dev,
+ "controller can't do SNTF, turning off CAP_SNTF\n");
+ cap &= ~HOST_CAP_SNTF;
+ }
+
+ if (force_port_map && port_map != force_port_map) {
+ dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
+ port_map, force_port_map);
+ port_map = force_port_map;
+ }
+
+ if (mask_port_map) {
+ dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
+ port_map,
+ port_map & mask_port_map);
+ port_map &= mask_port_map;
+ }
+
+ /* cross check port_map and cap.n_ports */
+ if (port_map) {
+ int map_ports = 0;
+
+ for (i = 0; i < AHCI_MAX_PORTS; i++)
+ if (port_map & (1 << i))
+ map_ports++;
+
+ /* If PI has more ports than n_ports, whine, clear
+ * port_map and let it be generated from n_ports.
+ */
+ if (map_ports > ahci_nr_ports(cap)) {
+ dev_printk(KERN_WARNING, dev,
+ "implemented port map (0x%x) contains more "
+ "ports than nr_ports (%u), using nr_ports\n",
+ port_map, ahci_nr_ports(cap));
+ port_map = 0;
+ }
+ }
+
+ /* fabricate port_map from cap.nr_ports */
+ if (!port_map) {
+ port_map = (1 << ahci_nr_ports(cap)) - 1;
+ dev_printk(KERN_WARNING, dev,
+ "forcing PORTS_IMPL to 0x%x\n", port_map);
+
+ /* write the fixed up value to the PI register */
+ hpriv->saved_port_map = port_map;
+ }
+
+ /* record values to use during operation */
+ hpriv->cap = cap;
+ hpriv->cap2 = cap2;
+ hpriv->port_map = port_map;
+}
+EXPORT_SYMBOL_GPL(ahci_save_initial_config);
+
+/**
+ * ahci_restore_initial_config - Restore initial config
+ * @host: target ATA host
+ *
+ * Restore initial config stored by ahci_save_initial_config().
+ *
+ * LOCKING:
+ * None.
+ */
+static void ahci_restore_initial_config(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+
+ writel(hpriv->saved_cap, mmio + HOST_CAP);
+ if (hpriv->saved_cap2)
+ writel(hpriv->saved_cap2, mmio + HOST_CAP2);
+ writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
+ (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
+}
+
+static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
+{
+ static const int offset[] = {
+ [SCR_STATUS] = PORT_SCR_STAT,
+ [SCR_CONTROL] = PORT_SCR_CTL,
+ [SCR_ERROR] = PORT_SCR_ERR,
+ [SCR_ACTIVE] = PORT_SCR_ACT,
+ [SCR_NOTIFICATION] = PORT_SCR_NTF,
+ };
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ if (sc_reg < ARRAY_SIZE(offset) &&
+ (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
+ return offset[sc_reg];
+ return 0;
+}
+
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
+
+ if (offset) {
+ *val = readl(port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
+
+ if (offset) {
+ writel(val, port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ahci_is_device_present(void __iomem *port_mmio)
+{
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xff;
+
+ /* Make sure PxTFD.STS.BSY and PxTFD.STS.DRQ are 0 */
+ if (status & (ATA_BUSY | ATA_DRQ))
+ return 0;
+
+ /* Make sure PxSSTS.DET is 3h */
+ status = readl(port_mmio + PORT_SCR_STAT) & 0xf;
+ if (status != 3)
+ return 0;
+ return 1;
+}
+
+void ahci_start_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ if (!ahci_is_device_present(port_mmio))
+ return;
+
+ /* start DMA */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+}
+EXPORT_SYMBOL_GPL(ahci_start_engine);
+
+int ahci_stop_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_CMD);
+
+ /* check if the HBA is idle */
+ if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
+ return 0;
+
+ /* setting HBA to idle */
+ tmp &= ~PORT_CMD_START;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for engine to stop. This could be as long as 500 msec */
+ tmp = ata_wait_register(port_mmio + PORT_CMD,
+ PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
+ if (tmp & PORT_CMD_LIST_ON)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_stop_engine);
+
+static void ahci_start_fis_rx(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 tmp;
+
+ /* set FIS registers */
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->cmd_slot_dma >> 16) >> 16,
+ port_mmio + PORT_LST_ADDR_HI);
+ writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
+
+ if (hpriv->cap & HOST_CAP_64)
+ writel((pp->rx_fis_dma >> 16) >> 16,
+ port_mmio + PORT_FIS_ADDR_HI);
+ writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
+
+ /* enable FIS reception */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* flush */
+ readl(port_mmio + PORT_CMD);
+}
+
+static int ahci_stop_fis_rx(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+
+ /* disable FIS reception */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp &= ~PORT_CMD_FIS_RX;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ /* wait for completion, spec says 500ms, give it 1000 */
+ tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
+ PORT_CMD_FIS_ON, 10, 1000);
+ if (tmp & PORT_CMD_FIS_ON)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void ahci_power_up(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+
+ /* spin up device */
+ if (hpriv->cap & HOST_CAP_SSS) {
+ cmd |= PORT_CMD_SPIN_UP;
+ writel(cmd, port_mmio + PORT_CMD);
+ }
+
+ /* wake up link */
+ writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
+}
+
+static void ahci_disable_alpm(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* IPM bits should be disabled by libata-core */
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* disable ALPM and ASP */
+ cmd &= ~PORT_CMD_ASP;
+ cmd &= ~PORT_CMD_ALPE;
+
+ /* force the interface back to active */
+ cmd |= PORT_CMD_ICC_ACTIVE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* wait 10ms to be sure we've come out of any low power state */
+ msleep(10);
+
+ /* clear out any PhyRdy stuff from interrupt status */
+ writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
+
+ /* go ahead and clean out PhyRdy Change from Serror too */
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+
+ /*
+ * Clear flag to indicate that we should ignore all PhyRdy
+ * state changes
+ */
+ hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
+
+ /*
+ * Enable interrupts on Phy Ready.
+ */
+ pp->intr_mask |= PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * don't change the link pm policy - we can be called
+ * just to turn of link pm temporarily
+ */
+}
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 asp;
+
+ /* Make sure the host is capable of link power management */
+ if (!(hpriv->cap & HOST_CAP_ALPM))
+ return -EINVAL;
+
+ switch (policy) {
+ case MAX_PERFORMANCE:
+ case NOT_AVAILABLE:
+ /*
+ * if we came here with NOT_AVAILABLE,
+ * it just means this is the first time we
+ * have tried to enable - default to max performance,
+ * and let the user go to lower power modes on request.
+ */
+ ahci_disable_alpm(ap);
+ return 0;
+ case MIN_POWER:
+ /* configure HBA to enter SLUMBER */
+ asp = PORT_CMD_ASP;
+ break;
+ case MEDIUM_POWER:
+ /* configure HBA to enter PARTIAL */
+ asp = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Disable interrupts on Phy Ready. This keeps us from
+ * getting woken up due to spurious phy ready interrupts
+ * TBD - Hot plug should be done via polling now, is
+ * that even supported?
+ */
+ pp->intr_mask &= ~PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * Set a flag to indicate that we should ignore all PhyRdy
+ * state changes since these can happen now whenever we
+ * change link state
+ */
+ hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
+
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /*
+ * Set ASP based on Policy
+ */
+ cmd |= asp;
+
+ /*
+ * Setting this bit will instruct the HBA to aggressively
+ * enter a lower power link state when it's appropriate and
+ * based on the value set above for ASP
+ */
+ cmd |= PORT_CMD_ALPE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* IPM bits should be set by libata-core */
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void ahci_power_down(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd, scontrol;
+
+ if (!(hpriv->cap & HOST_CAP_SSS))
+ return;
+
+ /* put device into listen mode, first set PxSCTL.DET to 0 */
+ scontrol = readl(port_mmio + PORT_SCR_CTL);
+ scontrol &= ~0xf;
+ writel(scontrol, port_mmio + PORT_SCR_CTL);
+
+ /* then set PxCMD.SUD to 0 */
+ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
+ cmd &= ~PORT_CMD_SPIN_UP;
+ writel(cmd, port_mmio + PORT_CMD);
+}
+#endif
+
+static void ahci_start_port(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_link *link;
+ struct ahci_em_priv *emp;
+ ssize_t rc;
+ int i;
+
+ /* enable FIS reception */
+ ahci_start_fis_rx(ap);
+
+ /* enable DMA */
+ ahci_start_engine(ap);
+
+ /* turn on LEDs */
+ if (ap->flags & ATA_FLAG_EM) {
+ ata_for_each_link(link, ap, EDGE) {
+ emp = &pp->em_priv[link->pmp];
+
+ /* EM Transmit bit maybe busy during init */
+ for (i = 0; i < EM_MAX_RETRY; i++) {
+ rc = ahci_transmit_led_message(ap,
+ emp->led_state,
+ 4);
+ if (rc == -EBUSY)
+ msleep(1);
+ else
+ break;
+ }
+ }
+ }
+
+ if (ap->flags & ATA_FLAG_SW_ACTIVITY)
+ ata_for_each_link(link, ap, EDGE)
+ ahci_init_sw_activity(link);
+
+}
+
+static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
+{
+ int rc;
+
+ /* disable DMA */
+ rc = ahci_stop_engine(ap);
+ if (rc) {
+ *emsg = "failed to stop engine";
+ return rc;
+ }
+
+ /* disable FIS reception */
+ rc = ahci_stop_fis_rx(ap);
+ if (rc) {
+ *emsg = "failed stop FIS RX";
+ return rc;
+ }
+
+ return 0;
+}
+
+int ahci_reset_controller(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 tmp;
+
+ /* we must be in AHCI mode, before using anything
+ * AHCI-specific, such as HOST_RESET.
+ */
+ ahci_enable_ahci(mmio);
+
+ /* global controller reset */
+ if (!ahci_skip_host_reset) {
+ tmp = readl(mmio + HOST_CTL);
+ if ((tmp & HOST_RESET) == 0) {
+ writel(tmp | HOST_RESET, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ }
+
+ /*
+ * to perform host reset, OS should set HOST_RESET
+ * and poll until this bit is read to be "0".
+ * reset must complete within 1 second, or
+ * the hardware should be considered fried.
+ */
+ tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
+ HOST_RESET, 10, 1000);
+
+ if (tmp & HOST_RESET) {
+ dev_printk(KERN_ERR, host->dev,
+ "controller reset failed (0x%x)\n", tmp);
+ return -EIO;
+ }
+
+ /* turn on AHCI mode */
+ ahci_enable_ahci(mmio);
+
+ /* Some registers might be cleared on reset. Restore
+ * initial values.
+ */
+ ahci_restore_initial_config(host);
+ } else
+ dev_printk(KERN_INFO, host->dev,
+ "skipping global host reset\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_reset_controller);
+
+static void ahci_sw_activity(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
+ return;
+
+ emp->activity++;
+ if (!timer_pending(&emp->timer))
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
+}
+
+static void ahci_sw_activity_blink(unsigned long arg)
+{
+ struct ata_link *link = (struct ata_link *)arg;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+ unsigned long led_message = emp->led_state;
+ u32 activity_led_state;
+ unsigned long flags;
+
+ led_message &= EM_MSG_LED_VALUE;
+ led_message |= ap->port_no | (link->pmp << 8);
+
+ /* check to see if we've had activity. If so,
+ * toggle state of LED and reset timer. If not,
+ * turn LED to desired idle state.
+ */
+ spin_lock_irqsave(ap->lock, flags);
+ if (emp->saved_activity != emp->activity) {
+ emp->saved_activity = emp->activity;
+ /* get the current LED state */
+ activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
+
+ if (activity_led_state)
+ activity_led_state = 0;
+ else
+ activity_led_state = 1;
+
+ /* clear old state */
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+ /* toggle state */
+ led_message |= (activity_led_state << 16);
+ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
+ } else {
+ /* switch to idle */
+ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
+ if (emp->blink_policy == BLINK_OFF)
+ led_message |= (1 << 16);
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+ ahci_transmit_led_message(ap, led_message, 4);
+}
+
+static void ahci_init_sw_activity(struct ata_link *link)
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ /* init activity stats, setup timer */
+ emp->saved_activity = emp->activity = 0;
+ setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
+
+ /* check our blink policy and set flag for link if it's enabled */
+ if (emp->blink_policy)
+ link->flags |= ATA_LFLAG_SW_ACTIVITY;
+}
+
+int ahci_reset_em(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_ctl;
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
+ return -EINVAL;
+
+ writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_reset_em);
+
+static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
+ ssize_t size)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_ctl;
+ u32 message[] = {0, 0};
+ unsigned long flags;
+ int pmp;
+ struct ahci_em_priv *emp;
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < EM_MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /*
+ * if we are still busy transmitting a previous message,
+ * do not allow
+ */
+ em_ctl = readl(mmio + HOST_EM_CTL);
+ if (em_ctl & EM_CTL_TM) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return -EBUSY;
+ }
+
+ if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
+ /*
+ * create message header - this is all zero except for
+ * the message size, which is 4 bytes.
+ */
+ message[0] |= (4 << 8);
+
+ /* ignore 0:4 of byte zero, fill in port info yourself */
+ message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
+
+ /* write message to EM_LOC */
+ writel(message[0], mmio + hpriv->em_loc);
+ writel(message[1], mmio + hpriv->em_loc+4);
+
+ /*
+ * tell hardware to transmit the message
+ */
+ writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
+ }
+
+ /* save off new led state for port/slot */
+ emp->led_state = state;
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ return size;
+}
+
+static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_link *link;
+ struct ahci_em_priv *emp;
+ int rc = 0;
+
+ ata_for_each_link(link, ap, EDGE) {
+ emp = &pp->em_priv[link->pmp];
+ rc += sprintf(buf, "%lx\n", emp->led_state);
+ }
+ return rc;
+}
+
+static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size)
+{
+ int state;
+ int pmp;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp;
+
+ state = simple_strtoul(buf, NULL, 0);
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+ if (pmp < EM_MAX_SLOTS)
+ emp = &pp->em_priv[pmp];
+ else
+ return -EINVAL;
+
+ /* mask off the activity bits if we are in sw_activity
+ * mode, user should turn off sw_activity before setting
+ * activity led through em_message
+ */
+ if (emp->blink_policy)
+ state &= ~EM_MSG_LED_VALUE_ACTIVITY;
+
+ return ahci_transmit_led_message(ap, state, size);
+}
+
+static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+ u32 port_led_state = emp->led_state;
+
+ /* save the desired Activity LED behavior */
+ if (val == OFF) {
+ /* clear LFLAG */
+ link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
+
+ /* set the LED to OFF */
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
+ port_led_state |= (ap->port_no | (link->pmp << 8));
+ ahci_transmit_led_message(ap, port_led_state, 4);
+ } else {
+ link->flags |= ATA_LFLAG_SW_ACTIVITY;
+ if (val == BLINK_OFF) {
+ /* set LED to ON for idle */
+ port_led_state &= EM_MSG_LED_VALUE_OFF;
+ port_led_state |= (ap->port_no | (link->pmp << 8));
+ port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
+ ahci_transmit_led_message(ap, port_led_state, 4);
+ }
+ }
+ emp->blink_policy = val;
+ return 0;
+}
+
+static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
+ /* display the saved value of activity behavior for this
+ * disk.
+ */
+ return sprintf(buf, "%d\n", emp->blink_policy);
+}
+
+static void ahci_port_init(struct device *dev, struct ata_port *ap,
+ int port_no, void __iomem *mmio,
+ void __iomem *port_mmio)
+{
+ const char *emsg = NULL;
+ int rc;
+ u32 tmp;
+
+ /* make sure port is not active */
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc)
+ dev_warn(dev, "%s (%d)\n", emsg, rc);
+
+ /* clear SError */
+ tmp = readl(port_mmio + PORT_SCR_ERR);
+ VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
+ writel(tmp, port_mmio + PORT_SCR_ERR);
+
+ /* clear port IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ if (tmp)
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+
+ writel(1 << port_no, mmio + HOST_IRQ_STAT);
+}
+
+void ahci_init_controller(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ int i;
+ void __iomem *port_mmio;
+ u32 tmp;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ port_mmio = ahci_port_base(ap);
+ if (ata_port_is_dummy(ap))
+ continue;
+
+ ahci_port_init(host->dev, ap, i, mmio, port_mmio);
+ }
+
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+ writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
+ tmp = readl(mmio + HOST_CTL);
+ VPRINTK("HOST_CTL 0x%x\n", tmp);
+}
+EXPORT_SYMBOL_GPL(ahci_init_controller);
+
+static void ahci_dev_config(struct ata_device *dev)
+{
+ struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+
+ if (hpriv->flags & AHCI_HFLAG_SECT255) {
+ dev->max_sectors = 255;
+ ata_dev_printk(dev, KERN_INFO,
+ "SB600 AHCI: limiting to 255 sectors per cmd\n");
+ }
+}
+
+static unsigned int ahci_dev_classify(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_taskfile tf;
+ u32 tmp;
+
+ tmp = readl(port_mmio + PORT_SIG);
+ tf.lbah = (tmp >> 24) & 0xff;
+ tf.lbam = (tmp >> 16) & 0xff;
+ tf.lbal = (tmp >> 8) & 0xff;
+ tf.nsect = (tmp) & 0xff;
+
+ return ata_dev_classify(&tf);
+}
+
+static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+ u32 opts)
+{
+ dma_addr_t cmd_tbl_dma;
+
+ cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
+
+ pp->cmd_slot[tag].opts = cpu_to_le32(opts);
+ pp->cmd_slot[tag].status = 0;
+ pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
+ pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
+}
+
+int ahci_kick_engine(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+ u32 tmp;
+ int busy, rc;
+
+ /* stop engine */
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ goto out_restart;
+
+ /* need to do CLO?
+ * always do CLO if PMP is attached (AHCI-1.3 9.2)
+ */
+ busy = status & (ATA_BUSY | ATA_DRQ);
+ if (!busy && !sata_pmp_attached(ap)) {
+ rc = 0;
+ goto out_restart;
+ }
+
+ if (!(hpriv->cap & HOST_CAP_CLO)) {
+ rc = -EOPNOTSUPP;
+ goto out_restart;
+ }
+
+ /* perform CLO */
+ tmp = readl(port_mmio + PORT_CMD);
+ tmp |= PORT_CMD_CLO;
+ writel(tmp, port_mmio + PORT_CMD);
+
+ rc = 0;
+ tmp = ata_wait_register(port_mmio + PORT_CMD,
+ PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
+ if (tmp & PORT_CMD_CLO)
+ rc = -EIO;
+
+ /* restart engine */
+ out_restart:
+ ahci_start_engine(ap);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_kick_engine);
+
+static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+ struct ata_taskfile *tf, int is_cmd, u16 flags,
+ unsigned long timeout_msec)
+{
+ const u32 cmd_fis_len = 5; /* five dwords */
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u8 *fis = pp->cmd_tbl;
+ u32 tmp;
+
+ /* prep the command */
+ ata_tf_to_fis(tf, pmp, is_cmd, fis);
+ ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
+
+ /* issue & wait */
+ writel(1, port_mmio + PORT_CMD_ISSUE);
+
+ if (timeout_msec) {
+ tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
+ 1, timeout_msec);
+ if (tmp & 0x1) {
+ ahci_kick_engine(ap);
+ return -EBUSY;
+ }
+ } else
+ readl(port_mmio + PORT_CMD_ISSUE); /* flush */
+
+ return 0;
+}
+
+int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ int pmp, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link))
+{
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ const char *reason = NULL;
+ unsigned long now, msecs;
+ struct ata_taskfile tf;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ /* prepare for SRST (AHCI-1.1 10.4.1) */
+ rc = ahci_kick_engine(ap);
+ if (rc && rc != -EOPNOTSUPP)
+ ata_link_printk(link, KERN_WARNING,
+ "failed to reset engine (errno=%d)\n", rc);
+
+ ata_tf_init(link->device, &tf);
+
+ /* issue the first D2H Register FIS */
+ msecs = 0;
+ now = jiffies;
+ if (time_after(now, deadline))
+ msecs = jiffies_to_msecs(deadline - now);
+
+ tf.ctl |= ATA_SRST;
+ if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
+ AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
+ rc = -EIO;
+ reason = "1st FIS failed";
+ goto fail;
+ }
+
+ /* spec says at least 5us, but be generous and sleep for 1ms */
+ msleep(1);
+
+ /* issue the second D2H Register FIS */
+ tf.ctl &= ~ATA_SRST;
+ ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
+
+ /* wait for link to become ready */
+ rc = ata_wait_after_reset(link, deadline, check_ready);
+ if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
+ /*
+ * Workaround for cases where link online status can't
+ * be trusted. Treat device readiness timeout as link
+ * offline.
+ */
+ ata_link_printk(link, KERN_INFO,
+ "device not ready, treating as offline\n");
+ *class = ATA_DEV_NONE;
+ } else if (rc) {
+ /* link occupied, -ENODEV too is an error */
+ reason = "device not ready";
+ goto fail;
+ } else
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+
+ fail:
+ ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
+ return rc;
+}
+
+int ahci_check_ready(struct ata_link *link)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+
+ return ata_check_ready(status);
+}
+EXPORT_SYMBOL_GPL(ahci_check_ready);
+
+static int ahci_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ int pmp = sata_srst_pmp(link);
+
+ DPRINTK("ENTER\n");
+
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
+}
+EXPORT_SYMBOL_GPL(ahci_do_softreset);
+
+static int ahci_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ struct ata_taskfile tf;
+ bool online;
+ int rc;
+
+ DPRINTK("ENTER\n");
+
+ ahci_stop_engine(ap);
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = 0x80;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ahci_check_ready);
+
+ ahci_start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+ return rc;
+}
+
+static void ahci_postreset(struct ata_link *link, unsigned int *class)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 new_tmp, tmp;
+
+ ata_std_postreset(link, class);
+
+ /* Make sure port's ATAPI bit is set appropriately */
+ new_tmp = tmp = readl(port_mmio + PORT_CMD);
+ if (*class == ATA_DEV_ATAPI)
+ new_tmp |= PORT_CMD_ATAPI;
+ else
+ new_tmp &= ~PORT_CMD_ATAPI;
+ if (new_tmp != tmp) {
+ writel(new_tmp, port_mmio + PORT_CMD);
+ readl(port_mmio + PORT_CMD); /* flush */
+ }
+}
+
+static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+{
+ struct scatterlist *sg;
+ struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
+ unsigned int si;
+
+ VPRINTK("ENTER\n");
+
+ /*
+ * Next, the S/G list.
+ */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_addr_t addr = sg_dma_address(sg);
+ u32 sg_len = sg_dma_len(sg);
+
+ ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
+ ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+ ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
+ }
+
+ return si;
+}
+
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ if (!sata_pmp_attached(ap) || pp->fbs_enabled)
+ return ata_std_qc_defer(qc);
+ else
+ return sata_pmp_qc_defer_cmd_switch(qc);
+}
+
+static void ahci_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ int is_atapi = ata_is_atapi(qc->tf.protocol);
+ void *cmd_tbl;
+ u32 opts;
+ const u32 cmd_fis_len = 5; /* five dwords */
+ unsigned int n_elem;
+
+ /*
+ * Fill in command table information. First, the header,
+ * a SATA Register - Host to Device command FIS.
+ */
+ cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
+
+ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
+ if (is_atapi) {
+ memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
+ memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
+ }
+
+ n_elem = 0;
+ if (qc->flags & ATA_QCFLAG_DMAMAP)
+ n_elem = ahci_fill_sg(qc, cmd_tbl);
+
+ /*
+ * Fill in command slot information.
+ */
+ opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ opts |= AHCI_CMD_WRITE;
+ if (is_atapi)
+ opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+
+ ahci_fill_cmd_slot(pp, qc->tag, opts);
+}
+
+static void ahci_fbs_dec_intr(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int retries = 3;
+
+ DPRINTK("ENTER\n");
+ BUG_ON(!pp->fbs_enabled);
+
+ /* time to wait for DEC is not specified by AHCI spec,
+ * add a retry loop for safety.
+ */
+ writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ while ((fbs & PORT_FBS_DEC) && retries--) {
+ udelay(1);
+ fbs = readl(port_mmio + PORT_FBS);
+ }
+
+ if (fbs & PORT_FBS_DEC)
+ dev_printk(KERN_ERR, ap->host->dev,
+ "failed to clear device error\n");
+}
+
+static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ata_eh_info *host_ehi = &ap->link.eh_info;
+ struct ata_link *link = NULL;
+ struct ata_queued_cmd *active_qc;
+ struct ata_eh_info *active_ehi;
+ bool fbs_need_dec = false;
+ u32 serror;
+
+ /* determine active link with error */
+ if (pp->fbs_enabled) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int pmp = fbs >> PORT_FBS_DWE_OFFSET;
+
+ if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
+ ata_link_online(&ap->pmp_link[pmp])) {
+ link = &ap->pmp_link[pmp];
+ fbs_need_dec = true;
+ }
+
+ } else
+ ata_for_each_link(link, ap, EDGE)
+ if (ata_link_active(link))
+ break;
+
+ if (!link)
+ link = &ap->link;
+
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ active_ehi = &link->eh_info;
+
+ /* record irq stat */
+ ata_ehi_clear_desc(host_ehi);
+ ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
+
+ /* AHCI needs SError cleared; otherwise, it might lock up */
+ ahci_scr_read(&ap->link, SCR_ERROR, &serror);
+ ahci_scr_write(&ap->link, SCR_ERROR, serror);
+ host_ehi->serror |= serror;
+
+ /* some controllers set IRQ_IF_ERR on device errors, ignore it */
+ if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
+ irq_stat &= ~PORT_IRQ_IF_ERR;
+
+ if (irq_stat & PORT_IRQ_TF_ERR) {
+ /* If qc is active, charge it; otherwise, the active
+ * link. There's no active qc on NCQ errors. It will
+ * be determined by EH by reading log page 10h.
+ */
+ if (active_qc)
+ active_qc->err_mask |= AC_ERR_DEV;
+ else
+ active_ehi->err_mask |= AC_ERR_DEV;
+
+ if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
+ host_ehi->serror &= ~SERR_INTERNAL;
+ }
+
+ if (irq_stat & PORT_IRQ_UNK_FIS) {
+ u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
+
+ active_ehi->err_mask |= AC_ERR_HSM;
+ active_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(active_ehi,
+ "unknown FIS %08x %08x %08x %08x" ,
+ unk[0], unk[1], unk[2], unk[3]);
+ }
+
+ if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
+ active_ehi->err_mask |= AC_ERR_HSM;
+ active_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(active_ehi, "incorrect PMP");
+ }
+
+ if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
+ host_ehi->err_mask |= AC_ERR_HOST_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ ata_ehi_push_desc(host_ehi, "host bus error");
+ }
+
+ if (irq_stat & PORT_IRQ_IF_ERR) {
+ if (fbs_need_dec)
+ active_ehi->err_mask |= AC_ERR_DEV;
+ else {
+ host_ehi->err_mask |= AC_ERR_ATA_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ }
+
+ ata_ehi_push_desc(host_ehi, "interface fatal error");
+ }
+
+ if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
+ ata_ehi_hotplugged(host_ehi);
+ ata_ehi_push_desc(host_ehi, "%s",
+ irq_stat & PORT_IRQ_CONNECT ?
+ "connection status changed" : "PHY RDY changed");
+ }
+
+ /* okay, let's hand over to EH */
+
+ if (irq_stat & PORT_IRQ_FREEZE)
+ ata_port_freeze(ap);
+ else if (fbs_need_dec) {
+ ata_link_abort(link);
+ ahci_fbs_dec_intr(ap);
+ } else
+ ata_port_abort(ap);
+}
+
+static void ahci_port_intr(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
+ u32 status, qc_active = 0;
+ int rc;
+
+ status = readl(port_mmio + PORT_IRQ_STAT);
+ writel(status, port_mmio + PORT_IRQ_STAT);
+
+ /* ignore BAD_PMP while resetting */
+ if (unlikely(resetting))
+ status &= ~PORT_IRQ_BAD_PMP;
+
+ /* If we are getting PhyRdy, this is
+ * just a power state change, we should
+ * clear out this, plus the PhyRdy/Comm
+ * Wake bits from Serror
+ */
+ if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
+ (status & PORT_IRQ_PHYRDY)) {
+ status &= ~PORT_IRQ_PHYRDY;
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+ }
+
+ if (unlikely(status & PORT_IRQ_ERROR)) {
+ ahci_error_intr(ap, status);
+ return;
+ }
+
+ if (status & PORT_IRQ_SDB_FIS) {
+ /* If SNotification is available, leave notification
+ * handling to sata_async_notification(). If not,
+ * emulate it by snooping SDB FIS RX area.
+ *
+ * Snooping FIS RX area is probably cheaper than
+ * poking SNotification but some constrollers which
+ * implement SNotification, ICH9 for example, don't
+ * store AN SDB FIS into receive area.
+ */
+ if (hpriv->cap & HOST_CAP_SNTF)
+ sata_async_notification(ap);
+ else {
+ /* If the 'N' bit in word 0 of the FIS is set,
+ * we just received asynchronous notification.
+ * Tell libata about it.
+ *
+ * Lack of SNotification should not appear in
+ * ahci 1.2, so the workaround is unnecessary
+ * when FBS is enabled.
+ */
+ if (pp->fbs_enabled)
+ WARN_ON_ONCE(1);
+ else {
+ const __le32 *f = pp->rx_fis + RX_FIS_SDB;
+ u32 f0 = le32_to_cpu(f[0]);
+ if (f0 & (1 << 15))
+ sata_async_notification(ap);
+ }
+ }
+ }
+
+ /* pp->active_link is not reliable once FBS is enabled, both
+ * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
+ * NCQ and non-NCQ commands may be in flight at the same time.
+ */
+ if (pp->fbs_enabled) {
+ if (ap->qc_active) {
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
+ }
+ } else {
+ /* pp->active_link is valid iff any command is in flight */
+ if (ap->qc_active && pp->active_link->sactive)
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ else
+ qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+ }
+
+
+ rc = ata_qc_complete_multiple(ap, qc_active);
+
+ /* while resetting, invalid completions are expected */
+ if (unlikely(rc < 0 && !resetting)) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+ }
+}
+
+irqreturn_t ahci_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ struct ahci_host_priv *hpriv;
+ unsigned int i, handled = 0;
+ void __iomem *mmio;
+ u32 irq_stat, irq_masked;
+
+ VPRINTK("ENTER\n");
+
+ hpriv = host->private_data;
+ mmio = hpriv->mmio;
+
+ /* sigh. 0xffffffff is a valid return from h/w */
+ irq_stat = readl(mmio + HOST_IRQ_STAT);
+ if (!irq_stat)
+ return IRQ_NONE;
+
+ irq_masked = irq_stat & hpriv->port_map;
+
+ spin_lock(&host->lock);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap;
+
+ if (!(irq_masked & (1 << i)))
+ continue;
+
+ ap = host->ports[i];
+ if (ap) {
+ ahci_port_intr(ap);
+ VPRINTK("port %u\n", i);
+ } else {
+ VPRINTK("port %u (no irq)\n", i);
+ if (ata_ratelimit())
+ dev_printk(KERN_WARNING, host->dev,
+ "interrupt on disabled port %u\n", i);
+ }
+
+ handled = 1;
+ }
+
+ /* HOST_IRQ_STAT behaves as level triggered latch meaning that
+ * it should be cleared after all the port events are cleared;
+ * otherwise, it will raise a spurious interrupt after each
+ * valid one. Please read section 10.6.2 of ahci 1.1 for more
+ * information.
+ *
+ * Also, use the unmasked value to clear interrupt as spurious
+ * pending event on a dummy port might cause screaming IRQ.
+ */
+ writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+ spin_unlock(&host->lock);
+
+ VPRINTK("EXIT\n");
+
+ return IRQ_RETVAL(handled);
+}
+EXPORT_SYMBOL_GPL(ahci_interrupt);
+
+static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* Keep track of the currently active link. It will be used
+ * in completion path to determine whether NCQ phase is in
+ * progress.
+ */
+ pp->active_link = qc->dev->link;
+
+ if (qc->tf.protocol == ATA_PROT_NCQ)
+ writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
+
+ if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+ fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
+ writel(fbs, port_mmio + PORT_FBS);
+ pp->fbs_last_dev = qc->dev->link->pmp;
+ }
+
+ writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
+
+ ahci_sw_activity(qc->dev->link);
+
+ return 0;
+}
+
+static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ struct ahci_port_priv *pp = qc->ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+
+ if (pp->fbs_enabled)
+ d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
+
+ ata_tf_from_fis(d2h_fis, &qc->result_tf);
+ return true;
+}
+
+static void ahci_freeze(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+
+ /* turn IRQ off */
+ writel(0, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_thaw(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* clear IRQ */
+ tmp = readl(port_mmio + PORT_IRQ_STAT);
+ writel(tmp, port_mmio + PORT_IRQ_STAT);
+ writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
+
+ /* turn IRQ back on */
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_error_handler(struct ata_port *ap)
+{
+ if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+ /* restart engine */
+ ahci_stop_engine(ap);
+ ahci_start_engine(ap);
+ }
+
+ sata_pmp_error_handler(ap);
+}
+
+static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ /* make DMA engine forget about the failed command */
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ ahci_kick_engine(ap);
+}
+
+static void ahci_enable_fbs(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+ int rc;
+
+ if (!pp->fbs_supported)
+ return;
+
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN) {
+ pp->fbs_enabled = true;
+ pp->fbs_last_dev = -1; /* initialization */
+ return;
+ }
+
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ return;
+
+ writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN) {
+ dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
+ pp->fbs_enabled = true;
+ pp->fbs_last_dev = -1; /* initialization */
+ } else
+ dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
+
+ ahci_start_engine(ap);
+}
+
+static void ahci_disable_fbs(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+ int rc;
+
+ if (!pp->fbs_supported)
+ return;
+
+ fbs = readl(port_mmio + PORT_FBS);
+ if ((fbs & PORT_FBS_EN) == 0) {
+ pp->fbs_enabled = false;
+ return;
+ }
+
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ return;
+
+ writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN)
+ dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
+ else {
+ dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
+ pp->fbs_enabled = false;
+ }
+
+ ahci_start_engine(ap);
+}
+
+static void ahci_pmp_attach(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 cmd;
+
+ cmd = readl(port_mmio + PORT_CMD);
+ cmd |= PORT_CMD_PMP;
+ writel(cmd, port_mmio + PORT_CMD);
+
+ ahci_enable_fbs(ap);
+
+ pp->intr_mask |= PORT_IRQ_BAD_PMP;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static void ahci_pmp_detach(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 cmd;
+
+ ahci_disable_fbs(ap);
+
+ cmd = readl(port_mmio + PORT_CMD);
+ cmd &= ~PORT_CMD_PMP;
+ writel(cmd, port_mmio + PORT_CMD);
+
+ pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+}
+
+static int ahci_port_resume(struct ata_port *ap)
+{
+ ahci_power_up(ap);
+ ahci_start_port(ap);
+
+ if (sata_pmp_attached(ap))
+ ahci_pmp_attach(ap);
+ else
+ ahci_pmp_detach(ap);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+ const char *emsg = NULL;
+ int rc;
+
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc == 0)
+ ahci_power_down(ap);
+ else {
+ ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
+ ahci_start_port(ap);
+ }
+
+ return rc;
+}
+#endif
+
+static int ahci_port_start(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct device *dev = ap->host->dev;
+ struct ahci_port_priv *pp;
+ void *mem;
+ dma_addr_t mem_dma;
+ size_t dma_sz, rx_fis_sz;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ /* check FBS capability */
+ if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd = readl(port_mmio + PORT_CMD);
+ if (cmd & PORT_CMD_FBSCP)
+ pp->fbs_supported = true;
+ else
+ dev_printk(KERN_WARNING, dev,
+ "The port is not capable of FBS\n");
+ }
+
+ if (pp->fbs_supported) {
+ dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
+ rx_fis_sz = AHCI_RX_FIS_SZ * 16;
+ } else {
+ dma_sz = AHCI_PORT_PRIV_DMA_SZ;
+ rx_fis_sz = AHCI_RX_FIS_SZ;
+ }
+
+ mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ memset(mem, 0, dma_sz);
+
+ /*
+ * First item in chunk of DMA memory: 32-slot command table,
+ * 32 bytes each in size
+ */
+ pp->cmd_slot = mem;
+ pp->cmd_slot_dma = mem_dma;
+
+ mem += AHCI_CMD_SLOT_SZ;
+ mem_dma += AHCI_CMD_SLOT_SZ;
+
+ /*
+ * Second item: Received-FIS area
+ */
+ pp->rx_fis = mem;
+ pp->rx_fis_dma = mem_dma;
+
+ mem += rx_fis_sz;
+ mem_dma += rx_fis_sz;
+
+ /*
+ * Third item: data area for storing a single command
+ * and its scatter-gather table
+ */
+ pp->cmd_tbl = mem;
+ pp->cmd_tbl_dma = mem_dma;
+
+ /*
+ * Save off initial list of interrupts to be enabled.
+ * This could be changed later
+ */
+ pp->intr_mask = DEF_PORT_IRQ;
+
+ ap->private_data = pp;
+
+ /* engage engines, captain */
+ return ahci_port_resume(ap);
+}
+
+static void ahci_port_stop(struct ata_port *ap)
+{
+ const char *emsg = NULL;
+ int rc;
+
+ /* de-initialize port */
+ rc = ahci_deinit_port(ap, &emsg);
+ if (rc)
+ ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
+}
+
+void ahci_print_info(struct ata_host *host, const char *scc_s)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 vers, cap, cap2, impl, speed;
+ const char *speed_s;
+
+ vers = readl(mmio + HOST_VERSION);
+ cap = hpriv->cap;
+ cap2 = hpriv->cap2;
+ impl = hpriv->port_map;
+
+ speed = (cap >> 20) & 0xf;
+ if (speed == 1)
+ speed_s = "1.5";
+ else if (speed == 2)
+ speed_s = "3";
+ else if (speed == 3)
+ speed_s = "6";
+ else
+ speed_s = "?";
+
+ dev_info(host->dev,
+ "AHCI %02x%02x.%02x%02x "
+ "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
+ ,
+
+ (vers >> 24) & 0xff,
+ (vers >> 16) & 0xff,
+ (vers >> 8) & 0xff,
+ vers & 0xff,
+
+ ((cap >> 8) & 0x1f) + 1,
+ (cap & 0x1f) + 1,
+ speed_s,
+ impl,
+ scc_s);
+
+ dev_info(host->dev,
+ "flags: "
+ "%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s\n"
+ ,
+
+ cap & HOST_CAP_64 ? "64bit " : "",
+ cap & HOST_CAP_NCQ ? "ncq " : "",
+ cap & HOST_CAP_SNTF ? "sntf " : "",
+ cap & HOST_CAP_MPS ? "ilck " : "",
+ cap & HOST_CAP_SSS ? "stag " : "",
+ cap & HOST_CAP_ALPM ? "pm " : "",
+ cap & HOST_CAP_LED ? "led " : "",
+ cap & HOST_CAP_CLO ? "clo " : "",
+ cap & HOST_CAP_ONLY ? "only " : "",
+ cap & HOST_CAP_PMP ? "pmp " : "",
+ cap & HOST_CAP_FBS ? "fbs " : "",
+ cap & HOST_CAP_PIO_MULTI ? "pio " : "",
+ cap & HOST_CAP_SSC ? "slum " : "",
+ cap & HOST_CAP_PART ? "part " : "",
+ cap & HOST_CAP_CCC ? "ccc " : "",
+ cap & HOST_CAP_EMS ? "ems " : "",
+ cap & HOST_CAP_SXS ? "sxs " : "",
+ cap2 & HOST_CAP2_APST ? "apst " : "",
+ cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
+ cap2 & HOST_CAP2_BOH ? "boh " : ""
+ );
+}
+EXPORT_SYMBOL_GPL(ahci_print_info);
+
+void ahci_set_em_messages(struct ahci_host_priv *hpriv,
+ struct ata_port_info *pi)
+{
+ u8 messages;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_loc = readl(mmio + HOST_EM_LOC);
+ u32 em_ctl = readl(mmio + HOST_EM_CTL);
+
+ if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
+ return;
+
+ messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
+
+ if (messages) {
+ /* store em_loc */
+ hpriv->em_loc = ((em_loc >> 16) * 4);
+ hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
+ hpriv->em_msg_type = messages;
+ pi->flags |= ATA_FLAG_EM;
+ if (!(em_ctl & EM_CTL_ALHD))
+ pi->flags |= ATA_FLAG_SW_ACTIVITY;
+ }
+}
+EXPORT_SYMBOL_GPL(ahci_set_em_messages);
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 49cffb6094a3..c47373f01f89 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -65,6 +65,7 @@
#include <linux/libata.h>
#include <asm/byteorder.h>
#include <linux/cdrom.h>
+#include <linux/ratelimit.h>
#include "libata.h"
@@ -96,7 +97,6 @@ static void ata_dev_xfermask(struct ata_device *dev);
static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
unsigned int ata_print_id = 1;
-static struct workqueue_struct *ata_wq;
struct workqueue_struct *ata_aux_wq;
@@ -1685,52 +1685,6 @@ unsigned long ata_id_xfermask(const u16 *id)
return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
}
-/**
- * ata_pio_queue_task - Queue port_task
- * @ap: The ata_port to queue port_task for
- * @data: data for @fn to use
- * @delay: delay time in msecs for workqueue function
- *
- * Schedule @fn(@data) for execution after @delay jiffies using
- * port_task. There is one port_task per port and it's the
- * user(low level driver)'s responsibility to make sure that only
- * one task is active at any given time.
- *
- * libata core layer takes care of synchronization between
- * port_task and EH. ata_pio_queue_task() may be ignored for EH
- * synchronization.
- *
- * LOCKING:
- * Inherited from caller.
- */
-void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
-{
- ap->port_task_data = data;
-
- /* may fail if ata_port_flush_task() in progress */
- queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
-}
-
-/**
- * ata_port_flush_task - Flush port_task
- * @ap: The ata_port to flush port_task for
- *
- * After this function completes, port_task is guranteed not to
- * be running or scheduled.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- */
-void ata_port_flush_task(struct ata_port *ap)
-{
- DPRINTK("ENTER\n");
-
- cancel_rearming_delayed_work(&ap->port_task);
-
- if (ata_msg_ctl(ap))
- ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
-}
-
static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
{
struct completion *waiting = qc->private_data;
@@ -1852,7 +1806,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
- ata_port_flush_task(ap);
+ ata_sff_flush_pio_task(ap);
if (!rc) {
spin_lock_irqsave(ap->lock, flags);
@@ -1906,22 +1860,6 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
ap->qc_active = preempted_qc_active;
ap->nr_active_links = preempted_nr_active_links;
- /* XXX - Some LLDDs (sata_mv) disable port on command failure.
- * Until those drivers are fixed, we detect the condition
- * here, fail the command with AC_ERR_SYSTEM and reenable the
- * port.
- *
- * Note that this doesn't change any behavior as internal
- * command failure results in disabling the device in the
- * higher layer for LLDDs without new reset/EH callbacks.
- *
- * Kill the following code as soon as those drivers are fixed.
- */
- if (ap->flags & ATA_FLAG_DISABLED) {
- err_mask |= AC_ERR_SYSTEM;
- ata_port_probe(ap);
- }
-
spin_unlock_irqrestore(ap->lock, flags);
if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
@@ -2767,8 +2705,6 @@ int ata_bus_probe(struct ata_port *ap)
int rc;
struct ata_device *dev;
- ata_port_probe(ap);
-
ata_for_each_dev(dev, &ap->link, ALL)
tries[dev->devno] = ATA_PROBE_MAX_TRIES;
@@ -2796,8 +2732,7 @@ int ata_bus_probe(struct ata_port *ap)
ap->ops->phy_reset(ap);
ata_for_each_dev(dev, &ap->link, ALL) {
- if (!(ap->flags & ATA_FLAG_DISABLED) &&
- dev->class != ATA_DEV_UNKNOWN)
+ if (dev->class != ATA_DEV_UNKNOWN)
classes[dev->devno] = dev->class;
else
classes[dev->devno] = ATA_DEV_NONE;
@@ -2805,8 +2740,6 @@ int ata_bus_probe(struct ata_port *ap)
dev->class = ATA_DEV_UNKNOWN;
}
- ata_port_probe(ap);
-
/* read IDENTIFY page and configure devices. We have to do the identify
specific sequence bass-ackwards so that PDIAG- is released by
the slave device */
@@ -2856,8 +2789,6 @@ int ata_bus_probe(struct ata_port *ap)
ata_for_each_dev(dev, &ap->link, ENABLED)
return 0;
- /* no device present, disable port */
- ata_port_disable(ap);
return -ENODEV;
fail:
@@ -2889,22 +2820,6 @@ int ata_bus_probe(struct ata_port *ap)
}
/**
- * ata_port_probe - Mark port as enabled
- * @ap: Port for which we indicate enablement
- *
- * Modify @ap data structure such that the system
- * thinks that the entire port is enabled.
- *
- * LOCKING: host lock, or some other form of
- * serialization.
- */
-
-void ata_port_probe(struct ata_port *ap)
-{
- ap->flags &= ~ATA_FLAG_DISABLED;
-}
-
-/**
* sata_print_link_status - Print SATA link status
* @link: SATA link to printk link status about
*
@@ -2951,26 +2866,6 @@ struct ata_device *ata_dev_pair(struct ata_device *adev)
}
/**
- * ata_port_disable - Disable port.
- * @ap: Port to be disabled.
- *
- * Modify @ap data structure such that the system
- * thinks that the entire port is disabled, and should
- * never attempt to probe or communicate with devices
- * on this port.
- *
- * LOCKING: host lock, or some other form of
- * serialization.
- */
-
-void ata_port_disable(struct ata_port *ap)
-{
- ap->link.device[0].class = ATA_DEV_NONE;
- ap->link.device[1].class = ATA_DEV_NONE;
- ap->flags |= ATA_FLAG_DISABLED;
-}
-
-/**
* sata_down_spd_limit - adjust SATA spd limit downward
* @link: Link to adjust SATA spd limit for
* @spd_limit: Additional limit
@@ -3631,9 +3526,15 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
{
unsigned long start = jiffies;
- unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
+ unsigned long nodev_deadline;
int warned = 0;
+ /* choose which 0xff timeout to use, read comment in libata.h */
+ if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
+ nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
+ else
+ nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
+
/* Slave readiness can't be tested separately from master. On
* M/S emulation configuration, this function should be called
* only on the master and it will handle both master and slave.
@@ -3651,12 +3552,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
if (ready > 0)
return 0;
- /* -ENODEV could be transient. Ignore -ENODEV if link
+ /*
+ * -ENODEV could be transient. Ignore -ENODEV if link
* is online. Also, some SATA devices take a long
- * time to clear 0xff after reset. For example,
- * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
- * GoVault needs even more than that. Wait for
- * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
+ * time to clear 0xff after reset. Wait for
+ * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
+ * offline.
*
* Note that some PATA controllers (pata_ali) explode
* if status register is read more than once when
@@ -5558,30 +5459,6 @@ void ata_host_resume(struct ata_host *host)
#endif
/**
- * ata_port_start - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized. Allocates space for PRD table.
- *
- * May be used as the port_start() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-int ata_port_start(struct ata_port *ap)
-{
- struct device *dev = ap->dev;
-
- ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
- GFP_KERNEL);
- if (!ap->prd)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
* ata_dev_init - Initialize an ata_device structure
* @dev: Device structure to initialize
*
@@ -5709,12 +5586,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->pflags |= ATA_PFLAG_INITIALIZING;
ap->lock = &host->lock;
- ap->flags = ATA_FLAG_DISABLED;
ap->print_id = -1;
- ap->ctl = ATA_DEVCTL_OBS;
ap->host = host;
ap->dev = host->dev;
- ap->last_ctl = 0xFF;
#if defined(ATA_VERBOSE_DEBUG)
/* turn on all debugging levels */
@@ -5725,11 +5599,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
-#ifdef CONFIG_ATA_SFF
- INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
-#else
- INIT_DELAYED_WORK(&ap->port_task, NULL);
-#endif
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
@@ -5747,6 +5616,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->stats.unhandled_irq = 1;
ap->stats.idle_irq = 1;
#endif
+ ata_sff_port_init(ap);
+
return ap;
}
@@ -6138,8 +6009,6 @@ static void async_port_probe(void *data, async_cookie_t cookie)
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
- ata_port_probe(ap);
-
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
@@ -6663,62 +6532,43 @@ static void __init ata_parse_force_param(void)
static int __init ata_init(void)
{
- ata_parse_force_param();
+ int rc = -ENOMEM;
- /*
- * FIXME: In UP case, there is only one workqueue thread and if you
- * have more than one PIO device, latency is bloody awful, with
- * occasional multi-second "hiccups" as one PIO device waits for
- * another. It's an ugly wart that users DO occasionally complain
- * about; luckily most users have at most one PIO polled device.
- */
- ata_wq = create_workqueue("ata");
- if (!ata_wq)
- goto free_force_tbl;
+ ata_parse_force_param();
ata_aux_wq = create_singlethread_workqueue("ata_aux");
if (!ata_aux_wq)
- goto free_wq;
+ goto fail;
+
+ rc = ata_sff_init();
+ if (rc)
+ goto fail;
printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
return 0;
-free_wq:
- destroy_workqueue(ata_wq);
-free_force_tbl:
+fail:
kfree(ata_force_tbl);
- return -ENOMEM;
+ if (ata_aux_wq)
+ destroy_workqueue(ata_aux_wq);
+ return rc;
}
static void __exit ata_exit(void)
{
+ ata_sff_exit();
kfree(ata_force_tbl);
- destroy_workqueue(ata_wq);
destroy_workqueue(ata_aux_wq);
}
subsys_initcall(ata_init);
module_exit(ata_exit);
-static unsigned long ratelimit_time;
-static DEFINE_SPINLOCK(ata_ratelimit_lock);
+static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
int ata_ratelimit(void)
{
- int rc;
- unsigned long flags;
-
- spin_lock_irqsave(&ata_ratelimit_lock, flags);
-
- if (time_after(jiffies, ratelimit_time)) {
- rc = 1;
- ratelimit_time = jiffies + (HZ/5);
- } else
- rc = 0;
-
- spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
-
- return rc;
+ return __ratelimit(&ratelimit);
}
/**
@@ -6826,11 +6676,9 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
EXPORT_SYMBOL_GPL(ata_mode_string);
EXPORT_SYMBOL_GPL(ata_id_xfermask);
-EXPORT_SYMBOL_GPL(ata_port_start);
EXPORT_SYMBOL_GPL(ata_do_set_mode);
EXPORT_SYMBOL_GPL(ata_std_qc_defer);
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-EXPORT_SYMBOL_GPL(ata_port_probe);
EXPORT_SYMBOL_GPL(ata_dev_disable);
EXPORT_SYMBOL_GPL(sata_set_spd);
EXPORT_SYMBOL_GPL(ata_wait_after_reset);
@@ -6842,7 +6690,6 @@ EXPORT_SYMBOL_GPL(sata_std_hardreset);
EXPORT_SYMBOL_GPL(ata_std_postreset);
EXPORT_SYMBOL_GPL(ata_dev_classify);
EXPORT_SYMBOL_GPL(ata_dev_pair);
-EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_ratelimit);
EXPORT_SYMBOL_GPL(ata_wait_register);
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
@@ -6864,7 +6711,6 @@ EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
-EXPORT_SYMBOL_GPL(ata_pio_queue_task);
EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
EXPORT_SYMBOL_GPL(ata_timing_find_mode);
EXPORT_SYMBOL_GPL(ata_timing_compute);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 228740f356c9..f77a67303f8b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -550,8 +550,8 @@ void ata_scsi_error(struct Scsi_Host *host)
DPRINTK("ENTER\n");
- /* synchronize with port task */
- ata_port_flush_task(ap);
+ /* make sure sff pio task is not running */
+ ata_sff_flush_pio_task(ap);
/* synchronize with host lock and sort out timeouts */
@@ -3684,7 +3684,7 @@ void ata_std_error_handler(struct ata_port *ap)
ata_reset_fn_t hardreset = ops->hardreset;
/* ignore built-in hardreset if SCR access is not available */
- if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
+ if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
hardreset = NULL;
ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 00305f41ed86..224faabd7b7e 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -231,10 +231,14 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr)
return "<unknown>";
}
+#define PMP_GSCR_SII_POL 129
+
static int sata_pmp_configure(struct ata_device *dev, int print_info)
{
struct ata_port *ap = dev->link->ap;
u32 *gscr = dev->gscr;
+ u16 vendor = sata_pmp_gscr_vendor(gscr);
+ u16 devid = sata_pmp_gscr_devid(gscr);
unsigned int err_mask = 0;
const char *reason;
int nr_ports, rc;
@@ -260,12 +264,34 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
goto fail;
}
+ /* Disable sending Early R_OK.
+ * With "cached read" HDD testing and multiple ports busy on a SATA
+ * host controller, 3726 PMP will very rarely drop a deferred
+ * R_OK that was intended for the host. Symptom will be all
+ * 5 drives under test will timeout, get reset, and recover.
+ */
+ if (vendor == 0x1095 && devid == 0x3726) {
+ u32 reg;
+
+ err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "failed to read Sil3726 Private Register";
+ goto fail;
+ }
+ reg &= ~0x1;
+ err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
+ if (err_mask) {
+ rc = -EIO;
+ reason = "failed to write Sil3726 Private Register";
+ goto fail;
+ }
+ }
+
if (print_info) {
ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
"0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
- sata_pmp_spec_rev_str(gscr),
- sata_pmp_gscr_vendor(gscr),
- sata_pmp_gscr_devid(gscr),
+ sata_pmp_spec_rev_str(gscr), vendor, devid,
sata_pmp_gscr_rev(gscr),
nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
gscr[SATA_PMP_GSCR_FEAT]);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0088cdeb0b1e..cfa9dd3d7253 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3345,9 +3345,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
struct ata_link *link;
struct ata_device *dev;
- if (ap->flags & ATA_FLAG_DISABLED)
- return;
-
repeat:
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, link, ENABLED) {
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index e3877b6843c9..19ddf924944f 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -40,10 +40,12 @@
#include "libata.h"
+static struct workqueue_struct *ata_sff_wq;
+
const struct ata_port_operations ata_sff_port_ops = {
.inherits = &ata_base_port_ops,
- .qc_prep = ata_sff_qc_prep,
+ .qc_prep = ata_noop_qc_prep,
.qc_issue = ata_sff_qc_issue,
.qc_fill_rtf = ata_sff_qc_fill_rtf,
@@ -53,9 +55,7 @@ const struct ata_port_operations ata_sff_port_ops = {
.softreset = ata_sff_softreset,
.hardreset = sata_sff_hardreset,
.postreset = ata_sff_postreset,
- .drain_fifo = ata_sff_drain_fifo,
.error_handler = ata_sff_error_handler,
- .post_internal_cmd = ata_sff_post_internal_cmd,
.sff_dev_select = ata_sff_dev_select,
.sff_check_status = ata_sff_check_status,
@@ -63,178 +63,13 @@ const struct ata_port_operations ata_sff_port_ops = {
.sff_tf_read = ata_sff_tf_read,
.sff_exec_command = ata_sff_exec_command,
.sff_data_xfer = ata_sff_data_xfer,
- .sff_irq_on = ata_sff_irq_on,
.sff_irq_clear = ata_sff_irq_clear,
+ .sff_drain_fifo = ata_sff_drain_fifo,
.lost_interrupt = ata_sff_lost_interrupt,
-
- .port_start = ata_sff_port_start,
};
EXPORT_SYMBOL_GPL(ata_sff_port_ops);
-const struct ata_port_operations ata_bmdma_port_ops = {
- .inherits = &ata_sff_port_ops,
-
- .mode_filter = ata_bmdma_mode_filter,
-
- .bmdma_setup = ata_bmdma_setup,
- .bmdma_start = ata_bmdma_start,
- .bmdma_stop = ata_bmdma_stop,
- .bmdma_status = ata_bmdma_status,
-};
-EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
-
-const struct ata_port_operations ata_bmdma32_port_ops = {
- .inherits = &ata_bmdma_port_ops,
-
- .sff_data_xfer = ata_sff_data_xfer32,
- .port_start = ata_sff_port_start32,
-};
-EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
-
-/**
- * ata_fill_sg - Fill PCI IDE PRD table
- * @qc: Metadata associated with taskfile to be transferred
- *
- * Fill PCI IDE PRD (scatter-gather) table with segments
- * associated with the current disk command.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int si, pi;
-
- pi = 0;
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- u32 addr, offset;
- u32 sg_len, len;
-
- /* determine if physical DMA addr spans 64K boundary.
- * Note h/w doesn't support 64-bit, so we unconditionally
- * truncate dma_addr_t to u32.
- */
- addr = (u32) sg_dma_address(sg);
- sg_len = sg_dma_len(sg);
-
- while (sg_len) {
- offset = addr & 0xffff;
- len = sg_len;
- if ((offset + sg_len) > 0x10000)
- len = 0x10000 - offset;
-
- ap->prd[pi].addr = cpu_to_le32(addr);
- ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
-
- pi++;
- sg_len -= len;
- addr += len;
- }
- }
-
- ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
- * ata_fill_sg_dumb - Fill PCI IDE PRD table
- * @qc: Metadata associated with taskfile to be transferred
- *
- * Fill PCI IDE PRD (scatter-gather) table with segments
- * associated with the current disk command. Perform the fill
- * so that we avoid writing any length 64K records for
- * controllers that don't follow the spec.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int si, pi;
-
- pi = 0;
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- u32 addr, offset;
- u32 sg_len, len, blen;
-
- /* determine if physical DMA addr spans 64K boundary.
- * Note h/w doesn't support 64-bit, so we unconditionally
- * truncate dma_addr_t to u32.
- */
- addr = (u32) sg_dma_address(sg);
- sg_len = sg_dma_len(sg);
-
- while (sg_len) {
- offset = addr & 0xffff;
- len = sg_len;
- if ((offset + sg_len) > 0x10000)
- len = 0x10000 - offset;
-
- blen = len & 0xffff;
- ap->prd[pi].addr = cpu_to_le32(addr);
- if (blen == 0) {
- /* Some PATA chipsets like the CS5530 can't
- cope with 0x0000 meaning 64K as the spec
- says */
- ap->prd[pi].flags_len = cpu_to_le32(0x8000);
- blen = 0x8000;
- ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
- }
- ap->prd[pi].flags_len = cpu_to_le32(blen);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
-
- pi++;
- sg_len -= len;
- addr += len;
- }
- }
-
- ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
- * ata_sff_qc_prep - Prepare taskfile for submission
- * @qc: Metadata associated with taskfile to be prepared
- *
- * Prepare ATA taskfile for submission.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_sff_qc_prep(struct ata_queued_cmd *qc)
-{
- if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
-
- ata_fill_sg(qc);
-}
-EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
-
-/**
- * ata_sff_dumb_qc_prep - Prepare taskfile for submission
- * @qc: Metadata associated with taskfile to be prepared
- *
- * Prepare ATA taskfile for submission.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
-{
- if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
-
- ata_fill_sg_dumb(qc);
-}
-EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
-
/**
* ata_sff_check_status - Read device status reg & clear interrupt
* @ap: port where the device is
@@ -446,6 +281,27 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
/**
+ * ata_sff_set_devctl - Write device control reg
+ * @ap: port where the device is
+ * @ctl: value to write
+ *
+ * Writes ATA taskfile device control register.
+ *
+ * Note: may NOT be used as the sff_set_devctl() entry in
+ * ata_port_operations.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ if (ap->ops->sff_set_devctl)
+ ap->ops->sff_set_devctl(ap, ctl);
+ else
+ iowrite8(ctl, ap->ioaddr.ctl_addr);
+}
+
+/**
* ata_sff_dev_select - Select device 0/1 on ATA bus
* @ap: ATA channel to manipulate
* @device: ATA device (numbered from zero) to select
@@ -491,7 +347,7 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select);
* LOCKING:
* caller.
*/
-void ata_dev_select(struct ata_port *ap, unsigned int device,
+static void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
if (ata_msg_probe(ap))
@@ -517,24 +373,29 @@ void ata_dev_select(struct ata_port *ap, unsigned int device,
* Enable interrupts on a legacy IDE device using MMIO or PIO,
* wait for idle, clear any pending interrupts.
*
+ * Note: may NOT be used as the sff_irq_on() entry in
+ * ata_port_operations.
+ *
* LOCKING:
* Inherited from caller.
*/
-u8 ata_sff_irq_on(struct ata_port *ap)
+void ata_sff_irq_on(struct ata_port *ap)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- u8 tmp;
+
+ if (ap->ops->sff_irq_on) {
+ ap->ops->sff_irq_on(ap);
+ return;
+ }
ap->ctl &= ~ATA_NIEN;
ap->last_ctl = ap->ctl;
- if (ioaddr->ctl_addr)
- iowrite8(ap->ctl, ioaddr->ctl_addr);
- tmp = ata_wait_idle(ap);
+ if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
+ ata_sff_set_devctl(ap, ap->ctl);
+ ata_wait_idle(ap);
ap->ops->sff_irq_clear(ap);
-
- return tmp;
}
EXPORT_SYMBOL_GPL(ata_sff_irq_on);
@@ -579,7 +440,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
if (ioaddr->ctl_addr)
iowrite8(tf->ctl, ioaddr->ctl_addr);
ap->last_ctl = tf->ctl;
- ata_wait_idle(ap);
}
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -615,8 +475,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->device, ioaddr->device_addr);
VPRINTK("device 0x%X\n", tf->device);
}
-
- ata_wait_idle(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_tf_load);
@@ -894,7 +752,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
do_write);
}
- if (!do_write)
+ if (!do_write && !PageSlab(page))
flush_dcache_page(page);
qc->curbytes += qc->sect_size;
@@ -1165,7 +1023,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
qc = ata_qc_from_tag(ap, qc->tag);
if (qc) {
if (likely(!(qc->err_mask & AC_ERR_HSM))) {
- ap->ops->sff_irq_on(ap);
+ ata_sff_irq_on(ap);
ata_qc_complete(qc);
} else
ata_port_freeze(ap);
@@ -1181,7 +1039,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
} else {
if (in_wq) {
spin_lock_irqsave(ap->lock, flags);
- ap->ops->sff_irq_on(ap);
+ ata_sff_irq_on(ap);
ata_qc_complete(qc);
spin_unlock_irqrestore(ap->lock, flags);
} else
@@ -1293,7 +1151,7 @@ fsm_start:
if (in_wq)
spin_unlock_irqrestore(ap->lock, flags);
- /* if polling, ata_pio_task() handles the rest.
+ /* if polling, ata_sff_pio_task() handles the rest.
* otherwise, interrupt handler takes over from here.
*/
break;
@@ -1458,14 +1316,38 @@ fsm_start:
}
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
-void ata_pio_task(struct work_struct *work)
+void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+{
+ /* may fail if ata_sff_flush_pio_task() in progress */
+ queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
+ msecs_to_jiffies(delay));
+}
+EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
+
+void ata_sff_flush_pio_task(struct ata_port *ap)
+{
+ DPRINTK("ENTER\n");
+
+ cancel_rearming_delayed_work(&ap->sff_pio_task);
+ ap->hsm_task_state = HSM_ST_IDLE;
+
+ if (ata_msg_ctl(ap))
+ ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
+}
+
+static void ata_sff_pio_task(struct work_struct *work)
{
struct ata_port *ap =
- container_of(work, struct ata_port, port_task.work);
- struct ata_queued_cmd *qc = ap->port_task_data;
+ container_of(work, struct ata_port, sff_pio_task.work);
+ struct ata_queued_cmd *qc;
u8 status;
int poll_next;
+ /* qc can be NULL if timeout occurred */
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (!qc)
+ return;
+
fsm_start:
WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1481,7 +1363,7 @@ fsm_start:
msleep(2);
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
- ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
+ ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
return;
}
}
@@ -1497,15 +1379,11 @@ fsm_start:
}
/**
- * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner
+ * ata_sff_qc_issue - issue taskfile to a SFF controller
* @qc: command to issue to device
*
- * Using various libata functions and hooks, this function
- * starts an ATA command. ATA commands are grouped into
- * classes called "protocols", and issuing each type of protocol
- * is slightly different.
- *
- * May be used as the qc_issue() entry in ata_port_operations.
+ * This function issues a PIO or NODATA command to a SFF
+ * controller.
*
* LOCKING:
* spin_lock_irqsave(host lock)
@@ -1520,23 +1398,8 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
/* Use polling pio if the LLD doesn't handle
* interrupt driven pio and atapi CDB interrupt.
*/
- if (ap->flags & ATA_FLAG_PIO_POLLING) {
- switch (qc->tf.protocol) {
- case ATA_PROT_PIO:
- case ATA_PROT_NODATA:
- case ATAPI_PROT_PIO:
- case ATAPI_PROT_NODATA:
- qc->tf.flags |= ATA_TFLAG_POLLING;
- break;
- case ATAPI_PROT_DMA:
- if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
- /* see ata_dma_blacklisted() */
- BUG();</