aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig30
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpi_configfs.c267
-rw-r--r--drivers/acpi/acpi_dbg.c4
-rw-r--r--drivers/acpi/acpi_lpat.c4
-rw-r--r--drivers/acpi/acpi_lpss.c5
-rw-r--r--drivers/acpi/acpi_video.c3
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c9
-rw-r--r--drivers/acpi/apei/Makefile2
-rw-r--r--drivers/acpi/apei/apei-internal.h2
-rw-r--r--drivers/acpi/apei/bert.c150
-rw-r--r--drivers/acpi/apei/einj.c57
-rw-r--r--drivers/acpi/apei/erst.c7
-rw-r--r--drivers/acpi/bus.c99
-rw-r--r--drivers/acpi/button.c149
-rw-r--r--drivers/acpi/cppc_acpi.c24
-rw-r--r--drivers/acpi/dock.c7
-rw-r--r--drivers/acpi/dptf/Kconfig15
-rw-r--r--drivers/acpi/dptf/Makefile4
-rw-r--r--drivers/acpi/dptf/dptf_power.c128
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c (renamed from drivers/acpi/int340x_thermal.c)0
-rw-r--r--drivers/acpi/ec.c121
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/nfit.c11
-rw-r--r--drivers/acpi/numa.c226
-rw-r--r--drivers/acpi/pci_link.c63
-rw-r--r--drivers/acpi/pci_slot.c43
-rw-r--r--drivers/acpi/pmic/intel_pmic.c84
-rw-r--r--drivers/acpi/pmic/intel_pmic.h4
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c420
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c5
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c7
-rw-r--r--drivers/acpi/processor_core.c26
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_idle.c542
-rw-r--r--drivers/acpi/scan.c81
-rw-r--r--drivers/acpi/sleep.c29
-rw-r--r--drivers/acpi/sysfs.c6
-rw-r--r--drivers/acpi/tables.c23
-rw-r--r--drivers/acpi/thermal.c3
-rw-r--r--drivers/acpi/utils.c6
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/Makefile2
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_brcm.c (renamed from drivers/ata/ahci_brcmstb.c)46
-rw-r--r--drivers/ata/libahci.c10
-rw-r--r--drivers/ata/libata-core.c20
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/libata-scsi.c84
-rw-r--r--drivers/ata/libata-transport.c9
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_hpt366.c2
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c14
-rw-r--r--drivers/base/power/clock_ops.c45
-rw-r--r--drivers/base/power/domain.c301
-rw-r--r--drivers/base/power/runtime.c13
-rw-r--r--drivers/base/topology.c13
-rw-r--r--drivers/bcma/bcma_private.h2
-rw-r--r--drivers/block/brd.c10
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/block/drbd/drbd_actlog.c61
-rw-r--r--drivers/block/drbd/drbd_bitmap.c92
-rw-r--r--drivers/block/drbd/drbd_debugfs.c13
-rw-r--r--drivers/block/drbd/drbd_int.h57
-rw-r--r--drivers/block/drbd/drbd_interval.h14
-rw-r--r--drivers/block/drbd/drbd_main.c135
-rw-r--r--drivers/block/drbd/drbd_nl.c282
-rw-r--r--drivers/block/drbd/drbd_proc.c30
-rw-r--r--drivers/block/drbd/drbd_protocol.h79
-rw-r--r--drivers/block/drbd/drbd_receiver.c569
-rw-r--r--drivers/block/drbd/drbd_req.c120
-rw-r--r--drivers/block/drbd/drbd_req.h5
-rw-r--r--drivers/block/drbd/drbd_state.c61
-rw-r--r--drivers/block/drbd/drbd_state.h2
-rw-r--r--drivers/block/drbd/drbd_strings.c8
-rw-r--r--drivers/block/drbd/drbd_worker.c118
-rw-r--r--drivers/block/floppy.c6
-rw-r--r--drivers/block/loop.c15
-rw-r--r--drivers/block/mg_disk.c9
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c7
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/block/ps3disk.c7
-rw-r--r--drivers/block/ps3vram.c3
-rw-r--r--drivers/block/rbd.c4
-rw-r--r--drivers/block/rsxx/dev.c4
-rw-r--r--drivers/block/rsxx/dma.c2
-rw-r--r--drivers/block/skd_main.c10
-rw-r--r--drivers/block/sunvdc.c3
-rw-r--r--drivers/block/umem.c8
-rw-r--r--drivers/block/virtio_blk.c26
-rw-r--r--drivers/block/xen-blkback/blkback.c27
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c165
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/cdrom/cdrom.c28
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/hw_random/Kconfig16
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c47
-rw-r--r--drivers/char/hw_random/exynos-rng.c4
-rw-r--r--drivers/char/hw_random/meson-rng.c131
-rw-r--r--drivers/char/hw_random/omap-rng.c16
-rw-r--r--drivers/char/hw_random/stm32-rng.c10
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/sunxi/clk-sun4i-display.c5
-rw-r--r--drivers/clk/sunxi/clk-sun4i-tcon-ch1.c4
-rw-r--r--drivers/clocksource/Kconfig116
-rw-r--r--drivers/clocksource/Makefile23
-rw-r--r--drivers/clocksource/arm_arch_timer.c56
-rw-r--r--drivers/clocksource/arm_global_timer.c26
-rw-r--r--drivers/clocksource/armv7m_systick.c17
-rw-r--r--drivers/clocksource/asm9260_timer.c22
-rw-r--r--drivers/clocksource/bcm2835_timer.c38
-rw-r--r--drivers/clocksource/bcm_kona_timer.c12
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c74
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c4
-rw-r--r--drivers/clocksource/clksrc-probe.c14
-rw-r--r--drivers/clocksource/clksrc_st_lpc.c20
-rw-r--r--drivers/clocksource/clps711x-timer.c10
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c4
-rw-r--r--drivers/clocksource/exynos_mct.c32
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c20
-rw-r--r--drivers/clocksource/h8300_timer16.c12
-rw-r--r--drivers/clocksource/h8300_timer8.c11
-rw-r--r--drivers/clocksource/h8300_tpu.c10
-rw-r--r--drivers/clocksource/meson6_timer.c19
-rw-r--r--drivers/clocksource/mips-gic-timer.c24
-rw-r--r--drivers/clocksource/moxart_timer.c39
-rw-r--r--drivers/clocksource/mps2-timer.c8
-rw-r--r--drivers/clocksource/mtk_timer.c8
-rw-r--r--drivers/clocksource/mxs_timer.c26
-rw-r--r--drivers/clocksource/nomadik-mtu.c43
-rw-r--r--drivers/clocksource/pxa_timer.c44
-rw-r--r--drivers/clocksource/qcom-timer.c23
-rw-r--r--drivers/clocksource/rockchip_timer.c53
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c70
-rw-r--r--drivers/clocksource/sun4i_timer.c43
-rw-r--r--drivers/clocksource/tango_xtal.c10
-rw-r--r--drivers/clocksource/tegra20_timer.c24
-rw-r--r--drivers/clocksource/time-armada-370-xp.c98
-rw-r--r--drivers/clocksource/time-efm32.c17
-rw-r--r--drivers/clocksource/time-lpc32xx.c10
-rw-r--r--drivers/clocksource/time-orion.c50
-rw-r--r--drivers/clocksource/time-pistachio.c18
-rw-r--r--drivers/clocksource/timer-atlas7.c30
-rw-r--r--drivers/clocksource/timer-atmel-pit.c41
-rw-r--r--drivers/clocksource/timer-atmel-st.c42
-rw-r--r--drivers/clocksource/timer-digicolor.c16
-rw-r--r--drivers/clocksource/timer-imx-gpt.c51
-rw-r--r--drivers/clocksource/timer-integrator-ap.c57
-rw-r--r--drivers/clocksource/timer-keystone.c13
-rw-r--r--drivers/clocksource/timer-nps.c14
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c297
-rw-r--r--drivers/clocksource/timer-prima2.c42
-rw-r--r--drivers/clocksource/timer-sp804.c86
-rw-r--r--drivers/clocksource/timer-stm32.c8
-rw-r--r--drivers/clocksource/timer-sun5i.c33
-rw-r--r--drivers/clocksource/timer-ti-32k.c8
-rw-r--r--drivers/clocksource/timer-u300.c36
-rw-r--r--drivers/clocksource/versatile.c6
-rw-r--r--drivers/clocksource/vf_pit_timer.c25
-rw-r--r--drivers/clocksource/vt8500_timer.c24
-rw-r--r--drivers/clocksource/zevio-timer.c4
-rw-r--r--drivers/cpufreq/Kconfig13
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c17
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c10
-rw-r--r--drivers/cpufreq/cpufreq.c223
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c88
-rw-r--r--drivers/cpufreq/cpufreq_governor.c73
-rw-r--r--drivers/cpufreq/cpufreq_governor.h24
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c38
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h1
-rw-r--r--drivers/cpufreq/cpufreq_performance.c19
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c19
-rw-r--r--drivers/cpufreq/cpufreq_stats.c157
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c104
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c22
-rw-r--r--drivers/cpufreq/freq_table.c106
-rw-r--r--drivers/cpufreq/intel_pstate.c111
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c2
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c186
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c3
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c33
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c7
-rw-r--r--drivers/cpuidle/cpuidle-arm.c26
-rw-r--r--drivers/cpuidle/cpuidle.c12
-rw-r--r--drivers/crypto/Kconfig13
-rw-r--r--drivers/crypto/bfin_crc.c5
-rw-r--r--drivers/crypto/caam/Kconfig18
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/caamhash.c5
-rw-r--r--drivers/crypto/caam/caampkc.c607
-rw-r--r--drivers/crypto/caam/caampkc.h70
-rw-r--r--drivers/crypto/caam/compat.h3
-rw-r--r--drivers/crypto/caam/ctrl.c125
-rw-r--r--drivers/crypto/caam/desc.h11
-rw-r--r--drivers/crypto/caam/desc_constr.h51
-rw-r--r--drivers/crypto/caam/jr.c22
-rw-r--r--drivers/crypto/caam/pdb.h188
-rw-r--r--drivers/crypto/caam/pkc_desc.c36
-rw-r--r--drivers/crypto/caam/regs.h151
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h17
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c43
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h3
-rw-r--r--drivers/crypto/marvell/cesa.c142
-rw-r--r--drivers/crypto/marvell/cesa.h120
-rw-r--r--drivers/crypto/marvell/cipher.c157
-rw-r--r--drivers/crypto/marvell/hash.c150
-rw-r--r--drivers/crypto/marvell/tdma.c130
-rw-r--r--drivers/crypto/mxs-dcp.c47
-rw-r--r--drivers/crypto/nx/nx.c2
-rw-r--r--drivers/crypto/omap-aes.c36
-rw-r--r--drivers/crypto/omap-des.c14
-rw-r--r--drivers/crypto/omap-sham.c47
-rw-r--r--drivers/crypto/picoxcell_crypto.c60
-rw-r--r--drivers/crypto/qat/Kconfig3
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_common/Makefile10
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c49
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c8
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c872
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsaprivkey.asn111
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsapubkey.asn14
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c1
-rw-r--r--drivers/crypto/qce/ablkcipher.c27
-rw-r--r--drivers/crypto/qce/cipher.h2
-rw-r--r--drivers/crypto/s5p-sss.c80
-rw-r--r--drivers/crypto/sahara.c112
-rw-r--r--drivers/crypto/talitos.c672
-rw-r--r--drivers/crypto/ux500/cryp/Makefile6
-rw-r--r--drivers/crypto/ux500/hash/Makefile2
-rw-r--r--drivers/crypto/vmx/.gitignore2
-rw-r--r--drivers/crypto/vmx/Makefile2
-rw-r--r--drivers/crypto/vmx/aes_xts.c190
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h4
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl1863
-rw-r--r--drivers/crypto/vmx/vmx.c2
-rw-r--r--drivers/devfreq/Kconfig2
-rw-r--r--drivers/devfreq/devfreq-event.c12
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/event/Kconfig4
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c3
-rw-r--r--drivers/devfreq/exynos-bus.c11
-rw-r--r--drivers/dma/hsu/hsu.c90
-rw-r--r--drivers/dma/hsu/pci.c11
-rw-r--r--drivers/edac/sb_edac.c20
-rw-r--r--drivers/extcon/Makefile3
-rw-r--r--drivers/extcon/devres.c216
-rw-r--r--drivers/extcon/extcon-adc-jack.c34
-rw-r--r--drivers/extcon/extcon-usb-gpio.c32
-rw-r--r--drivers/extcon/extcon.c344
-rw-r--r--drivers/firmware/efi/efi-pstore.c13
-rw-r--r--drivers/firmware/efi/efi.c96
-rw-r--r--drivers/firmware/efi/efibc.c4
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c53
-rw-r--r--drivers/firmware/psci.c66
-rw-r--r--drivers/gpio/Kconfig9
-rw-r--r--drivers/gpio/gpio-sch.c21
-rw-r--r--drivers/gpio/gpiolib-legacy.c8
-rw-r--r--drivers/gpio/gpiolib.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c30
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c59
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c11
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c64
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c8
-rw-r--r--drivers/hwmon/Kconfig42
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/ad7314.c48
-rw-r--r--drivers/hwmon/ads7871.c65
-rw-r--r--drivers/hwmon/adt7411.c5
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c41
-rw-r--r--drivers/hwmon/emc6w201.c2
-rw-r--r--drivers/hwmon/ftsteutates.c819
-rw-r--r--drivers/hwmon/ina3221.c445
-rw-r--r--drivers/hwmon/jc42.c16
-rw-r--r--drivers/hwmon/jz4740-hwmon.c65
-rw-r--r--drivers/hwmon/lm75.c239
-rw-r--r--drivers/hwmon/lm90.c447
-rw-r--r--drivers/hwmon/sht3x.c775
-rw-r--r--drivers/hwmon/tmp102.c247
-rw-r--r--drivers/hwmon/tmp401.c35
-rw-r--r--drivers/hwtracing/intel_th/core.c89
-rw-r--r--drivers/hwtracing/intel_th/gth.c18
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h6
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/hwtracing/stm/core.c52
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/i2c-boardinfo.c4
-rw-r--r--drivers/i2c/i2c-core.c175
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c2
-rw-r--r--drivers/ide/ide-cd.c3
-rw-r--r--drivers/ide/ide-cd_ioctl.c3
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-gd.c3
-rw-r--r--drivers/idle/intel_idle.c178
-rw-r--r--drivers/iio/Kconfig8
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/Kconfig24
-rw-r--r--drivers/iio/accel/Makefile3
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bma220_spi.c338
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c4
-rw-r--r--drivers/iio/accel/kxcjk-1013.c2
-rw-r--r--drivers/iio/accel/mma7455_core.c3
-rw-r--r--drivers/iio/accel/mma7660.c277
-rw-r--r--drivers/iio/accel/mma8452.c223
-rw-r--r--drivers/iio/accel/mma9551.c2
-rw-r--r--drivers/iio/accel/mma9553.c2
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c76
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c1
-rw-r--r--drivers/iio/adc/Kconfig12
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad7266.c8
-rw-r--r--drivers/iio/adc/ad7291.c3
-rw-r--r--drivers/iio/adc/ad7298.c3
-rw-r--r--drivers/iio/adc/ad7476.c14
-rw-r--r--drivers/iio/adc/ad7791.c37
-rw-r--r--drivers/iio/adc/ad7793.c33
-rw-r--r--drivers/iio/adc/ad7887.c14
-rw-r--r--drivers/iio/adc/ad7923.c14
-rw-r--r--drivers/iio/adc/ad799x.c29
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c644
-rw-r--r--drivers/iio/adc/cc10001_adc.c2
-rw-r--r--drivers/iio/adc/hi8435.c3
-rw-r--r--drivers/iio/adc/ina2xx-adc.c7
-rw-r--r--drivers/iio/adc/max1027.c1
-rw-r--r--drivers/iio/adc/max1363.c67
-rw-r--r--drivers/iio/adc/mcp320x.c1
-rw-r--r--drivers/iio/adc/mcp3422.c1
-rw-r--r--drivers/iio/adc/mxs-lradc.c34
-rw-r--r--drivers/iio/adc/nau7802.c20
-rw-r--r--drivers/iio/adc/ti-adc081c.c30
-rw-r--r--drivers/iio/adc/ti-adc0832.c1
-rw-r--r--drivers/iio/adc/ti-adc128s052.c1
-rw-r--r--drivers/iio/adc/ti-ads1015.c132
-rw-r--r--drivers/iio/adc/ti-ads8688.c1
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c22
-rw-r--r--drivers/iio/adc/vf610_adc.c3
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c4
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c4
-rw-r--r--drivers/iio/chemical/Kconfig8
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c269
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c49
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c57
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c156
-rw-r--r--drivers/iio/dac/Kconfig9
-rw-r--r--drivers/iio/dac/ad5421.c6
-rw-r--r--drivers/iio/dac/ad5504.c2
-rw-r--r--drivers/iio/dac/ad5755.c188
-rw-r--r--drivers/iio/dac/stx104.c125
-rw-r--r--drivers/iio/dummy/Kconfig1
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c102
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c3
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_events.c2
-rw-r--r--drivers/iio/gyro/bmg160_core.c138
-rw-r--r--drivers/iio/gyro/st_gyro_core.c12
-rw-r--r--drivers/iio/health/afe4403.c299
-rw-r--r--drivers/iio/health/afe4404.c308
-rw-r--r--drivers/iio/health/afe440x.h48
-rw-r--r--drivers/iio/humidity/am2315.c1
-rw-r--r--drivers/iio/humidity/htu21.c1
-rw-r--r--drivers/iio/iio_core.h3
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c30
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c1
-rw-r--r--drivers/iio/industrialio-core.c180
-rw-r--r--drivers/iio/industrialio-event.c19
-rw-r--r--drivers/iio/industrialio-sw-device.c182
-rw-r--r--drivers/iio/industrialio-trigger.c37
-rw-r--r--drivers/iio/light/acpi-als.c2
-rw-r--r--drivers/iio/light/adjd_s311.c2
-rw-r--r--drivers/iio/light/apds9300.c2
-rw-r--r--drivers/iio/light/apds9960.c4
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/gp2ap020a00f.c26
-rw-r--r--drivers/iio/light/isl29125.c23
-rw-r--r--drivers/iio/light/jsa1212.c3
-rw-r--r--drivers/iio/light/lm3533-als.c2
-rw-r--r--drivers/iio/light/ltr501.c7
-rw-r--r--drivers/iio/light/max44000.c3
-rw-r--r--drivers/iio/light/opt3001.c4
-rw-r--r--drivers/iio/light/stk3310.c2
-rw-r--r--drivers/iio/light/tcs3414.c14
-rw-r--r--drivers/iio/light/tcs3472.c15
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/light/us5182d.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig2
-rw-r--r--drivers/iio/magnetometer/ak8975.c154
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c3
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c3
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c2
-rw-r--r--drivers/iio/magnetometer/mag3110.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c12
-rw-r--r--drivers/iio/potentiometer/Kconfig23
-rw-r--r--drivers/iio/potentiometer/Makefile1
-rw-r--r--drivers/iio/potentiometer/max5487.c161
-rw-r--r--drivers/iio/potentiometer/mcp4531.c159
-rw-r--r--drivers/iio/potentiometer/tpl0102.c4
-rw-r--r--drivers/iio/pressure/Kconfig31
-rw-r--r--drivers/iio/pressure/Makefile3
-rw-r--r--drivers/iio/pressure/bmp280-core.c (renamed from drivers/iio/pressure/bmp280.c)673
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c91
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c84
-rw-r--r--drivers/iio/pressure/bmp280-spi.c125
-rw-r--r--drivers/iio/pressure/bmp280.h112
-rw-r--r--drivers/iio/pressure/hp206c.c1
-rw-r--r--drivers/iio/pressure/mpl3115.c2
-rw-r--r--drivers/iio/pressure/ms5611_core.c3
-rw-r--r--drivers/iio/pressure/ms5637.c13
-rw-r--r--drivers/iio/pressure/st_pressure.h1
-rw-r--r--drivers/iio/pressure/st_pressure_core.c252
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c4
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c1
-rw-r--r--drivers/iio/proximity/as3935.c12
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c16
-rw-r--r--drivers/iio/proximity/sx9500.c4
-rw-r--r--drivers/iio/temperature/tsys02d.c1
-rw-r--r--drivers/iio/trigger/Kconfig12
-rw-r--r--drivers/iio/trigger/Makefile1
-rw-r--r--drivers/iio/trigger/iio-trig-loop.c143
-rw-r--r--drivers/infiniband/core/sysfs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c16
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c23
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c1
-rw-r--r--drivers/input/joystick/xpad.c3
-rw-r--r--drivers/input/rmi4/rmi_bus.c4
-rw-r--r--drivers/input/rmi4/rmi_f12.c9
-rw-r--r--drivers/input/touchscreen/ts4800-ts.c13
-rw-r--r--drivers/input/touchscreen/tsc2004.c7
-rw-r--r--drivers/input/touchscreen/tsc2005.c7
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c15
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h2
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c3
-rw-r--r--drivers/iommu/amd_iommu_init.c14
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/irqchip/Kconfig6
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/exynos-combiner.c14
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c2
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c230
-rw-r--r--drivers/irqchip/irq-bcm2835.c3
-rw-r--r--drivers/irqchip/irq-bcm2836.c6
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c10
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c4
-rw-r--r--drivers/irqchip/irq-gic-common.c4
-rw-r--r--drivers/irqchip/irq-gic-pm.c184
-rw-r--r--drivers/irqchip/irq-gic-v2m.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c404
-rw-r--r--drivers/irqchip/irq-gic.c134
-rw-r--r--drivers/irqchip/irq-mips-gic.c7
-rw-r--r--drivers/irqchip/irq-omap-intc.c2
-rw-r--r--drivers/irqchip/irq-s3c24xx.c36
-rw-r--r--drivers/irqchip/irq-sirfsoc.c11
-rw-r--r--drivers/irqchip/irq-tegra.c4
-rw-r--r--drivers/irqchip/irq-vic.c5
-rw-r--r--drivers/lightnvm/Kconfig10
-rw-r--r--drivers/lightnvm/core.c242
-rw-r--r--drivers/lightnvm/gennvm.c385
-rw-r--r--drivers/lightnvm/gennvm.h10
-rw-r--r--drivers/lightnvm/rrpc.c155
-rw-r--r--drivers/lightnvm/rrpc.h13
-rw-r--r--drivers/lightnvm/sysblk.c8
-rw-r--r--drivers/md/Makefile3
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/closure.c2
-rw-r--r--drivers/md/bcache/closure.h3
-rw-r--r--drivers/md/bcache/debug.c6
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c9
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/request.c28
-rw-r--r--drivers/md/bcache/super.c36
-rw-r--r--drivers/md/bcache/writeback.c4
-rw-r--r--drivers/md/bitmap.c6
-rw-r--r--drivers/md/dm-bufio.c9
-rw-r--r--drivers/md/dm-builtin.c2
-rw-r--r--drivers/md/dm-cache-target.c18
-rw-r--r--drivers/md/dm-core.h149
-rw-r--r--drivers/md/dm-crypt.c15
-rw-r--r--drivers/md/dm-era-target.c4
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-io.c59
-rw-r--r--drivers/md/dm-ioctl.c31
-rw-r--r--drivers/md/dm-kcopyd.c13
-rw-r--r--drivers/md/dm-linear.c21
-rw-r--r--drivers/md/dm-log-writes.c13
-rw-r--r--drivers/md/dm-log.c5
-rw-r--r--drivers/md/dm-mpath.c354
-rw-r--r--drivers/md/dm-raid.c3010
-rw-r--r--drivers/md/dm-raid1.c30
-rw-r--r--drivers/md/dm-region-hash.c6
-rw-r--r--drivers/md/dm-rq.c970
-rw-r--r--drivers/md/dm-rq.h64
-rw-r--r--drivers/md/dm-snap-persistent.c24
-rw-r--r--drivers/md/dm-snap.c27
-rw-r--r--drivers/md/dm-stats.c11
-rw-r--r--drivers/md/dm-stripe.c32
-rw-r--r--drivers/md/dm-sysfs.c3
-rw-r--r--drivers/md/dm-table.c114
-rw-r--r--drivers/md/dm-target.c11
-rw-r--r--drivers/md/dm-thin-metadata.c30
-rw-r--r--drivers/md/dm-thin-metadata.h3
-rw-r--r--drivers/md/dm-thin.c124
-rw-r--r--drivers/md/dm-verity-fec.c4
-rw-r--r--drivers/md/dm-zero.c15
-rw-r--r--drivers/md/dm.c1272
-rw-r--r--drivers/md/dm.h36
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c18
-rw-r--r--drivers/md/md.h5
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/persistent-data/dm-btree.c9
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid1.c39
-rw-r--r--drivers/md/raid10.c51
-rw-r--r--drivers/md/raid5-cache.c33
-rw-r--r--drivers/md/raid5.c50
-rw-r--r--drivers/media/usb/airspy/airspy.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c2
-rw-r--r--drivers/memstick/core/ms_block.c6
-rw-r--r--drivers/memstick/core/mspro_block.c6
-rw-r--r--drivers/misc/Makefile14
-rw-r--r--drivers/misc/lkdtm.c1023
-rw-r--r--drivers/misc/lkdtm.h60
-rw-r--r--drivers/misc/lkdtm_bugs.c148
-rw-r--r--drivers/misc/lkdtm_core.c544
-rw-r--r--drivers/misc/lkdtm_heap.c142
-rw-r--r--drivers/misc/lkdtm_perms.c199
-rw-r--r--drivers/misc/lkdtm_rodata.c10
-rw-r--r--drivers/misc/lkdtm_usercopy.c313
-rw-r--r--drivers/misc/mei/hbm.c137
-rw-r--r--drivers/misc/mei/mei_dev.h10
-rw-r--r--drivers/mmc/card/block.c42
-rw-r--r--drivers/mmc/card/queue.c8
-rw-r--r--drivers/mmc/card/queue.h6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/pxamci.c16
-rw-r--r--drivers/mmc/host/sdhci-acpi.c3
-rw-r--r--drivers/mtd/mtd_blkdevs.c8
-rw-r--r--drivers/mtd/nand/omap2.c7
-rw-r--r--drivers/net/bonding/bond_3ad.c11
-rw-r--r--drivers/net/bonding/bond_alb.c7
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/bonding/bond_netlink.c6
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/ethoc.c16
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c227
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c30
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c8
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/tile/tilepro.c4
-rw-r--r--drivers/net/geneve.c9
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/dp83867.c13
-rw-r--r--drivers/net/ppp/ppp_generic.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/r8152.c120
-rw-r--r--drivers/net/usb/usbnet.c148
-rw-r--r--drivers/nvdimm/blk.c3
-rw-r--r--drivers/nvdimm/btt.c3
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvme/Kconfig1
-rw-r--r--drivers/nvme/Makefile1
-rw-r--r--drivers/nvme/host/Kconfig19
-rw-r--r--drivers/nvme/host/Makefile6
-rw-r--r--drivers/nvme/host/core.c396
-rw-r--r--drivers/nvme/host/fabrics.c952
-rw-r--r--drivers/nvme/host/fabrics.h132
-rw-r--r--drivers/nvme/host/lightnvm.c4
-rw-r--r--drivers/nvme/host/nvme.h52
-rw-r--r--drivers/nvme/host/pci.c68
-rw-r--r--drivers/nvme/host/rdma.c2018
-rw-r--r--drivers/nvme/target/Kconfig36
-rw-r--r--drivers/nvme/target/Makefile9
-rw-r--r--drivers/nvme/target/admin-cmd.c465
-rw-r--r--drivers/nvme/target/configfs.c917
-rw-r--r--drivers/nvme/target/core.c964
-rw-r--r--drivers/nvme/target/discovery.c221
-rw-r--r--drivers/nvme/target/fabrics-cmd.c240
-rw-r--r--drivers/nvme/target/io-cmd.c215
-rw-r--r--drivers/nvme/target/loop.c754
-rw-r--r--drivers/nvme/target/nvmet.h331
-rw-r--r--drivers/nvme/target/rdma.c1448
-rw-r--r--drivers/nvmem/Kconfig4
-rw-r--r--drivers/nvmem/imx-ocotp.c17
-rw-r--r--drivers/nvmem/mtk-efuse.c47
-rw-r--r--drivers/nvmem/mxs-ocotp.c83
-rw-r--r--drivers/of/of_numa.c4
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/pci-mid.c77
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/phy/Kconfig11
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-brcm-sata.c81
-rw-r--r--drivers/phy/phy-core.c15
-rw-r--r--drivers/phy/phy-da8xx-usb.c245
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-14nm.c1
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-20nm.c1
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c26
-rw-r--r--drivers/phy/phy-rockchip-usb.c23
-rw-r--r--drivers/phy/phy-sun4i-usb.c34
-rw-r--r--drivers/phy/phy-xgene.c4
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c8
-rw-r--r--drivers/platform/x86/Kconfig15
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c50
-rw-r--r--drivers/platform/x86/asus-wireless.c91
-rw-r--r--drivers/platform/x86/asus-wmi.c8
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/dell-wmi.c293
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c81
-rw-r--r--drivers/platform/x86/hp-wmi.c7
-rw-r--r--drivers/platform/x86/intel-hid.c5
-rw-r--r--drivers/platform/x86/intel-vbtn.c188
-rw-r--r--drivers/platform/x86/intel_pmc_core.c54
-rw-r--r--drivers/platform/x86/intel_pmc_core.h3
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c5
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c5
-rw-r--r--drivers/platform/x86/toshiba_acpi.c136
-rw-r--r--drivers/pnp/isapnp/proc.c2
-rw-r--r--drivers/pnp/pnpbios/core.c3
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/axp288_charger.c77
-rw-r--r--drivers/power/bq27xxx_battery.c5
-rw-r--r--drivers/powercap/intel_rapl.c157
-rw-r--r--drivers/pps/clients/pps_parport.c2
-rw-r--r--drivers/regulator/qcom_smd-regulator.c1
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/block/dasd_genhd.c3
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/block/scm_blk.c3
-rw-r--r--drivers/s390/char/keyboard.c15
-rw-r--r--drivers/s390/char/sclp_con.c3
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/chp.c22
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c25
-rw-r--r--drivers/s390/cio/chsc.h5
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cmf.c44
-rw-r--r--drivers/s390/cio/device_ops.c22
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/cio/ioasm.c91
-rw-r--r--drivers/s390/crypto/ap_bus.c77
-rw-r--r--drivers/s390/net/qeth_l2_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/libsas/sas_ata.c11
-rw-r--r--drivers/scsi/osd/osd_initiator.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c10
-rw-r--r--drivers/scsi/sd.c26
-rw-r--r--drivers/scsi/sr.c3
-rw-r--r--drivers/sh/pm_runtime.c9
-rw-r--r--drivers/spi/spi.c100
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/Kconfig17
-rw-r--r--drivers/staging/android/Makefile3
-rw-r--r--drivers/staging/android/sw_sync.c341
-rw-r--r--drivers/staging/android/sw_sync.h59
-rw-r--r--drivers/staging/android/sync.c221
-rw-r--r--drivers/staging/android/sync.h154
-rw-r--r--drivers/staging/android/sync_debug.c165
-rw-r--r--drivers/staging/android/sync_debug.h83
-rw-r--r--drivers/staging/android/trace/sync.h14
-rw-r--r--drivers/staging/android/uapi/sw_sync.h32
-rw-r--r--drivers/staging/comedi/comedi.h2
-rw-r--r--drivers/staging/comedi/comedi_fops.c21
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c187
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c305
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c8
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c209
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c10
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c380
-rw-r--r--drivers/staging/comedi/drivers/das16.c39
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c482
-rw-r--r--drivers/staging/comedi/drivers/das6402.c74
-rw-r--r--drivers/staging/comedi/drivers/das800.c106
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c98
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c95
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c852
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c72
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c140
-rw-r--r--drivers/staging/comedi/drivers/dt2817.c64
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c87
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c36
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c2
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c18
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c8
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c40
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c2
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h957
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c15
-rw-r--r--drivers/staging/comedi/drivers/s626.c8
-rw-r--r--drivers/staging/comedi/drivers/s626.h356
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c7
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c132
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h86
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c89
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng-cmd.h12
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng.c15
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h379
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c20
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c715
-rw-r--r--drivers/staging/fsl-mc/bus/mc-allocator.c2
-rw-r--r--drivers/staging/fsl-mc/bus/mc-bus.c71
-rw-r--r--drivers/staging/fsl-mc/bus/mc-msi.c17
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c46
-rw-r--r--drivers/staging/fsl-mc/include/dpbp-cmd.h125
-rw-r--r--drivers/staging/fsl-mc/include/mc-cmd.h91
-rw-r--r--drivers/staging/fsl-mc/include/mc.h21
-rw-r--r--drivers/staging/iio/accel/Kconfig14
-rw-r--r--drivers/staging/iio/accel/Makefile4
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h217
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c814
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c428
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c4
-rw-r--r--drivers/staging/iio/adc/ad7280a.c8
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c3
-rw-r--r--drivers/staging/iio/adc/ad7816.c3
-rw-r--r--drivers/staging/iio/addac/adt7316.c4
-rw-r--r--drivers/staging/iio/cdc/ad7150.c2
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c2
-rw-r--r--drivers/staging/ks7010/Kconfig10
-rw-r--r--drivers/staging/ks7010/Makefile4
-rw-r--r--drivers/staging/ks7010/TODO36
-rw-r--r--drivers/staging/ks7010/eap_packet.h129
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c1236
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.h147
-rw-r--r--drivers/staging/ks7010/ks_hostif.c2760
-rw-r--r--drivers/staging/ks7010/ks_hostif.h644
-rw-r--r--drivers/staging/ks7010/ks_wlan.h505
-rw-r--r--drivers/staging/ks7010/ks_wlan_ioctl.h67
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c3528
-rw-r--r--drivers/staging/ks7010/michael_mic.c139
-rw-r--r--drivers/staging/ks7010/michael_mic.h26
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_time.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h6
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-dlc.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h3
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h4
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c354
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h307
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c384
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c8
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c308
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h209
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c204
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c40
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c74
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c8
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c4
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c9
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h6
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h6
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h6
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h7
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.h6
-rw-r--r--drivers/staging/lustre/lustre/Kconfig6
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c6
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c12
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c6
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h16
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_lite.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_user.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h50
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h21
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_acl.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h24
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_eacl.h17
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h19
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_intent.h36
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lite.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h421
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h12
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h17
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h6
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h16
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h9
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c15
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c9
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c22
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c19
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile6
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c47
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c55
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c97
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h132
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c125
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_rmtacl.c299
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c883
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c34
-rw-r--r--drivers/staging/lustre/lustre/llite/remote_perm.c324
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c151
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c23
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c25
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c103
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c16
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c6
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c32
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c39
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c22
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c10
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c8
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c8
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_io.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c6
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c6
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c18
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c120
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c8
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c88
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c6
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/acl.c415
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c52
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/kernelcomm.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ref.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/statfs_pack.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c21
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h13
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c18
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c27
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c36
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c24
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c167
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c36
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c19
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h49
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c22
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c13
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c24
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c54
-rw-r--r--drivers/staging/lustre/sysfs-fs-lustre8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c54
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c34
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c30
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U.h11
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c198
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c80
-rw-r--r--drivers/staging/unisys/visorbus/iovmcall_gnuc.h4
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c12
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c2
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c394
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c2
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c73
-rw-r--r--drivers/staging/wilc1000/Makefile1
-rw-r--r--drivers/staging/wilc1000/TODO5
-rw-r--r--drivers/staging/wilc1000/host_interface.c474
-rw-r--r--drivers/staging/wilc1000/host_interface.h4
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c37
-rw-r--r--drivers/staging/wilc1000/wilc_msgqueue.c144
-rw-r--r--drivers/staging/wilc1000/wilc_msgqueue.h28
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c2
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c2
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c3
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h10
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c34
-rw-r--r--drivers/target/target_core_iblock.c41
-rw-r--r--drivers/target/target_core_pscsi.c89
-rw-r--r--drivers/thermal/cpu_cooling.c24
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c4
-rw-r--r--drivers/tty/cyclades.c4
-rw-r--r--drivers/tty/ipwireless/tty.c11
-rw-r--r--drivers/tty/metag_da.c4
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/tty/mxser.c1
-rw-r--r--drivers/tty/serial/8250/8250.h51
-rw-r--r--drivers/tty/serial/8250/8250_core.c19
-rw-r--r--drivers/tty/serial/8250/8250_dma.c1
-rw-r--r--drivers/tty/serial/8250/8250_early.c1
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c36
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c2
-rw-r--r--drivers/tty/serial/8250/8250_mid.c24
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c35
-rw-r--r--drivers/tty/serial/8250/8250_pci.c28
-rw-r--r--drivers/tty/serial/8250/8250_port.c71
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c2
-rw-r--r--drivers/tty/serial/8250/Kconfig6
-rw-r--r--drivers/tty/serial/Kconfig3
-rw-r--r--drivers/tty/serial/amba-pl011.c8
-rw-r--r--drivers/tty/serial/atmel_serial.c143
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c8
-rw-r--r--drivers/tty/serial/fsl_lpuart.c8
-rw-r--r--drivers/tty/serial/m32r_sio.c23
-rw-r--r--drivers/tty/serial/max310x.c176
-rw-r--r--drivers/tty/serial/mps2-uart.c29
-rw-r--r--drivers/tty/serial/msm_serial.c197
-rw-r--r--drivers/tty/serial/msm_serial.h184
-rw-r--r--drivers/tty/serial/mvebu-uart.c2
-rw-r--r--drivers/tty/serial/pic32_uart.c1
-rw-r--r--drivers/tty/serial/pmac_zilog.c2
-rw-r--r--drivers/tty/serial/pxa.c30
-rw-r--r--drivers/tty/serial/samsung.c38
-rw-r--r--drivers/tty/serial/samsung.h38
-rw-r--r--drivers/tty/serial/serial-tegra.c7
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c36
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h15
-rw-r--r--drivers/tty/serial/sh-sci.c196
-rw-r--r--drivers/tty/serial/sh-sci.h25
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h4
-rw-r--r--drivers/tty/serial/vt8500_serial.c30
-rw-r--r--drivers/tty/serial/xilinx_uartps.c30
-rw-r--r--drivers/tty/vt/consolemap.c13
-rw-r--r--drivers/tty/vt/keyboard.c46
-rw-r--r--drivers/tty/vt/vt.c431
-rw-r--r--drivers/tty/vt/vt_ioctl.c8
-rw-r--r--drivers/usb/chipidea/Kconfig5
-rw-r--r--drivers/usb/class/cdc-acm.c103
-rw-r--r--drivers/usb/class/cdc-wdm.c30
-rw-r--r--drivers/usb/common/common.c26
-rw-r--r--drivers/usb/core/message.c153
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/core.h8
-rw-r--r--drivers/usb/dwc2/gadget.c574
-rw-r--r--drivers/usb/dwc2/hcd_queue.c3
-rw-r--r--drivers/usb/dwc2/hw.h14
-rw-r--r--drivers/usb/dwc3/core.c388
-rw-r--r--drivers/usb/dwc3/core.h59
-rw-r--r--drivers/usb/dwc3/debug.h140
-rw-r--r--drivers/usb/dwc3/debugfs.c191
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c53
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c161
-rw-r--r--drivers/usb/dwc3/ep0.c26
-rw-r--r--drivers/usb/dwc3/gadget.c863
-rw-r--r--drivers/usb/dwc3/gadget.h4
-rw-r--r--drivers/usb/dwc3/host.c61
-rw-r--r--drivers/usb/dwc3/io.h7
-rw-r--r--drivers/usb/dwc3/platform_data.h53
-rw-r--r--drivers/usb/dwc3/trace.h96
-rw-r--r--drivers/usb/early/ehci-dbgp.c4
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/config.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c192
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c22
-rw-r--r--drivers/usb/gadget/function/u_serial.c3
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c15
-rw-r--r--drivers/usb/gadget/udc/Kconfig4
-rw-r--r--drivers/usb/gadget/udc/Makefile5
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c9
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c3
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c6
-rw-r--r--drivers/usb/gadget/udc/core.c1523
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c24
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c23
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c9
-rw-r--r--drivers/usb/gadget/udc/net2272.c4
-rw-r--r--drivers/usb/gadget/udc/net2280.c51
-rw-r--r--drivers/usb/gadget/udc/net2280.h1
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c36
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c9
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c24
-rw-r--r--drivers/usb/gadget/udc/trace.c18
-rw-r--r--drivers/usb/gadget/udc/trace.h298
-rw-r--r--drivers/usb/gadget/udc/udc-core.c800
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c3
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-platform.c37
-rw-r--r--drivers/usb/host/ohci-hcd.c1
-rw-r--r--drivers/usb/host/ohci-platform.c43
-rw-r--r--drivers/usb/host/xhci-mem.c74
-rw-r--r--drivers/usb/host/xhci-plat.c8
-rw-r--r--drivers/usb/host/xhci-ring.c458
-rw-r--r--drivers/usb/host/xhci.c7
-rw-r--r--drivers/usb/host/xhci.h10
-rw-r--r--drivers/usb/image/microtek.h6
-rw-r--r--drivers/usb/misc/Kconfig11
-rw-r--r--drivers/usb/misc/chaoskey.c21
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c28
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c87
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h2
-rw-r--r--drivers/usb/misc/usb3503.c25
-rw-r--r--drivers/usb/musb/Makefile5
-rw-r--r--drivers/usb/musb/cppi_dma.c51
-rw-r--r--drivers/usb/musb/cppi_dma.h31
-rw-r--r--drivers/usb/musb/musb_core.c87
-rw-r--r--drivers/usb/musb/musb_cppi41.c47
-rw-r--r--drivers/usb/musb/musb_debug.h2
-rw-r--r--drivers/usb/musb/musb_dsps.c112
-rw-r--r--drivers/usb/musb/musb_gadget.c122
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c22
-rw-r--r--drivers/usb/musb/musb_host.c133
-rw-r--r--drivers/usb/musb/musb_trace.c33
-rw-r--r--drivers/usb/musb/musb_trace.h371
-rw-r--r--drivers/usb/musb/musb_virthub.c24
-rw-r--r--drivers/usb/musb/musbhsdma.c10
-rw-r--r--drivers/usb/musb/sunxi.c74
-rw-r--r--drivers/usb/phy/Kconfig11
-rw-r--r--drivers/usb/phy/phy-am335x.c2
-rw-r--r--drivers/usb/phy/phy-msm-usb.c178
-rw-r--r--drivers/usb/phy/phy-omap-otg.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c18
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c9
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c2
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c2
-rw-r--r--drivers/video/console/dummycon.c3
-rw-r--r--drivers/video/console/fbcon.c53
-rw-r--r--drivers/video/console/mdacon.c45
-rw-r--r--drivers/video/console/newport_con.c42
-rw-r--r--drivers/video/console/sticon.c29
-rw-r--r--drivers/video/console/vgacon.c29
-rw-r--r--drivers/xen/xen-acpi-processor.c35
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c14
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c10
1271 files changed, 63376 insertions, 26791 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 0b6f3d60193d..a7187b999c5e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -128,7 +128,6 @@ obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
-obj-$(CONFIG_ARCH_SHMOBILE) += sh/
ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
obj-y += clocksource/
endif
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b7e2e776397d..acad70a0bb0d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -213,6 +213,10 @@ config ACPI_CPU_FREQ_PSS
bool
select THERMAL
+config ACPI_PROCESSOR_CSTATE
+ def_bool y
+ depends on IA64 || X86
+
config ACPI_PROCESSOR_IDLE
bool
select CPU_IDLE
@@ -234,7 +238,7 @@ config ACPI_CPPC_LIB
config ACPI_PROCESSOR
tristate "Processor"
depends on X86 || IA64 || ARM64
- select ACPI_PROCESSOR_IDLE if X86 || IA64
+ select ACPI_PROCESSOR_IDLE
select ACPI_CPU_FREQ_PSS if X86 || IA64
default y
help
@@ -291,8 +295,8 @@ config ACPI_THERMAL
config ACPI_NUMA
bool "NUMA support"
depends on NUMA
- depends on (X86 || IA64)
- default y if IA64_GENERIC || IA64_SGI_SN2
+ depends on (X86 || IA64 || ARM64)
+ default y if IA64_GENERIC || IA64_SGI_SN2 || ARM64
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"
@@ -311,9 +315,12 @@ config ACPI_CUSTOM_DSDT
bool
default ACPI_CUSTOM_DSDT_FILE != ""
+config ARCH_HAS_ACPI_TABLE_UPGRADE
+ def_bool n
+
config ACPI_TABLE_UPGRADE
bool "Allow upgrading ACPI tables via initrd"
- depends on BLK_DEV_INITRD && X86
+ depends on BLK_DEV_INITRD && ARCH_HAS_ACPI_TABLE_UPGRADE
default y
help
This option provides functionality to upgrade arbitrary ACPI tables
@@ -475,6 +482,7 @@ config ACPI_NFIT_DEBUG
issue.
source "drivers/acpi/apei/Kconfig"
+source "drivers/acpi/dptf/Kconfig"
config ACPI_EXTLOG
tristate "Extended Error Log support"
@@ -519,6 +527,20 @@ config XPOWER_PMIC_OPREGION
help
This config adds ACPI operation region support for XPower AXP288 PMIC.
+config BXT_WC_PMIC_OPREGION
+ bool "ACPI operation region support for BXT WhiskeyCove PMIC"
+ depends on INTEL_SOC_PMIC
+ help
+ This config adds ACPI operation region support for BXT WhiskeyCove PMIC.
+
endif
+config ACPI_CONFIGFS
+ tristate "ACPI configfs support"
+ select CONFIGFS_FS
+ help
+ Select this option to enable support for ACPI configuration from
+ userspace. The configurable ACPI groups will be visible under
+ /config/acpi, assuming configfs is mounted under /config.
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 251ce85a66fb..88f54f03e3d2 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -44,7 +44,6 @@ acpi-y += acpi_lpss.o acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
-acpi-y += int340x_thermal.o
acpi-y += power.o
acpi-y += event.o
acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
@@ -99,5 +98,9 @@ obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o
obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o
obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o
obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o
+obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o
+
+obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o
video-objs += acpi_video.o video_detect.o
+obj-y += dptf/
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
new file mode 100644
index 000000000000..146a77fb762d
--- /dev/null
+++ b/drivers/acpi/acpi_configfs.c
@@ -0,0 +1,267 @@
+/*
+ * ACPI configfs support
+ *
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "ACPI configfs: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/configfs.h>
+#include <linux/acpi.h>
+
+static struct config_group *acpi_table_group;
+
+struct acpi_table {
+ struct config_item cfg;
+ struct acpi_table_header *header;
+};
+
+static ssize_t acpi_table_aml_write(struct config_item *cfg,
+ const void *data, size_t size)
+{
+ const struct acpi_table_header *header = data;
+ struct acpi_table *table;
+ int ret;
+
+ table = container_of(cfg, struct acpi_table, cfg);
+
+ if (table->header) {
+ pr_err("table already loaded\n");
+ return -EBUSY;
+ }
+
+ if (header->length != size) {
+ pr_err("invalid table length\n");
+ return -EINVAL;
+ }
+
+ if (memcmp(header->signature, ACPI_SIG_SSDT, 4)) {
+ pr_err("invalid table signature\n");
+ return -EINVAL;
+ }
+
+ table = container_of(cfg, struct acpi_table, cfg);
+
+ table->header = kmemdup(header, header->length, GFP_KERNEL);
+ if (!table->header)
+ return -ENOMEM;
+
+ ret = acpi_load_table(table->header);
+ if (ret) {
+ kfree(table->header);
+ table->header = NULL;
+ }
+
+ return ret;
+}
+
+static inline struct acpi_table_header *get_header(struct config_item *cfg)
+{
+ struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
+
+ if (!table->header)
+ pr_err("table not loaded\n");
+
+ return table->header;
+}
+
+static ssize_t acpi_table_aml_read(struct config_item *cfg,
+ void *data, size_t size)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ if (data)
+ memcpy(data, h, h->length);
+
+ return h->length;
+}
+
+#define MAX_ACPI_TABLE_SIZE (128 * 1024)
+
+CONFIGFS_BIN_ATTR(acpi_table_, aml, NULL, MAX_ACPI_TABLE_SIZE);
+
+struct configfs_bin_attribute *acpi_table_bin_attrs[] = {
+ &acpi_table_attr_aml,
+ NULL,
+};
+
+ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->signature);
+}
+
+ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%d\n", h->length);
+}
+
+ssize_t acpi_table_revision_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%d\n", h->revision);
+}
+
+ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id);
+}
+
+ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id);
+}
+
+ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%d\n", h->oem_revision);
+}
+
+ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->asl_compiler_id);
+}
+
+ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
+ char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%d\n", h->asl_compiler_revision);
+}
+
+CONFIGFS_ATTR_RO(acpi_table_, signature);
+CONFIGFS_ATTR_RO(acpi_table_, length);
+CONFIGFS_ATTR_RO(acpi_table_, revision);
+CONFIGFS_ATTR_RO(acpi_table_, oem_id);
+CONFIGFS_ATTR_RO(acpi_table_, oem_table_id);
+CONFIGFS_ATTR_RO(acpi_table_, oem_revision);
+CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_id);
+CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_revision);
+
+struct configfs_attribute *acpi_table_attrs[] = {
+ &acpi_table_attr_signature,
+ &acpi_table_attr_length,
+ &acpi_table_attr_revision,
+ &acpi_table_attr_oem_id,
+ &acpi_table_attr_oem_table_id,
+ &acpi_table_attr_oem_revision,
+ &acpi_table_attr_asl_compiler_id,
+ &acpi_table_attr_asl_compiler_revision,
+ NULL,
+};
+
+static struct config_item_type acpi_table_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_bin_attrs = acpi_table_bin_attrs,
+ .ct_attrs = acpi_table_attrs,
+};
+
+static struct config_item *acpi_table_make_item(struct config_group *group,
+ const char *name)
+{
+ struct acpi_table *table;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&table->cfg, name, &acpi_table_type);
+ return &table->cfg;
+}
+
+struct configfs_group_operations acpi_table_group_ops = {
+ .make_item = acpi_table_make_item,
+};
+
+static struct config_item_type acpi_tables_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_group_ops = &acpi_table_group_ops,
+};
+
+static struct config_item_type acpi_root_group_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem acpi_configfs = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "acpi",
+ .ci_type = &acpi_root_group_type,
+ },
+ },
+ .su_mutex = __MUTEX_INITIALIZER(acpi_configfs.su_mutex),
+};
+
+static int __init acpi_configfs_init(void)
+{
+ int ret;
+ struct config_group *root = &acpi_configfs.su_group;
+
+ config_group_init(root);
+
+ ret = configfs_register_subsystem(&acpi_configfs);
+ if (ret)
+ return ret;
+
+ acpi_table_group = configfs_register_default_group(root, "table",
+ &acpi_tables_type);
+ return PTR_ERR_OR_ZERO(acpi_table_group);
+}
+module_init(acpi_configfs_init);
+
+static void __exit acpi_configfs_exit(void)
+{
+ configfs_unregister_default_group(acpi_table_group);
+ configfs_unregister_subsystem(&acpi_configfs);
+}
+module_exit(acpi_configfs_exit);
+
+MODULE_AUTHOR("Octavian Purdila <octavian.purdila@intel.com>");
+MODULE_DESCRIPTION("ACPI configfs support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 1f4128487dd4..dee86925a9a1 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
+ acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
return ret;
}
@@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
+ acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
return n;
}
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
index feb61c1630eb..c1c4877ca96c 100644
--- a/drivers/acpi/acpi_lpat.c
+++ b/drivers/acpi/acpi_lpat.c
@@ -13,7 +13,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/acpi.h>
#include <acpi/acpi_lpat.h>
@@ -157,5 +157,3 @@ void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
}
}
EXPORT_SYMBOL_GPL(acpi_lpat_free_conversion_table);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 0872d5fecb82..357a0b8f860b 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -29,6 +29,7 @@ ACPI_MODULE_NAME("acpi_lpss");
#ifdef CONFIG_X86_INTEL_LPSS
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
#include <asm/pmc_atom.h>
@@ -229,8 +230,8 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(0x37), /* Valleyview, Bay Trail */
- ICPU(0x4c), /* Braswell, Cherry Trail */
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */
+ ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
{}
};
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index c1d138e128cb..c5557d070954 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1246,6 +1246,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
union acpi_object *dod = NULL;
union acpi_object *obj;
+ if (!video->cap._DOD)
+ return AE_NOT_EXIST;
+
status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
if (!ACPI_SUCCESS(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 21932d640a41..a1d177d58254 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -108,9 +108,7 @@ acpi_ex_add_table(u32 table_index,
/* Add the table to the namespace */
- acpi_ex_exit_interpreter();
status = acpi_ns_load_table(table_index, parent_node);
- acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(obj_desc);
*ddb_handle = NULL;
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 1783cd7e1446..f631a47724f0 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -47,7 +47,6 @@
#include "acparser.h"
#include "acdispat.h"
#include "actables.h"
-#include "acinterp.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsparse")
@@ -171,8 +170,6 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
ACPI_FUNCTION_TRACE(ns_parse_table);
- acpi_ex_enter_interpreter();
-
/*
* AML Parse, pass 1
*
@@ -188,7 +185,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
table_index, start_node);
if (ACPI_FAILURE(status)) {
- goto error_exit;
+ return_ACPI_STATUS(status);
}
/*
@@ -204,10 +201,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
table_index, start_node);
if (ACPI_FAILURE(status)) {
- goto error_exit;
+ return_ACPI_STATUS(status);
}
-error_exit:
- acpi_ex_exit_interpreter();
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
index 5d575a955940..e50573de25f1 100644
--- a/drivers/acpi/apei/Makefile
+++ b/drivers/acpi/apei/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
obj-$(CONFIG_ACPI_APEI_ERST_DEBUG) += erst-dbg.o
-apei-y := apei-base.o hest.o erst.o
+apei-y := apei-base.o hest.o erst.o bert.o
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 16129c78b489..6e9f14c0a71b 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -1,6 +1,6 @@
/*
* apei-internal.h - ACPI Platform Error Interface internal
- * definations.
+ * definitions.
*/
#ifndef APEI_INTERNAL_H
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
new file mode 100644
index 000000000000..a05b5c0cf181
--- /dev/null
+++ b/drivers/acpi/apei/bert.c
@@ -0,0 +1,150 @@
+/*
+ * APEI Boot Error Record Table (BERT) support
+ *
+ * Copyright 2011 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * Under normal circumstances, when a hardware error occurs, the error
+ * handler receives control and processes the error. This gives OSPM a
+ * chance to process the error condition, report it, and optionally attempt
+ * recovery. In some cases, the system is unable to process an error.
+ * For example, system firmware or a management controller may choose to
+ * reset the system or the system might experience an uncontrolled crash
+ * or reset.The boot error source is used to report unhandled errors that
+ * occurred in a previous boot. This mechanism is described in the BERT
+ * table.
+ *
+ * For more information about BERT, please refer to ACPI Specification
+ * version 4.0, section 17.3.1
+ *
+ * This file is licensed under GPLv2.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+
+#include "apei-internal.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "BERT: " fmt
+
+static int bert_disable;
+
+static void __init bert_print_all(struct acpi_bert_region *region,
+ unsigned int region_len)
+{
+ struct acpi_hest_generic_status *estatus =
+ (struct acpi_hest_generic_status *)region;
+ int remain = region_len;
+ u32 estatus_len;
+
+ if (!estatus->block_status)
+ return;
+
+ while (remain > sizeof(struct acpi_bert_region)) {
+ if (cper_estatus_check(estatus)) {
+ pr_err(FW_BUG "Invalid error record.\n");
+ return;
+ }
+
+ estatus_len = cper_estatus_len(estatus);
+ if (remain < estatus_len) {
+ pr_err(FW_BUG "Truncated status block (length: %u).\n",
+ estatus_len);
+ return;
+ }
+
+ pr_info_once("Error records from previous boot:\n");
+
+ cper_estatus_print(KERN_INFO HW_ERR, estatus);
+
+ /*
+ * Because the boot error source is "one-time polled" type,
+ * clear Block Status of current Generic Error Status Block,
+ * once it's printed.
+ */
+ estatus->block_status = 0;
+
+ estatus = (void *)estatus + estatus_len;
+ /* No more error records. */
+ if (!estatus->block_status)
+ return;
+
+ remain -= estatus_len;
+ }
+}
+
+static int __init setup_bert_disable(char *str)
+{
+ bert_disable = 1;
+
+ return 0;
+}
+__setup("bert_disable", setup_bert_disable);
+
+static int __init bert_check_table(struct acpi_table_bert *bert_tab)
+{
+ if (bert_tab->header.length < sizeof(struct acpi_table_bert) ||
+ bert_tab->region_length < sizeof(struct acpi_bert_region))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init bert_init(void)
+{
+ struct acpi_bert_region *boot_error_region;
+ struct acpi_table_bert *bert_tab;
+ unsigned int region_len;
+ acpi_status status;
+ int rc = 0;
+
+ if (acpi_disabled)
+ return 0;
+
+ if (bert_disable) {
+ pr_info("Boot Error Record Table support is disabled.\n");
+ return 0;
+ }
+
+ status = acpi_get_table(ACPI_SIG_BERT, 0, (struct acpi_table_header **)&bert_tab);
+ if (status == AE_NOT_FOUND)
+ return 0;
+
+ if (ACPI_FAILURE(status)) {
+ pr_err("get table failed, %s.\n", acpi_format_exception(status));
+ return -EINVAL;
+ }
+
+ rc = bert_check_table(bert_tab);
+ if (rc) {
+ pr_err(FW_BUG "table invalid.\n");
+ return rc;
+ }
+
+ region_len = bert_tab->region_length;
+ if (!request_mem_region(bert_tab->address, region_len, "APEI BERT")) {
+ pr_err("Can't request iomem region <%016llx-%016llx>.\n",
+ (unsigned long long)bert_tab->address,
+ (unsigned long long)bert_tab->address + region_len - 1);
+ return -EIO;
+ }
+
+ boot_error_region = ioremap_cache(bert_tab->address, region_len);
+ if (boot_error_region) {
+ bert_print_all(boot_error_region, region_len);
+ iounmap(boot_error_region);
+ } else {
+ rc = -ENOMEM;
+ }
+
+ release_mem_region(bert_tab->address, region_len);
+
+ return rc;
+}
+
+late_initcall(bert_init);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 559c1173de1c..eebb7e39c49c 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -33,7 +33,8 @@
#include "apei-internal.h"
-#define EINJ_PFX "EINJ: "
+#undef pr_fmt
+#define pr_fmt(fmt) "EINJ: " fmt
#define SPIN_UNIT 100 /* 100ns */
/* Firmware should respond within 1 milliseconds */
@@ -179,8 +180,7 @@ static int einj_get_available_error_type(u32 *type)
static int einj_timedout(u64 *t)
{
if ((s64)*t < SPIN_UNIT) {
- pr_warning(FW_WARN EINJ_PFX
- "Firmware does not respond in time\n");
+ pr_warning(FW_WARN "Firmware does not respond in time\n");
return 1;
}
*t -= SPIN_UNIT;
@@ -307,8 +307,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
- pr_err(EINJ_PFX
- "Can not request [mem %#010llx-%#010llx] for Trigger table\n",
+ pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n",
(unsigned long long)trigger_paddr,
(unsigned long long)trigger_paddr +
sizeof(*trigger_tab) - 1);
@@ -316,13 +315,12 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
}
trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
if (!trigger_tab) {
- pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ pr_err("Failed to map trigger table!\n");
goto out_rel_header;
}
rc = einj_check_trigger_header(trigger_tab);
if (rc) {
- pr_warning(FW_BUG EINJ_PFX
- "The trigger error action table is invalid\n");
+ pr_warning(FW_BUG "Invalid trigger error action table.\n");
goto out_rel_header;
}
@@ -336,8 +334,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
table_size - sizeof(*trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
- pr_err(EINJ_PFX
-"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
+ pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
(unsigned long long)trigger_paddr + sizeof(*trigger_tab),
(unsigned long long)trigger_paddr + table_size - 1);
goto out_rel_header;
@@ -345,7 +342,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
iounmap(trigger_tab);
trigger_tab = ioremap_cache(trigger_paddr, table_size);
if (!trigger_tab) {
- pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ pr_err("Failed to map trigger table!\n");
goto out_rel_entry;
}
trigger_entry = (struct acpi_whea_header *)
@@ -695,34 +692,42 @@ static int __init einj_init(void)
struct dentry *fentry;
struct apei_exec_context ctx;
- if (acpi_disabled)
+ if (acpi_disabled) {
+ pr_warn("ACPI disabled.\n");
return -ENODEV;
+ }
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
- if (status == AE_NOT_FOUND)
+ if (status == AE_NOT_FOUND) {
+ pr_warn("EINJ table not found.\n");
return -ENODEV;
+ }
else if (ACPI_FAILURE(status)) {
- const char *msg = acpi_format_exception(status);
- pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
+ pr_err("Failed to get EINJ table: %s\n",
+ acpi_format_exception(status));
return -EINVAL;
}
rc = einj_check_table(einj_tab);
if (rc) {
- pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
+ pr_warn(FW_BUG "Invalid EINJ table.n");
return -EINVAL;
}
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
- if (!einj_debug_dir)
+ if (!einj_debug_dir) {
+ pr_err("Error creating debugfs node.\n");
goto err_cleanup;
+ }
+
fentry = debugfs_create_file("available_error_type", S_IRUSR,
einj_debug_dir, NULL,
&available_error_type_fops);
if (!fentry)
goto err_cleanup;
+
fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
einj_debug_dir, NULL, &error_type_fops);
if (!fentry)
@@ -735,14 +740,22 @@ static int __init einj_init(void)
apei_resources_init(&einj_resources);
einj_exec_ctx_init(&ctx);
rc = apei_exec_collect_resources(&ctx, &einj_resources);
- if (rc)
+ if (rc) {
+ pr_err("Error collecting EINJ resources.\n");
goto err_fini;
+ }
+
rc = apei_resources_request(&einj_resources, "APEI EINJ");
- if (rc)
+ if (rc) {
+ pr_err("Error requesting memory/port resources.\n");
goto err_fini;
+ }
+
rc = apei_exec_pre_map_gars(&ctx);
- if (rc)
+ if (rc) {
+ pr_err("Error pre-mapping GARs.\n");
goto err_release;
+ }
rc = -ENOMEM;
einj_param = einj_get_parameter_address();
@@ -787,7 +800,7 @@ static int __init einj_init(void)
goto err_unmap;
}
- pr_info(EINJ_PFX "Error INJection is initialized.\n");
+ pr_info("Error INJection is initialized.\n");
return 0;
@@ -798,6 +811,7 @@ err_unmap:
sizeof(struct einj_parameter);
acpi_os_unmap_iomem(einj_param, size);
+ pr_err("Error creating param extension debugfs nodes.\n");
}
apei_exec_post_unmap_gars(&ctx);
err_release:
@@ -805,6 +819,7 @@ err_release:
err_fini:
apei_resources_fini(&einj_resources);
err_cleanup:
+ pr_err("Error creating primary debugfs nodes.\n");
debugfs_remove_recursive(einj_debug_dir);
return rc;
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 006c3894c6ea..f096ab3cb54d 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -927,7 +927,8 @@ static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
struct timespec *time, char **buf,
- bool *compressed, struct pstore_info *psi);
+ bool *compressed, ssize_t *ecc_notice_size,
+ struct pstore_info *psi);
static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
u64 *id, unsigned int part, int count, bool compressed,
size_t size, struct pstore_info *psi);
@@ -987,7 +988,8 @@ static int erst_close_pstore(struct pstore_info *psi)
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
struct timespec *time, char **buf,
- bool *compressed, struct pstore_info *psi)
+ bool *compressed, ssize_t *ecc_notice_size,
+ struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
@@ -1033,6 +1035,7 @@ skip:
memcpy(*buf, rcd->data, len - sizeof(*rcd));
*id = record_id;
*compressed = false;
+ *ecc_notice_size = 0;
if (uuid_le_cmp(rcd->sec_hdr.section_type,
CPER_SECTION_TYPE_DMESG_Z) == 0) {
*type = PSTORE_TYPE_DMESG;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 262ca31b86d9..85b7d07fe5c8 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -30,6 +30,9 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/regulator/machine.h>
+#include <linux/workqueue.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
#ifdef CONFIG_X86
#include <asm/mpspec.h>
#endif
@@ -174,22 +177,17 @@ void acpi_bus_detach_private_data(acpi_handle handle)
EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data);
static void acpi_print_osc_error(acpi_handle handle,
- struct acpi_osc_context *context, char *error)
+ struct acpi_osc_context *context, char *error)
{
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
int i;
- if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer)))
- printk(KERN_DEBUG "%s: %s\n", context->uuid_str, error);
- else {
- printk(KERN_DEBUG "%s (%s): %s\n",
- (char *)buffer.pointer, context->uuid_str, error);
- kfree(buffer.pointer);
- }
- printk(KERN_DEBUG "_OSC request data:");
+ acpi_handle_debug(handle, "(%s): %s\n", context->uuid_str, error);
+
+ pr_debug("_OSC request data:");
for (i = 0; i < context->cap.length; i += sizeof(u32))
- printk(" %x", *((u32 *)(context->cap.pointer + i)));
- printk("\n");
+ pr_debug(" %x", *((u32 *)(context->cap.pointer + i)));
+
+ pr_debug("\n");
}
acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
@@ -302,6 +300,14 @@ out_kfree:
EXPORT_SYMBOL(acpi_run_osc);
bool osc_sb_apei_support_acked;
+
+/*
+ * ACPI 6.0 Section 8.4.4.2 Idle State Coordination
+ * OSPM supports platform coordinated low power idle(LPI) states
+ */
+bool osc_pc_lpi_support_confirmed;
+EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed);
+
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
static void acpi_bus_osc_support(void)
{
@@ -322,6 +328,7 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
if (!ghes_disable)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
@@ -329,9 +336,12 @@ static void acpi_bus_osc_support(void)
return;
if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
u32 *capbuf_ret = context.ret.pointer;
- if (context.ret.length > OSC_SUPPORT_DWORD)
+ if (context.ret.length > OSC_SUPPORT_DWORD) {
osc_sb_apei_support_acked =
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+ osc_pc_lpi_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+ }
kfree(context.ret.pointer);
}
/* do we need to check other returned cap? Sounds no */
@@ -475,6 +485,56 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
acpi_device_notify);
}
+/* Handle events targeting \_SB device (at present only graceful shutdown) */
+
+#define ACPI_SB_NOTIFY_SHUTDOWN_REQUEST 0x81
+#define ACPI_SB_INDICATE_INTERVAL 10000
+
+static void sb_notify_work(struct work_struct *dummy)
+{
+ acpi_handle sb_handle;
+
+ orderly_poweroff(true);
+
+ /*
+ * After initiating graceful shutdown, the ACPI spec requires OSPM
+ * to evaluate _OST method once every 10seconds to indicate that
+ * the shutdown is in progress
+ */
+ acpi_get_handle(NULL, "\\_SB", &sb_handle);
+ while (1) {
+ pr_info("Graceful shutdown in progress.\n");
+ acpi_evaluate_ost(sb_handle, ACPI_OST_EC_OSPM_SHUTDOWN,
+ ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS, NULL);
+ msleep(ACPI_SB_INDICATE_INTERVAL);
+ }
+}
+
+static void acpi_sb_notify(acpi_handle handle, u32 event, void *data)
+{
+ static DECLARE_WORK(acpi_sb_work, sb_notify_work);
+
+ if (event == ACPI_SB_NOTIFY_SHUTDOWN_REQUEST) {
+ if (!work_busy(&acpi_sb_work))
+ schedule_work(&acpi_sb_work);
+ } else
+ pr_warn("event %x is not supported by \\_SB device\n", event);
+}
+
+static int __init acpi_setup_sb_notify_handler(void)
+{
+ acpi_handle sb_handle;
+
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &sb_handle)))
+ return -ENXIO;
+
+ if (ACPI_FAILURE(acpi_install_notify_handler(sb_handle, ACPI_DEVICE_NOTIFY,
+ acpi_sb_notify, NULL)))
+ return -EINVAL;
+
+ return 0;
+}
+
/* --------------------------------------------------------------------------
Device Matching
-------------------------------------------------------------------------- */
@@ -961,8 +1021,7 @@ void __init acpi_early_init(void)
/**
* acpi_subsystem_init - Finalize the early initialization of ACPI.
*
- * Switch over the platform to the ACPI mode (if possible), initialize the
- * handling of ACPI events, install the interrupt and global lock handlers.
+ * Switch over the platform to the ACPI mode (if possible).
*
* Doing this too early is generally unsafe, but at the same time it needs to be
* done before all things that really depend on ACPI. The right spot appears to
@@ -990,6 +1049,13 @@ void __init acpi_subsystem_init(void)
}
}
+static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context)
+{
+ acpi_scan_table_handler(event, table, context);
+
+ return acpi_sysfs_table_handler(event, table, context);
+}
+
static int __init acpi_bus_init(void)
{
int result;
@@ -1043,6 +1109,8 @@ static int __init acpi_bus_init(void)
* _PDC control method may load dynamic SSDT tables,
* and we need to install the table handler before that.
*/
+ status = acpi_install_table_handler(acpi_bus_table_handler, NULL);
+
acpi_sysfs_init();
acpi_early_processor_set_pdc();
@@ -1124,6 +1192,7 @@ static int __init acpi_init(void)
acpi_sleep_proc_init();
acpi_wakeup_device_init();
acpi_debugger_init();
+ acpi_setup_sb_notify_handler();
return 0;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 5c3b0918d5fd..148f4e5ca104 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -53,6 +53,10 @@
#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch"
#define ACPI_BUTTON_TYPE_LID 0x05
+#define ACPI_BUTTON_LID_INIT_IGNORE 0x00
+#define ACPI_BUTTON_LID_INIT_OPEN 0x01
+#define ACPI_BUTTON_LID_INIT_METHOD 0x02
+
#define _COMPONENT ACPI_BUTTON_COMPONENT
ACPI_MODULE_NAME("button");
@@ -105,6 +109,7 @@ struct acpi_button {
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
static struct acpi_device *lid_device;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
/* --------------------------------------------------------------------------
FS Interface (/proc)
@@ -113,16 +118,52 @@ static struct acpi_device *lid_device;
static struct proc_dir_entry *acpi_button_dir;
static struct proc_dir_entry *acpi_lid_dir;
+static int acpi_lid_evaluate_state(struct acpi_device *device)
+{
+ unsigned long long lid_state;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "_LID", NULL, &lid_state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return lid_state ? 1 : 0;
+}
+
+static int acpi_lid_notify_state(struct acpi_device *device, int state)
+{
+ struct acpi_button *button = acpi_driver_data(device);
+ int ret;
+
+ /* input layer checks if event is redundant */
+ input_report_switch(button->input, SW_LID, !state);
+ input_sync(button->input);
+
+ if (state)
+ pm_wakeup_event(&device->dev, 0);
+
+ ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
+ if (ret == NOTIFY_DONE)
+ ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
+ device);
+ if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
+ /*
+ * It is also regarded as success if the notifier_chain
+ * returns NOTIFY_OK or NOTIFY_DONE.
+ */
+ ret = 0;
+ }
+ return ret;
+}
+
static int acpi_button_state_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_device *device = seq->private;
- acpi_status status;
- unsigned long long state;
+ int state;
- status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state);
+ state = acpi_lid_evaluate_state(device);
seq_printf(seq, "state: %s\n",
- ACPI_FAILURE(status) ? "unsupported" :
- (state ? "open" : "closed"));
+ state < 0 ? "unsupported" : (state ? "open" : "closed"));
return 0;
}
@@ -231,51 +272,37 @@ EXPORT_SYMBOL(acpi_lid_notifier_unregister);
int acpi_lid_open(void)
{
- acpi_status status;
- unsigned long long state;
-
if (!lid_device)
return -ENODEV;
- status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL,
- &state);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- return !!state;
+ return acpi_lid_evaluate_state(lid_device);
}
EXPORT_SYMBOL(acpi_lid_open);
-static int acpi_lid_send_state(struct acpi_device *device)
+static int acpi_lid_update_state(struct acpi_device *device)
{
- struct acpi_button *button = acpi_driver_data(device);
- unsigned long long state;
- acpi_status status;
- int ret;
-
- status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state);
- if (ACPI_FAILURE(status))
- return -ENODEV;
+ int state;
- /* input layer checks if event is redundant */
- input_report_switch(button->input, SW_LID, !state);
- input_sync(button->input);
+ state = acpi_lid_evaluate_state(device);
+ if (state < 0)
+ return state;
- if (state)
- pm_wakeup_event(&device->dev, 0);
+ return acpi_lid_notify_state(device, state);
+}
- ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
- if (ret == NOTIFY_DONE)
- ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
- device);
- if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
- /*
- * It is also regarded as success if the notifier_chain
- * returns NOTIFY_OK or NOTIFY_DONE.
- */
- ret = 0;
+static void acpi_lid_initialize_state(struct acpi_device *device)
+{
+ switch (lid_init_state) {
+ case ACPI_BUTTON_LID_INIT_OPEN:
+ (void)acpi_lid_notify_state(device, 1);
+ break;
+ case ACPI_BUTTON_LID_INIT_METHOD:
+ (void)acpi_lid_update_state(device);
+ break;
+ case ACPI_BUTTON_LID_INIT_IGNORE:
+ default:
+ break;
}
- return ret;
}
static void acpi_button_notify(struct acpi_device *device, u32 event)
@@ -290,7 +317,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
case ACPI_BUTTON_NOTIFY_STATUS:
input = button->input;
if (button->type == ACPI_BUTTON_TYPE_LID) {
- acpi_lid_send_state(device);
+ acpi_lid_update_state(device);
} else {
int keycode;
@@ -335,7 +362,7 @@ static int acpi_button_resume(struct device *dev)
button->suspended = false;
if (button->type == ACPI_BUTTON_TYPE_LID)
- return acpi_lid_send_state(device);
+ acpi_lid_initialize_state(device);
return 0;
}
#endif
@@ -416,7 +443,7 @@ static int acpi_button_add(struct acpi_device *device)
if (error)
goto err_remove_fs;
if (button->type == ACPI_BUTTON_TYPE_LID) {
- acpi_lid_send_state(device);
+ acpi_lid_initialize_state(device);
/*
* This assumes there's only one lid device, or if there are
* more we only care about the last one...
@@ -446,4 +473,42 @@ static int acpi_button_remove(struct acpi_device *device)
return 0;
}
+static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
+{
+ int result = 0;
+
+ if (!strncmp(val, "open", sizeof("open") - 1)) {
+ lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
+ pr_info("Notify initial lid state as open\n");
+ } else if (!strncmp(val, "method", sizeof("method") - 1)) {
+ lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+ pr_info("Notify initial lid state with _LID return value\n");
+ } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) {
+ lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE;
+ pr_info("Do not notify initial lid state\n");
+ } else
+ result = -EINVAL;
+ return result;
+}
+
+static int param_get_lid_init_state(char *buffer, struct kernel_param *kp)
+{
+ switch (lid_init_state) {
+ case ACPI_BUTTON_LID_INIT_OPEN:
+ return sprintf(buffer, "open");
+ case ACPI_BUTTON_LID_INIT_METHOD:
+ return sprintf(buffer, "method");
+ case ACPI_BUTTON_LID_INIT_IGNORE:
+ return sprintf(buffer, "ignore");
+ default:
+ return sprintf(buffer, "invalid");
+ }
+ return 0;
+}
+
+module_param_call(lid_init_state,
+ param_set_lid_init_state, param_get_lid_init_state,
+ NULL, 0644);
+MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
+
module_acpi_driver(acpi_button_driver);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 8adac69dba3d..2e981732805b 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -299,8 +299,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
cpc_ptr = per_cpu(cpc_desc_ptr, i);
- if (!cpc_ptr)
- continue;
+ if (!cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
pdomain = &(cpc_ptr->domain_info);
cpumask_set_cpu(i, pr->shared_cpu_map);
@@ -322,8 +324,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr)
- continue;
+ if (!match_cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
match_pdomain = &(match_cpc_ptr->domain_info);
if (match_pdomain->domain != pdomain->domain)
@@ -353,8 +357,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr)
- continue;
+ if (!match_cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
match_pdomain = &(match_cpc_ptr->domain_info);
if (match_pdomain->domain != pdomain->domain)
@@ -595,9 +601,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
- /* Plug it into this CPUs CPC descriptor. */
- per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
-
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
if (ret)
@@ -610,6 +613,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
+ /* Plug PSD data into this CPUs CPC descriptor. */
+ per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
+
/* Everything looks okay */
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index e8e128dede29..0c00208b423e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -21,7 +21,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -33,12 +33,7 @@
#include "internal.h"
-#define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
-
ACPI_MODULE_NAME("dock");
-MODULE_AUTHOR("Kristen Carlson Accardi");
-MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_DESCRIPTION);
-MODULE_LICENSE("GPL");
static bool immediate_undock = 1;
module_param(immediate_undock, bool, 0644);
diff --git a/drivers/acpi/dptf/Kconfig b/drivers/acpi/dptf/Kconfig
new file mode 100644
index 000000000000..ac0a6ed0cf46
--- /dev/null
+++ b/drivers/acpi/dptf/Kconfig
@@ -0,0 +1,15 @@
+config DPTF_POWER
+ tristate "DPTF Platform Power Participant"
+ depends on X86
+ help
+ This driver adds support for Dynamic Platform and Thermal Framework
+ (DPTF) Platform Power Participant device (INT3407) support.
+ This participant is responsible for exposing platform telemetry:
+ max_platform_power
+ platform_power_source
+ adapter_rating
+ battery_steady_power
+ charger_type
+
+ To compile this driver as a module, choose M here:
+ the module will be called dptf_power.
diff --git a/drivers/acpi/dptf/Makefile b/drivers/acpi/dptf/Makefile
new file mode 100644
index 000000000000..06ea8809583d
--- /dev/null
+++ b/drivers/acpi/dptf/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_ACPI) += int340x_thermal.o
+obj-$(CONFIG_DPTF_POWER) += dptf_power.o
+
+ccflags-y += -Idrivers/acpi
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
new file mode 100644
index 000000000000..734642dc5008
--- /dev/null
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -0,0 +1,128 @@
+/*
+ * dptf_power: DPTF platform power driver
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+
+/*
+ * Presentation of attributes which are defined for INT3407. They are:
+ * PMAX : Maximum platform powe
+ * PSRC : Platform power source
+ * ARTG : Adapter rating
+ * CTYP : Charger type
+ * PBSS : Battery steady power
+ */
+#define DPTF_POWER_SHOW(name, object) \
+static ssize_t name##_show(struct device *dev,\
+ struct device_attribute *attr,\
+ char *buf)\
+{\
+ struct platform_device *pdev = to_platform_device(dev);\
+ struct acpi_device *acpi_dev = platform_get_drvdata(pdev);\
+ unsigned long long val;\
+ acpi_status status;\
+\
+ status = acpi_evaluate_integer(acpi_dev->handle, #object,\
+ NULL, &val);\
+ if (ACPI_SUCCESS(status))\
+ return sprintf(buf, "%d\n", (int)val);\
+ else \
+ return -EINVAL;\
+}
+
+DPTF_POWER_SHOW(max_platform_power_mw, PMAX)
+DPTF_POWER_SHOW(platform_power_source, PSRC)
+DPTF_POWER_SHOW(adapter_rating_mw, ARTG)
+DPTF_POWER_SHOW(battery_steady_power_mw, PBSS)
+DPTF_POWER_SHOW(charger_type, CTYP)
+
+static DEVICE_ATTR_RO(max_platform_power_mw);
+static DEVICE_ATTR_RO(platform_power_source);
+static DEVICE_ATTR_RO(adapter_rating_mw);
+static DEVICE_ATTR_RO(battery_steady_power_mw);
+static DEVICE_ATTR_RO(charger_type);
+
+static struct attribute *dptf_power_attrs[] = {
+ &dev_attr_max_platform_power_mw.attr,
+ &dev_attr_platform_power_source.attr,
+ &dev_attr_adapter_rating_mw.attr,
+ &dev_attr_battery_steady_power_mw.attr,
+ &dev_attr_charger_type.attr,
+ NULL
+};
+
+static struct attribute_group dptf_power_attribute_group = {
+ .attrs = dptf_power_attrs,
+ .name = "dptf_power"
+};
+
+static int dptf_power_add(struct platform_device *pdev)
+{
+ struct acpi_device *acpi_dev;
+ acpi_status status;
+ unsigned long long ptype;
+ int result;
+
+ acpi_dev = ACPI_COMPANION(&(pdev->dev));
+ if (!acpi_dev)
+ return -ENODEV;
+
+ status = acpi_evaluate_integer(acpi_dev->handle, "PTYP", NULL, &ptype);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (ptype != 0x11)
+ return -ENODEV;
+
+ result = sysfs_create_group(&pdev->dev.kobj,
+ &dptf_power_attribute_group);
+ if (result)
+ return result;
+
+ platform_set_drvdata(pdev, acpi_dev);
+
+ return 0;
+}
+
+static int dptf_power_remove(struct platform_device *pdev)
+{
+
+ sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group);
+
+ return 0;
+}
+
+static const struct acpi_device_id int3407_device_ids[] = {
+ {"INT3407", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
+
+static struct platform_driver dptf_power_driver = {
+ .probe = dptf_power_add,
+ .remove = dptf_power_remove,
+ .driver = {
+ .name = "DPTF Platform Power",
+ .acpi_match_table = int3407_device_ids,
+ },
+};
+
+module_platform_driver(dptf_power_driver);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ACPI DPTF platform power driver");
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 33505c651f62..33505c651f62 100644
--- a/drivers/acpi/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 73c76d646064..999a10914678 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1331,8 +1331,6 @@ static int ec_install_handlers(struct acpi_ec *ec)
static void ec_remove_handlers(struct acpi_ec *ec)
{
- acpi_ec_stop(ec, false);
-
if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
@@ -1340,6 +1338,19 @@ static void ec_remove_handlers(struct acpi_ec *ec)
clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
}
+ /*
+ * Stops handling the EC transactions after removing the operation
+ * region handler. This is required because _REG(DISCONNECT)
+ * invoked during the removal can result in new EC transactions.
+ *
+ * Flushes the EC requests and thus disables the GPE before
+ * removing the GPE handler. This is required by the current ACPICA
+ * GPE core. ACPICA GPE core will automatically disable a GPE when
+ * it is indicated but there is no way to handle it. So the drivers
+ * must disable the GPEs prior to removing the GPE handlers.
+ */
+ acpi_ec_stop(ec, false);
+
if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler)))
@@ -1348,13 +1359,9 @@ static void ec_remove_handlers(struct acpi_ec *ec)
}
}
-static int acpi_ec_add(struct acpi_device *device)
+static struct acpi_ec *acpi_ec_alloc(void)
{
- struct acpi_ec *ec = NULL;
- int ret;
-
- strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_EC_CLASS);
+ struct acpi_ec *ec;
/* Check for boot EC */
if (boot_ec) {
@@ -1365,9 +1372,21 @@ static int acpi_ec_add(struct acpi_device *device)
first_ec = NULL;
} else {
ec = make_acpi_ec();
- if (!ec)
- return -ENOMEM;
}
+ return ec;
+}
+
+static int acpi_ec_add(struct acpi_device *device)
+{
+ struct acpi_ec *ec = NULL;
+ int ret;
+
+ strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_EC_CLASS);
+
+ ec = acpi_ec_alloc();
+ if (!ec)
+ return -ENOMEM;
if (ec_parse_device(device->handle, 0, ec, NULL) !=
AE_CTRL_TERMINATE) {
kfree(ec);
@@ -1454,27 +1473,31 @@ static const struct acpi_device_id ec_device_ids[] = {
int __init acpi_ec_dsdt_probe(void)
{
acpi_status status;
+ struct acpi_ec *ec;
+ int ret;
- if (boot_ec)
- return 0;
-
+ ec = acpi_ec_alloc();
+ if (!ec)
+ return -ENOMEM;
/*
* Finding EC from DSDT if there is no ECDT EC available. When this
* function is invoked, ACPI tables have been fully loaded, we can
* walk namespace now.
*/
- boot_ec = make_acpi_ec();
- if (!boot_ec)
- return -ENOMEM;
status = acpi_get_devices(ec_device_ids[0].id,
- ec_parse_device, boot_ec, NULL);
- if (ACPI_FAILURE(status) || !boot_ec->handle)
- return -ENODEV;
- if (!ec_install_handlers(boot_ec)) {
- first_ec = boot_ec;
- return 0;
+ ec_parse_device, ec, NULL);
+ if (ACPI_FAILURE(status) || !ec->handle) {
+ ret = -ENODEV;
+ goto error;
}
- return -EFAULT;
+ ret = ec_install_handlers(ec);
+
+error:
+ if (ret)
+ kfree(ec);
+ else
+ first_ec = boot_ec = ec;
+ return ret;
}
#if 0
@@ -1518,6 +1541,11 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
return 0;
}
+/*
+ * Some ECDTs contain wrong register addresses.
+ * MSI MS-171F
+ * https://bugzilla.kernel.org/show_bug.cgi?id=12461
+ */
static int ec_correct_ecdt(const struct dmi_system_id *id)
{
pr_debug("Detected system needing ECDT address correction.\n");
@@ -1527,16 +1555,6 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
static struct dmi_system_id ec_dmi_table[] __initdata = {
{
- ec_correct_ecdt, "Asus L4R", {
- DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
- DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
- DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
- {
- ec_correct_ecdt, "Asus M6R", {
- DMI_MATCH(DMI_BIOS_VERSION, "0207"),
- DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
- DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
- {
ec_correct_ecdt, "MSI MS-171F", {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
@@ -1548,12 +1566,13 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
int __init acpi_ec_ecdt_probe(void)
{
- int ret = 0;
+ int ret;
acpi_status status;
struct acpi_table_ecdt *ecdt_ptr;
+ struct acpi_ec *ec;
- boot_ec = make_acpi_ec();
- if (!boot_ec)
+ ec = acpi_ec_alloc();
+ if (!ec)
return -ENOMEM;
/*
* Generate a boot ec context
@@ -1577,28 +1596,20 @@ int __init acpi_ec_ecdt_probe(void)
pr_info("EC description table is found, configuring boot EC\n");
if (EC_FLAGS_CORRECT_ECDT) {
- /*
- * Asus L4R, Asus M6R
- * https://bugzilla.kernel.org/show_bug.cgi?id=9399
- * MSI MS-171F
- * https://bugzilla.kernel.org/show_bug.cgi?id=12461
- */
- boot_ec->command_addr = ecdt_ptr->data.address;
- boot_ec->data_addr = ecdt_ptr->control.address;
+ ec->command_addr = ecdt_ptr->data.address;
+ ec->data_addr = ecdt_ptr->control.address;
} else {
- boot_ec->command_addr = ecdt_ptr->control.address;
- boot_ec->data_addr = ecdt_ptr->data.address;
+ ec->command_addr = ecdt_ptr->control.address;
+ ec->data_addr = ecdt_ptr->data.address;
}
- boot_ec->gpe = ecdt_ptr->gpe;
- boot_ec->handle = ACPI_ROOT_OBJECT;
- ret = ec_install_handlers(boot_ec);
- if (!ret)
- first_ec = boot_ec;
+ ec->gpe = ecdt_ptr->gpe;
+ ec->handle = ACPI_ROOT_OBJECT;
+ ret = ec_install_handlers(ec);
error:
- if (ret) {
- kfree(boot_ec);
- boot_ec = NULL;
- }
+ if (ret)
+ kfree(ec);
+ else
+ first_ec = boot_ec = ec;
return ret;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 27cc7feabfe4..940218ff0193 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -87,6 +87,9 @@ bool acpi_queue_hotplug_work(struct work_struct *work);
void acpi_device_hotplug(struct acpi_device *adev, u32 src);
bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
+acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context);
+void acpi_scan_table_handler(u32 event, void *table, void *context);
+
/* --------------------------------------------------------------------------
Device Node Initialization / Removal
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index ac6ddcc080d4..1f0e06065ae6 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -1131,11 +1131,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
/*
* Until standardization materializes we need to consider up to 3
- * different command sets. Note, that checking for zero functions
- * tells us if any commands might be reachable through this uuid.
+ * different command sets. Note, that checking for function0 (bit0)
+ * tells us if any commands are reachable through this uuid.
*/
for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
- if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 0))
+ if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
break;
/* limit the supported commands to those that are publicly documented */
@@ -1151,9 +1151,10 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
if (disable_vendor_specific)
dsm_mask &= ~(1 << 8);
} else {
- dev_err(dev, "unknown dimm command family\n");
+ dev_dbg(dev, "unknown dimm command family\n");
nfit_mem->family = -1;
- return force_enable_dimms ? 0 : -ENODEV;
+ /* DSMs are optional, continue loading the driver... */
+ return 0;
}
uuid = to_nfit_uuid(nfit_mem->family);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d176e0ece470..ce3a7a16f03f 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -18,22 +18,21 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/acpi.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
-#define PREFIX "ACPI: "
-
-#define ACPI_NUMA 0x80000000
-#define _COMPONENT ACPI_NUMA
-ACPI_MODULE_NAME("numa");
-
static nodemask_t nodes_found_map = NODE_MASK_NONE;
/* maps to convert between proximity domain and logical node ID */
@@ -43,6 +42,7 @@ static int node_to_pxm_map[MAX_NUMNODES]
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
unsigned char acpi_srat_revision __initdata;
+int acpi_numa __initdata;
int pxm_to_node(int pxm)
{
@@ -128,68 +128,63 @@ EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
-
- ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
-
- if (!header)
- return;
-
switch (header->type) {
-
case ACPI_SRAT_TYPE_CPU_AFFINITY:
-#ifdef ACPI_DEBUG_OUTPUT
{
struct acpi_srat_cpu_affinity *p =
(struct acpi_srat_cpu_affinity *)header;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
- p->apic_id, p->local_sapic_eid,
- p->proximity_domain_lo,
- (p->flags & ACPI_SRAT_CPU_ENABLED)?
- "enabled" : "disabled"));
+ pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
+ p->apic_id, p->local_sapic_eid,
+ p->proximity_domain_lo,
+ (p->flags & ACPI_SRAT_CPU_ENABLED) ?
+ "enabled" : "disabled");
}
-#endif /* ACPI_DEBUG_OUTPUT */
break;
case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
-#ifdef ACPI_DEBUG_OUTPUT
{
struct acpi_srat_mem_affinity *p =
(struct acpi_srat_mem_affinity *)header;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
- (unsigned long)p->base_address,
- (unsigned long)p->length,
- p->proximity_domain,
- (p->flags & ACPI_SRAT_MEM_ENABLED)?
- "enabled" : "disabled",
- (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
- " hot-pluggable" : "",
- (p->flags & ACPI_SRAT_MEM_NON_VOLATILE)?
- " non-volatile" : ""));
+ pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
+ (unsigned long)p->base_address,
+ (unsigned long)p->length,
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_MEM_ENABLED) ?
+ "enabled" : "disabled",
+ (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
+ " hot-pluggable" : "",
+ (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
+ " non-volatile" : "");
}
-#endif /* ACPI_DEBUG_OUTPUT */
break;
case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
-#ifdef ACPI_DEBUG_OUTPUT
{
struct acpi_srat_x2apic_cpu_affinity *p =
(struct acpi_srat_x2apic_cpu_affinity *)header;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Processor (x2apicid[0x%08x]) in"
- " proximity domain %d %s\n",
- p->apic_id,
- p->proximity_domain,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled"));
+ pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n",
+ p->apic_id,
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_CPU_ENABLED) ?
+ "enabled" : "disabled");
}
-#endif /* ACPI_DEBUG_OUTPUT */
break;
+
+ case ACPI_SRAT_TYPE_GICC_AFFINITY:
+ {
+ struct acpi_srat_gicc_affinity *p =
+ (struct acpi_srat_gicc_affinity *)header;
+ pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
+ p->acpi_processor_uid,
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_GICC_ENABLED) ?
+ "enabled" : "disabled");
+ }
+ break;
+
default:
- printk(KERN_WARNING PREFIX
- "Found unsupported SRAT entry (type = 0x%x)\n",
- header->type);
+ pr_warn("Found unsupported SRAT entry (type = 0x%x)\n",
+ header->type);
break;
}
}
@@ -217,12 +212,117 @@ static int __init slit_valid(struct acpi_table_slit *slit)
return 1;
}
+void __init bad_srat(void)
+{
+ pr_err("SRAT: SRAT not used.\n");
+ acpi_numa = -1;
+}
+
+int __init srat_disabled(void)
+{
+ return acpi_numa < 0;
+}
+
+#if defined(CONFIG_X86) || defined(CONFIG_ARM64)
+/*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them. I/O localities are
+ * not supported at this point.
+ */
+void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
+{
+ int i, j;
+
+ for (i = 0; i < slit->locality_count; i++) {
+ const int from_node = pxm_to_node(i);
+
+ if (from_node == NUMA_NO_NODE)
+ continue;
+
+ for (j = 0; j < slit->locality_count; j++) {
+ const int to_node = pxm_to_node(j);
+
+ if (to_node == NUMA_NO_NODE)
+ continue;
+
+ numa_set_distance(from_node, to_node,
+ slit->entry[slit->locality_count * i + j]);
+ }
+ }
+}
+
+/*
+ * Default callback for parsing of the Proximity Domain <-> Memory
+ * Area mappings
+ */
+int __init
+acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
+{
+ u64 start, end;
+ u32 hotpluggable;
+ int node, pxm;
+
+ if (srat_disabled())
+ goto out_err;
+ if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) {
+ pr_err("SRAT: Unexpected header length: %d\n",
+ ma->header.length);
+ goto out_err_bad_srat;
+ }
+ if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
+ goto out_err;
+ hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
+ if (hotpluggable && !IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
+ goto out_err;
+
+ start = ma->base_address;
+ end = start + ma->length;
+ pxm = ma->proximity_domain;
+ if (acpi_srat_revision <= 1)
+ pxm &= 0xff;
+
+ node = acpi_map_pxm_to_node(pxm);
+ if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+ pr_err("SRAT: Too many proximity domains.\n");
+ goto out_err_bad_srat;
+ }
+
+ if (numa_add_memblk(node, start, end) < 0) {
+ pr_err("SRAT: Failed to add memblk to node %u [mem %#010Lx-%#010Lx]\n",
+ node, (unsigned long long) start,
+ (unsigned long long) end - 1);
+ goto out_err_bad_srat;
+ }
+
+ node_set(node, numa_nodes_parsed);
+
+ pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n",
+ node, pxm,
+ (unsigned long long) start, (unsigned long long) end - 1,
+ hotpluggable ? " hotplug" : "",
+ ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : "");
+
+ /* Mark hotplug range in memblock. */
+ if (hotpluggable && memblock_mark_hotplug(start, ma->length))
+ pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n",
+ (unsigned long long)start, (unsigned long long)end - 1);
+
+ max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
+
+ return 0;
+out_err_bad_srat:
+ bad_srat();
+out_err:
+ return -EINVAL;
+}
+#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
+
static int __init acpi_parse_slit(struct acpi_table_header *table)
{
struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
if (!slit_valid(slit)) {
- printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
+ pr_info("SLIT table looks invalid. Not used.\n");
return -EINVAL;
}
acpi_numa_slit_init(slit);
@@ -233,12 +333,9 @@ static int __init acpi_parse_slit(struct acpi_table_header *table)
void __init __weak
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
{
- printk(KERN_WARNING PREFIX
- "Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
- return;
+ pr_warn("Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
}
-
static int __init
acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
const unsigned long end)
@@ -275,6 +372,24 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
return 0;
}
+static int __init
+acpi_parse_gicc_affinity(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_srat_gicc_affinity *processor_affinity;
+
+ processor_affinity = (struct acpi_srat_gicc_affinity *)header;
+ if (!processor_affinity)
+ return -EINVAL;
+
+ acpi_table_print_srat_entry(header);
+
+ /* let architecture-dependent part to do it */
+ acpi_numa_gicc_affinity_init(processor_affinity);
+
+ return 0;
+}
+
static int __initdata parsed_numa_memblks;
static int __init
@@ -319,6 +434,9 @@ int __init acpi_numa_init(void)
{
int cnt = 0;
+ if (acpi_disabled)
+ return -EINVAL;
+
/*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
* SRAT cpu entries could have different order with that in MADT.
@@ -327,13 +445,15 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- struct acpi_subtable_proc srat_proc[2];
+ struct acpi_subtable_proc srat_proc[3];
memset(srat_proc, 0, sizeof(srat_proc));
srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
srat_proc[0].handler = acpi_parse_processor_affinity;
srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
srat_proc[1].handler = acpi_parse_x2apic_affinity;
+ srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY;
+ srat_proc[2].handler = acpi_parse_gicc_affinity;
acpi_table_parse_entries_array(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),
@@ -347,8 +467,6 @@ int __init acpi_numa_init(void)
/* SLIT: System Locality Information Table */
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
- acpi_numa_arch_fixup();
-
if (cnt < 0)
return cnt;
else if (!parsed_numa_memblks)
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 4ed4061813e6..c983bf733ad3 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -470,6 +470,7 @@ static int acpi_irq_pci_sharing_penalty(int irq)
{
struct acpi_pci_link *link;
int penalty = 0;
+ int i;
list_for_each_entry(link, &acpi_link_list, list) {
/*
@@ -478,18 +479,14 @@ static int acpi_irq_pci_sharing_penalty(int irq)
*/
if (link->irq.active && link->irq.active == irq)
penalty += PIRQ_PENALTY_PCI_USING;
- else {
- int i;
-
- /*
- * If a link is inactive, penalize the IRQs it
- * might use, but not as severely.
- */
- for (i = 0; i < link->irq.possible_count; i++)
- if (link->irq.possible[i] == irq)
- penalty += PIRQ_PENALTY_PCI_POSSIBLE /
- link->irq.possible_count;
- }
+
+ /*
+ * penalize the IRQs PCI might use, but not as severely.
+ */
+ for (i = 0; i < link->irq.possible_count; i++)
+ if (link->irq.possible[i] == irq)
+ penalty += PIRQ_PENALTY_PCI_POSSIBLE /
+ link->irq.possible_count;
}
return penalty;
@@ -499,9 +496,6 @@ static int acpi_irq_get_penalty(int irq)
{
int penalty = 0;
- if (irq < ACPI_MAX_ISA_IRQS)
- penalty += acpi_isa_irq_penalty[irq];
-
/*
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
* with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
@@ -516,10 +510,49 @@ static int acpi_irq_get_penalty(int irq)
penalty += PIRQ_PENALTY_PCI_USING;
}
+ if (irq < ACPI_MAX_ISA_IRQS)
+ return penalty + acpi_isa_irq_penalty[irq];
+
penalty += acpi_irq_pci_sharing_penalty(irq);
return penalty;
}
+int __init acpi_irq_penalty_init(void)
+{
+ struct acpi_pci_link *link;
+ int i;
+
+ /*
+ * Update penalties to facilitate IRQ balancing.
+ */
+ list_for_each_entry(link, &acpi_link_list, list) {
+
+ /*
+ * reflect the possible and active irqs in the penalty table --
+ * useful for breaking ties.
+ */
+ if (link->irq.possible_count) {
+ int penalty =
+ PIRQ_PENALTY_PCI_POSSIBLE /
+ link->irq.possible_count;
+
+ for (i = 0; i < link->irq.possible_count; i++) {
+ if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
+ acpi_isa_irq_penalty[link->irq.
+ possible[i]] +=
+ penalty;
+ }
+
+ } else if (link->irq.active &&
+ (link->irq.active < ACPI_MAX_ISA_IRQS)) {
+ acpi_isa_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_POSSIBLE;
+ }
+ }
+
+ return 0;
+}
+
static int acpi_irq_balance = -1; /* 0: static, 1: balance */
static int acpi_pci_link_allocate(struct acpi_pci_link *link)
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 7188e53b6b7c..f62c68e24317 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -22,8 +22,9 @@
* General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -33,30 +34,11 @@
#include <linux/dmi.h>
#include <linux/pci-acpi.h>
-static bool debug;
static int check_sta_before_sun;
-#define DRIVER_VERSION "0.1"
-#define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>"
-#define DRIVER_DESC "ACPI PCI Slot Detection Driver"
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-module_param(debug, bool, 0644);
-
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_slot");
-#define MY_NAME "pci_slot"
-#define err(format, arg...) pr_err("%s: " format , MY_NAME , ## arg)
-#define info(format, arg...) pr_info("%s: " format , MY_NAME , ## arg)
-#define dbg(format, arg...) \
- do { \
- if (debug) \
- pr_debug("%s: " format, MY_NAME , ## arg); \
- } while (0)
-
#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */
struct acpi_pci_slot {
@@ -76,7 +58,7 @@ check_slot(acpi_handle handle, unsigned long long *sun)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- dbg("Checking slot on path: %s\n", (char *)buffer.pointer);
+ pr_debug("Checking slot on path: %s\n", (char *)buffer.pointer);
if (check_sta_before_sun) {
/* If SxFy doesn't have _STA, we just assume it's there */
@@ -87,14 +69,16 @@ check_slot(acpi_handle handle, unsigned long long *sun)
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
- dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer);
+ pr_debug("_ADR returned %d on %s\n",
+ status, (char *)buffer.pointer);
goto out;
}
/* No _SUN == not a slot == bail */
status = acpi_evaluate_integer(handle, "_SUN", NULL, sun);
if (ACPI_FAILURE(status)) {
- dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer);
+ pr_debug("_SUN returned %d on %s\n",
+ status, (char *)buffer.pointer);
goto out;
}
@@ -132,15 +116,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
}
slot = kmalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot) {
- err("%s: cannot allocate memory\n", __func__);
+ if (!slot)
return AE_OK;
- }
snprintf(name, sizeof(name), "%llu", sun);
pci_slot = pci_create_slot(pci_bus, device, name, NULL);
if (IS_ERR(pci_slot)) {
- err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
+ pr_err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
kfree(slot);
return AE_OK;
}
@@ -150,8 +132,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
get_device(&pci_bus->dev);
- dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n",
- pci_slot, pci_bus->number, device, name);
+ pr_debug("%p, pci_bus: %x, device: %d, name: %s\n",
+ pci_slot, pci_bus->number, device, name);
return AE_OK;
}
@@ -186,7 +168,8 @@ void acpi_pci_slot_remove(struct pci_bus *bus)
static int do_sta_before_sun(const struct dmi_system_id *d)
{
- info("%s detected: will evaluate _STA before calling _SUN\n", d->ident);
+ pr_info("%s detected: will evaluate _STA before calling _SUN\n",
+ d->ident);
check_sta_before_sun = 1;
return 0;
}
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index bd772cd56494..ca18e0d23df9 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -13,7 +13,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/regmap.h>
#include <acpi/acpi_lpat.h>
@@ -21,12 +21,19 @@
#define PMIC_POWER_OPREGION_ID 0x8d
#define PMIC_THERMAL_OPREGION_ID 0x8c
+#define PMIC_REGS_OPREGION_ID 0x8f
+
+struct intel_pmic_regs_handler_ctx {
+ unsigned int val;
+ u16 addr;
+};
struct intel_pmic_opregion {
struct mutex lock;
struct acpi_lpat_conversion_table *lpat_table;
struct regmap *regmap;
struct intel_pmic_opregion_data *data;
+ struct intel_pmic_regs_handler_ctx ctx;
};
static int pmic_get_reg_bit(int address, struct pmic_table *table,
@@ -131,7 +138,7 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg,
}
static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg,
- u32 function, u64 *value)
+ int bit, u32 function, u64 *value)
{
struct intel_pmic_opregion_data *d = opregion->data;
struct regmap *regmap = opregion->regmap;
@@ -140,12 +147,12 @@ static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg,
return -ENXIO;
if (function == ACPI_READ)
- return d->get_policy(regmap, reg, value);
+ return d->get_policy(regmap, reg, bit, value);
if (*value != 0 && *value != 1)
return -EINVAL;
- return d->update_policy(regmap, reg, *value);
+ return d->update_policy(regmap, reg, bit, *value);
}
static bool pmic_thermal_is_temp(int address)
@@ -170,13 +177,13 @@ static acpi_status intel_pmic_thermal_handler(u32 function,
{
struct intel_pmic_opregion *opregion = region_context;
struct intel_pmic_opregion_data *d = opregion->data;
- int reg, result;
+ int reg, bit, result;
if (bits != 32 || !value64)
return AE_BAD_PARAMETER;
result = pmic_get_reg_bit(address, d->thermal_table,
- d->thermal_table_count, &reg, NULL);
+ d->thermal_table_count, &reg, &bit);
if (result == -ENOENT)
return AE_BAD_PARAMETER;
@@ -187,7 +194,8 @@ static acpi_status intel_pmic_thermal_handler(u32 function,
else if (pmic_thermal_is_aux(address))
result = pmic_thermal_aux(opregion, reg, function, value64);
else if (pmic_thermal_is_pen(address))
- result = pmic_thermal_pen(opregion, reg, function, value64);
+ result = pmic_thermal_pen(opregion, reg, bit,
+ function, value64);
else
result = -EINVAL;
@@ -203,6 +211,48 @@ static acpi_status intel_pmic_thermal_handler(u32 function,
return AE_OK;
}
+static acpi_status intel_pmic_regs_handler(u32 function,
+ acpi_physical_address address, u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct intel_pmic_opregion *opregion = region_context;
+ int result = 0;
+
+ switch (address) {
+ case 0:
+ return AE_OK;
+ case 1:
+ opregion->ctx.addr |= (*value64 & 0xff) << 8;
+ return AE_OK;
+ case 2:
+ opregion->ctx.addr |= *value64 & 0xff;
+ return AE_OK;
+ case 3:
+ opregion->ctx.val = *value64 & 0xff;
+ return AE_OK;
+ case 4:
+ if (*value64) {
+ result = regmap_write(opregion->regmap, opregion->ctx.addr,
+ opregion->ctx.val);
+ } else {
+ result = regmap_read(opregion->regmap, opregion->ctx.addr,
+ &opregion->ctx.val);
+ if (result == 0)
+ *value64 = opregion->ctx.val;
+ }
+ memset(&opregion->ctx, 0x00, sizeof(opregion->ctx));
+ }
+
+ if (result < 0) {
+ if (result == -EINVAL)
+ return AE_BAD_PARAMETER;
+ else
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
struct regmap *regmap,
struct intel_pmic_opregion_data *d)
@@ -242,16 +292,30 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
intel_pmic_power_handler);
ret = -ENODEV;
- goto out_error;
+ goto out_remove_power_handler;
+ }
+
+ status = acpi_install_address_space_handler(handle,
+ PMIC_REGS_OPREGION_ID, intel_pmic_regs_handler, NULL,
+ opregion);
+ if (ACPI_FAILURE(status)) {
+ ret = -ENODEV;
+ goto out_remove_thermal_handler;
}
opregion->data = d;
return 0;
+out_remove_thermal_handler:
+ acpi_remove_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID,
+ intel_pmic_thermal_handler);
+
+out_remove_power_handler:
+ acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
+ intel_pmic_power_handler);
+
out_error:
acpi_lpat_free_conversion_table(opregion->lpat_table);
return ret;
}
EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h
index d4e90af8f0dd..e8bfa7b865a5 100644
--- a/drivers/acpi/pmic/intel_pmic.h
+++ b/drivers/acpi/pmic/intel_pmic.h
@@ -12,8 +12,8 @@ struct intel_pmic_opregion_data {
int (*update_power)(struct regmap *r, int reg, int bit, bool on);
int (*get_raw_temp)(struct regmap *r, int reg);
int (*update_aux)(struct regmap *r, int reg, int raw_temp);
- int (*get_policy)(struct regmap *r, int reg, u64 *value);
- int (*update_policy)(struct regmap *r, int reg, int enable);
+ int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value);
+ int (*update_policy)(struct regmap *r, int reg, int bit, int enable);
struct pmic_table *power_table;
int power_table_count;
struct pmic_table *thermal_table;
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
new file mode 100644
index 000000000000..90011aad4d20
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -0,0 +1,420 @@
+/*
+ * intel_pmic_bxtwc.c - Intel BXT WhiskeyCove PMIC operation region driver
+ *
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include "intel_pmic.h"
+
+#define WHISKEY_COVE_ALRT_HIGH_BIT_MASK 0x0F
+#define WHISKEY_COVE_ADC_HIGH_BIT(x) (((x & 0x0F) << 8))
+#define WHISKEY_COVE_ADC_CURSRC(x) (((x & 0xF0) >> 4))
+#define VR_MODE_DISABLED 0
+#define VR_MODE_AUTO BIT(0)
+#define VR_MODE_NORMAL BIT(1)
+#define VR_MODE_SWITCH BIT(2)
+#define VR_MODE_ECO (BIT(0)|BIT(1))
+#define VSWITCH2_OUTPUT BIT(5)
+#define VSWITCH1_OUTPUT BIT(4)
+#define VUSBPHY_CHARGE BIT(1)
+
+static struct pmic_table power_table[] = {
+ {
+ .address = 0x0,
+ .reg = 0x63,
+ .bit = VR_MODE_AUTO,
+ }, /* VDD1 -> VDD1CNT */
+ {
+ .address = 0x04,
+ .reg = 0x65,
+ .bit = VR_MODE_AUTO,
+ }, /* VDD2 -> VDD2CNT */
+ {
+ .address = 0x08,
+ .reg = 0x67,
+ .bit = VR_MODE_AUTO,
+ }, /* VDD3 -> VDD3CNT */
+ {
+ .address = 0x0c,
+ .reg = 0x6d,
+ .bit = VR_MODE_AUTO,
+ }, /* VLFX -> VFLEXCNT */
+ {
+ .address = 0x10,
+ .reg = 0x6f,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1A -> VPROG1ACNT */
+ {
+ .address = 0x14,
+ .reg = 0x70,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1B -> VPROG1BCNT */
+ {
+ .address = 0x18,
+ .reg = 0x71,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1C -> VPROG1CCNT */
+ {
+ .address = 0x1c,
+ .reg = 0x72,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1D -> VPROG1DCNT */
+ {
+ .address = 0x20,
+ .reg = 0x73,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP2A -> VPROG2ACNT */
+ {
+ .address = 0x24,
+ .reg = 0x74,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP2B -> VPROG2BCNT */
+ {
+ .address = 0x28,
+ .reg = 0x75,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP2C -> VPROG2CCNT */
+ {
+ .address = 0x2c,
+ .reg = 0x76,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP3A -> VPROG3ACNT */
+ {
+ .address = 0x30,
+ .reg = 0x77,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP3B -> VPROG3BCNT */
+ {
+ .address = 0x34,
+ .reg = 0x78,
+ .bit = VSWITCH2_OUTPUT,
+ }, /* VSW2 -> VLD0CNT Bit 5*/
+ {
+ .address = 0x38,
+ .reg = 0x78,
+ .bit = VSWITCH1_OUTPUT,
+ }, /* VSW1 -> VLD0CNT Bit 4 */
+ {
+ .address = 0x3c,
+ .reg = 0x78,
+ .bit = VUSBPHY_CHARGE,
+ }, /* VUPY -> VLDOCNT Bit 1 */
+ {
+ .address = 0x40,
+ .reg = 0x7b,
+ .bit = VR_MODE_NORMAL,
+ }, /* VRSO -> VREFSOCCNT*/
+ {
+ .address = 0x44,
+ .reg = 0xA0,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1E -> VPROG1ECNT */
+ {
+ .address = 0x48,
+ .reg = 0xA1,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1F -> VPROG1FCNT */
+ {
+ .address = 0x4c,
+ .reg = 0xA2,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP2D -> VPROG2DCNT */
+ {
+ .address = 0x50,
+ .reg = 0xA3,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP4A -> VPROG4ACNT */
+ {
+ .address = 0x54,
+ .reg = 0xA4,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP4B -> VPROG4BCNT */
+ {
+ .address = 0x58,
+ .reg = 0xA5,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP4C -> VPROG4CCNT */
+ {
+ .address = 0x5c,
+ .reg = 0xA6,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP4D -> VPROG4DCNT */
+ {
+ .address = 0x60,
+ .reg = 0xA7,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP5A -> VPROG5ACNT */
+ {
+ .address = 0x64,
+ .reg = 0xA8,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP5B -> VPROG5BCNT */
+ {
+ .address = 0x68,
+ .reg = 0xA9,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP6A -> VPROG6ACNT */
+ {
+ .address = 0x6c,
+ .reg = 0xAA,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP6B -> VPROG6BCNT */
+ {
+ .address = 0x70,
+ .reg = 0x36,
+ .bit = BIT(2),
+ }, /* SDWN_N -> MODEMCTRL Bit 2 */
+ {
+ .address = 0x74,
+ .reg = 0x36,
+ .bit = BIT(0),
+ } /* MOFF -> MODEMCTRL Bit 0 */
+};
+
+static struct pmic_table thermal_table[] = {
+ {
+ .address = 0x00,
+ .reg = 0x4F39
+ },
+ {
+ .address = 0x04,
+ .reg = 0x4F24
+ },
+ {
+ .address = 0x08,
+ .reg = 0x4F26
+ },
+ {
+ .address = 0x0c,
+ .reg = 0x4F3B
+ },
+ {
+ .address = 0x10,
+ .reg = 0x4F28
+ },
+ {
+ .address = 0x14,
+ .reg = 0x4F2A
+ },
+ {
+ .address = 0x18,
+ .reg = 0x4F3D
+ },
+ {
+ .address = 0x1c,
+ .reg = 0x4F2C
+ },
+ {
+ .address = 0x20,
+ .reg = 0x4F2E
+ },
+ {
+ .address = 0x24,
+ .reg = 0x4F3F
+ },
+ {
+ .address = 0x28,
+ .reg = 0x4F30
+ },
+ {
+ .address = 0x30,
+ .reg = 0x4F41
+ },
+ {
+ .address = 0x34,
+ .reg = 0x4F32
+ },
+ {
+ .address = 0x3c,
+ .reg = 0x4F43
+ },
+ {
+ .address = 0x40,
+ .reg = 0x4F34
+ },
+ {
+ .address = 0x48,
+ .reg = 0x4F6A,
+ .bit = 0,
+ },
+ {
+ .address = 0x4C,
+ .reg = 0x4F6A,
+ .bit = 1
+ },
+ {
+ .address = 0x50,
+ .reg = 0x4F6A,
+ .bit = 2
+ },
+ {
+ .address = 0x54,
+ .reg = 0x4F6A,
+ .bit = 4
+ },
+ {
+ .address = 0x58,
+ .reg = 0x4F6A,
+ .bit = 5
+ },
+ {
+ .address = 0x5C,
+ .reg = 0x4F6A,
+ .bit = 3
+ },
+};
+
+static int intel_bxtwc_pmic_get_power(struct regmap *regmap, int reg,
+ int bit, u64 *value)
+{
+ int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = (data & bit) ? 1 : 0;
+ return 0;
+}
+
+static int intel_bxtwc_pmic_update_power(struct regmap *regmap, int reg,
+ int bit, bool on)
+{
+ u8 val, mask = bit;
+
+ if (on)
+ val = 0xFF;
+ else
+ val = 0x0;
+
+ return regmap_update_bits(regmap, reg, mask, val);
+}
+
+static int intel_bxtwc_pmic_get_raw_temp(struct regmap *regmap, int reg)
+{
+ unsigned int val, adc_val, reg_val;
+ u8 temp_l, temp_h, cursrc;
+ unsigned long rlsb;
+ static const unsigned long rlsb_array[] = {
+ 0, 260420, 130210, 65100, 32550, 16280,
+ 8140, 4070, 2030, 0, 260420, 130210 };
+
+ if (regmap_read(regmap, reg, &val))
+ return -EIO;
+ temp_l = (u8) val;
+
+ if (regmap_read(regmap, (reg - 1), &val))
+ return -EIO;
+ temp_h = (u8) val;
+
+ reg_val = temp_l | WHISKEY_COVE_ADC_HIGH_BIT(temp_h);
+ cursrc = WHISKEY_COVE_ADC_CURSRC(temp_h);
+ rlsb = rlsb_array[cursrc];
+ adc_val = reg_val * rlsb / 1000;
+
+ return adc_val;
+}
+
+static int
+intel_bxtwc_pmic_update_aux(struct regmap *regmap, int reg, int raw)
+{
+ u32 bsr_num;
+ u16 resi_val, count = 0, thrsh = 0;
+ u8 alrt_h, alrt_l, cursel = 0;
+
+ bsr_num = raw;
+ bsr_num /= (1 << 5);
+
+ count = fls(bsr_num) - 1;
+
+ cursel = clamp_t(s8, (count - 7), 0, 7);
+ thrsh = raw / (1 << (4 + cursel));
+
+ resi_val = (cursel << 9) | thrsh;
+ alrt_h = (resi_val >> 8) & WHISKEY_COVE_ALRT_HIGH_BIT_MASK;
+ if (regmap_update_bits(regmap,
+ reg - 1,
+ WHISKEY_COVE_ALRT_HIGH_BIT_MASK,
+ alrt_h))
+ return -EIO;
+
+ alrt_l = (u8)resi_val;
+ return regmap_write(regmap, reg, alrt_l);
+}
+
+static int
+intel_bxtwc_pmic_get_policy(struct regmap *regmap, int reg, int bit, u64 *value)
+{
+ u8 mask = BIT(bit);
+ unsigned int val;
+
+ if (regmap_read(regmap, reg, &val))
+ return -EIO;
+
+ *value = (val & mask) >> bit;
+ return 0;
+}
+
+static int
+intel_bxtwc_pmic_update_policy(struct regmap *regmap,
+ int reg, int bit, int enable)
+{
+ u8 mask = BIT(bit), val = enable << bit;
+
+ return regmap_update_bits(regmap, reg, mask, val);
+}
+
+static struct intel_pmic_opregion_data intel_bxtwc_pmic_opregion_data = {
+ .get_power = intel_bxtwc_pmic_get_power,
+ .update_power = intel_bxtwc_pmic_update_power,
+ .get_raw_temp = intel_bxtwc_pmic_get_raw_temp,
+ .update_aux = intel_bxtwc_pmic_update_aux,
+ .get_policy = intel_bxtwc_pmic_get_policy,
+ .update_policy = intel_bxtwc_pmic_update_policy,
+ .power_table = power_table,
+ .power_table_count = ARRAY_SIZE(power_table),
+ .thermal_table = thermal_table,
+ .thermal_table_count = ARRAY_SIZE(thermal_table),
+};
+
+static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+
+ return intel_pmic_install_opregion_handler(&pdev->dev,
+ ACPI_HANDLE(pdev->dev.parent),
+ pmic->regmap,
+ &intel_bxtwc_pmic_opregion_data);
+}
+
+static struct platform_device_id bxt_wc_opregion_id_table[] = {
+ { .name = "bxt_wcove_region" },
+ {},
+};
+
+static struct platform_driver intel_bxtwc_pmic_opregion_driver = {
+ .probe = intel_bxtwc_pmic_opregion_probe,
+ .driver = {
+ .name = "bxt_whiskey_cove_pmic",
+ },
+ .id_table = bxt_wc_opregion_id_table,
+};
+
+static int __init intel_bxtwc_pmic_opregion_driver_init(void)
+{
+ return platform_driver_register(&intel_bxtwc_pmic_opregion_driver);
+}
+device_initcall(intel_bxtwc_pmic_opregion_driver_init);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index fcd1852dcdee..d7f1761ab1bc 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -141,7 +141,8 @@ static int intel_crc_pmic_update_aux(struct regmap *regmap, int reg, int raw)
regmap_update_bits(regmap, reg - 1, 0x3, raw >> 8) ? -EIO : 0;
}
-static int intel_crc_pmic_get_policy(struct regmap *regmap, int reg, u64 *value)
+static int intel_crc_pmic_get_policy(struct regmap *regmap,
+ int reg, int bit, u64 *value)
{
int pen;
@@ -152,7 +153,7 @@ static int intel_crc_pmic_get_policy(struct regmap *regmap, int reg, u64 *value)
}
static int intel_crc_pmic_update_policy(struct regmap *regmap,
- int reg, int enable)
+ int reg, int bit, int enable)
{
int alert0;
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 6a082d4de12c..e6e991ac20f3 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -13,7 +13,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/mfd/axp20x.h>
#include <linux/regmap.h>
@@ -262,7 +262,4 @@ static int __init intel_xpower_pmic_opregion_driver_init(void)
{
return platform_driver_register(&intel_xpower_pmic_opregion_driver);
}
-module_init(intel_xpower_pmic_opregion_driver_init);
-
-MODULE_DESCRIPTION("XPower AXP288 ACPI operation region driver");
-MODULE_LICENSE("GPL");
+device_initcall(intel_xpower_pmic_opregion_driver_init);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 33a38d604630..9125d7d96372 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -108,13 +108,12 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
return -EINVAL;
}
-static phys_cpuid_t map_madt_entry(int type, u32 acpi_id)
+static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
+ int type, u32 acpi_id)
{
unsigned long madt_end, entry;
phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
- struct acpi_table_madt *madt;
- madt = get_madt_table();
if (!madt)
return phys_id;
@@ -145,6 +144,25 @@ static phys_cpuid_t map_madt_entry(int type, u32 acpi_id)
return phys_id;
}
+phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
+{
+ struct acpi_table_madt *madt = NULL;
+ acpi_size tbl_size;
+ phys_cpuid_t rv;
+
+ acpi_get_table_with_size(ACPI_SIG_MADT, 0,
+ (struct acpi_table_header **)&madt,
+ &tbl_size);
+ if (!madt)
+ return PHYS_CPUID_INVALID;
+
+ rv = map_madt_entry(madt, 1, acpi_id);
+
+ early_acpi_os_unmap_memory(madt, tbl_size);
+
+ return rv;
+}
+
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -185,7 +203,7 @@ phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
phys_id = map_mat_entry(handle, type, acpi_id);
if (invalid_phys_cpuid(phys_id))
- phys_id = map_madt_entry(type, acpi_id);
+ phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
return phys_id;
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index d2fa8cb82d2b..0ca14ac7bb28 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -90,7 +90,7 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
pr->performance_platform_limit);
break;
case ACPI_PROCESSOR_NOTIFY_POWER:
- acpi_processor_cst_has_changed(pr);
+ acpi_processor_power_state_has_changed(pr);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 444e3745c8b3..cea52528aa18 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -59,6 +59,12 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
+struct cpuidle_driver acpi_idle_driver = {
+ .name = "acpi_idle",
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
static
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
@@ -297,7 +303,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *cst;
-
if (nocst)
return -ENODEV;
@@ -570,7 +575,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
return (working);
}
-static int acpi_processor_get_power_info(struct acpi_processor *pr)
+static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
{
unsigned int i;
int result;
@@ -804,36 +809,12 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
acpi_idle_do_entry(cx);
}
-struct cpuidle_driver acpi_idle_driver = {
- .name = "acpi_idle",
- .owner = THIS_MODULE,
-};
-
-/**
- * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
- * device i.e. per-cpu data
- *
- * @pr: the ACPI processor
- * @dev : the cpuidle device
- */
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
struct cpuidle_device *dev)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
- if (!pr->flags.power_setup_done)
- return -EINVAL;
-
- if (pr->flags.power == 0) {
- return -EINVAL;
- }
-
- if (!dev)
- return -EINVAL;
-
- dev->cpu = pr->id;
-
if (max_cstate == 0)
max_cstate = 1;
@@ -856,31 +837,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
return 0;
}
-/**
- * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
- * global state data i.e. idle routines
- *
- * @pr: the ACPI processor
- */
-static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
+static int acpi_processor_setup_cstates(struct acpi_processor *pr)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
struct cpuidle_driver *drv = &acpi_idle_driver;
- if (!pr->flags.power_setup_done)
- return -EINVAL;
-
- if (pr->flags.power == 0)
- return -EINVAL;
-
- drv->safe_state_index = -1;
- for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
- drv->states[i].name[0] = '\0';
- drv->states[i].desc[0] = '\0';
- }
-
if (max_cstate == 0)
max_cstate = 1;
@@ -892,7 +855,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
state = &drv->states[count];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
- strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
state->enter = acpi_idle_enter;
@@ -925,6 +888,450 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
return 0;
}
+static inline void acpi_processor_cstate_first_run_checks(void)
+{
+ acpi_status status;
+ static int first_run;
+
+ if (first_run)
+ return;
+ dmi_check_system(processor_power_dmi_table);
+ max_cstate = acpi_processor_cstate_check(max_cstate);
+ if (max_cstate < ACPI_C_STATES_MAX)
+ pr_notice("ACPI: processor limited to max C-state %d\n",
+ max_cstate);
+ first_run++;
+
+ if (acpi_gbl_FADT.cst_control && !nocst) {
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ acpi_gbl_FADT.cst_control, 8);
+ if (ACPI_FAILURE(status))
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Notifying BIOS of _CST ability failed"));
+ }
+}
+#else
+
+static inline int disabled_by_idle_boot_param(void) { return 0; }
+static inline void acpi_processor_cstate_first_run_checks(void) { }
+static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
+
+static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
+ struct cpuidle_device *dev)
+{
+ return -EINVAL;
+}
+
+static int acpi_processor_setup_cstates(struct acpi_processor *pr)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
+
+struct acpi_lpi_states_array {
+ unsigned int size;
+ unsigned int composite_states_size;
+ struct acpi_lpi_state *entries;
+ struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
+};
+
+static int obj_get_integer(union acpi_object *obj, u32 *value)
+{
+ if (obj->type != ACPI_TYPE_INTEGER)
+ return -EINVAL;
+
+ *value = obj->integer.value;
+ return 0;
+}
+
+static int acpi_processor_evaluate_lpi(acpi_handle handle,
+ struct acpi_lpi_states_array *info)
+{
+ acpi_status status;
+ int ret = 0;
+ int pkg_count, state_idx = 1, loop;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *lpi_data;
+ struct acpi_lpi_state *lpi_state;
+
+ status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
+ return -ENODEV;
+ }
+
+ lpi_data = buffer.pointer;
+
+ /* There must be at least 4 elements = 3 elements + 1 package */
+ if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
+ lpi_data->package.count < 4) {
+ pr_debug("not enough elements in _LPI\n");
+ ret = -ENODATA;
+ goto end;
+ }
+
+ pkg_count = lpi_data->package.elements[2].integer.value;
+
+ /* Validate number of power states. */
+ if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
+ pr_debug("count given by _LPI is not valid\n");
+ ret = -ENODATA;
+ goto end;
+ }
+
+ lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
+ if (!lpi_state) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ info->size = pkg_count;
+ info->entries = lpi_state;
+
+ /* LPI States start at index 3 */
+ for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
+ union acpi_object *element, *pkg_elem, *obj;
+
+ element = &lpi_data->package.elements[loop];
+ if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
+ continue;
+
+ pkg_elem = element->package.elements;
+
+ obj = pkg_elem + 6;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ struct acpi_power_register *reg;
+
+ reg = (struct acpi_power_register *)obj->buffer.pointer;
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
+ reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
+ continue;
+
+ lpi_state->address = reg->address;
+ lpi_state->entry_method =
+ reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
+ ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ lpi_state->entry_method = ACPI_CSTATE_INTEGER;
+ lpi_state->address = obj->integer.value;
+ } else {
+ continue;
+ }
+
+ /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
+
+ obj = pkg_elem + 9;
+ if (obj->type == ACPI_TYPE_STRING)
+ strlcpy(lpi_state->desc, obj->string.pointer,
+ ACPI_CX_DESC_LEN);
+
+ lpi_state->index = state_idx;
+ if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
+ pr_debug("No min. residency found, assuming 10 us\n");
+ lpi_state->min_residency = 10;
+ }
+
+ if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
+ pr_debug("No wakeup residency found, assuming 10 us\n");
+ lpi_state->wake_latency = 10;
+ }
+
+ if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
+ lpi_state->flags = 0;
+
+ if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
+ lpi_state->arch_flags = 0;
+
+ if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
+ lpi_state->res_cnt_freq = 1;
+
+ if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
+ lpi_state->enable_parent_state = 0;
+ }
+
+ acpi_handle_debug(handle, "Found %d power states\n", state_idx);
+end:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+/*
+ * flat_state_cnt - the number of composite LPI states after the process of flattening
+ */
+static int flat_state_cnt;
+
+/**
+ * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
+ *
+ * @local: local LPI state
+ * @parent: parent LPI state
+ * @result: composite LPI state
+ */
+static bool combine_lpi_states(struct acpi_lpi_state *local,
+ struct acpi_lpi_state *parent,
+ struct acpi_lpi_state *result)
+{
+ if (parent->entry_method == ACPI_CSTATE_INTEGER) {
+ if (!parent->address) /* 0 means autopromotable */
+ return false;
+ result->address = local->address + parent->address;
+ } else {
+ result->address = parent->address;
+ }
+
+ result->min_residency = max(local->min_residency, parent->min_residency);
+ result->wake_latency = local->wake_latency + parent->wake_latency;
+ result->enable_parent_state = parent->enable_parent_state;
+ result->entry_method = local->entry_method;
+
+ result->flags = parent->flags;
+ result->arch_flags = parent->arch_flags;
+ result->index = parent->index;
+
+ strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
+ strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
+ strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
+ return true;
+}
+
+#define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
+
+static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
+ struct acpi_lpi_state *t)
+{
+ curr_level->composite_states[curr_level->composite_states_size++] = t;
+}
+
+static int flatten_lpi_states(struct acpi_processor *pr,
+ struct acpi_lpi_states_array *curr_level,
+ struct acpi_lpi_states_array *prev_level)
+{
+ int i, j, state_count = curr_level->size;
+ struct acpi_lpi_state *p, *t = curr_level->entries;
+
+ curr_level->composite_states_size = 0;
+ for (j = 0; j < state_count; j++, t++) {
+ struct acpi_lpi_state *flpi;
+
+ if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
+ continue;
+
+ if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
+ pr_warn("Limiting number of LPI states to max (%d)\n",
+ ACPI_PROCESSOR_MAX_POWER);
+ pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
+ break;
+ }
+
+ flpi = &pr->power.lpi_states[flat_state_cnt];
+
+ if (!prev_level) { /* leaf/processor node */
+ memcpy(flpi, t, sizeof(*t));
+ stash_composite_state(curr_level, flpi);
+ flat_state_cnt++;
+ continue;
+ }
+
+ for (i = 0; i < prev_level->composite_states_size; i++) {
+ p = prev_level->composite_states[i];
+ if (t->index <= p->enable_parent_state &&
+ combine_lpi_states(p, t, flpi)) {
+ stash_composite_state(curr_level, flpi);
+ flat_state_cnt++;
+ flpi++;
+ }
+ }
+ }
+
+ kfree(curr_level->entries);
+ return 0;
+}
+
+static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
+{
+ int ret, i;
+ acpi_status status;
+ acpi_handle handle = pr->handle, pr_ahandle;
+ struct acpi_device *d = NULL;
+ struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+
+ if (!osc_pc_lpi_support_confirmed)
+ return -EOPNOTSUPP;
+
+ if (!acpi_has_method(handle, "_LPI"))
+ return -EINVAL;
+
+ flat_state_cnt = 0;
+ prev = &info[0];
+ curr = &info[1];
+ handle = pr->handle;
+ ret = acpi_processor_evaluate_lpi(handle, prev);
+ if (ret)
+ return ret;
+ flatten_lpi_states(pr, prev, NULL);
+
+ status = acpi_get_parent(handle, &pr_ahandle);
+ while (ACPI_SUCCESS(status)) {
+ acpi_bus_get_device(pr_ahandle, &d);
+ handle = pr_ahandle;
+
+ if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
+ break;
+
+ /* can be optional ? */
+ if (!acpi_has_method(handle, "_LPI"))
+ break;
+
+ ret = acpi_processor_evaluate_lpi(handle, curr);
+ if (ret)
+ break;
+
+ /* flatten all the LPI states in this level of hierarchy */
+ flatten_lpi_states(pr, curr, prev);
+
+ tmp = prev, prev = curr, curr = tmp;
+
+ status = acpi_get_parent(handle, &pr_ahandle);
+ }
+
+ pr->power.count = flat_state_cnt;
+ /* reset the index after flattening */
+ for (i = 0; i < pr->power.count; i++)
+ pr->power.lpi_states[i].index = i;
+
+ /* Tell driver that _LPI is supported. */
+ pr->flags.has_lpi = 1;
+ pr->flags.power = 1;
+
+ return 0;
+}
+
+int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return -ENODEV;
+}
+
+int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+{
+ return -ENODEV;
+}
+
+/**
+ * acpi_idle_lpi_enter - enters an ACPI any LPI state
+ * @dev: the target CPU
+ * @drv: cpuidle driver containing cpuidle state info
+ * @index: index of target state
+ *
+ * Return: 0 for success or negative value for error
+ */
+static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ struct acpi_processor *pr;
+ struct acpi_lpi_state *lpi;
+
+ pr = __this_cpu_read(processors);
+
+ if (unlikely(!pr))
+ return -EINVAL;
+
+ lpi = &pr->power.lpi_states[index];
+ if (lpi->entry_method == ACPI_CSTATE_FFH)
+ return acpi_processor_ffh_lpi_enter(lpi);
+
+ return -EINVAL;
+}
+
+static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
+{
+ int i;
+ struct acpi_lpi_state *lpi;
+ struct cpuidle_state *state;
+ struct cpuidle_driver *drv = &acpi_idle_driver;
+
+ if (!pr->flags.has_lpi)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
+ lpi = &pr->power.lpi_states[i];
+
+ state = &drv->states[i];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
+ strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
+ state->exit_latency = lpi->wake_latency;
+ state->target_residency = lpi->min_residency;
+ if (lpi->arch_flags)
+ state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ state->enter = acpi_idle_lpi_enter;
+ drv->safe_state_index = i;
+ }
+
+ drv->state_count = i;
+
+ return 0;
+}
+
+/**
+ * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
+ * global state data i.e. idle routines
+ *
+ * @pr: the ACPI processor
+ */
+static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
+{
+ int i;
+ struct cpuidle_driver *drv = &acpi_idle_driver;
+
+ if (!pr->flags.power_setup_done || !pr->flags.power)
+ return -EINVAL;
+
+ drv->safe_state_index = -1;
+ for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
+ drv->states[i].name[0] = '\0';
+ drv->states[i].desc[0] = '\0';
+ }
+
+ if (pr->flags.has_lpi)
+ return acpi_processor_setup_lpi_states(pr);
+
+ return acpi_processor_setup_cstates(pr);
+}
+
+/**
+ * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
+ * device i.e. per-cpu data
+ *
+ * @pr: the ACPI processor
+ * @dev : the cpuidle device
+ */
+static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
+ struct cpuidle_device *dev)
+{
+ if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
+ return -EINVAL;
+
+ dev->cpu = pr->id;
+ if (pr->flags.has_lpi)
+ return acpi_processor_ffh_lpi_probe(pr->id);
+
+ return acpi_processor_setup_cpuidle_cx(pr, dev);
+}
+
+static int acpi_processor_get_power_info(struct acpi_processor *pr)
+{
+ int ret;
+
+ ret = acpi_processor_get_lpi_info(pr);
+ if (ret)
+ ret = acpi_processor_get_cstate_info(pr);
+
+ return ret;
+}
+
int acpi_processor_hotplug(struct acpi_processor *pr)
{
int ret = 0;
@@ -933,18 +1340,15 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return 0;
- if (nocst)
- return -ENODEV;
-
if (!pr->flags.power_setup_done)
return -ENODEV;
dev = per_cpu(acpi_cpuidle_device, pr->id);
cpuidle_pause_and_lock();
cpuidle_disable_device(dev);
- acpi_processor_get_power_info(pr);
- if (pr->flags.power) {
- acpi_processor_setup_cpuidle_cx(pr, dev);
+ ret = acpi_processor_get_power_info(pr);
+ if (!ret && pr->flags.power) {
+ acpi_processor_setup_cpuidle_dev(pr, dev);
ret = cpuidle_enable_device(dev);
}
cpuidle_resume_and_unlock();
@@ -952,7 +1356,7 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
return ret;
}
-int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
{
int cpu;
struct acpi_processor *_pr;
@@ -961,9 +1365,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return 0;
- if (nocst)
- return -ENODEV;
-
if (!pr->flags.power_setup_done)
return -ENODEV;
@@ -1000,7 +1401,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
acpi_processor_get_power_info(_pr);
if (_pr->flags.power) {
dev = per_cpu(acpi_cpuidle_device, cpu);
- acpi_processor_setup_cpuidle_cx(_pr, dev);
+ acpi_processor_setup_cpuidle_dev(_pr, dev);
cpuidle_enable_device(dev);
}
}
@@ -1015,35 +1416,16 @@ static int acpi_processor_registered;
int acpi_processor_power_init(struct acpi_processor *pr)
{
- acpi_status status;
int retval;
struct cpuidle_device *dev;
- static int first_run;
if (disabled_by_idle_boot_param())
return 0;
- if (!first_run) {
- dmi_check_system(processor_power_dmi_table);
- max_cstate = acpi_processor_cstate_check(max_cstate);
- if (max_cstate < ACPI_C_STATES_MAX)
- printk(KERN_NOTICE
- "ACPI: processor limited to max C-state %d\n",
- max_cstate);
- first_run++;
- }
-
- if (acpi_gbl_FADT.cst_control && !nocst) {
- status =
- acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Notifying BIOS of _CST ability failed"));
- }
- }
+ acpi_processor_cstate_first_run_checks();
- acpi_processor_get_power_info(pr);
- pr->flags.power_setup_done = 1;
+ if (!acpi_processor_get_power_info(pr))
+ pr->flags.power_setup_done = 1;
/*
* Install the idle handler if processor power management is supported.
@@ -1066,7 +1448,7 @@ int acpi_processor_power_init(struct acpi_processor *pr)
return -ENOMEM;
per_cpu(acpi_cpuidle_device, pr->id) = dev;
- acpi_processor_setup_cpuidle_cx(pr, dev);
+ acpi_processor_setup_cpuidle_dev(pr, dev);
/* Register per-cpu cpuidle_device. Cpuidle driver
* must already be registered before registering device
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5f28cf778349..405056b95b05 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -494,6 +494,8 @@ static void acpi_device_del(struct acpi_device *device)
device_del(&device->dev);
}
+static BLOCKING_NOTIFIER_HEAD(acpi_reconfig_chain);
+
static LIST_HEAD(acpi_device_del_list);
static DEFINE_MUTEX(acpi_device_del_lock);
@@ -514,6 +516,9 @@ static void acpi_device_del_work_fn(struct work_struct *work_not_used)
mutex_unlock(&acpi_device_del_lock);
+ blocking_notifier_call_chain(&acpi_reconfig_chain,
+ ACPI_RECONFIG_DEVICE_REMOVE, adev);
+
acpi_device_del(adev);
/*
* Drop references to all power resources that might have been
@@ -1406,7 +1411,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
acpi_bus_get_flags(device);
device->flags.match_driver = false;
device->flags.initialized = true;
- device->flags.visited = false;
+ acpi_device_clear_enumerated(device);
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
acpi_init_coherency(device);
@@ -1676,15 +1681,20 @@ static void acpi_default_enumeration(struct acpi_device *device)
bool is_spi_i2c_slave = false;
/*
- * Do not enemerate SPI/I2C slaves as they will be enuerated by their
+ * Do not enumerate SPI/I2C slaves as they will be enumerated by their
* respective parents.
*/
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
- if (!is_spi_i2c_slave)
+ if (!is_spi_i2c_slave) {
acpi_create_platform_device(device);
+ acpi_device_set_enumerated(device);
+ } else {
+ blocking_notifier_call_chain(&acpi_reconfig_chain,
+ ACPI_RECONFIG_DEVICE_ADD, device);
+ }
}
static const struct acpi_device_id generic_device_ids[] = {
@@ -1751,7 +1761,7 @@ static void acpi_bus_attach(struct acpi_device *device)
acpi_bus_get_status(device);
/* Skip devices that are not present. */
if (!acpi_device_is_present(device)) {
- device->flags.visited = false;
+ acpi_device_clear_enumerated(device);
device->flags.power_manageable = 0;
return;
}
@@ -1766,7 +1776,7 @@ static void acpi_bus_attach(struct acpi_device *device)
device->flags.initialized = true;
}
- device->flags.visited = false;
+
ret = acpi_scan_attach_handler(device);
if (ret < 0)
return;
@@ -1780,7 +1790,6 @@ static void acpi_bus_attach(struct acpi_device *device)
if (!ret && device->pnp.type.platform_id)
acpi_default_enumeration(device);
}
- device->flags.visited = true;
ok:
list_for_each_entry(child, &device->children, node)
@@ -1872,7 +1881,7 @@ void acpi_bus_trim(struct acpi_device *adev)
*/
acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
adev->flags.initialized = false;
- adev->flags.visited = false;
+ acpi_device_clear_enumerated(adev);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
@@ -1916,6 +1925,8 @@ static int acpi_bus_scan_fixed(void)
return result < 0 ? result : 0;
}
+static bool acpi_scan_initialized;
+
int __init acpi_scan_init(void)
{
int result;
@@ -1960,6 +1971,8 @@ int __init acpi_scan_init(void)
acpi_update_all_gpes();
+ acpi_scan_initialized = true;
+
out:
mutex_unlock(&acpi_scan_lock);
return result;
@@ -2003,3 +2016,57 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
return count;
}
+
+struct acpi_table_events_work {
+ struct work_struct work;
+ void *table;
+ u32 event;
+};
+
+static void acpi_table_events_fn(struct work_struct *work)
+{
+ struct acpi_table_events_work *tew;
+
+ tew = container_of(work, struct acpi_table_events_work, work);
+
+ if (tew->event == ACPI_TABLE_EVENT_LOAD) {
+ acpi_scan_lock_acquire();
+ acpi_bus_scan(ACPI_ROOT_OBJECT);
+ acpi_scan_lock_release();
+ }
+
+ kfree(tew);
+}
+
+void acpi_scan_table_handler(u32 event, void *table, void *context)
+{
+ struct acpi_table_events_work *tew;
+
+ if (!acpi_scan_initialized)
+ return;
+
+ if (event != ACPI_TABLE_EVENT_LOAD)
+ return;
+
+ tew = kmalloc(sizeof(*tew), GFP_KERNEL);
+ if (!tew)
+ return;
+
+ INIT_WORK(&tew->work, acpi_table_events_fn);
+ tew->table = table;
+ tew->event = event;
+
+ schedule_work(&tew->work);
+}
+
+int acpi_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_reconfig_chain, nb);
+}
+EXPORT_SYMBOL(acpi_reconfig_notifier_register);
+
+int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&acpi_reconfig_chain, nb);
+}
+EXPORT_SYMBOL(acpi_reconfig_notifier_unregister);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 7a2e4d45b266..2b38c1bb0446 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -47,15 +47,32 @@ static void acpi_sleep_tts_switch(u32 acpi_state)
}
}
-static int tts_notify_reboot(struct notifier_block *this,
+static void acpi_sleep_pts_switch(u32 acpi_state)
+{
+ acpi_status status;
+
+ status = acpi_execute_simple_method(NULL, "\\_PTS", acpi_state);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ /*
+ * OS can't evaluate the _PTS object correctly. Some warning
+ * message will be printed. But it won't break anything.
+ */
+ printk(KERN_NOTICE "Failure in evaluating _PTS object\n");
+ }
+}
+
+static int sleep_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
acpi_sleep_tts_switch(ACPI_STATE_S5);
+
+ acpi_sleep_pts_switch(ACPI_STATE_S5);
+
return NOTIFY_DONE;
}
-static struct notifier_block tts_notifier = {
- .notifier_call = tts_notify_reboot,
+static struct notifier_block sleep_notifier = {
+ .notifier_call = sleep_notify_reboot,
.next = NULL,
.priority = 0,
};
@@ -899,9 +916,9 @@ int __init acpi_sleep_init(void)
pr_info(PREFIX "(supports%s)\n", supported);
/*
- * Register the tts_notifier to reboot notifier list so that the _TTS
- * object can also be evaluated when the system enters S5.
+ * Register the sleep_notifier to reboot notifier list so that the _TTS
+ * and _PTS object can also be evaluated when the system enters S5.
*/
- register_reboot_notifier(&tts_notifier);
+ register_reboot_notifier(&sleep_notifier);
return 0;
}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 4b3a9e27f1b6..358165e9f5b8 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -378,8 +378,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
return;
}
-static acpi_status
-acpi_sysfs_table_handler(u32 event, void *table, void *context)
+acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
{
struct acpi_table_attr *table_attr;
@@ -452,9 +451,8 @@ static int acpi_tables_sysfs_init(void)
kobject_uevent(tables_kobj, KOBJ_ADD);
kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
- status = acpi_install_table_handler(acpi_sysfs_table_handler, NULL);
- return ACPI_FAILURE(status) ? -EINVAL : 0;
+ return 0;
err_dynamic_tables:
kobject_put(tables_kobj);
err:
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index a372f9eaa15d..9f0ad6ebb368 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -34,6 +34,8 @@
#include <linux/bootmem.h>
#include <linux/earlycpio.h>
#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/acpi.h>
#include "internal.h"
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -481,8 +483,10 @@ static DECLARE_BITMAP(acpi_initrd_installed, NR_ACPI_INITRD_TABLES);
#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
-static void __init acpi_table_initrd_init(void *data, size_t size)
+void __init acpi_table_upgrade(void)
{
+ void *data = (void *)initrd_start;
+ size_t size = initrd_end - initrd_start;
int sig, no, table_nr = 0, total_offset = 0;
long offset = 0;
struct acpi_table_header *table;
@@ -540,7 +544,7 @@ static void __init acpi_table_initrd_init(void *data, size_t size)
return;
acpi_tables_addr =
- memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
+ memblock_find_in_range(0, ACPI_TABLE_UPGRADE_MAX_PHYS,
all_tables_size, PAGE_SIZE);
if (!acpi_tables_addr) {
WARN_ON(1);
@@ -578,10 +582,10 @@ static void __init acpi_table_initrd_init(void *data, size_t size)
clen = size;
if (clen > MAP_CHUNK_SIZE - slop)
clen = MAP_CHUNK_SIZE - slop;
- dest_p = early_ioremap(dest_addr & PAGE_MASK,
- clen + slop);
+ dest_p = early_memremap(dest_addr & PAGE_MASK,
+ clen + slop);
memcpy(dest_p + slop, src_p, clen);
- early_iounmap(dest_p, clen + slop);
+ early_memunmap(dest_p, clen + slop);
src_p += clen;
dest_addr += clen;
size -= clen;
@@ -696,10 +700,6 @@ next_table:
}
}
#else
-static void __init acpi_table_initrd_init(void *data, size_t size)
-{
-}
-
static acpi_status
acpi_table_initrd_override(struct acpi_table_header *existing_table,
acpi_physical_address *address,
@@ -742,11 +742,6 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
return AE_OK;
}
-void __init early_acpi_table_init(void *data, size_t size)
-{
- acpi_table_initrd_init(data, size);
-}
-
/*
* acpi_table_init()
*
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 82707f9824ca..f4ebe39539af 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1259,7 +1259,8 @@ static int __init acpi_thermal_init(void)
return -ENODEV;
}
- acpi_thermal_pm_queue = create_workqueue("acpi_thermal_pm");
+ acpi_thermal_pm_queue = alloc_workqueue("acpi_thermal_pm",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
if (!acpi_thermal_pm_queue)
return -ENODEV;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index b4de130f2d57..22c09952e177 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -680,6 +680,9 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
u64 mask = 0;
union acpi_object *obj;
+ if (funcs == 0)
+ return false;
+
obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
if (!obj)
return false;
@@ -692,9 +695,6 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
ACPI_FREE(obj);
- if (funcs == 0)
- return true;
-
/*
* Bit 0 indicates whether there's support for any functions other than
* function 0 for the specified UUID and revision.
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 3d1327615f72..a6b36fc53aec 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -167,6 +167,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
},
},
+ {
+ .callback = video_detect_force_video,
+ .ident = "ThinkPad X201T",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"),
+ },
+ },
/* The native backlight controls do not work on some older machines */
{
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e2dc4c045146..2c8be74f401d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -98,12 +98,12 @@ config SATA_AHCI_PLATFORM
If unsure, say N.
-config AHCI_BRCMSTB
- tristate "Broadcom STB AHCI SATA support"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC
+config AHCI_BRCM
+ tristate "Broadcom AHCI SATA support"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP
help
This option enables support for the AHCI SATA3 controller found on
- STB SoC's.
+ Broadcom SoC's.
If unsure, say N.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 0b2afb7e5f35..a46e6b784bda 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
-obj-$(CONFIG_AHCI_BRCMSTB) += ahci_brcmstb.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_BRCM) += ahci_brcm.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index a83bbcc58b4c..90eabaf81215 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -580,7 +580,7 @@ static struct pci_driver ahci_pci_driver = {
},
};
-#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
+#if IS_ENABLED(CONFIG_PATA_MARVELL)
static int marvell_enable;
#else
static int marvell_enable = 1;
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcm.c
index e87bcec0fd7c..6f8a7341fa08 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcm.c
@@ -71,6 +71,12 @@
(DATA_ENDIAN << DMADESC_ENDIAN_SHIFT) | \
(MMIO_ENDIAN << MMIO_ENDIAN_SHIFT))
+enum brcm_ahci_version {
+ BRCM_SATA_BCM7425 = 1,
+ BRCM_SATA_BCM7445,
+ BRCM_SATA_NSP,
+};
+
enum brcm_ahci_quirks {
BRCM_AHCI_QUIRK_NO_NCQ = BIT(0),
BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
@@ -81,6 +87,7 @@ struct brcm_ahci_priv {
void __iomem *top_ctrl;
u32 port_mask;
u32 quirks;
+ enum brcm_ahci_version version;
};
static const struct ata_port_info ahci_brcm_port_info = {
@@ -247,9 +254,19 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
static void brcm_sata_init(struct brcm_ahci_priv *priv)
{
+ void __iomem *ctrl = priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL;
+
/* Configure endianness */
- brcm_sata_writereg(BUS_CTRL_ENDIAN_CONF,
- priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
+ if (priv->version == BRCM_SATA_NSP) {
+ u32 data = brcm_sata_readreg(ctrl);
+
+ data &= ~((0x03 << DMADATA_ENDIAN_SHIFT) |
+ (0x03 << DMADESC_ENDIAN_SHIFT));
+ data |= (0x02 << DMADATA_ENDIAN_SHIFT) |
+ (0x02 << DMADESC_ENDIAN_SHIFT);
+ brcm_sata_writereg(data, ctrl);
+ } else
+ brcm_sata_writereg(BUS_CTRL_ENDIAN_CONF, ctrl);
}
#ifdef CONFIG_PM_SLEEP
@@ -282,8 +299,17 @@ static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
+static const struct of_device_id ahci_of_match[] = {
+ {.compatible = "brcm,bcm7425-ahci", .data = (void *)BRCM_SATA_BCM7425},
+ {.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
+ {.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
static int brcm_ahci_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct brcm_ahci_priv *priv;
struct ahci_host_priv *hpriv;
@@ -293,6 +319,12 @@ static int brcm_ahci_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+
+ of_id = of_match_node(ahci_of_match, pdev->dev.of_node);
+ if (!of_id)
+ return -ENODEV;
+
+ priv->version = (enum brcm_ahci_version)of_id->data;
priv->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl");
@@ -300,7 +332,8 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
- if (of_device_is_compatible(dev->of_node, "brcm,bcm7425-ahci")) {
+ if ((priv->version == BRCM_SATA_BCM7425) ||
+ (priv->version == BRCM_SATA_NSP)) {
priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
}
@@ -354,13 +387,6 @@ static int brcm_ahci_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ahci_of_match[] = {
- {.compatible = "brcm,bcm7425-ahci"},
- {.compatible = "brcm,bcm7445-ahci"},
- {},
-};
-MODULE_DEVICE_TABLE(of, ahci_of_match);
-
static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
static struct platform_driver brcm_ahci_driver = {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 71b07198e207..7461a587b39b 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1975,7 +1975,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
*/
pp->active_link = qc->dev->link;
- if (qc->tf.protocol == ATA_PROT_NCQ)
+ if (ata_is_ncq(qc->tf.protocol))
writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
@@ -2392,12 +2392,20 @@ static int ahci_port_start(struct ata_port *ap)
static void ahci_port_stop(struct ata_port *ap)
{
const char *emsg = NULL;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *host_mmio = hpriv->mmio;
int rc;
/* de-initialize port */
rc = ahci_deinit_port(ap, &emsg);
if (rc)
ata_port_warn(ap, "%s (%d)\n", emsg, rc);
+
+ /*
+ * Clear GHC.IS to prevent stuck INTx after disabling MSI and
+ * re-enabling INTx.
+ */
+ writel(1 << ap->port_no, host_mmio + HOST_IRQ_STAT);
}
void ahci_print_info(struct ata_host *host, const char *scc_s)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6be7770f68e9..67339b4f92f1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1238,7 +1238,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
} else
tf.command = ATA_CMD_READ_NATIVE_MAX;
- tf.protocol |= ATA_PROT_NODATA;
+ tf.protocol = ATA_PROT_NODATA;
tf.device |= ATA_LBA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
@@ -1297,7 +1297,7 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
tf.device |= (new_sectors >> 24) & 0xf;
}
- tf.protocol |= ATA_PROT_NODATA;
+ tf.protocol = ATA_PROT_NODATA;
tf.device |= ATA_LBA;
tf.lbal = (new_sectors >> 0) & 0xff;
@@ -4314,6 +4314,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+ /*
+ * Device times out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ */
+ { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
@@ -4842,7 +4848,7 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_link *link = qc->dev->link;
- if (qc->tf.protocol == ATA_PROT_NCQ) {
+ if (ata_is_ncq(qc->tf.protocol)) {
if (!ata_tag_valid(link->active_tag))
return 0;
} else {
@@ -5007,7 +5013,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
ata_sg_clean(qc);
/* command should be marked inactive atomically with qc completion */
- if (qc->tf.protocol == ATA_PROT_NCQ) {
+ if (ata_is_ncq(qc->tf.protocol)) {
link->sactive &= ~(1 << qc->tag);
if (!link->sactive)
ap->nr_active_links--;
@@ -5044,7 +5050,7 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)
{
struct ata_device *dev = qc->dev;
- if (ata_is_nodata(qc->tf.protocol))
+ if (!ata_is_data(qc->tf.protocol))
return;
if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
@@ -5127,7 +5133,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
switch (qc->tf.command) {
case ATA_CMD_SET_FEATURES:
if (qc->tf.feature != SETFEATURES_WC_ON &&
- qc->tf.feature != SETFEATURES_WC_OFF)
+ qc->tf.feature != SETFEATURES_WC_OFF &&
+ qc->tf.feature != SETFEATURES_RA_ON &&
+ qc->tf.feature != SETFEATURES_RA_OFF)
break;
/* fall through */
case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c6f017458958..0e1ec37070d1 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2607,9 +2607,13 @@ static void ata_eh_link_report(struct ata_link *link)
[DMA_FROM_DEVICE] = "in",
};
static const char *prot_str[] = {
+ [ATA_PROT_UNKNOWN] = "unknown",
+ [ATA_PROT_NODATA] = "nodata",
[ATA_PROT_PIO] = "pio",
[ATA_PROT_DMA] = "dma",
- [ATA_PROT_NCQ] = "ncq",
+ [ATA_PROT_NCQ] = "ncq dma",
+ [ATA_PROT_NCQ_NODATA] = "ncq nodata",
+ [ATAPI_PROT_NODATA] = "nodata",
[ATAPI_PROT_PIO] = "pio",
[ATAPI_PROT_DMA] = "dma",
};
@@ -3177,7 +3181,7 @@ static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
}
tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
- tf.protocol |= ATA_PROT_NODATA;
+ tf.protocol = ATA_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (park && (err_mask || tf.lbal != 0xc4)) {
ata_dev_err(dev, "head unload failed!\n");
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index bfec66fb26e2..e207b33e4ce9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -304,7 +304,7 @@ static void ata_scsi_set_invalid_field(struct ata_device *dev,
struct scsi_cmnd *cmd, u16 field, u8 bit)
{
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0);
- /* "Invalid field in cbd" */
+ /* "Invalid field in CDB" */
scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
field, bit, 1);
}
@@ -1190,7 +1190,7 @@ static int atapi_drain_needed(struct request *rq)
if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
return 0;
- if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
+ if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
@@ -2075,8 +2075,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
0x03,
0x20, /* SBC-2 (no version claimed) */
- 0x02,
- 0x60 /* SPC-3 (no version claimed) */
+ 0x03,
+ 0x00 /* SPC-3 (no version claimed) */
};
const u8 versions_zbc[] = {
0x00,
@@ -2097,7 +2097,10 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
0,
0x5, /* claim SPC-3 version compatibility */
2,
- 95 - 4
+ 95 - 4,
+ 0,
+ 0,
+ 2
};
VPRINTK("ENTER\n");
@@ -2109,8 +2112,10 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
(args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
hdr[1] |= (1 << 7);
- if (args->dev->class == ATA_DEV_ZAC)
+ if (args->dev->class == ATA_DEV_ZAC) {
hdr[0] = TYPE_ZBC;
+ hdr[2] = 0x7; /* claim SPC-5 version compatibility */
+ }
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
@@ -2314,7 +2319,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* with the unmap bit set.
*/
if (ata_id_has_trim(args->id)) {
- put_unaligned_be64(65535 * 512 / 8, &rbuf[36]);
+ put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]);
put_unaligned_be32(1, &rbuf[28]);
}
@@ -2424,15 +2429,17 @@ static void modecpy(u8 *dest, const u8 *src, int n, bool changeable)
static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
{
modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable);
- if (changeable || ata_id_wcache_enabled(id))
- buf[2] |= (1 << 2); /* write cache enable */
- if (!changeable && !ata_id_rahead_enabled(id))
- buf[12] |= (1 << 5); /* disable read ahead */
+ if (changeable) {
+ buf[2] |= (1 << 2); /* ata_mselect_caching() */
+ } else {
+ buf[2] |= (ata_id_wcache_enabled(id) << 2); /* write cache enable */
+ buf[12] |= (!ata_id_rahead_enabled(id) << 5); /* disable read ahead */
+ }
return sizeof(def_cache_mpage);
}
/**
- * ata_msense_ctl_mode - Simulate MODE SENSE control mode page
+ * ata_msense_control - Simulate MODE SENSE control mode page
* @dev: ATA device of interest
* @buf: output buffer
* @changeable: whether changeable parameters are requested
@@ -2442,12 +2449,17 @@ static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
* LOCKING:
* None.
*/
-static unsigned int ata_msense_ctl_mode(struct ata_device *dev, u8 *buf,
+static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
bool changeable)
{
modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable);
- if (changeable && (dev->flags & ATA_DFLAG_D_SENSE))
- buf[2] |= (1 << 2); /* Descriptor sense requested */
+ if (changeable) {
+ buf[2] |= (1 << 2); /* ata_mselect_control() */
+ } else {
+ bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
+
+ buf[2] |= (d_sense << 2); /* descriptor format sense data */
+ }
return sizeof(def_control_mpage);
}
@@ -2566,13 +2578,13 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
break;
case CONTROL_MPAGE:
- p += ata_msense_ctl_mode(args->dev, p, page_control == 1);
+ p += ata_msense_control(args->dev, p, page_control == 1);
break;
case ALL_MPAGES:
p += ata_msense_rw_recovery(p, page_control == 1);
p += ata_msense_caching(args->id, p, page_control == 1);
- p += ata_msense_ctl_mode(args->dev, p, page_control == 1);
+ p += ata_msense_control(args->dev, p, page_control == 1);
break;
default: /* invalid page code */
@@ -3077,6 +3089,9 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
goto invalid_fld;
}
+ if (ata_is_ncq(tf->protocol) && (cdb[2] & 0x3) == 0)
+ tf->protocol = ATA_PROT_NCQ_NODATA;
+
/* enable LBA */
tf->flags |= ATA_TFLAG_LBA;
@@ -3125,8 +3140,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->command = cdb[9];
}
- /* For NCQ commands with FPDMA protocol, copy the tag value */
- if (tf->protocol == ATA_PROT_NCQ)
+ /* For NCQ commands copy the tag value */
+ if (ata_is_ncq(tf->protocol))
tf->nsect = qc->tag << 3;
/* enforce correct master/slave bit */
@@ -3305,7 +3320,13 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
goto invalid_param_len;
buf = page_address(sg_page(scsi_sglist(scmd)));
- size = ata_set_lba_range_entries(buf, 512, block, n_block);
+
+ if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) {
+ size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block);
+ } else {
+ fp = 2;
+ goto invalid_fld;
+ }
if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
/* Newer devices support queued TRIM commands */
@@ -3454,7 +3475,7 @@ static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
goto invalid_param_len;
}
sect = n_block / 512;
- options = cdb[14];
+ options = cdb[14] & 0xbf;
if (ata_ncq_enabled(qc->dev) &&
ata_fpdma_zac_mgmt_in_supported(qc->dev)) {
@@ -3464,7 +3485,7 @@ static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
tf->nsect = qc->tag << 3;
tf->feature = sect & 0xff;
tf->hob_feature = (sect >> 8) & 0xff;
- tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
+ tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8);
} else {
tf->command = ATA_CMD_ZAC_MGMT_IN;
tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
@@ -3506,7 +3527,7 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
- u8 reset_all, sa;
+ u8 all, sa;
u64 block;
u32 n_block;
u16 fp = (u16)-1;
@@ -3533,20 +3554,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
if (block > dev->n_sectors)
goto out_of_range;
- reset_all = cdb[14] & 0x1;
+ all = cdb[14] & 0x1;
if (ata_ncq_enabled(qc->dev) &&
ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
- tf->protocol = ATA_PROT_NCQ;
+ tf->protocol = ATA_PROT_NCQ_NODATA;
tf->command = ATA_CMD_NCQ_NON_DATA;
- tf->hob_nsect = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
+ tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
tf->nsect = qc->tag << 3;
- tf->auxiliary = sa | (reset_all & 0x1) << 8;
+ tf->auxiliary = sa | ((u16)all << 8);
} else {
tf->protocol = ATA_PROT_NODATA;
tf->command = ATA_CMD_ZAC_MGMT_OUT;
tf->feature = sa;
- tf->hob_feature = reset_all & 0x1;
+ tf->hob_feature = all;
}
tf->lbah = (block >> 16) & 0xff;
tf->lbam = (block >> 8) & 0xff;
@@ -3667,7 +3688,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
/*
* Check that read-only bits are not modified.
*/
- ata_msense_ctl_mode(dev, mpage, false);
+ ata_msense_control(dev, mpage, false);
for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) {
if (i == 0)
continue;
@@ -4039,11 +4060,6 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
args.done = cmd->scsi_done;
switch(scsicmd[0]) {
- /* TODO: worth improving? */
- case FORMAT_UNIT:
- ata_scsi_invalid_field(dev, cmd, 0);
- break;
-
case INQUIRY:
if (scsicmd[1] & 2) /* is CmdDt set? */
ata_scsi_invalid_field(dev, cmd, 1);
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index e2d94972962d..7ef16c085058 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -495,12 +495,13 @@ struct ata_show_ering_arg {
static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg)
{
struct ata_show_ering_arg* arg = void_arg;
- struct timespec time;
+ u64 seconds;
+ u32 rem;
- jiffies_to_timespec(ent->timestamp,&time);
+ seconds = div_u64_rem(ent->timestamp, HZ, &rem);
arg->written += sprintf(arg->buf + arg->written,
- "[%5lu.%06lu]",
- time.tv_sec, time.tv_nsec);
+ "[%5llu.%09lu]", seconds,
+ rem * NSEC_PER_SEC / HZ);
arg->written += get_ata_err_names(ent->err_mask,
arg->buf + arg->written);
return 0;
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 80fe0f6fed29..b4d54771c9fe 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -565,7 +565,7 @@ chan_request_fail:
qc->ap->hsm_task_state = HSM_ST_ERR;
cf_ctrl_reset(acdev);
- spin_unlock_irqrestore(qc->ap->lock, flags);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
sff_intr:
dma_complete(acdev);
}
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 970f7767e5fd..49d705c9f0f7 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -183,8 +183,8 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
* We must now look at the PIO mode situation. We may need to
* adjust the PIO mode to keep the timings acceptable
*/
- if (adev->dma_mode >= XFER_MW_DMA_2)
- wanted_pio = 4;
+ if (adev->dma_mode >= XFER_MW_DMA_2)
+ wanted_pio = 4;
else if (adev->dma_mode == XFER_MW_DMA_1)
wanted_pio = 3;
else if (adev->dma_mode == XFER_MW_DMA_0)
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index e5fb7525a5df..a219a503c229 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -368,7 +368,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* PCI clocking determines the ATA timing values to use */
/* info_hpt366 is safe against re-entry so we can scribble on it */
- switch ((reg1 & 0x700) >> 8) {
+ switch ((reg1 & 0xf00) >> 8) {
case 9:
hpriv = &hpt366_40;
break;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index ae9feb1ba8f7..ff468a6fd8dd 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -146,7 +146,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
if (pdev->device == 0x6101)
ppi[1] = &ata_dummy_port_info;
-#if defined(CONFIG_SATA_AHCI) || defined(CONFIG_SATA_AHCI_MODULE)
+#if IS_ENABLED(CONFIG_SATA_AHCI)
if (!marvell_pata_active(pdev)) {
printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n");
return -ENODEV;
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 00c2af1d211b..e0939bd5ea73 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -259,11 +259,8 @@ static int sata_dwc_dma_init_old(struct platform_device *pdev,
/* Get physical SATA DMA register base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hsdev->dma->regs)) {
- dev_err(&pdev->dev,
- "ioremap failed for AHBDMA register address\n");
+ if (IS_ERR(hsdev->dma->regs))
return PTR_ERR(hsdev->dma->regs);
- }
/* Initialize AHB DMAC */
return dw_dma_probe(hsdev->dma);
@@ -281,7 +278,7 @@ static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
static const char *get_prot_descript(u8 protocol)
{
- switch ((enum ata_tf_protocols)protocol) {
+ switch (protocol) {
case ATA_PROT_NODATA:
return "ATA no data";
case ATA_PROT_PIO:
@@ -290,6 +287,8 @@ static const char *get_prot_descript(u8 protocol)
return "ATA DMA";
case ATA_PROT_NCQ:
return "ATA NCQ";
+ case ATA_PROT_NCQ_NODATA:
+ return "ATA NCQ no data";
case ATAPI_PROT_NODATA:
return "ATAPI no data";
case ATAPI_PROT_PIO:
@@ -1225,11 +1224,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
/* Ioremap SATA registers */
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&ofdev->dev, res);
- if (IS_ERR(base)) {
- dev_err(&ofdev->dev,
- "ioremap failed for SATA register address\n");
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
/* Synopsys DWC SATA specific Registers */
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 3657ac1cb801..8e2e4757adcb 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -121,6 +121,7 @@ int pm_clk_add(struct device *dev, const char *con_id)
{
return __pm_clk_add(dev, con_id, NULL);
}
+EXPORT_SYMBOL_GPL(pm_clk_add);
/**
* pm_clk_add_clk - Start using a device clock for power management.
@@ -136,9 +137,42 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
{
return __pm_clk_add(dev, NULL, clk);
}
+EXPORT_SYMBOL_GPL(pm_clk_add_clk);
/**
+ * of_pm_clk_add_clk - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @name: Name of clock that is going to be used for power management.
+ *
+ * Add the clock described in the 'clocks' device-tree node that matches
+ * with the 'name' provided, to the list of clocks used for the power
+ * management of @dev. On success, returns 0. Returns a negative error
+ * code if the clock is not found or cannot be added.
+ */
+int of_pm_clk_add_clk(struct device *dev, const char *name)
+{
+ struct clk *clk;
+ int ret;
+
+ if (!dev || !dev->of_node || !name)
+ return -EINVAL;
+
+ clk = of_clk_get_by_name(dev->of_node, name);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = pm_clk_add_clk(dev, clk);
+ if (ret) {
+ clk_put(clk);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
+
+/**
* of_pm_clk_add_clks - Start using device clock(s) for power management.
* @dev: Device whose clock(s) is going to be used for power management.
*
@@ -192,6 +226,7 @@ error:
return ret;
}
+EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
/**
* __pm_clk_remove - Destroy PM clock entry.
@@ -252,6 +287,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
__pm_clk_remove(ce);
}
+EXPORT_SYMBOL_GPL(pm_clk_remove);
/**
* pm_clk_remove_clk - Stop using a device clock for power management.
@@ -285,6 +321,7 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk)
__pm_clk_remove(ce);
}
+EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
/**
* pm_clk_init - Initialize a device's list of power management clocks.
@@ -299,6 +336,7 @@ void pm_clk_init(struct device *dev)
if (psd)
INIT_LIST_HEAD(&psd->clock_list);
}
+EXPORT_SYMBOL_GPL(pm_clk_init);
/**
* pm_clk_create - Create and initialize a device's list of PM clocks.
@@ -311,6 +349,7 @@ int pm_clk_create(struct device *dev)
{
return dev_pm_get_subsys_data(dev);
}
+EXPORT_SYMBOL_GPL(pm_clk_create);
/**
* pm_clk_destroy - Destroy a device's list of power management clocks.
@@ -345,6 +384,7 @@ void pm_clk_destroy(struct device *dev)
__pm_clk_remove(ce);
}
}
+EXPORT_SYMBOL_GPL(pm_clk_destroy);
/**
* pm_clk_suspend - Disable clocks in a device's PM clock list.
@@ -375,6 +415,7 @@ int pm_clk_suspend(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(pm_clk_suspend);
/**
* pm_clk_resume - Enable clocks in a device's PM clock list.
@@ -400,6 +441,7 @@ int pm_clk_resume(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(pm_clk_resume);
/**
* pm_clk_notify - Notify routine for device addition and removal.
@@ -480,6 +522,7 @@ int pm_clk_runtime_suspend(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
int pm_clk_runtime_resume(struct device *dev)
{
@@ -495,6 +538,7 @@ int pm_clk_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
+EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
#else /* !CONFIG_PM_CLK */
@@ -598,3 +642,4 @@ void pm_clk_add_notifier(struct bus_type *bus,
clknb->nb.notifier_call = pm_clk_notify;
bus_register_notifier(bus, &clknb->nb);
}
+EXPORT_SYMBOL_GPL(pm_clk_add_notifier);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index de23b648fce3..a1f2aff33997 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -187,8 +187,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
struct gpd_link *link;
int ret = 0;
- if (genpd->status == GPD_STATE_ACTIVE
- || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+ if (genpd->status == GPD_STATE_ACTIVE)
return 0;
/*
@@ -735,82 +734,24 @@ static int pm_genpd_prepare(struct device *dev)
mutex_lock(&genpd->lock);
- if (genpd->prepared_count++ == 0) {
+ if (genpd->prepared_count++ == 0)
genpd->suspended_count = 0;
- genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
- }
mutex_unlock(&genpd->lock);
- if (genpd->suspend_power_off)
- return 0;
-
- /*
- * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
- * so genpd_poweron() will return immediately, but if the device
- * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
- * to make it operational.
- */
- pm_runtime_resume(dev);
- __pm_runtime_disable(dev, false);
-
ret = pm_generic_prepare(dev);
if (ret) {
mutex_lock(&genpd->lock);
- if (--genpd->prepared_count == 0)
- genpd->suspend_power_off = false;
+ genpd->prepared_count--;
mutex_unlock(&genpd->lock);
- pm_runtime_enable(dev);
}
return ret;
}
/**
- * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Suspend a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a PM domain consisting of I/O devices.
- */
-static int pm_genpd_suspend(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
-}
-
-/**
- * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Carry out a late suspend of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int pm_genpd_suspend_late(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
-}
-
-/**
* pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
* @dev: Device to suspend.
*
@@ -820,6 +761,7 @@ static int pm_genpd_suspend_late(struct device *dev)
static int pm_genpd_suspend_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -827,11 +769,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off
- || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
+ if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
- genpd_stop_dev(genpd, dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start) {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+ }
/*
* Since all of the "noirq" callbacks are executed sequentially, it is
@@ -853,6 +798,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
static int pm_genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -860,8 +806,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off
- || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
+ if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
/*
@@ -872,93 +817,10 @@ static int pm_genpd_resume_noirq(struct device *dev)
pm_genpd_sync_poweron(genpd, true);
genpd->suspended_count--;
- return genpd_start_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
- * @dev: Device to resume.
- *
- * Carry out an early resume of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int pm_genpd_resume_early(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
-}
-
-/**
- * pm_genpd_resume - Resume of device in an I/O PM domain.
- * @dev: Device to resume.
- *
- * Resume a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_resume(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
-}
-
-/**
- * pm_genpd_freeze - Freezing a device in an I/O PM domain.
- * @dev: Device to freeze.
- *
- * Freeze a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_freeze(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
-}
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_resume(dev);
-/**
- * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
- * @dev: Device to freeze.
- *
- * Carry out a late freeze of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int pm_genpd_freeze_late(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
+ return ret;
}
/**
@@ -973,6 +835,7 @@ static int pm_genpd_freeze_late(struct device *dev)
static int pm_genpd_freeze_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -980,7 +843,10 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_suspend(dev);
+
+ return ret;
}
/**
@@ -993,6 +859,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
static int pm_genpd_thaw_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -1000,51 +867,10 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ?
- 0 : genpd_start_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
- * @dev: Device to thaw.
- *
- * Carry out an early thaw of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int pm_genpd_thaw_early(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
-}
-
-/**
- * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
- * @dev: Device to thaw.
- *
- * Thaw a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_thaw(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_resume(dev);
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
+ return ret;
}
/**
@@ -1057,6 +883,7 @@ static int pm_genpd_thaw(struct device *dev)
static int pm_genpd_restore_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -1072,30 +899,20 @@ static int pm_genpd_restore_noirq(struct device *dev)
* At this point suspended_count == 0 means we are being run for the
* first time for the given domain in the present cycle.
*/
- if (genpd->suspended_count++ == 0) {
+ if (genpd->suspended_count++ == 0)
/*
* The boot kernel might put the domain into arbitrary state,
* so make it appear as powered off to pm_genpd_sync_poweron(),
* so that it tries to power it on in case it was really off.
*/
genpd->status = GPD_STATE_POWER_OFF;
- if (genpd->suspend_power_off) {
- /*
- * If the domain was off before the hibernation, make
- * sure it will be off going forward.
- */
- genpd_power_off(genpd, true);
-
- return 0;
- }
- }
-
- if (genpd->suspend_power_off)
- return 0;
pm_genpd_sync_poweron(genpd, true);
- return genpd_start_dev(genpd, dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_resume(dev);
+
+ return ret;
}
/**
@@ -1110,7 +927,6 @@ static int pm_genpd_restore_noirq(struct device *dev)
static void pm_genpd_complete(struct device *dev)
{
struct generic_pm_domain *genpd;
- bool run_complete;
dev_dbg(dev, "%s()\n", __func__);
@@ -1118,20 +934,15 @@ static void pm_genpd_complete(struct device *dev)
if (IS_ERR(genpd))
return;
+ pm_generic_complete(dev);
+
mutex_lock(&genpd->lock);
- run_complete = !genpd->suspend_power_off;
- if (--genpd->prepared_count == 0)
- genpd->suspend_power_off = false;
+ genpd->prepared_count--;
+ if (!genpd->prepared_count)
+ genpd_queue_power_off_work(genpd);
mutex_unlock(&genpd->lock);
-
- if (run_complete) {
- pm_generic_complete(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- pm_request_idle(dev);
- }
}
/**
@@ -1173,18 +984,10 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#else /* !CONFIG_PM_SLEEP */
#define pm_genpd_prepare NULL
-#define pm_genpd_suspend NULL
-#define pm_genpd_suspend_late NULL
#define pm_genpd_suspend_noirq NULL
-#define pm_genpd_resume_early NULL
#define pm_genpd_resume_noirq NULL
-#define pm_genpd_resume NULL
-#define pm_genpd_freeze NULL
-#define pm_genpd_freeze_late NULL
#define pm_genpd_freeze_noirq NULL
-#define pm_genpd_thaw_early NULL
#define pm_genpd_thaw_noirq NULL
-#define pm_genpd_thaw NULL
#define pm_genpd_restore_noirq NULL
#define pm_genpd_complete NULL
@@ -1455,12 +1258,14 @@ EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
* @genpd: PM domain object to initialize.
* @gov: PM domain governor to associate with the domain (may be NULL).
* @is_off: Initial value of the domain's power_is_off field.
+ *
+ * Returns 0 on successful initialization, else a negative error code.
*/
-void pm_genpd_init(struct generic_pm_domain *genpd,
- struct dev_power_governor *gov, bool is_off)
+int pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off)
{
if (IS_ERR_OR_NULL(genpd))
- return;
+ return -EINVAL;
INIT_LIST_HEAD(&genpd->master_links);
INIT_LIST_HEAD(&genpd->slave_links);
@@ -1476,24 +1281,24 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = pm_genpd_prepare;
- genpd->domain.ops.suspend = pm_genpd_suspend;
- genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
+ genpd->domain.ops.suspend = pm_generic_suspend;
+ genpd->domain.ops.suspend_late = pm_generic_suspend_late;
genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
- genpd->domain.ops.resume_early = pm_genpd_resume_early;
- genpd->domain.ops.resume = pm_genpd_resume;
- genpd->domain.ops.freeze = pm_genpd_freeze;
- genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
+ genpd->domain.ops.resume_early = pm_generic_resume_early;
+ genpd->domain.ops.resume = pm_generic_resume;
+ genpd->domain.ops.freeze = pm_generic_freeze;
+ genpd->domain.ops.freeze_late = pm_generic_freeze_late;
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
- genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
- genpd->domain.ops.thaw = pm_genpd_thaw;
- genpd->domain.ops.poweroff = pm_genpd_suspend;
- genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
+ genpd->domain.ops.thaw_early = pm_generic_thaw_early;
+ genpd->domain.ops.thaw = pm_generic_thaw;
+ genpd->domain.ops.poweroff = pm_generic_poweroff;
+ genpd->domain.ops.poweroff_late = pm_generic_poweroff_late;
genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.restore_early = pm_genpd_resume_early;
- genpd->domain.ops.restore = pm_genpd_resume;
+ genpd->domain.ops.restore_early = pm_generic_restore_early;
+ genpd->domain.ops.restore = pm_generic_restore;
genpd->domain.ops.complete = pm_genpd_complete;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
@@ -1518,6 +1323,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(pm_genpd_init);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index b74690418504..e097d355cc04 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1045,10 +1045,14 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
*/
if (!parent->power.disable_depth
&& !parent->power.ignore_children
- && parent->power.runtime_status != RPM_ACTIVE)
+ && parent->power.runtime_status != RPM_ACTIVE) {
+ dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
+ dev_name(dev),
+ dev_name(parent));
error = -EBUSY;
- else if (dev->power.runtime_status == RPM_SUSPENDED)
+ } else if (dev->power.runtime_status == RPM_SUSPENDED) {
atomic_inc(&parent->power.child_count);
+ }
spin_unlock(&parent->power.lock);
@@ -1256,7 +1260,7 @@ void pm_runtime_allow(struct device *dev)
dev->power.runtime_auto = true;
if (atomic_dec_and_test(&dev->power.usage_count))
- rpm_idle(dev, RPM_AUTO);
+ rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1506,6 +1510,9 @@ int pm_runtime_force_resume(struct device *dev)
goto out;
}
+ if (!pm_runtime_status_suspended(dev))
+ goto out;
+
ret = pm_runtime_set_active(dev);
if (ret)
goto out;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8b7d7f8e5851..df3c97cb4c99 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -77,6 +77,14 @@ static DEVICE_ATTR_RO(book_siblings);
static DEVICE_ATTR_RO(book_siblings_list);
#endif
+#ifdef CONFIG_SCHED_DRAWER
+define_id_show_func(drawer_id);
+static DEVICE_ATTR_RO(drawer_id);
+define_siblings_show_func(drawer_siblings, drawer_cpumask);
+static DEVICE_ATTR_RO(drawer_siblings);
+static DEVICE_ATTR_RO(drawer_siblings_list);
+#endif
+
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
&dev_attr_core_id.attr,
@@ -89,6 +97,11 @@ static struct attribute *default_attrs[] = {
&dev_attr_book_siblings.attr,
&dev_attr_book_siblings_list.attr,
#endif
+#ifdef CONFIG_SCHED_DRAWER
+ &dev_attr_drawer_id.attr,
+ &dev_attr_drawer_siblings.attr,
+ &dev_attr_drawer_siblings_list.attr,
+#endif
NULL
};
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index eda09090cb52..f642c4264c27 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -8,8 +8,6 @@
#include <linux/bcma/bcma.h>
#include <linux/delay.h>
-#define BCMA_CORE_SIZE 0x1000
-
#define bcma_err(bus, fmt, ...) \
pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_warn(bus, fmt, ...) \
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c04bd9bc39fd..ba5145d384d8 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -339,7 +339,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto io_error;
- if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
bio->bi_iter.bi_size & ~PAGE_MASK)
goto io_error;
@@ -347,9 +347,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
goto out;
}
- rw = bio_rw(bio);
- if (rw == READA)
- rw = READ;
+ rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
@@ -509,7 +507,9 @@ static struct brd_device *brd_alloc(int i)
blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
brd->brd_queue->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
-
+#ifdef CONFIG_BLK_DEV_RAM_DAX
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
+#endif
disk = brd->brd_disk = alloc_disk(max_part);
if (!disk)
goto out_free_queue;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 63c2064689f8..db9d6bb6352d 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1951,7 +1951,6 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
if (cciss_create_ld_sysfs_entry(h, drv_index))
goto cleanup_queue;
disk->private_data = h->drv[drv_index];
- disk->driverfs_dev = &h->drv[drv_index]->dev;
/* Set up queue information */
blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1973,7 +1972,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
/* allows the interrupt handler to start the queue */
wmb();
h->drv[drv_index]->queue = disk->queue;
- add_disk(disk);
+ device_add_disk(&h->drv[drv_index]->dev, disk);
return 0;
cleanup_queue:
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 10459a145062..0a1aaf8c24c4 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -137,19 +137,19 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b
static int _drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev,
- sector_t sector, int rw)
+ sector_t sector, int op)
{
struct bio *bio;
/* we do all our meta data IO in aligned 4k blocks. */
const int size = 4096;
- int err;
+ int err, op_flags = 0;
device->md_io.done = 0;
device->md_io.error = -ENODEV;
- if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags))
- rw |= REQ_FUA | REQ_FLUSH;
- rw |= REQ_SYNC | REQ_NOIDLE;
+ if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
+ op_flags |= REQ_FUA | REQ_PREFLUSH;
+ op_flags |= REQ_SYNC | REQ_NOIDLE;
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
@@ -159,9 +159,9 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
goto out;
bio->bi_private = device;
bio->bi_end_io = drbd_md_endio;
- bio->bi_rw = rw;
+ bio_set_op_attrs(bio, op, op_flags);
- if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL)
+ if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
;
else if (!get_ldev_if_state(device, D_ATTACHING)) {
@@ -174,10 +174,10 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
bio_get(bio); /* one bio_put() is in the completion handler */
atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
device->md_io.submit_jif = jiffies;
- if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
+ if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_io_error(bio);
else
- submit_bio(rw, bio);
+ submit_bio(bio);
wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
if (!bio->bi_error)
err = device->md_io.error;
@@ -188,7 +188,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
}
int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
- sector_t sector, int rw)
+ sector_t sector, int op)
{
int err;
D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
@@ -197,19 +197,21 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd
dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
current->comm, current->pid, __func__,
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
+ (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ",
(void*)_RET_IP_ );
if (sector < drbd_md_first_sector(bdev) ||
sector + 7 > drbd_md_last_sector(bdev))
drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
current->comm, current->pid, __func__,
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
+ (unsigned long long)sector,
+ (op == REQ_OP_WRITE) ? "WRITE" : "READ");
- err = _drbd_md_sync_page_io(device, bdev, sector, rw);
+ err = _drbd_md_sync_page_io(device, bdev, sector, op);
if (err) {
drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
+ (unsigned long long)sector,
+ (op == REQ_OP_WRITE) ? "WRITE" : "READ", err);
}
return err;
}
@@ -256,7 +258,7 @@ bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
- D_ASSERT(device, (unsigned)(last - first) <= 1);
+ D_ASSERT(device, first <= last);
D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
/* FIXME figure out a fast path for bios crossing AL extent boundaries */
@@ -339,6 +341,8 @@ static int __al_write_transaction(struct drbd_device *device, struct al_transact
i = 0;
+ drbd_bm_reset_al_hints(device);
+
/* Even though no one can start to change this list
* once we set the LC_LOCKED -- from drbd_al_begin_io(),
* lc_try_lock_for_transaction() --, someone may still
@@ -768,10 +772,18 @@ static bool lazy_bitmap_update_due(struct drbd_device *device)
static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done)
{
- if (rs_done)
- set_bit(RS_DONE, &device->flags);
- /* and also set RS_PROGRESS below */
- else if (!lazy_bitmap_update_due(device))
+ if (rs_done) {
+ struct drbd_connection *connection = first_peer_device(device)->connection;
+ if (connection->agreed_pro_version <= 95 ||
+ is_sync_target_state(device->state.conn))
+ set_bit(RS_DONE, &device->flags);
+ /* and also set RS_PROGRESS below */
+
+ /* Else: rather wait for explicit notification via receive_state,
+ * to avoid uuids-rotated-too-fast causing full resync
+ * in next handshake, in case the replication link breaks
+ * at the most unfortunate time... */
+ } else if (!lazy_bitmap_update_due(device))
return;
drbd_device_post_work(device, RS_PROGRESS);
@@ -830,6 +842,13 @@ static int update_sync_bits(struct drbd_device *device,
return count;
}
+static bool plausible_request_size(int size)
+{
+ return size > 0
+ && size <= DRBD_MAX_BATCH_BIO_SIZE
+ && IS_ALIGNED(size, 512);
+}
+
/* clear the bit corresponding to the piece of storage in question:
* size byte of data starting from sector. Only clear a bits of the affected
* one ore more _aligned_ BM_BLOCK_SIZE blocks.
@@ -845,11 +864,11 @@ int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
unsigned long count = 0;
sector_t esector, nr_sectors;
- /* This would be an empty REQ_FLUSH, be silent. */
+ /* This would be an empty REQ_PREFLUSH, be silent. */
if ((mode == SET_OUT_OF_SYNC) && size == 0)
return 0;
- if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) {
+ if (!plausible_request_size(size)) {
drbd_err(device, "%s: sector=%llus size=%d nonsense!\n",
drbd_change_sync_fname[mode],
(unsigned long long)sector, size);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 92d6fc020a65..ab62b81c2ca7 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -96,6 +96,13 @@ struct drbd_bitmap {
struct page **bm_pages;
spinlock_t bm_lock;
+ /* exclusively to be used by __al_write_transaction(),
+ * drbd_bm_mark_for_writeout() and
+ * and drbd_bm_write_hinted() -> bm_rw() called from there.
+ */
+ unsigned int n_bitmap_hints;
+ unsigned int al_bitmap_hints[AL_UPDATES_PER_TRANSACTION];
+
/* see LIMITATIONS: above */
unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */
@@ -242,6 +249,11 @@ static void bm_set_page_need_writeout(struct page *page)
set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
}
+void drbd_bm_reset_al_hints(struct drbd_device *device)
+{
+ device->bitmap->n_bitmap_hints = 0;
+}
+
/**
* drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
* @device: DRBD device.
@@ -253,6 +265,7 @@ static void bm_set_page_need_writeout(struct page *page)
*/
void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
{
+ struct drbd_bitmap *b = device->bitmap;
struct page *page;
if (page_nr >= device->bitmap->bm_number_of_pages) {
drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
@@ -260,7 +273,9 @@ void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
return;
}
page = device->bitmap->bm_pages[page_nr];
- set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
+ BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints));
+ if (!test_and_set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)))
+ b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr;
}
static int bm_test_page_unchanged(struct page *page)
@@ -427,8 +442,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
}
/*
- * called on driver init only. TODO call when a device is created.
- * allocates the drbd_bitmap, and stores it in device->bitmap.
+ * allocates the drbd_bitmap and stores it in device->bitmap.
*/
int drbd_bm_init(struct drbd_device *device)
{
@@ -633,7 +647,8 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
unsigned long bits, words, owords, obits;
unsigned long want, have, onpages; /* number of pages */
struct page **npages, **opages = NULL;
- int err = 0, growing;
+ int err = 0;
+ bool growing;
if (!expect(b))
return -ENOMEM;
@@ -980,7 +995,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
struct drbd_bitmap *b = device->bitmap;
struct page *page;
unsigned int len;
- unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE;
+ unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
sector_t on_disk_sector =
device->ldev->md.md_offset + device->ldev->md.bm_offset;
@@ -1011,12 +1026,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = drbd_bm_endio;
+ bio_set_op_attrs(bio, op, 0);
- if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
- bio->bi_rw |= rw;
+ if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio_io_error(bio);
} else {
- submit_bio(rw, bio);
+ submit_bio(bio);
/* this should not count as user activity and cause the
* resync to throttle -- see drbd_rs_should_slow_down(). */
atomic_add(len >> 9, &device->rs_sect_ev);
@@ -1030,7 +1045,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
{
struct drbd_bm_aio_ctx *ctx;
struct drbd_bitmap *b = device->bitmap;
- int num_pages, i, count = 0;
+ unsigned int num_pages, i, count = 0;
unsigned long now;
char ppb[10];
int err = 0;
@@ -1078,16 +1093,37 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
now = jiffies;
/* let the layers below us try to merge these bios... */
- for (i = 0; i < num_pages; i++) {
- /* ignore completely unchanged pages */
- if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
- break;
- if (!(flags & BM_AIO_READ)) {
- if ((flags & BM_AIO_WRITE_HINTED) &&
- !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
- &page_private(b->bm_pages[i])))
- continue;
+ if (flags & BM_AIO_READ) {
+ for (i = 0; i < num_pages; i++) {
+ atomic_inc(&ctx->in_flight);
+ bm_page_io_async(ctx, i);
+ ++count;
+ cond_resched();
+ }
+ } else if (flags & BM_AIO_WRITE_HINTED) {
+ /* ASSERT: BM_AIO_WRITE_ALL_PAGES is not set. */
+ unsigned int hint;
+ for (hint = 0; hint < b->n_bitmap_hints; hint++) {
+ i = b->al_bitmap_hints[hint];
+ if (i >= num_pages) /* == -1U: no hint here. */
+ continue;
+ /* Several AL-extents may point to the same page. */
+ if (!test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
+ &page_private(b->bm_pages[i])))
+ continue;
+ /* Has it even changed? */
+ if (bm_test_page_unchanged(b->bm_pages[i]))
+ continue;
+ atomic_inc(&ctx->in_flight);
+ bm_page_io_async(ctx, i);
+ ++count;
+ }
+ } else {
+ for (i = 0; i < num_pages; i++) {
+ /* ignore completely unchanged pages */
+ if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
+ break;
if (!(flags & BM_AIO_WRITE_ALL_PAGES) &&
bm_test_page_unchanged(b->bm_pages[i])) {
dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
@@ -1100,11 +1136,11 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
continue;
}
+ atomic_inc(&ctx->in_flight);
+ bm_page_io_async(ctx, i);
+ ++count;
+ cond_resched();
}
- atomic_inc(&ctx->in_flight);
- bm_page_io_async(ctx, i);
- ++count;
- cond_resched();
}
/*
@@ -1121,10 +1157,14 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
/* summary for global bitmap IO */
- if (flags == 0)
- drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n",
- (flags & BM_AIO_READ) ? "READ" : "WRITE",
- count, jiffies - now);
+ if (flags == 0) {
+ unsigned int ms = jiffies_to_msecs(jiffies - now);
+ if (ms > 5) {
+ drbd_info(device, "bitmap %s of %u pages took %u ms\n",
+ (flags & BM_AIO_READ) ? "READ" : "WRITE",
+ count, ms);
+ }
+ }
if (ctx->error) {
drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index 4de95bbff486..be91a8d7c22a 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -237,14 +237,9 @@ static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_re
seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C");
seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync");
- if (f & EE_IS_TRIM) {
- seq_putc(m, sep);
- sep = '|';
- if (f & EE_IS_TRIM_USE_ZEROOUT)
- seq_puts(m, "zero-out");
- else
- seq_puts(m, "trim");
- }
+ if (f & EE_IS_TRIM)
+ __seq_print_rq_state_bit(m, f & EE_IS_TRIM_USE_ZEROOUT, &sep, "zero-out", "trim");
+ seq_print_rq_state_bit(m, f & EE_WRITE_SAME, &sep, "write-same");
seq_putc(m, '\n');
}
@@ -908,7 +903,7 @@ static int drbd_version_open(struct inode *inode, struct file *file)
return single_open(file, drbd_version_show, NULL);
}
-static struct file_operations drbd_version_fops = {
+static const struct file_operations drbd_version_fops = {
.owner = THIS_MODULE,
.open = drbd_version_open,
.llseek = seq_lseek,
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 7a1cf7eaa71d..7b54354976a5 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -468,9 +468,15 @@ enum {
/* this is/was a write request */
__EE_WRITE,
+ /* this is/was a write same request */
+ __EE_WRITE_SAME,
+
/* this originates from application on peer
* (not some resync or verify or other DRBD internal request) */
__EE_APPLICATION,
+
+ /* If it contains only 0 bytes, send back P_RS_DEALLOCATED */
+ __EE_RS_THIN_REQ,
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
@@ -484,7 +490,9 @@ enum {
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
#define EE_WRITE (1<<__EE_WRITE)
+#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
#define EE_APPLICATION (1<<__EE_APPLICATION)
+#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
/* flag bits per device */
enum {
@@ -1123,6 +1131,7 @@ extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int
extern int drbd_send_bitmap(struct drbd_device *device);
extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
+extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
extern void drbd_device_cleanup(struct drbd_device *device);
void drbd_print_uuids(struct drbd_device *device, const char *text);
@@ -1327,14 +1336,14 @@ struct bm_extent {
#endif
#endif
-/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
+/* Estimate max bio size as 256 * PAGE_SIZE,
* so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
* Since we may live in a mixed-platform cluster,
* we limit us to a platform agnostic constant here for now.
* A followup commit may allow even bigger BIO sizes,
* once we thought that through. */
#define DRBD_MAX_BIO_SIZE (1U << 20)
-#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
+#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
#endif
#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
@@ -1342,11 +1351,11 @@ struct bm_extent {
#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
-/* For now, don't allow more than one activity log extent worth of data
- * to be discarded in one go. We may need to rework drbd_al_begin_io()
- * to allow for even larger discard ranges */
-#define DRBD_MAX_DISCARD_SIZE AL_EXTENT_SIZE
-#define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9)
+/* For now, don't allow more than half of what we can "activate" in one
+ * activity log transaction to be discarded in one go. We may need to rework
+ * drbd_al_begin_io() to allow for even larger discard ranges */
+#define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
+#define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
extern int drbd_bm_init(struct drbd_device *device);
extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
@@ -1369,6 +1378,7 @@ extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
+extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
@@ -1483,12 +1493,14 @@ enum determine_dev_size {
extern enum determine_dev_size
drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
extern void resync_after_online_grow(struct drbd_device *);
-extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev);
+extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
+ struct drbd_backing_dev *bdev, struct o_qlim *o);
extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
enum drbd_role new_role,
int force);
extern bool conn_try_outdate_peer(struct drbd_connection *connection);
extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
+extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
extern int drbd_khelper(struct drbd_device *device, char *cmd);
/* drbd_worker.c */
@@ -1507,7 +1519,7 @@ extern int drbd_resync_finished(struct drbd_device *device);
extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
extern void drbd_md_put_buffer(struct drbd_device *device);
extern int drbd_md_sync_page_io(struct drbd_device *device,
- struct drbd_backing_dev *bdev, sector_t sector, int rw);
+ struct drbd_backing_dev *bdev, sector_t sector, int op);
extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
@@ -1548,6 +1560,8 @@ extern void start_resync_timer_fn(unsigned long data);
extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
/* drbd_receiver.c */
+extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
+ sector_t start, unsigned int nr_sectors, bool discard);
extern int drbd_receiver(struct drbd_thread *thi);
extern int drbd_ack_receiver(struct drbd_thread *thi);
extern void drbd_send_ping_wf(struct work_struct *ws);
@@ -1557,11 +1571,11 @@ extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector
bool throttle_if_app_is_waiting);
extern int drbd_submit_peer_request(struct drbd_device *,
struct drbd_peer_request *, const unsigned,
- const int);
+ const unsigned, const int);
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
sector_t, unsigned int,
- bool,
+ unsigned int,
gfp_t) __must_hold(local);
extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
int);
@@ -1635,8 +1649,6 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
/* drbd_proc.c */
extern struct proc_dir_entry *drbd_proc;
extern const struct file_operations drbd_proc_fops;
-extern const char *drbd_conn_str(enum drbd_conns s);
-extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
@@ -2095,13 +2107,22 @@ static inline void _sub_unacked(struct drbd_device *device, int n, const char *f
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
+static inline bool is_sync_target_state(enum drbd_conns connection_state)
+{
+ return connection_state == C_SYNC_TARGET ||
+ connection_state == C_PAUSED_SYNC_T;
+}
+
+static inline bool is_sync_source_state(enum drbd_conns connection_state)
+{
+ return connection_state == C_SYNC_SOURCE ||
+ connection_state == C_PAUSED_SYNC_S;
+}
+
static inline bool is_sync_state(enum drbd_conns connection_state)
{
- return
- (connection_state == C_SYNC_SOURCE
- || connection_state == C_SYNC_TARGET
- || connection_state == C_PAUSED_SYNC_S
- || connection_state == C_PAUSED_SYNC_T);
+ return is_sync_source_state(connection_state) ||
+ is_sync_target_state(connection_state);
}
/**
diff --git a/drivers/block/drbd/drbd_interval.h b/drivers/block/drbd/drbd_interval.h
index f210543f05f4..23c5a94428d2 100644
--- a/drivers/block/drbd/drbd_interval.h
+++ b/drivers/block/drbd/drbd_interval.h
@@ -6,13 +6,13 @@
struct drbd_interval {
struct rb_node rb;
- sector_t sector; /* start sector of the interval */
- unsigned int size; /* size in bytes */
- sector_t end; /* highest interval end in subtree */
- int local:1 /* local or remote request? */;
- int waiting:1; /* someone is waiting for this to complete */
- int completed:1; /* this has been completed already;
- * ignore for conflict detection */
+ sector_t sector; /* start sector of the interval */
+ unsigned int size; /* size in bytes */
+ sector_t end; /* highest interval end in subtree */
+ unsigned int local:1 /* local or remote request? */;
+ unsigned int waiting:1; /* someone is waiting for completion */
+ unsigned int completed:1; /* this has been completed already;
+ * ignore for conflict detection */
};
static inline void drbd_clear_interval(struct drbd_interval *i)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2ba1494b2799..0501ae0c517b 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -31,7 +31,7 @@
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/drbd.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/types.h>
#include <net/sock.h>
#include <linux/ctype.h>
@@ -920,6 +920,31 @@ void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
}
}
+/* communicated if (agreed_features & DRBD_FF_WSAME) */
+void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct request_queue *q)
+{
+ if (q) {
+ p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
+ p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
+ p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
+ p->qlim->io_min = cpu_to_be32(queue_io_min(q));
+ p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
+ p->qlim->discard_enabled = blk_queue_discard(q);
+ p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q);
+ p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
+ } else {
+ q = device->rq_queue;
+ p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
+ p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
+ p->qlim->alignment_offset = 0;
+ p->qlim->io_min = cpu_to_be32(queue_io_min(q));
+ p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
+ p->qlim->discard_enabled = 0;
+ p->qlim->discard_zeroes_data = 0;
+ p->qlim->write_same_capable = 0;
+ }
+}
+
int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
{
struct drbd_device *device = peer_device->device;
@@ -928,29 +953,37 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu
sector_t d_size, u_size;
int q_order_type;
unsigned int max_bio_size;
+ unsigned int packet_size;
+
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
+ if (!p)
+ return -EIO;
+ packet_size = sizeof(*p);
+ if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
+ packet_size += sizeof(p->qlim[0]);
+
+ memset(p, 0, packet_size);
if (get_ldev_if_state(device, D_NEGOTIATING)) {
- D_ASSERT(device, device->ldev->backing_bdev);
+ struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
d_size = drbd_get_max_capacity(device->ldev);
rcu_read_lock();
u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
q_order_type = drbd_queue_order_type(device);
- max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
+ max_bio_size = queue_max_hw_sectors(q) << 9;
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
+ assign_p_sizes_qlim(device, p, q);
put_ldev(device);
} else {
d_size = 0;
u_size = 0;
q_order_type = QUEUE_ORDERED_NONE;
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
+ assign_p_sizes_qlim(device, p, NULL);
}
- sock = &peer_device->connection->data;
- p = drbd_prepare_command(peer_device, sock);
- if (!p)
- return -EIO;
-
if (peer_device->connection->agreed_pro_version <= 94)
max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
else if (peer_device->connection->agreed_pro_version < 100)
@@ -962,7 +995,8 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu
p->max_bio_size = cpu_to_be32(max_bio_size);
p->queue_order_type = cpu_to_be16(q_order_type);
p->dds_flags = cpu_to_be16(flags);
- return drbd_send_command(peer_device, sock, P_SIZES, sizeof(*p), NULL, 0);
+
+ return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
}
/**
@@ -1377,6 +1411,22 @@ int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
cpu_to_be64(block_id));
}
+int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
+ struct drbd_peer_request *peer_req)
+{
+ struct drbd_socket *sock;
+ struct p_block_desc *p;
+
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(peer_req->i.sector);
+ p->blksize = cpu_to_be32(peer_req->i.size);
+ p->pad = 0;
+ return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
+}
+
int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
sector_t sector, int size, u64 block_id)
{
@@ -1561,6 +1611,9 @@ static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
? 0 : MSG_MORE);
if (err)
return err;
+ /* REQ_OP_WRITE_SAME has only one segment */
+ if (bio_op(bio) == REQ_OP_WRITE_SAME)
+ break;
}
return 0;
}
@@ -1579,6 +1632,9 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
if (err)
return err;
+ /* REQ_OP_WRITE_SAME has only one segment */
+ if (bio_op(bio) == REQ_OP_WRITE_SAME)
+ break;
}
return 0;
}
@@ -1603,15 +1659,17 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
return 0;
}
-static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw)
+static u32 bio_flags_to_wire(struct drbd_connection *connection,
+ struct bio *bio)
{
if (connection->agreed_pro_version >= 95)
- return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
- (bi_rw & REQ_FUA ? DP_FUA : 0) |
- (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
- (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
+ return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
+ (bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
+ (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
+ (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
+ (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
else
- return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
+ return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
}
/* Used to send write or TRIM aka REQ_DISCARD requests
@@ -1622,6 +1680,8 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
+ struct p_wsame *wsame = NULL;
+ void *digest_out;
unsigned int dp_flags = 0;
int digest_size;
int err;
@@ -1636,7 +1696,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
p->sector = cpu_to_be64(req->i.sector);
p->block_id = (unsigned long)req;
p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
- dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
+ dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
if (device->state.conn >= C_SYNC_SOURCE &&
device->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
@@ -1657,12 +1717,29 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
goto out;
}
+ if (dp_flags & DP_WSAME) {
+ /* this will only work if DRBD_FF_WSAME is set AND the
+ * handshake agreed that all nodes and backend devices are
+ * WRITE_SAME capable and agree on logical_block_size */
+ wsame = (struct p_wsame*)p;
+ digest_out = wsame + 1;
+ wsame->size = cpu_to_be32(req->i.size);
+ } else
+ digest_out = p + 1;
/* our digest is still only over the payload.
* TRIM does not carry any payload. */
if (digest_size)
- drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1);
- err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + digest_size, NULL, req->i.size);
+ drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
+ if (wsame) {
+ err =
+ __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
+ sizeof(*wsame) + digest_size, NULL,
+ bio_iovec(req->master_bio).bv_len);
+ } else
+ err =
+ __send_command(peer_device->connection, device->vnr, sock, P_DATA,
+ sizeof(*p) + digest_size, NULL, req->i.size);
if (!err) {
/* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away
@@ -3061,7 +3138,7 @@ void drbd_md_write(struct drbd_device *device, void *b)
D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
sector = device->ldev->md.md_offset;
- if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
/* this was a try anyways ... */
drbd_err(device, "meta data update failed!\n");
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
@@ -3263,7 +3340,8 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
* Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
bdev->md.md_size_sect = 8;
- if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
+ if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
+ REQ_OP_READ)) {
/* NOTE: can't do normal error processing here as this is
called BEFORE disk is attached */
drbd_err(device, "Error while reading metadata.\n");
@@ -3505,7 +3583,12 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
struct bm_io_work *work = &device->bm_io_work;
int rv = -EIO;
- D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
+ if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
+ int cnt = atomic_read(&device->ap_bio_cnt);
+ if (cnt)
+ drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
+ cnt, work->why);
+ }
if (get_ldev(device)) {
drbd_bm_lock(device, work->why, work->flags);
@@ -3585,18 +3668,20 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags)
{
+ /* Only suspend io, if some operation is supposed to be locked out */
+ const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
int rv;
D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
- if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
+ if (do_suspend_io)
drbd_suspend_io(device);
drbd_bm_lock(device, why, flags);
rv = io_fn(device);
drbd_bm_unlock(device);
- if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
+ if (do_suspend_io)
drbd_resume_io(device);
return rv;
@@ -3635,6 +3720,8 @@ const char *cmdname(enum drbd_packet cmd)
* one PRO_VERSION */
static const char *cmdnames[] = {
[P_DATA] = "Data",
+ [P_WSAME] = "WriteSame",
+ [P_TRIM] = "Trim",
[P_DATA_REPLY] = "DataReply",
[P_RS_DATA_REPLY] = "RSDataReply",
[P_BARRIER] = "Barrier",
@@ -3679,6 +3766,8 @@ const char *cmdname(enum drbd_packet cmd)
[P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
[P_RETRY_WRITE] = "retry_write",
[P_PROTOCOL_UPDATE] = "protocol_update",
+ [P_RS_THIN_REQ] = "rs_thin_req",
+ [P_RS_DEALLOCATED] = "rs_deallocated",
/* enum drbd_packet, but not commands - obsoleted flags:
* P_MAY_IGNORE
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 0bac9c8246bc..f35db29cac76 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -343,7 +343,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
(char[20]) { }, /* address family */
(char[60]) { }, /* address */
NULL };
- char mb[12];
+ char mb[14];
char *argv[] = {usermode_helper, cmd, mb, NULL };
struct drbd_connection *connection = first_peer_device(device)->connection;
struct sib_info sib;
@@ -352,7 +352,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
if (current == connection->worker.task)
set_bit(CALLBACK_PENDING, &connection->flags);
- snprintf(mb, 12, "minor-%d", device_to_minor(device));
+ snprintf(mb, 14, "minor-%d", device_to_minor(device));
setup_khelper_env(connection, envp);
/* The helper may take some time.
@@ -387,7 +387,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
return ret;
}
-static int conn_khelper(struct drbd_connection *connection, char *cmd)
+enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
@@ -442,19 +442,17 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connec
}
rcu_read_unlock();
- if (fp == FP_NOT_AVAIL) {
- /* IO Suspending works on the whole resource.
- Do it only for one device. */
- vnr = 0;
- peer_device = idr_get_next(&connection->peer_devices, &vnr);
- drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0));
- }
-
return fp;
}
+static bool resource_is_supended(struct drbd_resource *resource)
+{
+ return resource->susp || resource->susp_fen || resource->susp_nod;
+}
+
bool conn_try_outdate_peer(struct drbd_connection *connection)
{
+ struct drbd_resource * const resource = connection->resource;
unsigned int connect_cnt;
union drbd_state mask = { };
union drbd_state val = { };
@@ -462,21 +460,41 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
char *ex_to_string;
int r;
- spin_lock_irq(&connection->resource->req_lock);
+ spin_lock_irq(&resource->req_lock);
if (connection->cstate >= C_WF_REPORT_PARAMS) {
drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
- spin_unlock_irq(&connection->resource->req_lock);
+ spin_unlock_irq(&resource->req_lock);
return false;
}
connect_cnt = connection->connect_cnt;
- spin_unlock_irq(&connection->resource->req_lock);
+ spin_unlock_irq(&resource->req_lock);
fp = highest_fencing_policy(connection);
switch (fp) {
case FP_NOT_AVAIL:
drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
- goto out;
+ spin_lock_irq(&resource->req_lock);
+ if (connection->cstate < C_WF_REPORT_PARAMS) {
+ _conn_request_state(connection,
+ (union drbd_state) { { .susp_fen = 1 } },
+ (union drbd_state) { { .susp_fen = 0 } },
+ CS_VERBOSE | CS_HARD | CS_DC_SUSP);
+ /* We are no longer suspended due to the fencing policy.
+ * We may still be suspended due to the on-no-data-accessible policy.
+ * If that was OND_IO_ERROR, fail pending requests. */
+ if (!resource_is_supended(resource))
+ _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
+ }
+ /* Else: in case we raced with a connection handshake,
+ * let the handshake figure out if we maybe can RESEND,
+ * and do not resume/fail pending requests here.
+ * Worst case is we stay suspended for now, which may be
+ * resolved by either re-establishing the replication link, or
+ * the next link failure, or eventually the administrator. */
+ spin_unlock_irq(&resource->req_lock);
+ return false;
+
case FP_DONT_CARE:
return true;
default: ;
@@ -485,17 +503,17 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
r = conn_khelper(connection, "fence-peer");
switch ((r>>8) & 0xff) {
- case 3: /* peer is inconsistent */
+ case P_INCONSISTENT: /* peer is inconsistent */
ex_to_string = "peer is inconsistent or worse";
mask.pdsk = D_MASK;
val.pdsk = D_INCONSISTENT;
break;
- case 4: /* peer got outdated, or was already outdated */
+ case P_OUTDATED: /* peer got outdated, or was already outdated */
ex_to_string = "peer was fenced";
mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED;
break;
- case 5: /* peer was down */
+ case P_DOWN: /* peer was down */
if (conn_highest_disk(connection) == D_UP_TO_DATE) {
/* we will(have) create(d) a new UUID anyways... */
ex_to_string = "peer is unreachable, assumed to be dead";
@@ -505,7 +523,7 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
}
break;
- case 6: /* Peer is primary, voluntarily outdate myself.
+ case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
* This is useful when an unconnected R_SECONDARY is asked to
* become R_PRIMARY, but finds the other peer being active. */
ex_to_string = "peer is active";
@@ -513,7 +531,9 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
mask.disk = D_MASK;
val.disk = D_OUTDATED;
break;
- case 7:
+ case P_FENCING:
+ /* THINK: do we need to handle this
+ * like case 4, or more like case 5? */
if (fp != FP_STONITH)
drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
ex_to_string = "peer was stonithed";
@@ -529,13 +549,11 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
drbd_info(connection, "fence-peer helper returned %d (%s)\n",
(r>>8) & 0xff, ex_to_string);
- out:
-
/* Not using
conn_request_state(connection, mask, val, CS_VERBOSE);
here, because we might were able to re-establish the connection in the
meantime. */
- spin_lock_irq(&connection->resource->req_lock);
+ spin_lock_irq(&resource->req_lock);
if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
if (connection->connect_cnt != connect_cnt)
/* In case the connection was established and droped
@@ -544,7 +562,7 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
else
_conn_request_state(connection, mask, val, CS_VERBOSE);
}
- spin_unlock_irq(&connection->resource->req_lock);
+ spin_unlock_irq(&resource->req_lock);
return conn_highest_pdsk(connection) <= D_OUTDATED;
}
@@ -1154,51 +1172,160 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
return 0;
}
+static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
+{
+ q->limits.discard_granularity = granularity;
+}
+
+static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
+{
+ /* when we introduced REQ_WRITE_SAME support, we also bumped
+ * our maximum supported batch bio size used for discards. */
+ if (connection->agreed_features & DRBD_FF_WSAME)
+ return DRBD_MAX_BBIO_SECTORS;
+ /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
+ return AL_EXTENT_SIZE >> 9;
+}
+
+static void decide_on_discard_support(struct drbd_device *device,
+ struct request_queue *q,
+ struct request_queue *b,
+ bool discard_zeroes_if_aligned)
+{
+ /* q = drbd device queue (device->rq_queue)
+ * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
+ * or NULL if diskless
+ */
+ struct drbd_connection *connection = first_peer_device(device)->connection;
+ bool can_do = b ? blk_queue_discard(b) : true;
+
+ if (can_do && b && !b->limits.discard_zeroes_data && !discard_zeroes_if_aligned) {
+ can_do = false;
+ drbd_info(device, "discard_zeroes_data=0 and discard_zeroes_if_aligned=no: disabling discards\n");
+ }
+ if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
+ can_do = false;
+ drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
+ }
+ if (can_do) {
+ /* We don't care for the granularity, really.
+ * Stacking limits below should fix it for the local
+ * device. Whether or not it is a suitable granularity
+ * on the remote device is not our problem, really. If
+ * you care, you need to use devices with similar
+ * topology on all peers. */
+ blk_queue_discard_granularity(q, 512);
+ q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ } else {
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+ blk_queue_discard_granularity(q, 0);
+ q->limits.max_discard_sectors = 0;
+ }
+}
+
+static void fixup_discard_if_not_supported(struct request_queue *q)
+{
+ /* To avoid confusion, if this queue does not support discard, clear
+ * max_discard_sectors, which is what lsblk -D reports to the user.
+ * Older kernels got this wrong in "stack limits".
+ * */
+ if (!blk_queue_discard(q)) {
+ blk_queue_max_discard_sectors(q, 0);
+ blk_queue_discard_granularity(q, 0);
+ }
+}
+
+static void decide_on_write_same_support(struct drbd_device *device,
+ struct request_queue *q,
+ struct request_queue *b, struct o_qlim *o)
+{
+ struct drbd_peer_device *peer_device = first_peer_device(device);
+ struct drbd_connection *connection = peer_device->connection;
+ bool can_do = b ? b->limits.max_write_same_sectors : true;
+
+ if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
+ can_do = false;
+ drbd_info(peer_device, "peer does not support WRITE_SAME\n");
+ }
+
+ if (o) {
+ /* logical block size; queue_logical_block_size(NULL) is 512 */
+ unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
+ unsigned int me_lbs_b = queue_logical_block_size(b);
+ unsigned int me_lbs = queue_logical_block_size(q);
+
+ if (me_lbs_b != me_lbs) {
+ drbd_warn(device,
+ "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
+ me_lbs, me_lbs_b);
+ /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
+ can_do = false;
+ }
+ if (me_lbs_b != peer_lbs) {
+ drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
+ me_lbs, peer_lbs);
+ if (can_do) {
+ drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n");
+ can_do = false;
+ }
+ me_lbs = max(me_lbs, me_lbs_b);
+ /* We cannot change the logical block size of an in-use queue.
+ * We can only hope that access happens to be properly aligned.
+ * If not, the peer will likely produce an IO error, and detach. */
+ if (peer_lbs > me_lbs) {
+ if (device->state.role != R_PRIMARY) {
+ blk_queue_logical_block_size(q, peer_lbs);
+ drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs);
+ } else {
+ drbd_warn(peer_device,
+ "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
+ me_lbs, peer_lbs);
+ }
+ }
+ }
+ if (can_do && !o->write_same_capable) {
+ /* If we introduce an open-coded write-same loop on the receiving side,
+ * the peer would present itself as "capable". */
+ drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n");
+ can_do = false;
+ }
+ }
+
+ blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
+}
+
static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
- unsigned int max_bio_size)
+ unsigned int max_bio_size, struct o_qlim *o)
{
struct request_queue * const q = device->rq_queue;
unsigned int max_hw_sectors = max_bio_size >> 9;
unsigned int max_segments = 0;
struct request_queue *b = NULL;
+ struct disk_conf *dc;
+ bool discard_zeroes_if_aligned = true;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
rcu_read_lock();
- max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
+ dc = rcu_dereference(device->ldev->disk_conf);
+ max_segments = dc->max_bio_bvecs;
+ discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
rcu_read_unlock();
blk_set_stacking_limits(&q->limits);
- blk_queue_max_write_same_sectors(q, 0);
}
- blk_queue_logical_block_size(q, 512);
blk_queue_max_hw_sectors(q, max_hw_sectors);
/* This is the workaround for "bio would need to, but cannot, be split" */
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_SIZE-1);
+ decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
+ decide_on_write_same_support(device, q, b, o);
if (b) {
- struct drbd_connection *connection = first_peer_device(device)->connection;
-
- blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
-
- if (blk_queue_discard(b) &&
- (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) {
- /* We don't care, stacking below should fix it for the local device.
- * Whether or not it is a suitable granularity on the remote device
- * is not our problem, really. If you care, you need to
- * use devices with similar topology on all peers. */
- q->limits.discard_granularity = 512;
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
- } else {
- blk_queue_max_discard_sectors(q, 0);
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
- q->limits.discard_granularity = 0;
- }
-
blk_queue_stack_limits(q, b);
if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
@@ -1208,15 +1335,10 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
}
}
- /* To avoid confusion, if this queue does not support discard, clear
- * max_discard_sectors, which is what lsblk -D reports to the user. */
- if (!blk_queue_discard(q)) {
- blk_queue_max_discard_sectors(q, 0);
- q->limits.discard_granularity = 0;
- }
+ fixup_discard_if_not_supported(q);
}
-void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev)
+void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
{
unsigned int now, new, local, peer;
@@ -1259,7 +1381,7 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backin
if (new != now)
drbd_info(device, "max BIO size = %u\n", new);
- drbd_setup_queue_param(device, bdev, new);
+ drbd_setup_queue_param(device, bdev, new, o);
}
/* Starts the worker thread */
@@ -1348,6 +1470,43 @@ static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
a->disk_drain != b->disk_drain;
}
+static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
+ struct drbd_backing_dev *nbc)
+{
+ struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
+
+ if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
+ disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
+ if (disk_conf->al_extents > drbd_al_extents_max(nbc))
+ disk_conf->al_extents = drbd_al_extents_max(nbc);
+
+ if (!blk_queue_discard(q)
+ || (!q->limits.discard_zeroes_data && !disk_conf->discard_zeroes_if_aligned)) {
+ if (disk_conf->rs_discard_granularity) {
+ disk_conf->rs_discard_granularity = 0; /* disable feature */
+ drbd_info(device, "rs_discard_granularity feature disabled\n");
+ }
+ }
+
+ if (disk_conf->rs_discard_granularity) {
+ int orig_value = disk_conf->rs_discard_granularity;
+ int remainder;
+
+ if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
+ disk_conf->rs_discard_granularity = q->limits.discard_granularity;
+
+ remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
+ disk_conf->rs_discard_granularity += remainder;
+
+ if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
+ disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
+
+ if (disk_conf->rs_discard_granularity != orig_value)
+ drbd_info(device, "rs_discard_granularity changed to %d\n",
+ disk_conf->rs_discard_granularity);
+ }
+}
+
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
@@ -1395,10 +1554,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (!expect(new_disk_conf->resync_rate >= 1))
new_disk_conf->resync_rate = 1;
- if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
- new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
- if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev))
- new_disk_conf->al_extents = drbd_al_extents_max(device->ldev);
+ sanitize_disk_conf(device, new_disk_conf, device->ldev);
if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
@@ -1457,6 +1613,9 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (write_ordering_changed(old_disk_conf, new_disk_conf))
drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
+ if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
+ drbd_reconsider_queue_parameters(device, device->ldev, NULL);
+
drbd_md_sync(device);
if (device->state.conn >= C_CONNECTED) {
@@ -1693,10 +1852,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto fail;
- if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
- new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
- if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
- new_disk_conf->al_extents = drbd_al_extents_max(nbc);
+ sanitize_disk_conf(device, new_disk_conf, nbc);
if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
@@ -1838,7 +1994,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
device->read_cnt = 0;
device->writ_cnt = 0;
- drbd_reconsider_max_bio_size(device, device->ldev);
+ drbd_reconsider_queue_parameters(device, device->ldev, NULL);
/* If I am currently not R_PRIMARY,
* but meta data primary indicator is set,
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 6537b25db9c1..be2b93fd2c11 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -25,7 +25,7 @@
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
@@ -122,18 +122,18 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
x = res/50;
y = 20-x;
- seq_printf(seq, "\t[");
+ seq_puts(seq, "\t[");
for (i = 1; i < x; i++)
- seq_printf(seq, "=");
- seq_printf(seq, ">");
+ seq_putc(seq, '=');
+ seq_putc(seq, '>');
for (i = 0; i < y; i++)
seq_printf(seq, ".");
- seq_printf(seq, "] ");
+ seq_puts(seq, "] ");
if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T)
- seq_printf(seq, "verified:");
+ seq_puts(seq, "verified:");
else
- seq_printf(seq, "sync'ed:");
+ seq_puts(seq, "sync'ed:");
seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
/* if more than a few GB, display in MB */
@@ -146,7 +146,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
(unsigned long) Bit2KB(rs_left),
(unsigned long) Bit2KB(rs_total));
- seq_printf(seq, "\n\t");
+ seq_puts(seq, "\n\t");
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
@@ -175,9 +175,9 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
rt / 3600, (rt % 3600) / 60, rt % 60);
dbdt = Bit2KB(db/dt);
- seq_printf(seq, " speed: ");
+ seq_puts(seq, " speed: ");
seq_printf_with_thousands_grouping(seq, dbdt);
- seq_printf(seq, " (");
+ seq_puts(seq, " (");
/* ------------------------- ~3s average ------------------------ */
if (proc_details >= 1) {
/* this is what drbd_rs_should_slow_down() uses */
@@ -188,7 +188,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
db = device->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
- seq_printf(seq, " -- ");
+ seq_puts(seq, " -- ");
}
/* --------------------- long term average ---------------------- */
@@ -200,11 +200,11 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
db = rs_total - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
- seq_printf(seq, ")");
+ seq_putc(seq, ')');
if (state.conn == C_SYNC_TARGET ||
state.conn == C_VERIFY_S) {
- seq_printf(seq, " want: ");
+ seq_puts(seq, " want: ");
seq_printf_with_thousands_grouping(seq, device->c_sync_rate);
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
@@ -231,7 +231,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
(unsigned long long)bm_bits * BM_SECT_PER_BIT);
if (stop_sector != 0 && stop_sector != ULLONG_MAX)
seq_printf(seq, " stop sector: %llu", stop_sector);
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
}
}
@@ -276,7 +276,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
rcu_read_lock();
idr_for_each_entry(&drbd_devices, device, i) {
if (prev_i != i - 1)
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
prev_i = i;
state = device->state;
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
index ef9245363dcc..4d296800f706 100644
--- a/drivers/block/drbd/drbd_protocol.h
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -60,6 +60,15 @@ enum drbd_packet {
* which is why I chose TRIM here, to disambiguate. */
P_TRIM = 0x31,
+ /* Only use these two if both support FF_THIN_RESYNC */
+ P_RS_THIN_REQ = 0x32, /* Request a block for resync or reply P_RS_DEALLOCATED */
+ P_RS_DEALLOCATED = 0x33, /* Contains only zeros on sync source node */
+
+ /* REQ_WRITE_SAME.
+ * On a receiving side without REQ_WRITE_SAME,
+ * we may fall back to an opencoded loop instead. */
+ P_WSAME = 0x34,
+
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
@@ -106,16 +115,20 @@ struct p_header100 {
u32 pad;
} __packed;
-/* these defines must not be changed without changing the protocol version */
-#define DP_HARDBARRIER 1 /* depricated */
+/* These defines must not be changed without changing the protocol version.
+ * New defines may only be introduced together with protocol version bump or
+ * new protocol feature flags.
+ */
+#define DP_HARDBARRIER 1 /* no longer used */
#define DP_RW_SYNC 2 /* equals REQ_SYNC */
#define DP_MAY_SET_IN_SYNC 4
#define DP_UNPLUG 8 /* not used anymore */
#define DP_FUA 16 /* equals REQ_FUA */
-#define DP_FLUSH 32 /* equals REQ_FLUSH */
+#define DP_FLUSH 32 /* equals REQ_PREFLUSH */
#define DP_DISCARD 64 /* equals REQ_DISCARD */
#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
+#define DP_WSAME 512 /* equiv. REQ_WRITE_SAME */
struct p_data {
u64 sector; /* 64 bits sector number */
@@ -129,6 +142,11 @@ struct p_trim {
u32 size; /* == bio->bi_size */
} __packed;
+struct p_wsame {
+ struct p_data p_data;
+ u32 size; /* == bio->bi_size */
+} __packed;
+
/*
* commands which share a struct:
* p_block_ack:
@@ -160,7 +178,23 @@ struct p_block_req {
* ReportParams
*/
-#define FF_TRIM 1
+/* supports TRIM/DISCARD on the "wire" protocol */
+#define DRBD_FF_TRIM 1
+
+/* Detect all-zeros during resync, and rather TRIM/UNMAP/DISCARD those blocks
+ * instead of fully allocate a supposedly thin volume on initial resync */
+#define DRBD_FF_THIN_RESYNC 2
+
+/* supports REQ_WRITE_SAME on the "wire" protocol.
+ * Note: this flag is overloaded,
+ * its presence also
+ * - indicates support for 128 MiB "batch bios",
+ * max discard size of 128 MiB
+ * instead of 4M before that.
+ * - indicates that we exchange additional settings in p_sizes
+ * drbd_send_sizes()/receive_sizes()
+ */
+#define DRBD_FF_WSAME 4
struct p_connection_features {
u32 protocol_min;
@@ -235,6 +269,40 @@ struct p_rs_uuid {
u64 uuid;
} __packed;
+/* optional queue_limits if (agreed_features & DRBD_FF_WSAME)
+ * see also struct queue_limits, as of late 2015 */
+struct o_qlim {
+ /* we don't need it yet, but we may as well communicate it now */
+ u32 physical_block_size;
+
+ /* so the original in struct queue_limits is unsigned short,
+ * but I'd have to put in padding anyways. */
+ u32 logical_block_size;
+
+ /* One incoming bio becomes one DRBD request,
+ * which may be translated to several bio on the receiving side.
+ * We don't need to communicate chunk/boundary/segment ... limits.
+ */
+
+ /* various IO hints may be useful with "diskless client" setups */
+ u32 alignment_offset;
+ u32 io_min;
+ u32 io_opt;
+
+ /* We may need to communicate integrity stuff at some point,
+ * but let's not get ahead of ourselves. */
+
+ /* Backend discard capabilities.
+ * Receiving side uses "blkdev_issue_discard()", no need to communicate
+ * more specifics. If the backend cannot do discards, the DRBD peer
+ * may fall back to blkdev_issue_zeroout().
+ */
+ u8 discard_enabled;
+ u8 discard_zeroes_data;
+ u8 write_same_capable;
+ u8 _pad;
+} __packed;
+
struct p_sizes {
u64 d_size; /* size of disk */
u64 u_size; /* user requested size */
@@ -242,6 +310,9 @@ struct p_sizes {
u32 max_bio_size; /* Maximal size of a BIO */
u16 queue_order_type; /* not yet implemented in DRBD*/
u16 dds_flags; /* use enum dds_flags here. */
+
+ /* optional queue_limits if (agreed_features & DRBD_FF_WSAME) */
+ struct o_qlim qlim[0];
} __packed;
struct p_state {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 050aaa1c0350..df45713dfbe8 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -25,7 +25,7 @@
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/sock.h>
#include <linux/drbd.h>
@@ -48,7 +48,7 @@
#include "drbd_req.h"
#include "drbd_vli.h"
-#define PRO_FEATURES (FF_TRIM)
+#define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME)
struct packet_info {
enum drbd_packet cmd;
@@ -361,14 +361,17 @@ You must not have the req_lock:
drbd_wait_ee_list_empty()
*/
+/* normal: payload_size == request size (bi_size)
+ * w_same: payload_size == logical_block_size
+ * trim: payload_size == 0 */
struct drbd_peer_request *
drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
- unsigned int data_size, bool has_payload, gfp_t gfp_mask) __must_hold(local)
+ unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
struct page *page = NULL;
- unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
return NULL;
@@ -380,7 +383,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return NULL;
}
- if (has_payload && data_size) {
+ if (nr_pages) {
page = drbd_alloc_pages(peer_device, nr_pages,
gfpflags_allow_blocking(gfp_mask));
if (!page)
@@ -390,7 +393,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
memset(peer_req, 0, sizeof(*peer_req));
INIT_LIST_HEAD(&peer_req->w.list);
drbd_clear_interval(&peer_req->i);
- peer_req->i.size = data_size;
+ peer_req->i.size = request_size;
peer_req->i.sector = sector;
peer_req->submit_jif = jiffies;
peer_req->peer_device = peer_device;
@@ -1204,13 +1207,84 @@ static int drbd_recv_header(struct drbd_connection *connection, struct packet_in
return err;
}
-static void drbd_flush(struct drbd_connection *connection)
+/* This is blkdev_issue_flush, but asynchronous.
+ * We want to submit to all component volumes in parallel,
+ * then wait for all completions.
+ */
+struct issue_flush_context {
+ atomic_t pending;
+ int error;
+ struct completion done;
+};
+struct one_flush_context {
+ struct drbd_device *device;
+ struct issue_flush_context *ctx;
+};
+
+void one_flush_endio(struct bio *bio)
{
- int rv;
- struct drbd_peer_device *peer_device;
- int vnr;
+ struct one_flush_context *octx = bio->bi_private;
+ struct drbd_device *device = octx->device;
+ struct issue_flush_context *ctx = octx->ctx;
+
+ if (bio->bi_error) {
+ ctx->error = bio->bi_error;
+ drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error);
+ }
+ kfree(octx);
+ bio_put(bio);
+
+ clear_bit(FLUSH_PENDING, &device->flags);
+ put_ldev(device);
+ kref_put(&device->kref, drbd_destroy_device);
+
+ if (atomic_dec_and_test(&ctx->pending))
+ complete(&ctx->done);
+}
+
+static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
+{
+ struct bio *bio = bio_alloc(GFP_NOIO, 0);
+ struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
+ if (!bio || !octx) {
+ drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
+ /* FIXME: what else can I do now? disconnecting or detaching
+ * really does not help to improve the state of the world, either.
+ */
+ kfree(octx);
+ if (bio)
+ bio_put(bio);
+
+ ctx->error = -ENOMEM;
+ put_ldev(device);
+ kref_put(&device->kref, drbd_destroy_device);
+ return;
+ }
+ octx->device = device;
+ octx->ctx = ctx;
+ bio->bi_bdev = device->ldev->backing_bdev;
+ bio->bi_private = octx;
+ bio->bi_end_io = one_flush_endio;
+ bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH);
+
+ device->flush_jif = jiffies;
+ set_bit(FLUSH_PENDING, &device->flags);
+ atomic_inc(&ctx->pending);
+ submit_bio(bio);
+}
+
+static void drbd_flush(struct drbd_connection *connection)
+{
if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
+ struct drbd_peer_device *peer_device;
+ struct issue_flush_context ctx;
+ int vnr;
+
+ atomic_set(&ctx.pending, 1);
+ ctx.error = 0;
+ init_completion(&ctx.done);
+
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
@@ -1220,31 +1294,24 @@ static void drbd_flush(struct drbd_connection *connection)
kref_get(&device->kref);
rcu_read_unlock();
- /* Right now, we have only this one synchronous code path
- * for flushes between request epochs.
- * We may want to make those asynchronous,
- * or at least parallelize the flushes to the volume devices.
- */
- device->flush_jif = jiffies;
- set_bit(FLUSH_PENDING, &device->flags);
- rv = blkdev_issue_flush(device->ldev->backing_bdev,
- GFP_NOIO, NULL);
- clear_bit(FLUSH_PENDING, &device->flags);
- if (rv) {
- drbd_info(device, "local disk flush failed with status %d\n", rv);
- /* would rather check on EOPNOTSUPP, but that is not reliable.
- * don't try again for ANY return value != 0
- * if (rv == -EOPNOTSUPP) */
- drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
- }
- put_ldev(device);
- kref_put(&device->kref, drbd_destroy_device);
+ submit_one_flush(device, &ctx);
rcu_read_lock();
- if (rv)
- break;
}
rcu_read_unlock();
+
+ /* Do we want to add a timeout,
+ * if disk-timeout is set? */
+ if (!atomic_dec_and_test(&ctx.pending))
+ wait_for_completion(&ctx.done);
+
+ if (ctx.error) {
+ /* would rather check on EOPNOTSUPP, but that is not reliable.
+ * don't try again for ANY return value != 0
+ * if (rv == -EOPNOTSUPP) */
+ /* Any error is already reported by bio_endio callback. */
+ drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
+ }
}
}
@@ -1379,6 +1446,120 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
}
+/*
+ * We *may* ignore the discard-zeroes-data setting, if so configured.
+ *
+ * Assumption is that it "discard_zeroes_data=0" is only because the backend
+ * may ignore partial unaligned discards.
+ *
+ * LVM/DM thin as of at least
+ * LVM version: 2.02.115(2)-RHEL7 (2015-01-28)
+ * Library version: 1.02.93-RHEL7 (2015-01-28)
+ * Driver version: 4.29.0
+ * still behaves this way.
+ *
+ * For unaligned (wrt. alignment and granularity) or too small discards,
+ * we zero-out the initial (and/or) trailing unaligned partial chunks,
+ * but discard all the aligned full chunks.
+ *
+ * At least for LVM/DM thin, the result is effectively "discard_zeroes_data=1".
+ */
+int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, bool discard)
+{
+ struct block_device *bdev = device->ldev->backing_bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ sector_t tmp, nr;
+ unsigned int max_discard_sectors, granularity;
+ int alignment;
+ int err = 0;
+
+ if (!discard)
+ goto zero_out;
+
+ /* Zero-sector (unknown) and one-sector granularities are the same. */
+ granularity = max(q->limits.discard_granularity >> 9, 1U);
+ alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
+
+ max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22));
+ max_discard_sectors -= max_discard_sectors % granularity;
+ if (unlikely(!max_discard_sectors))
+ goto zero_out;
+
+ if (nr_sectors < granularity)
+ goto zero_out;
+
+ tmp = start;
+ if (sector_div(tmp, granularity) != alignment) {
+ if (nr_sectors < 2*granularity)
+ goto zero_out;
+ /* start + gran - (start + gran - align) % gran */
+ tmp = start + granularity - alignment;
+ tmp = start + granularity - sector_div(tmp, granularity);
+
+ nr = tmp - start;
+ err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0);
+ nr_sectors -= nr;
+ start = tmp;
+ }
+ while (nr_sectors >= granularity) {
+ nr = min_t(sector_t, nr_sectors, max_discard_sectors);
+ err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0);
+ nr_sectors -= nr;
+ start += nr;
+ }
+ zero_out:
+ if (nr_sectors) {
+ err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, 0);
+ }
+ return err != 0;
+}
+
+static bool can_do_reliable_discards(struct drbd_device *device)
+{
+ struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
+ struct disk_conf *dc;
+ bool can_do;
+
+ if (!blk_queue_discard(q))
+ return false;
+
+ if (q->limits.discard_zeroes_data)
+ return true;
+
+ rcu_read_lock();
+ dc = rcu_dereference(device->ldev->disk_conf);
+ can_do = dc->discard_zeroes_if_aligned;
+ rcu_read_unlock();
+ return can_do;
+}
+
+static void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req)
+{
+ /* If the backend cannot discard, or does not guarantee
+ * read-back zeroes in discarded ranges, we fall back to
+ * zero-out. Unless configuration specifically requested
+ * otherwise. */
+ if (!can_do_reliable_discards(device))
+ peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
+
+ if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
+ peer_req->i.size >> 9, !(peer_req->flags & EE_IS_TRIM_USE_ZEROOUT)))
+ peer_req->flags |= EE_WAS_ERROR;
+ drbd_endio_write_sec_final(peer_req);
+}
+
+static void drbd_issue_peer_wsame(struct drbd_device *device,
+ struct drbd_peer_request *peer_req)
+{
+ struct block_device *bdev = device->ldev->backing_bdev;
+ sector_t s = peer_req->i.sector;
+ sector_t nr = peer_req->i.size >> 9;
+ if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
+ peer_req->flags |= EE_WAS_ERROR;
+ drbd_endio_write_sec_final(peer_req);
+}
+
+
/**
* drbd_submit_peer_request()
* @device: DRBD device.
@@ -1398,7 +1579,8 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
/* TODO allocate from our own bio_set. */
int drbd_submit_peer_request(struct drbd_device *device,
struct drbd_peer_request *peer_req,
- const unsigned rw, const int fault_type)
+ const unsigned op, const unsigned op_flags,
+ const int fault_type)
{
struct bio *bios = NULL;
struct bio *bio;
@@ -1409,7 +1591,13 @@ int drbd_submit_peer_request(struct drbd_device *device,
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
int err = -ENOMEM;
- if (peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) {
+ /* TRIM/DISCARD: for now, always use the helper function
+ * blkdev_issue_zeroout(..., discard=true).
+ * It's synchronous, but it does the right thing wrt. bio splitting.
+ * Correctness first, performance later. Next step is to code an
+ * asynchronous variant of the same.
+ */
+ if (peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) {
/* wait for all pending IO completions, before we start
* zeroing things out. */
conn_wait_active_ee_empty(peer_req->peer_device->connection);
@@ -1417,22 +1605,22 @@ int drbd_submit_peer_request(struct drbd_device *device,
* so we can find it to present it in debugfs */
peer_req->submit_jif = jiffies;
peer_req->flags |= EE_SUBMITTED;
- spin_lock_irq(&device->resource->req_lock);
- list_add_tail(&peer_req->w.list, &device->active_ee);
- spin_unlock_irq(&device->resource->req_lock);
- if (blkdev_issue_zeroout(device->ldev->backing_bdev,
- sector, data_size >> 9, GFP_NOIO, false))
- peer_req->flags |= EE_WAS_ERROR;
- drbd_endio_write_sec_final(peer_req);
+
+ /* If this was a resync request from receive_rs_deallocated(),
+ * it is already on the sync_ee list */
+ if (list_empty(&peer_req->w.list)) {
+ spin_lock_irq(&device->resource->req_lock);
+ list_add_tail(&peer_req->w.list, &device->active_ee);
+ spin_unlock_irq(&device->resource->req_lock);
+ }
+
+ if (peer_req->flags & EE_IS_TRIM)
+ drbd_issue_peer_discard(device, peer_req);
+ else /* EE_WRITE_SAME */
+ drbd_issue_peer_wsame(device, peer_req);
return 0;
}
- /* Discards don't have any payload.
- * But the scsi layer still expects a bio_vec it can use internally,
- * see sd_setup_discard_cmnd() and blk_add_request_payload(). */
- if (peer_req->flags & EE_IS_TRIM)
- nr_pages = 1;
-
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
* side than those of the sending peer, we may need to submit the
@@ -1450,7 +1638,7 @@ next_bio:
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = device->ldev->backing_bdev;
- bio->bi_rw = rw;
+ bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
@@ -1458,11 +1646,6 @@ next_bio:
bios = bio;
++n_bios;
- if (rw & REQ_DISCARD) {
- bio->bi_iter.bi_size = data_size;
- goto submit;
- }
-
page_chain_for_each(page) {
unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
if (!bio_add_page(bio, page, len, 0)) {
@@ -1484,7 +1667,6 @@ next_bio:
--nr_pages;
}
D_ASSERT(device, data_size == 0);
-submit:
D_ASSERT(device, page == NULL);
atomic_set(&peer_req->pending_bios, n_bios);
@@ -1608,8 +1790,26 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
return 0;
}
+/* quick wrapper in case payload size != request_size (write same) */
+static void drbd_csum_ee_size(struct crypto_ahash *h,
+ struct drbd_peer_request *r, void *d,
+ unsigned int payload_size)
+{
+ unsigned int tmp = r->i.size;
+ r->i.size = payload_size;
+ drbd_csum_ee(h, r, d);
+ r->i.size = tmp;
+}
+
/* used from receive_RSDataReply (recv_resync_read)
- * and from receive_Data */
+ * and from receive_Data.
+ * data_size: actual payload ("data in")
+ * for normal writes that is bi_size.
+ * for discards, that is zero.
+ * for write same, it is logical_block_size.
+ * both trim and write same have the bi_size ("data len to be affected")
+ * as extra argument in the packet header.
+ */
static struct drbd_peer_request *
read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
struct packet_info *pi) __must_hold(local)
@@ -1624,6 +1824,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
void *dig_vv = peer_device->connection->int_dig_vv;
unsigned long *data;
struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
+ struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL;
digest_size = 0;
if (!trim && peer_device->connection->peer_integrity_tfm) {
@@ -1638,38 +1839,60 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
data_size -= digest_size;
}
+ /* assume request_size == data_size, but special case trim and wsame. */
+ ds = data_size;
if (trim) {
- D_ASSERT(peer_device, data_size == 0);
- data_size = be32_to_cpu(trim->size);
+ if (!expect(data_size == 0))
+ return NULL;
+ ds = be32_to_cpu(trim->size);
+ } else if (wsame) {
+ if (data_size != queue_logical_block_size(device->rq_queue)) {
+ drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n",
+ data_size, queue_logical_block_size(device->rq_queue));
+ return NULL;
+ }
+ if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) {
+ drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n",
+ data_size, bdev_logical_block_size(device->ldev->backing_bdev));
+ return NULL;
+ }
+ ds = be32_to_cpu(wsame->size);
}
- if (!expect(IS_ALIGNED(data_size, 512)))
+ if (!expect(IS_ALIGNED(ds, 512)))
return NULL;
- /* prepare for larger trim requests. */
- if (!trim && !expect(data_size <= DRBD_MAX_BIO_SIZE))
+ if (trim || wsame) {
+ if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
+ return NULL;
+ } else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
return NULL;
/* even though we trust out peer,
* we sometimes have to double check. */
- if (sector + (data_size>>9) > capacity) {
+ if (sector + (ds>>9) > capacity) {
drbd_err(device, "request from peer beyond end of local disk: "
"capacity: %llus < sector: %llus + size: %u\n",
(unsigned long long)capacity,
- (unsigned long long)sector, data_size);
+ (unsigned long long)sector, ds);
return NULL;
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, trim == NULL, GFP_NOIO);
+ peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
if (!peer_req)
return NULL;
peer_req->flags |= EE_WRITE;
- if (trim)
+ if (trim) {
+ peer_req->flags |= EE_IS_TRIM;
return peer_req;
+ }
+ if (wsame)
+ peer_req->flags |= EE_WRITE_SAME;
+ /* receive payload size bytes into page chain */
ds = data_size;
page = peer_req->pages;
page_chain_for_each(page) {
@@ -1689,7 +1912,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
}
if (digest_size) {
- drbd_csum_ee(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv);
+ drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
if (memcmp(dig_in, dig_vv, digest_size)) {
drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size);
@@ -1830,7 +2053,8 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
spin_unlock_irq(&device->resource->req_lock);
atomic_add(pi->size >> 9, &device->rs_sect_ev);
- if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
+ DRBD_FAULT_RS_WR) == 0)
return 0;
/* don't care for the reason here */
@@ -2065,13 +2289,13 @@ static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
struct drbd_peer_request *rs_req;
- bool rv = 0;
+ bool rv = false;
spin_lock_irq(&device->resource->req_lock);
list_for_each_entry(rs_req, &device->sync_ee, w.list) {
if (overlaps(peer_req->i.sector, peer_req->i.size,
rs_req->i.sector, rs_req->i.size)) {
- rv = 1;
+ rv = true;
break;
}
}
@@ -2152,12 +2376,19 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co
/* see also bio_flags_to_wire()
* DRBD_REQ_*, because we need to semantically map the flags to data packet
* flags and back. We may replicate to other kernel versions. */
-static unsigned long wire_flags_to_bio(u32 dpf)
+static unsigned long wire_flags_to_bio_flags(u32 dpf)
{
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) |
- (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
- (dpf & DP_DISCARD ? REQ_DISCARD : 0);
+ (dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
+}
+
+static unsigned long wire_flags_to_bio_op(u32 dpf)
+{
+ if (dpf & DP_DISCARD)
+ return REQ_OP_DISCARD;
+ else
+ return REQ_OP_WRITE;
}
static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
@@ -2303,7 +2534,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
struct drbd_peer_request *peer_req;
struct p_data *p = pi->data;
u32 peer_seq = be32_to_cpu(p->seq_num);
- int rw = WRITE;
+ int op, op_flags;
u32 dp_flags;
int err, tp;
@@ -2342,14 +2573,11 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
peer_req->flags |= EE_APPLICATION;
dp_flags = be32_to_cpu(p->dp_flags);
- rw |= wire_flags_to_bio(dp_flags);
+ op = wire_flags_to_bio_op(dp_flags);
+ op_flags = wire_flags_to_bio_flags(dp_flags);
if (pi->cmd == P_TRIM) {
- struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
- peer_req->flags |= EE_IS_TRIM;
- if (!blk_queue_discard(q))
- peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
D_ASSERT(peer_device, peer_req->i.size > 0);
- D_ASSERT(peer_device, rw & REQ_DISCARD);
+ D_ASSERT(peer_device, op == REQ_OP_DISCARD);
D_ASSERT(peer_device, peer_req->pages == NULL);
} else if (peer_req->pages == NULL) {
D_ASSERT(device, peer_req->i.size == 0);
@@ -2414,11 +2642,11 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
update_peer_seq(peer_device, peer_seq);
spin_lock_irq(&device->resource->req_lock);
}
- /* if we use the zeroout fallback code, we process synchronously
- * and we wait for all pending requests, respectively wait for
+ /* TRIM and WRITE_SAME are processed synchronously,
+ * we wait for all pending requests, respectively wait for
* active_ee to become empty in drbd_submit_peer_request();
* better not add ourselves here. */
- if ((peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) == 0)
+ if ((peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) == 0)
list_add_tail(&peer_req->w.list, &device->active_ee);
spin_unlock_irq(&device->resource->req_lock);
@@ -2433,7 +2661,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
}
- err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR);
+ err = drbd_submit_peer_request(device, peer_req, op, op_flags,
+ DRBD_FAULT_DT_WR);
if (!err)
return 0;
@@ -2449,7 +2678,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
}
out_interrupted:
- drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT + EV_CLEANUP);
+ drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
put_ldev(device);
drbd_free_peer_req(device, peer_req);
return err;
@@ -2574,6 +2803,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
case P_DATA_REQUEST:
drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
break;
+ case P_RS_THIN_REQ:
case P_RS_DATA_REQUEST:
case P_CSUM_RS_REQUEST:
case P_OV_REQUEST:
@@ -2599,7 +2829,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
- true /* has real payload */, GFP_NOIO);
+ size, GFP_NOIO);
if (!peer_req) {
put_ldev(device);
return -ENOMEM;
@@ -2613,6 +2843,12 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
peer_req->flags |= EE_APPLICATION;
goto submit;
+ case P_RS_THIN_REQ:
+ /* If at some point in the future we have a smart way to
+ find out if this data block is completely deallocated,
+ then we would do something smarter here than reading
+ the block... */
+ peer_req->flags |= EE_RS_THIN_REQ;
case P_RS_DATA_REQUEST:
peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
@@ -2723,7 +2959,8 @@ submit_for_resync:
submit:
update_receiver_timing_details(connection, drbd_submit_peer_request);
inc_unacked(device);
- if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+ fault_type) == 0)
return 0;
/* don't care for the reason here */
@@ -2957,7 +3194,8 @@ static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
-1091 requires proto 91
-1096 requires proto 96
*/
-static int drbd_uuid_compare(struct drbd_device *const device, int *rule_nr) __must_hold(local)
+
+static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
{
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
@@ -3037,8 +3275,39 @@ static int drbd_uuid_compare(struct drbd_device *const device, int *rule_nr) __m
* next bit (weight 2) is set when peer was primary */
*rule_nr = 40;
+ /* Neither has the "crashed primary" flag set,
+ * only a replication link hickup. */
+ if (rct == 0)
+ return 0;
+
+ /* Current UUID equal and no bitmap uuid; does not necessarily
+ * mean this was a "simultaneous hard crash", maybe IO was
+ * frozen, so no UUID-bump happened.
+ * This is a protocol change, overload DRBD_FF_WSAME as flag
+ * for "new-enough" peer DRBD version. */
+ if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
+ *rule_nr = 41;
+ if (!(connection->agreed_features & DRBD_FF_WSAME)) {
+ drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n");
+ return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8));
+ }
+ if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
+ /* At least one has the "crashed primary" bit set,
+ * both are primary now, but neither has rotated its UUIDs?
+ * "Can not happen." */
+ drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n");
+ return -100;
+ }
+ if (device->state.role == R_PRIMARY)
+ return 1;
+ return -1;
+ }
+
+ /* Both are secondary.
+ * Really looks like recovery from simultaneous hard crash.
+ * Check which had been primary before, and arbitrate. */
switch (rct) {
- case 0: /* !self_pri && !peer_pri */ return 0;
+ case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
@@ -3165,7 +3434,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
drbd_uuid_dump(device, "peer", device->p_uuid,
device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
- hg = drbd_uuid_compare(device, &rule_nr);
+ hg = drbd_uuid_compare(device, peer_role, &rule_nr);
spin_unlock_irq(&device->ldev->md.uuid_lock);
drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
@@ -3174,6 +3443,15 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
drbd_alert(device, "Unrelated data, aborting!\n");
return C_MASK;
}
+ if (hg < -0x10000) {
+ int proto, fflags;
+ hg = -hg;
+ proto = hg & 0xff;
+ fflags = (hg >> 8) & 0xff;
+ drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
+ proto, fflags);
+ return C_MASK;
+ }
if (hg < -1000) {
drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
return C_MASK;
@@ -3403,7 +3681,8 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
*/
peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
- if (!peer_integrity_tfm) {
+ if (IS_ERR(peer_integrity_tfm)) {
+ peer_integrity_tfm = NULL;
drbd_err(connection, "peer data-integrity-alg %s not supported\n",
integrity_alg);
goto disconnect;
@@ -3754,6 +4033,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_sizes *p = pi->data;
+ struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
enum determine_dev_size dd = DS_UNCHANGED;
sector_t p_size, p_usize, p_csize, my_usize;
int ldsc = 0; /* local disk size changed */
@@ -3773,6 +4053,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
device->p_size = p_size;
if (get_ldev(device)) {
+ sector_t new_size, cur_size;
rcu_read_lock();
my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
@@ -3789,11 +4070,13 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
- if (drbd_new_dev_size(device, device->ldev, p_usize, 0) <
- drbd_get_capacity(device->this_bdev) &&
+ new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
+ cur_size = drbd_get_capacity(device->this_bdev);
+ if (new_size < cur_size &&
device->state.disk >= D_OUTDATED &&
device->state.conn < C_CONNECTED) {
- drbd_err(device, "The peer's disk size is too small!\n");
+ drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
+ (unsigned long long)new_size, (unsigned long long)cur_size);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
put_ldev(device);
return -EIO;
@@ -3827,14 +4110,14 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
}
device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
- /* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size().
+ /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
In case we cleared the QUEUE_FLAG_DISCARD from our queue in
- drbd_reconsider_max_bio_size(), we can be sure that after
+ drbd_reconsider_queue_parameters(), we can be sure that after
drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(device)) {
- drbd_reconsider_max_bio_size(device, device->ldev);
+ drbd_reconsider_queue_parameters(device, device->ldev, o);
dd = drbd_determine_dev_size(device, ddsf, NULL);
put_ldev(device);
if (dd == DS_ERROR)
@@ -3854,7 +4137,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
* However, if he sends a zero current size,
* take his (user-capped or) backing disk size anyways.
*/
- drbd_reconsider_max_bio_size(device, NULL);
+ drbd_reconsider_queue_parameters(device, NULL, o);
drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size);
}
@@ -4587,9 +4870,75 @@ static int receive_out_of_sync(struct drbd_connection *connection, struct packet
return 0;
}
+static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi)
+{
+ struct drbd_peer_device *peer_device;
+ struct p_block_desc *p = pi->data;
+ struct drbd_device *device;
+ sector_t sector;
+ int size, err = 0;
+
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
+ return -EIO;
+ device = peer_device->device;
+
+ sector = be64_to_cpu(p->sector);
+ size = be32_to_cpu(p->blksize);
+
+ dec_rs_pending(device);
+
+ if (get_ldev(device)) {
+ struct drbd_peer_request *peer_req;
+ const int op = REQ_OP_DISCARD;
+
+ peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
+ size, 0, GFP_NOIO);
+ if (!peer_req) {
+ put_ldev(device);
+ return -ENOMEM;
+ }
+
+ peer_req->w.cb = e_end_resync_block;
+ peer_req->submit_jif = jiffies;
+ peer_req->flags |= EE_IS_TRIM;
+
+ spin_lock_irq(&device->resource->req_lock);
+ list_add_tail(&peer_req->w.list, &device->sync_ee);
+ spin_unlock_irq(&device->resource->req_lock);
+
+ atomic_add(pi->size >> 9, &device->rs_sect_ev);
+ err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
+
+ if (err) {
+ spin_lock_irq(&device->resource->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&device->resource->req_lock);
+
+ drbd_free_peer_req(device, peer_req);
+ put_ldev(device);
+ err = 0;
+ goto fail;
+ }
+
+ inc_unacked(device);
+
+ /* No put_ldev() here. Gets called in drbd_endio_write_sec_final(),
+ as well as drbd_rs_complete_io() */
+ } else {
+ fail:
+ drbd_rs_complete_io(device, sector);
+ drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
+ }
+
+ atomic_add(size >> 9, &device->rs_sect_in);
+
+ return err;
+}
+
struct data_cmd {
int expect_payload;
- size_t pkt_size;
+ unsigned int pkt_size;
int (*fn)(struct drbd_connection *, struct packet_info *);
};
@@ -4614,11 +4963,14 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
+ [P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
[P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
[P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
[P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
[P_TRIM] = { 0, sizeof(struct p_trim), receive_Data },
+ [P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
+ [P_WSAME] = { 1, sizeof(struct p_wsame), receive_Data },
};
static void drbdd(struct drbd_connection *connection)
@@ -4628,7 +4980,7 @@ static void drbdd(struct drbd_connection *connection)
int err;
while (get_t_state(&connection->receiver) == RUNNING) {
- struct data_cmd *cmd;
+ struct data_cmd const *cmd;
drbd_thread_current_set_cpu(&connection->receiver);
update_receiver_timing_details(connection, drbd_recv_header);
@@ -4643,11 +4995,18 @@ static void drbdd(struct drbd_connection *connection)
}
shs = cmd->pkt_size;
+ if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME)
+ shs += sizeof(struct o_qlim);
if (pi.size > shs && !cmd->expect_payload) {
drbd_err(connection, "No payload expected %s l:%d\n",
cmdname(pi.cmd), pi.size);
goto err_out;
}
+ if (pi.size < shs) {
+ drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n",
+ cmdname(pi.cmd), (int)shs, pi.size);
+ goto err_out;
+ }
if (shs) {
update_receiver_timing_details(connection, drbd_recv_all_warn);
@@ -4783,9 +5142,11 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
drbd_md_sync(device);
- /* serialize with bitmap writeout triggered by the state change,
- * if any. */
- wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+ if (get_ldev(device)) {
+ drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
+ "write from disconnected", BM_LOCKED_CHANGE_ALLOWED);
+ put_ldev(device);
+ }
/* tcp_close and release of sendpage pages can be deferred. I don't
* want to use SO_LINGER, because apparently it can be deferred for
@@ -4892,8 +5253,12 @@ static int drbd_do_features(struct drbd_connection *connection)
drbd_info(connection, "Handshake successful: "
"Agreed network protocol version %d\n", connection->agreed_pro_version);
- drbd_info(connection, "Agreed to%ssupport TRIM on protocol level\n",
- connection->agreed_features & FF_TRIM ? " " : " not ");
+ drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s.\n",
+ connection->agreed_features,
+ connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "",
+ connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "",
+ connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" :
+ connection->agreed_features ? "" : " none");
return 1;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 2255dcfebd2b..66b8e4bb74d8 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -47,8 +47,7 @@ static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *r
&device->vdisk->part0, req->start_jif);
}
-static struct drbd_request *drbd_req_new(struct drbd_device *device,
- struct bio *bio_src)
+static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
{
struct drbd_request *req;
@@ -58,10 +57,12 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device,
memset(req, 0, sizeof(*req));
drbd_req_make_private_bio(req, bio_src);
- req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
- req->device = device;
- req->master_bio = bio_src;
- req->epoch = 0;
+ req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
+ | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
+ | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
+ req->device = device;
+ req->master_bio = bio_src;
+ req->epoch = 0;
drbd_clear_interval(&req->i);
req->i.sector = bio_src->bi_iter.bi_sector;
@@ -218,7 +219,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned s = req->rq_state;
struct drbd_device *device = req->device;
- int rw;
int error, ok;
/* we must not complete the master bio, while it is
@@ -242,8 +242,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
return;
}
- rw = bio_rw(req->master_bio);
-
/*
* figure out whether to report success or failure.
*
@@ -267,7 +265,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* epoch number. If they match, increase the current_tle_nr,
* and reset the transfer log epoch write_cnt.
*/
- if (rw == WRITE &&
+ if (op_is_write(bio_op(req->master_bio)) &&
req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
start_new_tl_epoch(first_peer_device(device)->connection);
@@ -284,11 +282,14 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* because no path was available, in which case
* it was not even added to the transfer_log.
*
- * READA may fail, and will not be retried.
+ * read-ahead may fail, and will not be retried.
*
* WRITE should have used all available paths already.
*/
- if (!ok && rw == READ && !list_empty(&req->tl_requests))
+ if (!ok &&
+ bio_op(req->master_bio) == REQ_OP_READ &&
+ !(req->master_bio->bi_rw & REQ_RAHEAD) &&
+ !list_empty(&req->tl_requests))
req->rq_state |= RQ_POSTPONED;
if (!(req->rq_state & RQ_POSTPONED)) {
@@ -644,7 +645,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
__drbd_chk_io_error(device, DRBD_READ_ERROR);
/* fall through. */
case READ_AHEAD_COMPLETED_WITH_ERROR:
- /* it is legal to fail READA, no __drbd_chk_io_error in that case. */
+ /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
@@ -656,7 +657,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case QUEUE_FOR_NET_READ:
- /* READ or READA, and
+ /* READ, and
* no local disk,
* or target area marked as invalid,
* or just got an io-error. */
@@ -977,16 +978,20 @@ static void complete_conflicting_writes(struct drbd_request *req)
sector_t sector = req->i.sector;
int size = req->i.size;
- i = drbd_find_overlap(&device->write_requests, sector, size);
- if (!i)
- return;
-
for (;;) {
- prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
- i = drbd_find_overlap(&device->write_requests, sector, size);
- if (!i)
+ drbd_for_each_overlap(i, &device->write_requests, sector, size) {
+ /* Ignore, if already completed to upper layers. */
+ if (i->completed)
+ continue;
+ /* Handle the first found overlap. After the schedule
+ * we have to restart the tree walk. */
+ break;
+ }
+ if (!i) /* if any */
break;
+
/* Indicate to wake up device->misc_wait on progress. */
+ prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
i->waiting = true;
spin_unlock_irq(&device->resource->req_lock);
schedule();
@@ -995,7 +1000,7 @@ static void complete_conflicting_writes(struct drbd_request *req)
finish_wait(&device->misc_wait, &wait);
}
-/* called within req_lock and rcu_read_lock() */
+/* called within req_lock */
static void maybe_pull_ahead(struct drbd_device *device)
{
struct drbd_connection *connection = first_peer_device(device)->connection;
@@ -1132,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req)
* replicating, in which case there is no point. */
if (unlikely(req->i.size == 0)) {
/* The only size==0 bios we expect are empty flushes. */
- D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH);
+ D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
if (remote)
_req_mod(req, QUEUE_AS_DRBD_BARRIER);
return remote;
@@ -1152,12 +1157,29 @@ static int drbd_process_write_request(struct drbd_request *req)
return remote;
}
+static void drbd_process_discard_req(struct drbd_request *req)
+{
+ int err = drbd_issue_discard_or_zero_out(req->device,
+ req->i.sector, req->i.size >> 9, true);
+
+ if (err)
+ req->private_bio->bi_error = -EIO;
+ bio_endio(req->private_bio);
+}
+
static void
drbd_submit_req_private_bio(struct drbd_request *req)
{
struct drbd_device *device = req->device;
struct bio *bio = req->private_bio;
- const int rw = bio_rw(bio);
+ unsigned int type;
+
+ if (bio_op(bio) != REQ_OP_READ)
+ type = DRBD_FAULT_DT_WR;
+ else if (bio->bi_rw & REQ_RAHEAD)
+ type = DRBD_FAULT_DT_RA;
+ else
+ type = DRBD_FAULT_DT_RD;
bio->bi_bdev = device->ldev->backing_bdev;
@@ -1167,11 +1189,10 @@ drbd_submit_req_private_bio(struct drbd_request *req)
* stable storage, and this is a WRITE, we may not even submit
* this bio. */
if (get_ldev(device)) {
- if (drbd_insert_fault(device,
- rw == WRITE ? DRBD_FAULT_DT_WR
- : rw == READ ? DRBD_FAULT_DT_RD
- : DRBD_FAULT_DT_RA))
+ if (drbd_insert_fault(device, type))
bio_io_error(bio);
+ else if (bio_op(bio) == REQ_OP_DISCARD)
+ drbd_process_discard_req(req);
else
generic_make_request(bio);
put_ldev(device);
@@ -1223,24 +1244,45 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
/* Update disk stats */
_drbd_start_io_acct(device, req);
+ /* process discards always from our submitter thread */
+ if (bio_op(bio) & REQ_OP_DISCARD)
+ goto queue_for_submitter_thread;
+
if (rw == WRITE && req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &device->flags)) {
- if (!drbd_al_begin_io_fastpath(device, &req->i)) {
- atomic_inc(&device->ap_actlog_cnt);
- drbd_queue_write(device, req);
- return NULL;
- }
+ if (!drbd_al_begin_io_fastpath(device, &req->i))
+ goto queue_for_submitter_thread;
req->rq_state |= RQ_IN_ACT_LOG;
req->in_actlog_jif = jiffies;
}
-
return req;
+
+ queue_for_submitter_thread:
+ atomic_inc(&device->ap_actlog_cnt);
+ drbd_queue_write(device, req);
+ return NULL;
+}
+
+/* Require at least one path to current data.
+ * We don't want to allow writes on C_STANDALONE D_INCONSISTENT:
+ * We would not allow to read what was written,
+ * we would not have bumped the data generation uuids,
+ * we would cause data divergence for all the wrong reasons.
+ *
+ * If we don't see at least one D_UP_TO_DATE, we will fail this request,
+ * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO,
+ * and queues for retry later.
+ */
+static bool may_do_writes(struct drbd_device *device)
+{
+ const union drbd_dev_state s = device->state;
+ return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
}
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
struct drbd_resource *resource = device->resource;
- const int rw = bio_rw(req->master_bio);
+ const int rw = bio_data_dir(req->master_bio);
struct bio_and_error m = { NULL, };
bool no_remote = false;
bool submit_private_bio = false;
@@ -1270,7 +1312,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
goto out;
}
- /* We fail READ/READA early, if we can not serve it.
+ /* We fail READ early, if we can not serve it.
* We must do this before req is registered on any lists.
* Otherwise, drbd_req_complete() will queue failed READ for retry. */
if (rw != WRITE) {
@@ -1291,6 +1333,12 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
}
if (rw == WRITE) {
+ if (req->private_bio && !may_do_writes(device)) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(device);
+ goto nodata;
+ }
if (!drbd_process_write_request(req))
no_remote = true;
} else {
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index bb2ef78165e5..eb49e7f2da91 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -206,6 +206,8 @@ enum drbd_req_state_bits {
/* Set when this is a write, clear for a read */
__RQ_WRITE,
+ __RQ_WSAME,
+ __RQ_UNMAP,
/* Should call drbd_al_complete_io() for this request... */
__RQ_IN_ACT_LOG,
@@ -241,10 +243,11 @@ enum drbd_req_state_bits {
#define RQ_NET_OK (1UL << __RQ_NET_OK)
#define RQ_NET_SIS (1UL << __RQ_NET_SIS)
-/* 0x1f8 */
#define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
#define RQ_WRITE (1UL << __RQ_WRITE)
+#define RQ_WSAME (1UL << __RQ_WSAME)
+#define RQ_UNMAP (1UL << __RQ_UNMAP)
#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
#define RQ_POSTPONED (1UL << __RQ_POSTPONED)
#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 5a7ef7873b67..eea0c4aec978 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -814,7 +814,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
}
if (rv <= 0)
- /* already found a reason to abort */;
+ goto out; /* already found a reason to abort */
else if (ns.role == R_SECONDARY && device->open_cnt)
rv = SS_DEVICE_IN_USE;
@@ -862,6 +862,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
rv = SS_CONNECTED_OUTDATES;
+out:
rcu_read_unlock();
return rv;
@@ -906,6 +907,15 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_c
(ns.conn >= C_CONNECTED && os.conn == C_WF_REPORT_PARAMS)))
rv = SS_IN_TRANSIENT_STATE;
+ /* Do not promote during resync handshake triggered by "force primary".
+ * This is a hack. It should really be rejected by the peer during the
+ * cluster wide state change request. */
+ if (os.role != R_PRIMARY && ns.role == R_PRIMARY
+ && ns.pdsk == D_UP_TO_DATE
+ && ns.disk != D_UP_TO_DATE && ns.disk != D_DISKLESS
+ && (ns.conn <= C_WF_SYNC_UUID || ns.conn != os.conn))
+ rv = SS_IN_TRANSIENT_STATE;
+
if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
rv = SS_NEED_CONNECTION;
@@ -1628,6 +1638,26 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
#undef REMEMBER_STATE_CHANGE
}
+/* takes old and new peer disk state */
+static bool lost_contact_to_peer_data(enum drbd_disk_state os, enum drbd_disk_state ns)
+{
+ if ((os >= D_INCONSISTENT && os != D_UNKNOWN && os != D_OUTDATED)
+ && (ns < D_INCONSISTENT || ns == D_UNKNOWN || ns == D_OUTDATED))
+ return true;
+
+ /* Scenario, starting with normal operation
+ * Connected Primary/Secondary UpToDate/UpToDate
+ * NetworkFailure Primary/Unknown UpToDate/DUnknown (frozen)
+ * ...
+ * Connected Primary/Secondary UpToDate/Diskless (resumed; needs to bump uuid!)
+ */
+ if (os == D_UNKNOWN
+ && (ns == D_DISKLESS || ns == D_FAILED || ns == D_OUTDATED))
+ return true;
+
+ return false;
+}
+
/**
* after_state_ch() - Perform after state change actions that may sleep
* @device: DRBD device.
@@ -1675,7 +1705,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
what = RESEND;
if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
- conn_lowest_disk(connection) > D_NEGOTIATING)
+ conn_lowest_disk(connection) == D_UP_TO_DATE)
what = RESTART_FROZEN_DISK_IO;
if (resource->susp_nod && what != NOTHING) {
@@ -1699,6 +1729,13 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
clear_bit(NEW_CUR_UUID, &peer_device->device->flags);
rcu_read_unlock();
+
+ /* We should actively create a new uuid, _before_
+ * we resume/resent, if the peer is diskless
+ * (recovery from a multiple error scenario).
+ * Currently, this happens with a slight delay
+ * below when checking lost_contact_to_peer_data() ...
+ */
_tl_restart(connection, RESEND);
_conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } },
@@ -1742,12 +1779,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
BM_LOCKED_TEST_ALLOWED);
/* Lost contact to peer's copy of the data */
- if ((os.pdsk >= D_INCONSISTENT &&
- os.pdsk != D_UNKNOWN &&
- os.pdsk != D_OUTDATED)
- && (ns.pdsk < D_INCONSISTENT ||
- ns.pdsk == D_UNKNOWN ||
- ns.pdsk == D_OUTDATED)) {
+ if (lost_contact_to_peer_data(os.pdsk, ns.pdsk)) {
if (get_ldev(device)) {
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
@@ -1934,12 +1966,17 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
/* This triggers bitmap writeout of potentially still unwritten pages
* if the resync finished cleanly, or aborted because of peer disk
- * failure, or because of connection loss.
+ * failure, or on transition from resync back to AHEAD/BEHIND.
+ *
+ * Connection loss is handled in drbd_disconnected() by the receiver.
+ *
* For resync aborted because of local disk failure, we cannot do
* any bitmap writeout anymore.
+ *
* No harm done if some bits change during this phase.
*/
- if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(device)) {
+ if ((os.conn > C_CONNECTED && os.conn < C_AHEAD) &&
+ (ns.conn == C_CONNECTED || ns.conn >= C_AHEAD) && get_ldev(device)) {
drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL,
"write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
put_ldev(device);
@@ -2160,9 +2197,7 @@ conn_set_state(struct drbd_connection *connection, union drbd_state mask, union
ns.disk = os.disk;
rv = _drbd_set_state(device, ns, flags, NULL);
- if (rv < SS_SUCCESS)
- BUG();
-
+ BUG_ON(rv < SS_SUCCESS);
ns.i = device->state.i;
ns_max.role = max_role(ns.role, ns_max.role);
ns_max.peer = max_role(ns.peer, ns_max.peer);
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index bd989536f888..6c9d5d4a8a75 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -140,7 +140,7 @@ extern void drbd_resume_al(struct drbd_device *device);
extern bool conn_all_vols_unconf(struct drbd_connection *connection);
/**
- * drbd_request_state() - Reqest a state change
+ * drbd_request_state() - Request a state change
* @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 80b0f63c7075..0eeab14776e9 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -26,7 +26,7 @@
#include <linux/drbd.h>
#include "drbd_strings.h"
-static const char *drbd_conn_s_names[] = {
+static const char * const drbd_conn_s_names[] = {
[C_STANDALONE] = "StandAlone",
[C_DISCONNECTING] = "Disconnecting",
[C_UNCONNECTED] = "Unconnected",
@@ -53,13 +53,13 @@ static const char *drbd_conn_s_names[] = {
[C_BEHIND] = "Behind",
};
-static const char *drbd_role_s_names[] = {
+static const char * const drbd_role_s_names[] = {
[R_PRIMARY] = "Primary",
[R_SECONDARY] = "Secondary",
[R_UNKNOWN] = "Unknown"
};
-static const char *drbd_disk_s_names[] = {
+static const char * const drbd_disk_s_names[] = {
[D_DISKLESS] = "Diskless",
[D_ATTACHING] = "Attaching",
[D_FAILED] = "Failed",
@@ -71,7 +71,7 @@ static const char *drbd_disk_s_names[] = {
[D_UP_TO_DATE] = "UpToDate",
};
-static const char *drbd_state_sw_errors[] = {
+static const char * const drbd_state_sw_errors[] = {
[-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config",
[-SS_NO_UP_TO_DATE_DISK] = "Need access to UpToDate data",
[-SS_NO_LOCAL_DISK] = "Can not resync without local disk",
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d87499f0d54..35dbb3dca47e 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -173,8 +173,8 @@ void drbd_peer_request_endio(struct bio *bio)
{
struct drbd_peer_request *peer_req = bio->bi_private;
struct drbd_device *device = peer_req->peer_device->device;
- int is_write = bio_data_dir(bio) == WRITE;
- int is_discard = !!(bio->bi_rw & REQ_DISCARD);
+ bool is_write = bio_data_dir(bio) == WRITE;
+ bool is_discard = !!(bio_op(bio) == REQ_OP_DISCARD);
if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "%s: error=%d s=%llus\n",
@@ -248,18 +248,26 @@ void drbd_request_endio(struct bio *bio)
/* to avoid recursion in __req_mod */
if (unlikely(bio->bi_error)) {
- if (bio->bi_rw & REQ_DISCARD)
- what = (bio->bi_error == -EOPNOTSUPP)
- ? DISCARD_COMPLETED_NOTSUPP
- : DISCARD_COMPLETED_WITH_ERROR;
- else
- what = (bio_data_dir(bio) == WRITE)
- ? WRITE_COMPLETED_WITH_ERROR
- : (bio_rw(bio) == READ)
- ? READ_COMPLETED_WITH_ERROR
- : READ_AHEAD_COMPLETED_WITH_ERROR;
- } else
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ if (bio->bi_error == -EOPNOTSUPP)
+ what = DISCARD_COMPLETED_NOTSUPP;
+ else
+ what = DISCARD_COMPLETED_WITH_ERROR;
+ break;
+ case REQ_OP_READ:
+ if (bio->bi_rw & REQ_RAHEAD)
+ what = READ_AHEAD_COMPLETED_WITH_ERROR;
+ else
+ what = READ_COMPLETED_WITH_ERROR;
+ break;
+ default:
+ what = WRITE_COMPLETED_WITH_ERROR;
+ break;
+ }
+ } else {
what = COMPLETED_OK;
+ }
bio_put(req->private_bio);
req->private_bio = ERR_PTR(bio->bi_error);
@@ -320,6 +328,10 @@ void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, sg.length);
crypto_ahash_update(req);
+ /* REQ_OP_WRITE_SAME has only one segment,
+ * checksum the payload only once. */
+ if (bio_op(bio) == REQ_OP_WRITE_SAME)
+ break;
}
ahash_request_set_crypt(req, NULL, digest, 0);
crypto_ahash_final(req);
@@ -387,7 +399,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector,
- size, true /* has real payload */, GFP_TRY);
+ size, size, GFP_TRY);
if (!peer_req)
goto defer;
@@ -397,7 +409,8 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
spin_unlock_irq(&device->resource->req_lock);
atomic_add(size >> 9, &device->rs_sect_ev);
- if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+ DRBD_FAULT_RS_RD) == 0)
return 0;
/* If it failed because of ENOMEM, retry should help. If it failed
@@ -582,6 +595,7 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
int number, rollback_i, size;
int align, requeue = 0;
int i = 0;
+ int discard_granularity = 0;
if (unlikely(cancel))
return 0;
@@ -601,6 +615,12 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
return 0;
}
+ if (connection->agreed_features & DRBD_FF_THIN_RESYNC) {
+ rcu_read_lock();
+ discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity;
+ rcu_read_unlock();
+ }
+
max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
number = drbd_rs_number_requests(device);
if (number <= 0)
@@ -665,6 +685,9 @@ next_sector:
if (sector & ((1<<(align+3))-1))
break;
+ if (discard_granularity && size == discard_granularity)
+ break;
+
/* do not cross extent boundaries */
if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
break;
@@ -711,7 +734,8 @@ next_sector:
int err;
inc_rs_pending(device);
- err = drbd_send_drequest(peer_device, P_RS_DATA_REQUEST,
+ err = drbd_send_drequest(peer_device,
+ size == discard_granularity ? P_RS_THIN_REQ : P_RS_DATA_REQUEST,
sector, size, ID_SYNCER);
if (err) {
drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
@@ -828,6 +852,7 @@ static void ping_peer(struct drbd_device *device)
int drbd_resync_finished(struct drbd_device *device)
{
+ struct drbd_connection *connection = first_peer_device(device)->connection;
unsigned long db, dt, dbdt;
unsigned long n_oos;
union drbd_state os, ns;
@@ -849,8 +874,7 @@ int drbd_resync_finished(struct drbd_device *device)
if (dw) {
dw->w.cb = w_resync_finished;
dw->device = device;
- drbd_queue_work(&first_peer_device(device)->connection->sender_work,
- &dw->w);
+ drbd_queue_work(&connection->sender_work, &dw->w);
return 1;
}
drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n");
@@ -963,6 +987,30 @@ int drbd_resync_finished(struct drbd_device *device)
_drbd_set_state(device, ns, CS_VERBOSE, NULL);
out_unlock:
spin_unlock_irq(&device->resource->req_lock);
+
+ /* If we have been sync source, and have an effective fencing-policy,
+ * once *all* volumes are back in sync, call "unfence". */
+ if (os.conn == C_SYNC_SOURCE) {
+ enum drbd_disk_state disk_state = D_MASK;
+ enum drbd_disk_state pdsk_state = D_MASK;
+ enum drbd_fencing_p fp = FP_DONT_CARE;
+
+ rcu_read_lock();
+ fp = rcu_dereference(device->ldev->disk_conf)->fencing;
+ if (fp != FP_DONT_CARE) {
+ struct drbd_peer_device *peer_device;
+ int vnr;
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
+ pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk);
+ }
+ }
+ rcu_read_unlock();
+ if (disk_state == D_UP_TO_DATE && pdsk_state == D_UP_TO_DATE)
+ conn_khelper(connection, "unfence-peer");
+ }
+
put_ldev(device);
out:
device->rs_total = 0;
@@ -999,7 +1047,6 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
- * @device: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
@@ -1035,6 +1082,30 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
return err;
}
+static bool all_zero(struct drbd_peer_request *peer_req)
+{
+ struct page *page = peer_req->pages;
+ unsigned int len = peer_req->i.size;
+
+ page_chain_for_each(page) {
+ unsigned int l = min_t(unsigned int, len, PAGE_SIZE);
+ unsigned int i, words = l / sizeof(long);
+ unsigned long *d;
+
+ d = kmap_atomic(page);
+ for (i = 0; i < words; i++) {
+ if (d[i]) {
+ kunmap_atomic(d);
+ return false;
+ }
+ }
+ kunmap_atomic(d);
+ len -= l;
+ }
+
+ return true;
+}
+
/**
* w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
* @w: work object.
@@ -1063,7 +1134,10 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
if (likely(device->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(device);
- err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
+ if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req))
+ err = drbd_send_rs_deallocated(peer_device, peer_req);
+ else
+ err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
drbd_err(device, "Not sending RSDataReply, "
@@ -1633,7 +1707,7 @@ static bool use_checksum_based_resync(struct drbd_connection *connection, struct
rcu_read_unlock();
return connection->agreed_pro_version >= 89 && /* supported? */
connection->csums_tfm && /* configured? */
- (csums_after_crash_only == 0 /* use for each resync? */
+ (csums_after_crash_only == false /* use for each resync? */
|| test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */
}
@@ -1768,7 +1842,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
device->bm_resync_fo = 0;
device->use_csums = use_checksum_based_resync(connection, device);
} else {
- device->use_csums = 0;
+ device->use_csums = false;
}
/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 84708a5f8c52..c557057fe8ae 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3822,8 +3822,9 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
bio.bi_end_io = floppy_rb0_cb;
+ bio_set_op_attrs(&bio, REQ_OP_READ, 0);
- submit_bio(READ, &bio);
+ submit_bio(&bio);
process_fd_request();
init_completion(&cbdata.complete);
@@ -4349,8 +4350,7 @@ static int __init do_floppy_init(void)
/* to be cleaned up... */
disks[drive]->private_data = (void *)(long)drive;
disks[drive]->flags |= GENHD_FL_REMOVABLE;
- disks[drive]->driverfs_dev = &floppy_device[drive].dev;
- add_disk(disks[drive]);
+ device_add_disk(&floppy_device[drive].dev, disks[drive]);
}
return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1fa8cc235977..075377eee0c0 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -447,7 +447,7 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
static inline void handle_partial_read(struct loop_cmd *cmd, long bytes)
{
- if (bytes < 0 || (cmd->rq->cmd_flags & REQ_WRITE))
+ if (bytes < 0 || op_is_write(req_op(cmd->rq)))
return;
if (unlikely(bytes < blk_rq_bytes(cmd->rq))) {
@@ -541,10 +541,10 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
- if (rq->cmd_flags & REQ_WRITE) {
- if (rq->cmd_flags & REQ_FLUSH)
+ if (op_is_write(req_op(rq))) {
+ if (req_op(rq) == REQ_OP_FLUSH)
ret = lo_req_flush(lo, rq);
- else if (rq->cmd_flags & REQ_DISCARD)
+ else if (req_op(rq) == REQ_OP_DISCARD)
ret = lo_discard(lo, rq, pos);
else if (lo->transfer)
ret = lo_write_transfer(lo, rq, pos);
@@ -1659,8 +1659,8 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (lo->lo_state != Lo_bound)
return -EIO;
- if (lo->use_dio && !(cmd->rq->cmd_flags & (REQ_FLUSH |
- REQ_DISCARD)))
+ if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH ||
+ req_op(cmd->rq) == REQ_OP_DISCARD))
cmd->use_aio = true;
else
cmd->use_aio = false;
@@ -1672,7 +1672,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd)
{
- const bool write = cmd->rq->cmd_flags & REQ_WRITE;
+ const bool write = op_is_write(req_op(cmd->rq));
struct loop_device *lo = cmd->rq->q->queuedata;
int ret = 0;
@@ -1765,6 +1765,7 @@ static int loop_add(struct loop_device **l, int i)
*/
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
+ err = -ENOMEM;
disk = lo->lo_disk = alloc_disk(1 << part_shift);
if (!disk)
goto out_free_queue;
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 145ce2aa2e78..e937fcf71769 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -687,15 +687,13 @@ static unsigned int mg_issue_req(struct request *req,
unsigned int sect_num,
unsigned int sect_cnt)
{
- switch (rq_data_dir(req)) {
- case READ:
+ if (rq_data_dir(req) == READ) {
if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
!= MG_ERR_NONE) {
mg_bad_rw_intr(host);
return host->error;
}
- break;
- case WRITE:
+ } else {
/* TODO : handler */
outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
@@ -714,7 +712,6 @@ static unsigned int mg_issue_req(struct request *req,
mod_timer(&host->timer, jiffies + 3 * HZ);
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
- break;
}
return MG_ERR_NONE;
}
@@ -1018,7 +1015,7 @@ probe_err_7:
probe_err_6:
blk_cleanup_queue(host->breq);
probe_err_5:
- unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
+ unregister_blkdev(host->major, MG_DISK_NAME);
probe_err_4:
if (!prv_data->use_polling)
free_irq(host->irq, host);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 6053e4659fa2..2aca98e8e427 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3765,7 +3765,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
return -ENODATA;
}
- if (rq->cmd_flags & REQ_DISCARD) {
+ if (req_op(rq) == REQ_OP_DISCARD) {
int err;
err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
@@ -3956,7 +3956,6 @@ static int mtip_block_initialize(struct driver_data *dd)
if (rv)
goto disk_index_error;
- dd->disk->driverfs_dev = &dd->pdev->dev;
dd->disk->major = dd->major;
dd->disk->first_minor = index * MTIP_MAX_MINORS;
dd->disk->minors = MTIP_MAX_MINORS;
@@ -4008,7 +4007,7 @@ skip_create_disk:
/*
* if rebuild pending, start the service thread, and delay the block
- * queue creation and add_disk()
+ * queue creation and device_add_disk()
*/
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
goto start_service_thread;
@@ -4042,7 +4041,7 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
- add_disk(dd->disk);
+ device_add_disk(&dd->pdev->dev, dd->disk);
dd->bdev = bdget_disk(dd->disk, 0);
/*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6a48ed41963f..6f55b262b5ce 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -282,9 +282,9 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
type = NBD_CMD_DISC;
- else if (req->cmd_flags & REQ_DISCARD)
+ else if (req_op(req) == REQ_OP_DISCARD)
type = NBD_CMD_TRIM;
- else if (req->cmd_flags & REQ_FLUSH)
+ else if (req_op(req) == REQ_OP_FLUSH)
type = NBD_CMD_FLUSH;
else if (rq_data_dir(req) == WRITE)
type = NBD_CMD_WRITE;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index cab97593ba54..75a7f88d6717 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -448,7 +448,7 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct request *rq;
struct bio *bio = rqd->bio;
- rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
+ rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
if (IS_ERR(rq))
return -ENOMEM;
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index c2854a2bfdb0..92900f5f0b47 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -321,7 +321,7 @@ static void osdblk_rq_fn(struct request_queue *q)
* driver-specific, etc.
*/
- do_flush = rq->cmd_flags & REQ_FLUSH;
+ do_flush = (req_op(rq) == REQ_OP_FLUSH);
do_write = (rq_data_dir(rq) == WRITE);
if (!do_flush) { /* osd_flush does not use a bio */
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d06c62eccdf0..9393bc730acf 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1074,7 +1074,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
BUG();
atomic_inc(&pkt->io_wait);
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
pkt_queue_bio(pd, bio);
frames_read++;
}
@@ -1336,7 +1336,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
/* Start the write request */
atomic_set(&pkt->io_wait, 1);
- pkt->w_bio->bi_rw = WRITE;
+ bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
pkt_queue_bio(pd, pkt->w_bio);
}
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 4b7e405830d7..76f33c84ce3d 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -196,7 +196,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = blk_fetch_request(q))) {
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
if (ps3disk_submit_flush_request(dev, req))
break;
} else if (req->cmd_type == REQ_TYPE_FS) {
@@ -256,7 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
read = 0;
op = "flush";
} else {
@@ -487,7 +487,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
gendisk->fops = &ps3disk_fops;
gendisk->queue = queue;
gendisk->private_data = dev;
- gendisk->driverfs_dev = &dev->sbd.core;
snprintf(gendisk->disk_name, sizeof(gendisk->disk_name), PS3DISK_NAME,
devidx+'a');
priv->blocking_factor = dev->blk_size >> 9;
@@ -499,7 +498,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
- add_disk(gendisk);
+ device_add_disk(&dev->sbd.core, gendisk);
return 0;
fail_cleanup_queue:
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 56847fcda086..456b4fe21559 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -773,14 +773,13 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
gendisk->fops = &ps3vram_fops;
gendisk->queue = queue;
gendisk->private_data = dev;
- gendisk->driverfs_dev = &dev->core;
strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
- add_disk(gendisk);
+ device_add_disk(&dev->core, gendisk);
return 0;
fail_cleanup_queue:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 81666a56415e..450662055d97 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3286,9 +3286,9 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err;
}
- if (rq->cmd_flags & REQ_DISCARD)
+ if (req_op(rq) == REQ_OP_DISCARD)
op_type = OBJ_OP_DISCARD;
- else if (rq->cmd_flags & REQ_WRITE)
+ else if (req_op(rq) == REQ_OP_WRITE)
op_type = OBJ_OP_WRITE;
else
op_type = OBJ_OP_READ;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index e1b8b7061d2f..f81d70b39d10 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -230,8 +230,7 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
- add_disk(card->gendisk);
-
+ device_add_disk(CARD_TO_DEV(card), card->gendisk);
card->bdev_attached = 1;
}
@@ -308,7 +307,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
"rsxx%d", card->disk_id);
- card->gendisk->driverfs_dev = &card->dev->dev;
card->gendisk->major = card->major;
card->gendisk->first_minor = 0;
card->gendisk->fops = &rsxx_fops;
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index cf8cd293abb5..5a20385f87d0 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
dma_cnt[i] = 0;
}
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio_op(bio) == REQ_OP_DISCARD) {
bv_len = bio->bi_iter.bi_size;
while (bv_len > 0) {
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 910e065918af..3822eae102db 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -597,7 +597,7 @@ static void skd_request_fn(struct request_queue *q)
data_dir = rq_data_dir(req);
io_flags = req->cmd_flags;
- if (io_flags & REQ_FLUSH)
+ if (req_op(req) == REQ_OP_FLUSH)
flush++;
if (io_flags & REQ_FUA)
@@ -4690,10 +4690,10 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return -EIO;
}
-static int skd_bdev_attach(struct skd_device *skdev)
+static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
{
pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
- add_disk(skdev->disk);
+ device_add_disk(parent, skdev->disk);
return 0;
}
@@ -4812,8 +4812,6 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, skdev);
- skdev->disk->driverfs_dev = &pdev->dev;
-
for (i = 0; i < SKD_MAX_BARS; i++) {
skdev->mem_phys[i] = pci_resource_start(pdev, i);
skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
@@ -4851,7 +4849,7 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(SKD_START_WAIT_SECONDS * HZ));
if (skdev->gendisk_on > 0) {
/* device came on-line after reset */
- skd_bdev_attach(skdev);
+ skd_bdev_attach(&pdev->dev, skdev);
rc = 0;
} else {
/* we timed out, something is wrong with the device,
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 4b911ed96ea3..cab157331c4e 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -804,7 +804,6 @@ static int probe_disk(struct vdc_port *port)
g->fops = &vdc_fops;
g->queue = q;
g->private_data = port;
- g->driverfs_dev = &port->vio.vdev->dev;
set_capacity(g, port->vdisk_size);
@@ -835,7 +834,7 @@ static int probe_disk(struct vdc_port *port)
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
port->vio.ver.major, port->vio.ver.minor);
- add_disk(g);
+ device_add_disk(&port->vio.vdev->dev, g);
return 0;
}
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 7939b9f87441..d0a3e6d4515f 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -344,7 +344,6 @@ static int add_bio(struct cardinfo *card)
int offset;
struct bio *bio;
struct bio_vec vec;
- int rw;
bio = card->currentbio;
if (!bio && card->bio) {
@@ -359,7 +358,6 @@ static int add_bio(struct cardinfo *card)
if (!bio)
return 0;
- rw = bio_rw(bio);
if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
return 0;
@@ -369,7 +367,7 @@ static int add_bio(struct cardinfo *card)
vec.bv_page,
vec.bv_offset,
vec.bv_len,
- (rw == READ) ?
+ bio_op(bio) == REQ_OP_READ ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
p = &card->mm_pages[card->Ready];
@@ -398,7 +396,7 @@ static int add_bio(struct cardinfo *card)
DMASCR_CHAIN_EN |
DMASCR_SEM_EN |
pci_cmds);
- if (rw == WRITE)
+ if (bio_op(bio) == REQ_OP_WRITE)
desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
desc->sem_control_bits = desc->control_bits;
@@ -462,7 +460,7 @@ static void process_page(unsigned long data)
le32_to_cpu(desc->local_addr)>>9,
le32_to_cpu(desc->transfer_size));
dump_dmastat(card, control);
- } else if ((bio->bi_rw & REQ_WRITE) &&
+ } else if (op_is_write(bio_op(bio)) &&
le32_to_cpu(desc->local_addr) >> 9 ==
card->init_size) {
card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 42758b52768c..1523e05c46fc 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -172,7 +172,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
vbr->req = req;
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
@@ -236,25 +236,22 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
struct virtio_blk *vblk = disk->private_data;
+ struct request_queue *q = vblk->disk->queue;
struct request *req;
- struct bio *bio;
int err;
- bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
- GFP_KERNEL);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
- if (IS_ERR(req)) {
- bio_put(bio);
+ req = blk_get_request(q, READ, GFP_KERNEL);
+ if (IS_ERR(req))
return PTR_ERR(req);
- }
-
req->cmd_type = REQ_TYPE_DRV_PRIV;
+
+ err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ if (err)
+ goto out;
+
err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+out:
blk_put_request(req);
-
return err;
}
@@ -656,7 +653,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->disk->first_minor = index_to_minor(index);
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
- vblk->disk->driverfs_dev = &vdev->dev;
vblk->disk->flags |= GENHD_FL_EXT_DEVT;
vblk->index = index;
@@ -733,7 +729,7 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- add_disk(vblk->disk);
+ device_add_disk(&vdev->dev, vblk->disk);
err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
if (err)
goto out_del_disk;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4809c1501d7e..4a80ee752597 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
struct xen_vbd *vbd = &blkif->vbd;
int rc = -EACCES;
- if ((operation != READ) && vbd->readonly)
+ if ((operation != REQ_OP_READ) && vbd->readonly)
goto out;
if (likely(req->nr_sects)) {
@@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
preq.sector_number = req->u.discard.sector_number;
preq.nr_sects = req->u.discard.nr_sectors;
- err = xen_vbd_translate(&preq, blkif, WRITE);
+ err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
if (err) {
pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
preq.sector_number,
@@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct bio **biolist = pending_req->biolist;
int i, nbio = 0;
int operation;
+ int operation_flags = 0;
struct blk_plug plug;
bool drain = false;
struct grant_page **pages = pending_req->segments;
@@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
switch (req_operation) {
case BLKIF_OP_READ:
ring->st_rd_req++;
- operation = READ;
+ operation = REQ_OP_READ;
break;
case BLKIF_OP_WRITE:
ring->st_wr_req++;
- operation = WRITE_ODIRECT;
+ operation = REQ_OP_WRITE;
+ operation_flags = WRITE_ODIRECT;
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
- operation = WRITE_FLUSH;
+ operation = REQ_OP_WRITE;
+ operation_flags = WRITE_FLUSH;
break;
default:
operation = 0; /* make gcc happy */
@@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
nseg = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.nr_segments : req->u.rw.nr_segments;
- if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
+ if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
unlikely((req->operation != BLKIF_OP_INDIRECT) &&
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
- operation == READ ? "read" : "write",
+ operation == REQ_OP_READ ? "read" : "write",
preq.sector_number,
preq.sector_number + preq.nr_sects,
ring->blkif->vbd.pdevice);
@@ -1369,6 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
+ bio_set_op_attrs(bio, operation, operation_flags);
}
preq.sector_number += seg[i].nsec;
@@ -1376,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
/* This will be hit if the operation was a flush or discard. */
if (!bio) {
- BUG_ON(operation != WRITE_FLUSH);
+ BUG_ON(operation_flags != WRITE_FLUSH);
bio = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL))
@@ -1386,20 +1390,21 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
+ bio_set_op_attrs(bio, operation, operation_flags);
}
atomic_set(&pending_req->pendcnt, nbio);
blk_start_plug(&plug);
for (i = 0; i < nbio; i++)
- submit_bio(operation, biolist[i]);
+ submit_bio(biolist[i]);
/* Let the I/Os go.. */
blk_finish_plug(&plug);
- if (operation == READ)
+ if (operation == REQ_OP_READ)
ring->st_rd_sect += preq.nr_sects;
- else if (operation & WRITE)
+ else if (operation == REQ_OP_WRITE)
ring->st_wr_sect += preq.nr_sects;
return 0;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3355f1cdd4e5..2994cfa44c8a 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -480,7 +480,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
vbd->flush_support = true;
- if (q && blk_queue_secdiscard(q))
+ if (q && blk_queue_secure_erase(q))
vbd->discard_secure = true;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2e6d1e9c3345..0b6682a33e3b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -196,6 +196,7 @@ struct blkfront_info
unsigned int nr_ring_pages;
struct request_queue *rq;
unsigned int feature_flush;
+ unsigned int feature_fua;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
unsigned int discard_granularity;
@@ -207,6 +208,9 @@ struct blkfront_info
struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo;
unsigned int nr_rings;
+ /* Save uncomplete reqs and bios for migration. */
+ struct list_head requests;
+ struct bio_list bio_list;
};
static unsigned int nr_minors;
@@ -544,7 +548,7 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
ring_req->u.discard.id = id;
ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
- if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
+ if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
else
ring_req->u.discard.flag = 0;
@@ -743,7 +747,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* The indirect operation can only be a BLKIF_OP_READ or
* BLKIF_OP_WRITE
*/
- BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
+ BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
ring_req->operation = BLKIF_OP_INDIRECT;
ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
@@ -755,7 +759,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
ring_req->u.rw.handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
/*
* Ideally we can do an unordered flush-to-disk.
* In case the backend onlysupports barriers, use that.
@@ -763,19 +767,14 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* implement it the same way. (It's also a FLUSH+FUA,
* since it is guaranteed ordered WRT previous writes.)
*/
- switch (info->feature_flush &
- ((REQ_FLUSH|REQ_FUA))) {
- case REQ_FLUSH|REQ_FUA:
+ if (info->feature_flush && info->feature_fua)
ring_req->operation =
BLKIF_OP_WRITE_BARRIER;
- break;
- case REQ_FLUSH:
+ else if (info->feature_flush)
ring_req->operation =
BLKIF_OP_FLUSH_DISKCACHE;
- break;
- default:
+ else
ring_req->operation = 0;
- }
}
ring_req->u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) {
@@ -844,7 +843,8 @@ static int blkif_queue_request(struct request *req, struct blkfront_ring_info *r
if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
return 1;
- if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
+ if (unlikely(req_op(req) == REQ_OP_DISCARD ||
+ req_op(req) == REQ_OP_SECURE_ERASE))
return blkif_queue_discard_req(req, rinfo);
else
return blkif_queue_rw_req(req, rinfo);
@@ -864,10 +864,10 @@ static inline bool blkif_request_flush_invalid(struct request *req,
struct blkfront_info *info)
{
return ((req->cmd_type != REQ_TYPE_FS) ||
- ((req->cmd_flags & REQ_FLUSH) &&
- !(info->feature_flush & REQ_FLUSH)) ||
+ ((req_op(req) == REQ_OP_FLUSH) &&
+ !info->feature_flush) ||
((req->cmd_flags & REQ_FUA) &&
- !(info->feature_flush & REQ_FUA)));
+ !info->feature_fua));
}
static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -952,7 +952,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
rq->limits.discard_granularity = info->discard_granularity;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
+ queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
@@ -978,24 +978,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
return 0;
}
-static const char *flush_info(unsigned int feature_flush)
+static const char *flush_info(struct blkfront_info *info)
{
- switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
- case REQ_FLUSH|REQ_FUA:
+ if (info->feature_flush && info->feature_fua)
return "barrier: enabled;";
- case REQ_FLUSH:
+ else if (info->feature_flush)
return "flush diskcache: enabled;";
- default:
+ else
return "barrier or flush: disabled;";
- }
}
static void xlvbd_flush(struct blkfront_info *info)
{
- blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
- info->feature_flush & REQ_FUA);
+ blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
+ info->feature_fua ? true : false);
pr_info("blkfront: %s: %s %s %s %s %s\n",
- info->gd->disk_name, flush_info(info->feature_flush),
+ info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
"enabled;" : "disabled;", "indirect descriptors:",
info->max_indirect_segments ? "enabled;" : "disabled;");
@@ -1136,7 +1134,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
gd->first_minor = minor;
gd->fops = &xlvbd_block_fops;
gd->private_data = info;
- gd->driverfs_dev = &(info->xbdev->dev);
set_capacity(gd, capacity);
if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
@@ -1594,7 +1591,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
- queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
+ queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
}
blk_mq_complete_request(req, error);
break;
@@ -1614,6 +1611,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
if (unlikely(error)) {
if (error == -EOPNOTSUPP)
error = 0;
+ info->feature_fua = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
@@ -2002,69 +2000,22 @@ static int blkif_recover(struct blkfront_info *info)
{
unsigned int i, r_index;
struct request *req, *n;
- struct blk_shadow *copy;
int rc;
struct bio *bio, *cloned_bio;
- struct bio_list bio_list, merge_bio;
unsigned int segs, offset;
int pending, size;
struct split_bio *split_bio;
- struct list_head requests;
blkfront_gather_backend_features(info);
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs);
- bio_list_init(&bio_list);
- INIT_LIST_HEAD(&requests);
for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[r_index];
- /* Stage 1: Make a safe copy of the shadow state. */
- copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
- GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
- if (!copy)
- return -ENOMEM;
-
- /* Stage 2: Set up free list. */
- memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
- for (i = 0; i < BLK_RING_SIZE(info); i++)
- rinfo->shadow[i].req.u.rw.id = i+1;
- rinfo->shadow_free = rinfo->ring.req_prod_pvt;
- rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
+ struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
rc = blkfront_setup_indirect(rinfo);
- if (rc) {
- kfree(copy);
+ if (rc)
return rc;
- }
-
- for (i = 0; i < BLK_RING_SIZE(info); i++) {
- /* Not in use? */
- if (!copy[i].request)
- continue;
-
- /*
- * Get the bios in the request so we can re-queue them.
- */
- if (copy[i].request->cmd_flags &
- (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
- /*
- * Flush operations don't contain bios, so
- * we need to requeue the whole request
- */
- list_add(&copy[i].request->queuelist, &requests);
- continue;
- }
- merge_bio.head = copy[i].request->bio;
- merge_bio.tail = copy[i].request->biotail;
- bio_list_merge(&bio_list, &merge_bio);
- copy[i].request->bio = NULL;
- blk_end_request_all(copy[i].request, 0);
- }
-
- kfree(copy);
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
@@ -2079,7 +2030,7 @@ static int blkif_recover(struct blkfront_info *info)
kick_pending_request_queues(rinfo);
}
- list_for_each_entry_safe(req, n, &requests, queuelist) {
+ list_for_each_entry_safe(req, n, &info->requests, queuelist) {
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
@@ -2087,7 +2038,7 @@ static int blkif_recover(struct blkfront_info *info)
}
blk_mq_kick_requeue_list(info->rq);
- while ((bio = bio_list_pop(&bio_list)) != NULL) {
+ while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
if (bio_segments(bio) > segs) {
/*
@@ -2108,7 +2059,7 @@ static int blkif_recover(struct blkfront_info *info)
bio_trim(cloned_bio, offset, size);
cloned_bio->bi_private = split_bio;
cloned_bio->bi_end_io = split_bio_end;
- submit_bio(cloned_bio->bi_rw, cloned_bio);
+ submit_bio(cloned_bio);
}
/*
* Now we have to wait for all those smaller bios to
@@ -2117,7 +2068,7 @@ static int blkif_recover(struct blkfront_info *info)
continue;
}
/* We don't need to split this bio */
- submit_bio(bio->bi_rw, bio);
+ submit_bio(bio);
}
return 0;
@@ -2133,9 +2084,47 @@ static int blkfront_resume(struct xenbus_device *dev)
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0;
+ unsigned int i, j;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
+ bio_list_init(&info->bio_list);
+ INIT_LIST_HEAD(&info->requests);
+ for (i = 0; i < info->nr_rings; i++) {
+ struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ struct bio_list merge_bio;
+ struct blk_shadow *shadow = rinfo->shadow;
+
+ for (j = 0; j < BLK_RING_SIZE(info); j++) {
+ /* Not in use? */
+ if (!shadow[j].request)
+ continue;
+
+ /*
+ * Get the bios in the request so we can re-queue them.
+ */
+ if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
+ req_op(shadow[i].request) == REQ_OP_DISCARD ||
+ req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+ shadow[j].request->cmd_flags & REQ_FUA) {
+ /*
+ * Flush operations don't contain bios, so
+ * we need to requeue the whole request
+ *
+ * XXX: but this doesn't make any sense for a
+ * write with the FUA flag set..
+ */
+ list_add(&shadow[j].request->queuelist, &info->requests);
+ continue;
+ }
+ merge_bio.head = shadow[j].request->bio;
+ merge_bio.tail = shadow[j].request->biotail;
+ bio_list_merge(&info->bio_list, &merge_bio);
+ shadow[j].request->bio = NULL;
+ blk_mq_end_request(shadow[j].request, 0);
+ }
+ }
+
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
err = negotiate_mq(info);
@@ -2309,6 +2298,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
unsigned int indirect_segments;
info->feature_flush = 0;
+ info->feature_fua = 0;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-barrier", "%d", &barrier,
@@ -2321,8 +2311,11 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
*
* If there are barriers, then we use flush.
*/
- if (!err && barrier)
- info->feature_flush = REQ_FLUSH | REQ_FUA;
+ if (!err && barrier) {
+ info->feature_flush = 1;
+ info->feature_fua = 1;
+ }
+
/*
* And if there is "feature-flush-cache" use that above
* barriers.
@@ -2331,8 +2324,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
"feature-flush-cache", "%d", &flush,
NULL);
- if (!err && flush)
- info->feature_flush = REQ_FLUSH;
+ if (!err && flush) {
+ info->feature_flush = 1;
+ info->feature_fua = 0;
+ }
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-discard", "%d", &discard,
@@ -2452,7 +2447,7 @@ static void blkfront_connect(struct blkfront_info *info)
for (i = 0; i < info->nr_rings; i++)
kick_pending_request_queues(&info->rinfo[i]);
- add_disk(info->gd);
+ device_add_disk(&info->xbdev->dev, info->gd);
info->is_ready = 1;
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8fcad8b761f1..e5e5d19f2172 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -874,7 +874,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
offset = (bio->bi_iter.bi_sector &
(SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
- if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
zram_bio_discard(zram, index, offset, bio);
bio_endio(bio);
return;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 1b257ea9776a..5d475b3a0b2e 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2032,7 +2032,7 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_SUBCHANNEL;
- cgc.cmd[1] = 2; /* MSF addressing */
+ cgc.cmd[1] = subchnl->cdsc_format;/* MSF or LBA addressing */
cgc.cmd[2] = 0x40; /* request subQ data */
cgc.cmd[3] = mcn ? 2 : 1;
cgc.cmd[8] = 16;
@@ -2041,17 +2041,27 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
return ret;
subchnl->cdsc_audiostatus = cgc.buffer[1];
- subchnl->cdsc_format = CDROM_MSF;
subchnl->cdsc_ctrl = cgc.buffer[5] & 0xf;
subchnl->cdsc_trk = cgc.buffer[6];
subchnl->cdsc_ind = cgc.buffer[7];
- subchnl->cdsc_reladdr.msf.minute = cgc.buffer[13];
- subchnl->cdsc_reladdr.msf.second = cgc.buffer[14];
- subchnl->cdsc_reladdr.msf.frame = cgc.buffer[15];
- subchnl->cdsc_absaddr.msf.minute = cgc.buffer[9];
- subchnl->cdsc_absaddr.msf.second = cgc.buffer[10];
- subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11];
+ if (subchnl->cdsc_format == CDROM_LBA) {
+ subchnl->cdsc_absaddr.lba = ((cgc.buffer[8] << 24) |
+ (cgc.buffer[9] << 16) |
+ (cgc.buffer[10] << 8) |
+ (cgc.buffer[11]));
+ subchnl->cdsc_reladdr.lba = ((cgc.buffer[12] << 24) |
+ (cgc.buffer[13] << 16) |
+ (cgc.buffer[14] << 8) |
+ (cgc.buffer[15]));
+ } else {
+ subchnl->cdsc_reladdr.msf.minute = cgc.buffer[13];
+ subchnl->cdsc_reladdr.msf.second = cgc.buffer[14];
+ subchnl->cdsc_reladdr.msf.frame = cgc.buffer[15];
+ subchnl->cdsc_absaddr.msf.minute = cgc.buffer[9];
+ subchnl->cdsc_absaddr.msf.second = cgc.buffer[10];
+ subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11];
+ }
return 0;
}
@@ -3022,7 +3032,7 @@ static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
if (!((requested == CDROM_MSF) ||
(requested == CDROM_LBA)))
return -EINVAL;
- q.cdsc_format = CDROM_MSF;
+
ret = cdrom_read_subchannel(cdi, &q, 0);
if (ret)
return ret;
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 8bf70e8c3f79..50aa9ba91f25 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -325,7 +325,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
if(get_user(bin, &binary->bin) < 0)
return -EFAULT;
- if (len == 0) {
+ if (len <= 0) {
return -EINVAL; /* nothing to upload?!? */
}
if (len > DSP56K_MAX_BINARY_LENGTH) {
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index ac51149e9777..56ad5a5936a9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -90,7 +90,7 @@ config HW_RANDOM_BCM63XX
config HW_RANDOM_BCM2835
tristate "Broadcom BCM2835 Random Number Generator support"
- depends on ARCH_BCM2835
+ depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
@@ -396,6 +396,20 @@ config HW_RANDOM_PIC32
If unsure, say Y.
+config HW_RANDOM_MESON
+ tristate "Amlogic Meson Random Number Generator support"
+ depends on HW_RANDOM
+ depends on ARCH_MESON || COMPILE_TEST
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Amlogic Meson SoCs.
+
+ To compile this driver as a module, choose M here. the
+ module will be called meson-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 63022b49f160..04bb0b03356f 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
+obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 7192ec25f667..af2149273fe0 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -19,6 +19,7 @@
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
#define RNG_DATA 0x8
+#define RNG_INT_MASK 0x10
/* enable rng */
#define RNG_RBGEN 0x1
@@ -26,10 +27,24 @@
/* the initial numbers generated are "less random" so will be discarded */
#define RNG_WARMUP_COUNT 0x40000
+#define RNG_INT_OFF 0x1
+
+static void __init nsp_rng_init(void __iomem *base)
+{
+ u32 val;
+
+ /* mask the interrupt */
+ val = readl(base + RNG_INT_MASK);
+ val |= RNG_INT_OFF;
+ writel(val, base + RNG_INT_MASK);
+}
+
static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
void __iomem *rng_base = (void __iomem *)rng->priv;
+ u32 max_words = max / sizeof(u32);
+ u32 num_words, count;
while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
if (!wait)
@@ -37,8 +52,14 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
cpu_relax();
}
- *(u32 *)buf = __raw_readl(rng_base + RNG_DATA);
- return sizeof(u32);
+ num_words = readl(rng_base + RNG_STATUS) >> 24;
+ if (num_words > max_words)
+ num_words = max_words;
+
+ for (count = 0; count < num_words; count++)
+ ((u32 *)buf)[count] = readl(rng_base + RNG_DATA);
+
+ return num_words * sizeof(u32);
}
static struct hwrng bcm2835_rng_ops = {
@@ -46,10 +67,19 @@ static struct hwrng bcm2835_rng_ops = {
.read = bcm2835_rng_read,
};
+static const struct of_device_id bcm2835_rng_of_match[] = {
+ { .compatible = "brcm,bcm2835-rng"},
+ { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init},
+ { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init},
+ {},
+};
+
static int bcm2835_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ void (*rng_setup)(void __iomem *base);
+ const struct of_device_id *rng_id;
void __iomem *rng_base;
int err;
@@ -61,6 +91,15 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
}
bcm2835_rng_ops.priv = (unsigned long)rng_base;
+ rng_id = of_match_node(bcm2835_rng_of_match, np);
+ if (!rng_id)
+ return -EINVAL;
+
+ /* Check for rng init function, execute it */
+ rng_setup = rng_id->data;
+ if (rng_setup)
+ rng_setup(rng_base);
+
/* set warm-up count & enable */
__raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
__raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
@@ -90,10 +129,6 @@ static int bcm2835_rng_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id bcm2835_rng_of_match[] = {
- { .compatible = "brcm,bcm2835-rng", },
- {},
-};
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
static struct platform_driver bcm2835_rng_driver = {
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index ed44561ea647..23d358553b21 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -45,12 +45,12 @@ struct exynos_rng {
static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset)
{
- return __raw_readl(rng->mem + offset);
+ return readl_relaxed(rng->mem + offset);
}
static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset)
{
- __raw_writel(val, rng->mem + offset);
+ writel_relaxed(val, rng->mem + offset);
}
static int exynos_rng_configure(struct exynos_rng *exynos_rng)
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
new file mode 100644
index 000000000000..0cfd81bcaeac
--- /dev/null
+++ b/drivers/char/hw_random/meson-rng.c
@@ -0,0 +1,131 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of.h>
+
+#define RNG_DATA 0x00
+
+struct meson_rng_data {
+ void __iomem *base;
+ struct platform_device *pdev;
+ struct hwrng rng;
+};
+
+static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct meson_rng_data *data =
+ container_of(rng, struct meson_rng_data, rng);
+
+ if (max < sizeof(u32))
+ return 0;
+
+ *(u32 *)buf = readl_relaxed(data->base + RNG_DATA);
+
+ return sizeof(u32);
+}
+
+static int meson_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_rng_data *data;
+ struct resource *res;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->rng.name = pdev->name;
+ data->rng.read = meson_rng_read;
+
+ platform_set_drvdata(pdev, data);
+
+ return devm_hwrng_register(dev, &data->rng);
+}
+
+static const struct of_device_id meson_rng_of_match[] = {
+ { .compatible = "amlogic,meson-rng", },
+ {},
+};
+
+static struct platform_driver meson_rng_driver = {
+ .probe = meson_rng_probe,
+ .driver = {
+ .name = "meson-rng",
+ .of_match_table = meson_rng_of_match,
+ },
+};
+
+module_platform_driver(meson_rng_driver);
+
+MODULE_ALIAS("platform:meson-rng");
+MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
+MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 8a1432e8bb80..01d4be2c354b 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -384,7 +384,12 @@ static int omap_rng_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
+ pm_runtime_put_noidle(&pdev->dev);
+ goto err_ioremap;
+ }
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
get_omap_rng_device_details(priv);
@@ -435,8 +440,15 @@ static int __maybe_unused omap_rng_suspend(struct device *dev)
static int __maybe_unused omap_rng_resume(struct device *dev)
{
struct omap_rng_dev *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ dev_err(dev, "Failed to runtime_get device: %d\n", ret);
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
- pm_runtime_get_sync(dev);
priv->pdata->init(priv);
return 0;
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 92a810648bd0..63d84e6f1891 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -69,8 +69,12 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
}
/* If error detected or data not ready... */
- if (sr != RNG_SR_DRDY)
+ if (sr != RNG_SR_DRDY) {
+ if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS),
+ "bad RNG status - %x\n", sr))
+ writel_relaxed(0, priv->base + RNG_SR);
break;
+ }
*(u32 *)data = readl_relaxed(priv->base + RNG_DR);
@@ -79,10 +83,6 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
max -= sizeof(u32);
}
- if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS),
- "bad RNG status - %x\n", sr))
- writel_relaxed(0, priv->base + RNG_SR);
-
pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 71025c2f6bbb..d633974e7f8b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -66,12 +66,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
u64 cursor = from;
while (cursor < to) {
- if (!devmem_is_allowed(pfn)) {
- printk(KERN_INFO
- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
- current->comm, from, to);
+ if (!devmem_is_allowed(pfn))
return 0;
- }
cursor += PAGE_SIZE;
pfn++;
}
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 10f846cc8db1..25d5906640c3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -99,7 +99,7 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
struct clk_programmable *prog = to_clk_programmable(hw);
const struct clk_programmable_layout *layout = prog->layout;
unsigned int mask = layout->css_mask;
- unsigned int pckr = 0;
+ unsigned int pckr = index;
if (layout->have_slck_mck)
mask |= AT91_PMC_CSSMCK_MCK;
diff --git a/drivers/clk/sunxi/clk-sun4i-display.c b/drivers/clk/sunxi/clk-sun4i-display.c
index 445a7498d6df..9780fac6d029 100644
--- a/drivers/clk/sunxi/clk-sun4i-display.c
+++ b/drivers/clk/sunxi/clk-sun4i-display.c
@@ -33,6 +33,8 @@ struct sun4i_a10_display_clk_data {
u8 width_div;
u8 width_mux;
+
+ u32 flags;
};
struct reset_data {
@@ -166,7 +168,7 @@ static void __init sun4i_a10_display_init(struct device_node *node,
data->has_div ? &div->hw : NULL,
data->has_div ? &clk_divider_ops : NULL,
&gate->hw, &clk_gate_ops,
- 0);
+ data->flags);
if (IS_ERR(clk)) {
pr_err("%s: Couldn't register the clock\n", clk_name);
goto free_div;
@@ -232,6 +234,7 @@ static const struct sun4i_a10_display_clk_data sun4i_a10_tcon_ch0_data __initcon
.offset_rst = 29,
.offset_mux = 24,
.width_mux = 2,
+ .flags = CLK_SET_RATE_PARENT,
};
static void __init sun4i_a10_tcon_ch0_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
index 98a4582de56a..b6d29d1bedca 100644
--- a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
+++ b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
@@ -79,15 +79,11 @@ static int tcon_ch1_is_enabled(struct clk_hw *hw)
static u8 tcon_ch1_get_parent(struct clk_hw *hw)
{
struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
- int num_parents = clk_hw_get_num_parents(hw);
u32 reg;
reg = readl(tclk->reg) >> TCON_CH1_SCLK2_MUX_SHIFT;
reg &= reg >> TCON_CH1_SCLK2_MUX_MASK;
- if (reg >= num_parents)
- return -EINVAL;
-
return reg;
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 47352d25c15e..567788664723 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -27,6 +27,20 @@ config CLKBLD_I8253
config CLKSRC_MMIO
bool
+config BCM2835_TIMER
+ bool "BCM2835 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables the support for the BCM2835 timer driver.
+
+config BCM_KONA_TIMER
+ bool "BCM mobile timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables the support for the BCM Kona mobile timer driver.
+
config DIGICOLOR_TIMER
bool "Digicolor timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
@@ -141,6 +155,72 @@ config CLKSRC_DBX500_PRCMU
help
Use the always on PRCMU Timer as clocksource
+config CLPS711X_TIMER
+ bool "Cirrus logic timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Cirrus Logic PS711 timer.
+
+config ATLAS7_TIMER
+ bool "Atlas7 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Atlas7 timer.
+
+config MOXART_TIMER
+ bool "Moxart timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Moxart timer.
+
+config MXS_TIMER
+ bool "Mxs timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ select STMP_DEVICE
+ help
+ Enables support for the Mxs timer.
+
+config PRIMA2_TIMER
+ bool "Prima2 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Prima2 timer.
+
+config U300_TIMER
+ bool "U300 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ depends on ARM
+ select CLKSRC_MMIO
+ help
+ Enables support for the U300 timer.
+
+config NSPIRE_TIMER
+ bool "NSpire timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Nspire timer.
+
+config KEYSTONE_TIMER
+ bool "Keystone timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ depends on ARM || ARM64
+ select CLKSRC_MMIO
+ help
+ Enables support for the Keystone timer.
+
+config INTEGRATOR_AP_TIMER
+ bool "Integrator-ap timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Integrator-ap timer.
+
config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
bool "Clocksource PRCMU Timer sched_clock"
depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK)
@@ -208,14 +288,16 @@ config ARM_ARCH_TIMER
select CLKSRC_ACPI if ACPI
config ARM_ARCH_TIMER_EVTSTREAM
- bool "Support for ARM architected timer event stream generation"
+ bool "Enable ARM architected timer event stream generation by default"
default y if ARM_ARCH_TIMER
depends on ARM_ARCH_TIMER
help
- This option enables support for event stream generation based on
- the ARM architected timer. It is used for waking up CPUs executing
- the wfe instruction at a frequency represented as a power-of-2
- divisor of the clock rate.
+ This option enables support by default for event stream generation
+ based on the ARM architected timer. It is used for waking up CPUs
+ executing the wfe instruction at a frequency represented as a
+ power-of-2 divisor of the clock rate. The behaviour can also be
+ overridden on the command line using the
+ clocksource.arm_arch_timer.evtstream parameter.
The main use of the event stream is wfe-based timeouts of userspace
locking implementations. It might also be useful for imposing timeout
on wfe to safeguard against any programming errors in case an expected
@@ -224,8 +306,9 @@ config ARM_ARCH_TIMER_EVTSTREAM
hardware anomalies of missing events.
config ARM_GLOBAL_TIMER
- bool
+ bool "Support for the ARM global timer" if COMPILE_TEST
select CLKSRC_OF if OF
+ depends on ARM
help
This options enables support for the ARM global timer unit
@@ -243,7 +326,7 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
Use ARM global timer clock source as sched_clock
config ARMV7M_SYSTICK
- bool
+ bool "Support for the ARMv7M system time" if COMPILE_TEST
select CLKSRC_OF if OF
select CLKSRC_MMIO
help
@@ -254,9 +337,12 @@ config ATMEL_PIT
def_bool SOC_AT91SAM9 || SOC_SAMA5
config ATMEL_ST
- bool
+ bool "Atmel ST timer support" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
select CLKSRC_OF
select MFD_SYSCON
+ help
+ Support for the Atmel ST timer.
config CLKSRC_METAG_GENERIC
def_bool y if METAG
@@ -270,7 +356,7 @@ config CLKSRC_EXYNOS_MCT
Support for Multi Core Timer controller on Exynos SoCs.
config CLKSRC_SAMSUNG_PWM
- bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
+ bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
help
@@ -293,6 +379,14 @@ config VF_PIT_TIMER
help
Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
+config OXNAS_RPS_TIMER
+ bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_OF
+ select CLKSRC_MMIO
+ help
+ This enables support for the Oxford Semiconductor OXNAS RPS timers.
+
config SYS_SUPPORTS_SH_CMT
bool
@@ -361,8 +455,8 @@ config CLKSRC_QCOM
Qualcomm SoCs.
config CLKSRC_VERSATILE
- bool "ARM Versatile (Express) reference platforms clock source"
- depends on PLAT_VERSATILE && GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
+ bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST
+ depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
select CLKSRC_OF
default y if MFD_VEXPRESS_SYSREG
help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 473974f9590a..fd9d6df0bbc0 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -19,21 +19,21 @@ obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ORION_TIMER) += time-orion.o
-obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
-obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o
-obj-$(CONFIG_ARCH_ATLAS7) += timer-atlas7.o
-obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
-obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
+obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
+obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
+obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
+obj-$(CONFIG_MOXART_TIMER) += moxart_timer.o
+obj-$(CONFIG_MXS_TIMER) += mxs_timer.o
obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o
-obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
-obj-$(CONFIG_ARCH_U300) += timer-u300.o
+obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o
+obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o
obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o
obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
-obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
-obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm_kona_timer.o
+obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o
+obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
@@ -48,6 +48,7 @@ obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
+obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
@@ -55,8 +56,8 @@ obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
-obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
-obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o
+obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
+obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4814446a0024..5effd3027319 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -79,6 +79,14 @@ static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
+static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
+
+static int __init early_evtstrm_cfg(char *buf)
+{
+ return strtobool(buf, &evtstrm_enable);
+}
+early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
+
/*
* Architected system timer support.
*/
@@ -372,7 +380,7 @@ static int arch_timer_setup(struct clock_event_device *clk)
enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
arch_counter_set_user_access();
- if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
+ if (evtstrm_enable)
arch_timer_configure_evtstream();
return 0;
@@ -693,25 +701,26 @@ arch_timer_needs_probing(int type, const struct of_device_id *matches)
return needs_probing;
}
-static void __init arch_timer_common_init(void)
+static int __init arch_timer_common_init(void)
{
unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
/* Wait until both nodes are probed if we have two timers */
if ((arch_timers_present & mask) != mask) {
if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
- return;
+ return 0;
if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
- return;
+ return 0;
}
arch_timer_banner(arch_timers_present);
arch_counter_register(arch_timers_present);
- arch_timer_arch_init();
+ return arch_timer_arch_init();
}
-static void __init arch_timer_init(void)
+static int __init arch_timer_init(void)
{
+ int ret;
/*
* If HYP mode is available, we know that the physical timer
* has been configured to be accessible from PL1. Use it, so
@@ -739,23 +748,30 @@ static void __init arch_timer_init(void)
if (!has_ppi) {
pr_warn("arch_timer: No interrupt available, giving up\n");
- return;
+ return -EINVAL;
}
}
- arch_timer_register();
- arch_timer_common_init();
+ ret = arch_timer_register();
+ if (ret)
+ return ret;
+
+ ret = arch_timer_common_init();
+ if (ret)
+ return ret;
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
+
+ return 0;
}
-static void __init arch_timer_of_init(struct device_node *np)
+static int __init arch_timer_of_init(struct device_node *np)
{
int i;
if (arch_timers_present & ARCH_CP15_TIMER) {
pr_warn("arch_timer: multiple nodes in dt, skipping\n");
- return;
+ return 0;
}
arch_timers_present |= ARCH_CP15_TIMER;
@@ -774,23 +790,23 @@ static void __init arch_timer_of_init(struct device_node *np)
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
arch_timer_uses_ppi = PHYS_SECURE_PPI;
- arch_timer_init();
+ return arch_timer_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
-static void __init arch_timer_mem_init(struct device_node *np)
+static int __init arch_timer_mem_init(struct device_node *np)
{
struct device_node *frame, *best_frame = NULL;
void __iomem *cntctlbase, *base;
- unsigned int irq;
+ unsigned int irq, ret = -EINVAL;
u32 cnttidr;
arch_timers_present |= ARCH_MEM_TIMER;
cntctlbase = of_iomap(np, 0);
if (!cntctlbase) {
pr_err("arch_timer: Can't find CNTCTLBase\n");
- return;
+ return -ENXIO;
}
cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
@@ -830,6 +846,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
best_frame = of_node_get(frame);
}
+ ret= -ENXIO;
base = arch_counter_base = of_iomap(best_frame, 0);
if (!base) {
pr_err("arch_timer: Can't map frame's registers\n");
@@ -841,6 +858,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
else
irq = irq_of_parse_and_map(best_frame, 0);
+ ret = -EINVAL;
if (!irq) {
pr_err("arch_timer: Frame missing %s irq",
arch_timer_mem_use_virtual ? "virt" : "phys");
@@ -848,11 +866,15 @@ static void __init arch_timer_mem_init(struct device_node *np)
}
arch_timer_detect_rate(base, np);
- arch_timer_mem_register(base, irq);
- arch_timer_common_init();
+ ret = arch_timer_mem_register(base, irq);
+ if (ret)
+ goto out;
+
+ return arch_timer_common_init();
out:
iounmap(cntctlbase);
of_node_put(best_frame);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_init);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 9df0d1699d22..2a9ceb6e93f9 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -238,7 +238,7 @@ static void __init gt_delay_timer_init(void)
register_current_timer_delay(&gt_delay_timer);
}
-static void __init gt_clocksource_init(void)
+static int __init gt_clocksource_init(void)
{
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
@@ -249,7 +249,7 @@ static void __init gt_clocksource_init(void)
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
#endif
- clocksource_register_hz(&gt_clocksource, gt_clk_rate);
+ return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
}
static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
@@ -270,7 +270,7 @@ static struct notifier_block gt_cpu_nb = {
.notifier_call = gt_cpu_notify,
};
-static void __init global_timer_of_register(struct device_node *np)
+static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
int err = 0;
@@ -283,19 +283,19 @@ static void __init global_timer_of_register(struct device_node *np)
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
&& (read_cpuid_id() & 0xf0000f) < 0x200000) {
pr_warn("global-timer: non support for this cpu version.\n");
- return;
+ return -ENOSYS;
}
gt_ppi = irq_of_parse_and_map(np, 0);
if (!gt_ppi) {
pr_warn("global-timer: unable to parse irq\n");
- return;
+ return -EINVAL;
}
gt_base = of_iomap(np, 0);
if (!gt_base) {
pr_warn("global-timer: invalid base address\n");
- return;
+ return -ENXIO;
}
gt_clk = of_clk_get(np, 0);
@@ -332,11 +332,17 @@ static void __init global_timer_of_register(struct device_node *np)
}
/* Immediately configure the timer on the boot CPU */
- gt_clocksource_init();
- gt_clockevents_init(this_cpu_ptr(gt_evt));
+ err = gt_clocksource_init();
+ if (err)
+ goto out_irq;
+
+ err = gt_clockevents_init(this_cpu_ptr(gt_evt));
+ if (err)
+ goto out_irq;
+
gt_delay_timer_init();
- return;
+ return 0;
out_irq:
free_percpu_irq(gt_ppi, gt_evt);
@@ -347,6 +353,8 @@ out_clk:
out_unmap:
iounmap(gt_base);
WARN(err, "ARM Global timer register failed (%d)\n", err);
+
+ return err;
}
/* Only tested on r2p2 and r3p0 */
diff --git a/drivers/clocksource/armv7m_systick.c b/drivers/clocksource/armv7m_systick.c
index addfd2c64f54..a315491b7047 100644
--- a/drivers/clocksource/armv7m_systick.c
+++ b/drivers/clocksource/armv7m_systick.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk.h>
@@ -21,7 +22,7 @@
#define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF
-static void __init system_timer_of_register(struct device_node *np)
+static int __init system_timer_of_register(struct device_node *np)
{
struct clk *clk = NULL;
void __iomem *base;
@@ -31,22 +32,26 @@ static void __init system_timer_of_register(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
pr_warn("system-timer: invalid base address\n");
- return;
+ return -ENXIO;
}
ret = of_property_read_u32(np, "clock-frequency", &rate);
if (ret) {
clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
goto out_unmap;
+ }
ret = clk_prepare_enable(clk);
if (ret)
goto out_clk_put;
rate = clk_get_rate(clk);
- if (!rate)
+ if (!rate) {
+ ret = -EINVAL;
goto out_clk_disable;
+ }
}
writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR);
@@ -64,7 +69,7 @@ static void __init system_timer_of_register(struct device_node *np)
pr_info("ARM System timer initialized as clocksource\n");
- return;
+ return 0;
out_clk_disable:
clk_disable_unprepare(clk);
@@ -73,6 +78,8 @@ out_clk_put:
out_unmap:
iounmap(base);
pr_warn("ARM System timer register failed (%d)\n", ret);
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick",
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 217438d39eb3..1ba871b7fe11 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -184,7 +184,7 @@ static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id)
* Timer initialization
* ---------------------------------------------------------------------------
*/
-static void __init asm9260_timer_init(struct device_node *np)
+static int __init asm9260_timer_init(struct device_node *np)
{
int irq;
struct clk *clk;
@@ -192,20 +192,26 @@ static void __init asm9260_timer_init(struct device_node *np)
unsigned long rate;
priv.base = of_io_request_and_map(np, 0, np->name);
- if (IS_ERR(priv.base))
- panic("%s: unable to map resource", np->name);
+ if (IS_ERR(priv.base)) {
+ pr_err("%s: unable to map resource", np->name);
+ return PTR_ERR(priv.base);
+ }
clk = of_clk_get(np, 0);
ret = clk_prepare_enable(clk);
- if (ret)
- panic("Failed to enable clk!\n");
+ if (ret) {
+ pr_err("Failed to enable clk!\n");
+ return ret;
+ }
irq = irq_of_parse_and_map(np, 0);
ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER,
DRIVER_NAME, &event_dev);
- if (ret)
- panic("Failed to setup irq!\n");
+ if (ret) {
+ pr_err("Failed to setup irq!\n");
+ return ret;
+ }
/* set all timers for count-up */
writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR);
@@ -229,6 +235,8 @@ static void __init asm9260_timer_init(struct device_node *np)
priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
event_dev.cpumask = cpumask_of(0);
clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer",
asm9260_timer_init);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 6f2822928963..e71acf231c89 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -80,19 +80,24 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
}
}
-static void __init bcm2835_timer_init(struct device_node *node)
+static int __init bcm2835_timer_init(struct device_node *node)
{
void __iomem *base;
u32 freq;
- int irq;
+ int irq, ret;
struct bcm2835_timer *timer;
base = of_iomap(node, 0);
- if (!base)
- panic("Can't remap registers");
+ if (!base) {
+ pr_err("Can't remap registers");
+ return -ENXIO;
+ }
- if (of_property_read_u32(node, "clock-frequency", &freq))
- panic("Can't read clock-frequency");
+ ret = of_property_read_u32(node, "clock-frequency", &freq);
+ if (ret) {
+ pr_err("Can't read clock-frequency");
+ return ret;
+ }
system_clock = base + REG_COUNTER_LO;
sched_clock_register(bcm2835_sched_read, 32, freq);
@@ -101,12 +106,16 @@ static void __init bcm2835_timer_init(struct device_node *node)
freq, 300, 32, clocksource_mmio_readl_up);
irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
- if (!timer)
- panic("Can't allocate timer struct\n");
+ if (!timer) {
+ pr_err("Can't allocate timer struct\n");
+ return -ENOMEM;
+ }
timer->control = base + REG_CONTROL;
timer->compare = base + REG_COMPARE(DEFAULT_TIMER);
@@ -121,12 +130,17 @@ static void __init bcm2835_timer_init(struct device_node *node)
timer->act.dev_id = timer;
timer->act.handler = bcm2835_time_interrupt;
- if (setup_irq(irq, &timer->act))
- panic("Can't set up timer IRQ\n");
+ ret = setup_irq(irq, &timer->act);
+ if (ret) {
+ pr_err("Can't set up timer IRQ\n");
+ return ret;
+ }
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
pr_info("bcm2835: system timer (irq = %d)\n", irq);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
bcm2835_timer_init);
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index e717e87df9bc..7e3fd375a627 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -20,7 +20,6 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <asm/mach/time.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -163,16 +162,11 @@ static struct irqaction kona_timer_irq = {
.handler = kona_timer_interrupt,
};
-static void __init kona_timer_init(struct device_node *node)
+static int __init kona_timer_init(struct device_node *node)
{
u32 freq;
struct clk *external_clk;
- if (!of_device_is_available(node)) {
- pr_info("Kona Timer v1 marked as disabled in device tree\n");
- return;
- }
-
external_clk = of_clk_get_by_name(node, NULL);
if (!IS_ERR(external_clk)) {
@@ -182,7 +176,7 @@ static void __init kona_timer_init(struct device_node *node)
arch_timer_rate = freq;
} else {
pr_err("Kona Timer v1 unable to determine clock-frequency");
- return;
+ return -EINVAL;
}
/* Setup IRQ numbers */
@@ -196,6 +190,8 @@ static void __init kona_timer_init(struct device_node *node)
kona_timer_clockevents_init();
setup_irq(timers.tmr_irq, &kona_timer_irq);
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 9be6018bd2b8..fbfbdec13b08 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -322,22 +322,22 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
+static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
u32 timer_width)
{
struct ttc_timer_clocksource *ttccs;
int err;
ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
- if (WARN_ON(!ttccs))
- return;
+ if (!ttccs)
+ return -ENOMEM;
ttccs->ttc.clk = clk;
err = clk_prepare_enable(ttccs->ttc.clk);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttccs);
- return;
+ return err;
}
ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
@@ -345,8 +345,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
ttccs->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clocksource_cb;
ttccs->ttc.clk_rate_change_nb.next = NULL;
- if (clk_notifier_register(ttccs->ttc.clk,
- &ttccs->ttc.clk_rate_change_nb))
+
+ err = clk_notifier_register(ttccs->ttc.clk,
+ &ttccs->ttc.clk_rate_change_nb);
+ if (err)
pr_warn("Unable to register clock notifier.\n");
ttccs->ttc.base_addr = base;
@@ -368,14 +370,16 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttccs);
- return;
+ return err;
}
ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
sched_clock_register(ttc_sched_clock_read, timer_width,
ttccs->ttc.freq / PRESCALE);
+
+ return 0;
}
static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@@ -401,30 +405,35 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
}
}
-static void __init ttc_setup_clockevent(struct clk *clk,
- void __iomem *base, u32 irq)
+static int __init ttc_setup_clockevent(struct clk *clk,
+ void __iomem *base, u32 irq)
{
struct ttc_timer_clockevent *ttcce;
int err;
ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
- if (WARN_ON(!ttcce))
- return;
+ if (!ttcce)
+ return -ENOMEM;
ttcce->ttc.clk = clk;
err = clk_prepare_enable(ttcce->ttc.clk);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttcce);
- return;
+ return err;
}
ttcce->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clockevent_cb;
ttcce->ttc.clk_rate_change_nb.next = NULL;
- if (clk_notifier_register(ttcce->ttc.clk,
- &ttcce->ttc.clk_rate_change_nb))
+
+ err = clk_notifier_register(ttcce->ttc.clk,
+ &ttcce->ttc.clk_rate_change_nb);
+ if (err) {
pr_warn("Unable to register clock notifier.\n");
+ return err;
+ }
+
ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
ttcce->ttc.base_addr = base;
@@ -451,13 +460,15 @@ static void __init ttc_setup_clockevent(struct clk *clk,
err = request_irq(irq, ttc_clock_event_interrupt,
IRQF_TIMER, ttcce->ce.name, ttcce);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttcce);
- return;
+ return err;
}
clockevents_config_and_register(&ttcce->ce,
ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
+
+ return 0;
}
/**
@@ -466,17 +477,17 @@ static void __init ttc_setup_clockevent(struct clk *clk,
* Initializes the timer hardware and register the clock source and clock event
* timers with Linux kernal timer framework
*/
-static void __init ttc_timer_init(struct device_node *timer)
+static int __init ttc_timer_init(struct device_node *timer)
{
unsigned int irq;
void __iomem *timer_baseaddr;
struct clk *clk_cs, *clk_ce;
static int initialized;
- int clksel;
+ int clksel, ret;
u32 timer_width = 16;
if (initialized)
- return;
+ return 0;
initialized = 1;
@@ -488,13 +499,13 @@ static void __init ttc_timer_init(struct device_node *timer)
timer_baseaddr = of_iomap(timer, 0);
if (!timer_baseaddr) {
pr_err("ERROR: invalid timer base address\n");
- BUG();
+ return -ENXIO;
}
irq = irq_of_parse_and_map(timer, 1);
if (irq <= 0) {
pr_err("ERROR: invalid interrupt number\n");
- BUG();
+ return -EINVAL;
}
of_property_read_u32(timer, "timer-width", &timer_width);
@@ -504,7 +515,7 @@ static void __init ttc_timer_init(struct device_node *timer)
clk_cs = of_clk_get(timer, clksel);
if (IS_ERR(clk_cs)) {
pr_err("ERROR: timer input clock not found\n");
- BUG();
+ return PTR_ERR(clk_cs);
}
clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
@@ -512,13 +523,20 @@ static void __init ttc_timer_init(struct device_node *timer)
clk_ce = of_clk_get(timer, clksel);
if (IS_ERR(clk_ce)) {
pr_err("ERROR: timer input clock not found\n");
- BUG();
+ return PTR_ERR(clk_ce);
}
- ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
- ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
+ ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
+ if (ret)
+ return ret;
+
+ ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
+ if (ret)
+ return ret;
pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index dfad6eb99662..77a365f573d7 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -64,7 +64,7 @@ static u64 notrace dbx500_prcmu_sched_clock_read(void)
#endif
-static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
+static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
{
clksrc_dbx500_timer_base = of_iomap(node, 0);
@@ -84,7 +84,7 @@ static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
#endif
- clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
+ return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}
CLOCKSOURCE_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4",
clksrc_dbx500_prcmu_init);
diff --git a/drivers/clocksource/clksrc-probe.c b/drivers/clocksource/clksrc-probe.c
index 7cb6c923a836..bc62be97f0a8 100644
--- a/drivers/clocksource/clksrc-probe.c
+++ b/drivers/clocksource/clksrc-probe.c
@@ -28,15 +28,23 @@ void __init clocksource_probe(void)
{
struct device_node *np;
const struct of_device_id *match;
- of_init_fn_1 init_func;
+ of_init_fn_1_ret init_func_ret;
unsigned clocksources = 0;
+ int ret;
for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
if (!of_device_is_available(np))
continue;
- init_func = match->data;
- init_func(np);
+ init_func_ret = match->data;
+
+ ret = init_func_ret(np);
+ if (ret) {
+ pr_err("Failed to initialize '%s': %d",
+ of_node_full_name(np), ret);
+ continue;
+ }
+
clocksources++;
}
diff --git a/drivers/clocksource/clksrc_st_lpc.c b/drivers/clocksource/clksrc_st_lpc.c
index 65ec4674416d..03cc49217bb4 100644
--- a/drivers/clocksource/clksrc_st_lpc.c
+++ b/drivers/clocksource/clksrc_st_lpc.c
@@ -92,7 +92,7 @@ static int __init st_clksrc_setup_clk(struct device_node *np)
return 0;
}
-static void __init st_clksrc_of_register(struct device_node *np)
+static int __init st_clksrc_of_register(struct device_node *np)
{
int ret;
uint32_t mode;
@@ -100,32 +100,36 @@ static void __init st_clksrc_of_register(struct device_node *np)
ret = of_property_read_u32(np, "st,lpc-mode", &mode);
if (ret) {
pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
- return;
+ return ret;
}
/* LPC can either run as a Clocksource or in RTC or WDT mode */
if (mode != ST_LPC_MODE_CLKSRC)
- return;
+ return 0;
ddata.base = of_iomap(np, 0);
if (!ddata.base) {
pr_err("clksrc-st-lpc: Unable to map iomem\n");
- return;
+ return -ENXIO;
}
- if (st_clksrc_setup_clk(np)) {
+ ret = st_clksrc_setup_clk(np);
+ if (ret) {
iounmap(ddata.base);
- return;
+ return ret;
}
- if (st_clksrc_init()) {
+ ret = st_clksrc_init();
+ if (ret) {
clk_disable_unprepare(ddata.clk);
clk_put(ddata.clk);
iounmap(ddata.base);
- return;
+ return ret;
}
pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
clk_get_rate(ddata.clk));
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index cdd86e3525bb..84aed78261e4 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -104,7 +104,7 @@ void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
}
#ifdef CONFIG_CLKSRC_OF
-static void __init clps711x_timer_init(struct device_node *np)
+static int __init clps711x_timer_init(struct device_node *np)
{
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
@@ -112,13 +112,11 @@ static void __init clps711x_timer_init(struct device_node *np)
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
- BUG_ON(_clps711x_clksrc_init(clock, base));
- break;
+ return _clps711x_clksrc_init(clock, base);
case CLPS711X_CLKSRC_CLOCKEVENT:
- BUG_ON(_clps711x_clkevt_init(clock, base, irq));
- break;
+ return _clps711x_clkevt_init(clock, base, irq);
default:
- break;
+ return -EINVAL;
}
}
CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 860843cef572..aee6c0d39a7c 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -143,7 +143,7 @@ static struct delay_timer dw_apb_delay_timer = {
#endif
static int num_called;
-static void __init dw_apb_timer_init(struct device_node *timer)
+static int __init dw_apb_timer_init(struct device_node *timer)
{
switch (num_called) {
case 0:
@@ -164,6 +164,8 @@ static void __init dw_apb_timer_init(struct device_node *timer)
}
num_called++;
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index be09bc0b5e26..0d18dd4b3bd2 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -232,7 +232,7 @@ static cycles_t exynos4_read_current_timer(void)
return exynos4_read_count_32();
}
-static void __init exynos4_clocksource_init(void)
+static int __init exynos4_clocksource_init(void)
{
exynos4_mct_frc_start();
@@ -244,6 +244,8 @@ static void __init exynos4_clocksource_init(void)
panic("%s: can't register clocksource\n", mct_frc.name);
sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
+
+ return 0;
}
static void exynos4_mct_comp0_stop(void)
@@ -335,12 +337,14 @@ static struct irqaction mct_comp_event_irq = {
.dev_id = &mct_comp_device,
};
-static void exynos4_clockevent_init(void)
+static int exynos4_clockevent_init(void)
{
mct_comp_device.cpumask = cpumask_of(0);
clockevents_config_and_register(&mct_comp_device, clk_rate,
0xf, 0xffffffff);
setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
+
+ return 0;
}
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
@@ -516,7 +520,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
.notifier_call = exynos4_mct_cpu_notify,
};
-static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
{
int err, cpu;
struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
@@ -572,15 +576,17 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
/* Immediately configure the timer on the boot CPU */
exynos4_local_timer_setup(mevt);
- return;
+ return 0;
out_irq:
free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
+ return err;
}
-static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
+static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
u32 nr_irqs, i;
+ int ret;
mct_int_type = int_type;
@@ -600,18 +606,24 @@ static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
for (i = MCT_L0_IRQ; i < nr_irqs; i++)
mct_irqs[i] = irq_of_parse_and_map(np, i);
- exynos4_timer_resources(np, of_iomap(np, 0));
- exynos4_clocksource_init();
- exynos4_clockevent_init();
+ ret = exynos4_timer_resources(np, of_iomap(np, 0));
+ if (ret)
+ return ret;
+
+ ret = exynos4_clocksource_init();
+ if (ret)
+ return ret;
+
+ return exynos4_clockevent_init();
}
-static void __init mct_init_spi(struct device_node *np)
+static int __init mct_init_spi(struct device_node *np)
{
return mct_init_dt(np, MCT_INT_SPI);
}
-static void __init mct_init_ppi(struct device_node *np)
+static int __init mct_init_ppi(struct device_node *np)
{
return mct_init_dt(np, MCT_INT_PPI);
}
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 517e1c7624d4..738515b89073 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -316,15 +316,16 @@ static int __init ftm_calc_closest_round_cyc(unsigned long freq)
return 0;
}
-static void __init ftm_timer_init(struct device_node *np)
+static int __init ftm_timer_init(struct device_node *np)
{
unsigned long freq;
- int irq;
+ int ret, irq;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
- return;
+ return -ENOMEM;
+ ret = -ENXIO;
priv->clkevt_base = of_iomap(np, 0);
if (!priv->clkevt_base) {
pr_err("ftm: unable to map event timer registers\n");
@@ -337,6 +338,7 @@ static void __init ftm_timer_init(struct device_node *np)
goto err;
}
+ ret = -EINVAL;
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_err("ftm: unable to get IRQ from DT, %d\n", irq);
@@ -349,18 +351,22 @@ static void __init ftm_timer_init(struct device_node *np)
if (!freq)
goto err;
- if (ftm_calc_closest_round_cyc(freq))
+ ret = ftm_calc_closest_round_cyc(freq);
+ if (ret)
goto err;
- if (ftm_clocksource_init(freq))
+ ret = ftm_clocksource_init(freq);
+ if (ret)
goto err;
- if (ftm_clockevent_init(freq, irq))
+ ret = ftm_clockevent_init(freq, irq);
+ if (ret)
goto err;
- return;
+ return 0;
err:
kfree(priv);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c
index 75c44079b345..07d9d5be9054 100644
--- a/drivers/clocksource/h8300_timer16.c
+++ b/drivers/clocksource/h8300_timer16.c
@@ -126,7 +126,7 @@ static struct timer16_priv timer16_priv = {
#define REG_CH 0
#define REG_COMM 1
-static void __init h8300_16timer_init(struct device_node *node)
+static int __init h8300_16timer_init(struct device_node *node)
{
void __iomem *base[2];
int ret, irq;
@@ -136,9 +136,10 @@ static void __init h8300_16timer_init(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clocksource\n");
- return;
+ return PTR_ERR(clk);
}
+ ret = -ENXIO;
base[REG_CH] = of_iomap(node, 0);
if (!base[REG_CH]) {
pr_err("failed to map registers for clocksource\n");
@@ -151,6 +152,7 @@ static void __init h8300_16timer_init(struct device_node *node)
goto unmap_ch;
}
+ ret = -EINVAL;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("failed to get irq for clockevent\n");
@@ -174,7 +176,7 @@ static void __init h8300_16timer_init(struct device_node *node)
clocksource_register_hz(&timer16_priv.cs,
clk_get_rate(clk) / 8);
- return;
+ return 0;
unmap_comm:
iounmap(base[REG_COMM]);
@@ -182,6 +184,8 @@ unmap_ch:
iounmap(base[REG_CH]);
free_clk:
clk_put(clk);
+ return ret;
}
-CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", h8300_16timer_init);
+CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer",
+ h8300_16timer_init);
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
index c151941e1956..546bb180f5a4 100644
--- a/drivers/clocksource/h8300_timer8.c
+++ b/drivers/clocksource/h8300_timer8.c
@@ -164,24 +164,26 @@ static struct timer8_priv timer8_priv = {
},
};
-static void __init h8300_8timer_init(struct device_node *node)
+static int __init h8300_8timer_init(struct device_node *node)
{
void __iomem *base;
- int irq;
+ int irq, ret;
struct clk *clk;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clockevent\n");
- return;
+ return PTR_ERR(clk);
}
+ ret = ENXIO;
base = of_iomap(node, 0);
if (!base) {
pr_err("failed to map registers for clockevent\n");
goto free_clk;
}
+ ret = -EINVAL;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("failed to get irq for clockevent\n");
@@ -205,11 +207,12 @@ static void __init h8300_8timer_init(struct device_node *node)
clockevents_config_and_register(&timer8_priv.ced,
timer8_priv.rate, 1, 0x0000ffff);
- return;
+ return 0;
unmap_reg:
iounmap(base);
free_clk:
clk_put(clk);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init);
diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c
index d4c1a287c262..7bdf1991c847 100644
--- a/drivers/clocksource/h8300_tpu.c
+++ b/drivers/clocksource/h8300_tpu.c
@@ -119,15 +119,16 @@ static struct tpu_priv tpu_priv = {
#define CH_L 0
#define CH_H 1
-static void __init h8300_tpu_init(struct device_node *node)
+static int __init h8300_tpu_init(struct device_node *node)
{
void __iomem *base[2];
struct clk *clk;
+ int ret = -ENXIO;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clocksource\n");
- return;
+ return PTR_ERR(clk);
}
base[CH_L] = of_iomap(node, CH_L);
@@ -144,14 +145,13 @@ static void __init h8300_tpu_init(struct device_node *node)
tpu_priv.mapbase1 = base[CH_L];
tpu_priv.mapbase2 = base[CH_H];
- clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
-
- return;
+ return clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
unmap_L:
iounmap(base[CH_H]);
free_clk:
clk_put(clk);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init);
diff --git a/drivers/clocksource/meson6_timer.c b/drivers/clocksource/meson6_timer.c
index 1fa22c4d2d49..52af591a9fc7 100644
--- a/drivers/clocksource/meson6_timer.c
+++ b/drivers/clocksource/meson6_timer.c
@@ -126,18 +126,22 @@ static struct irqaction meson6_timer_irq = {
.dev_id = &meson6_clockevent,
};
-static void __init meson6_timer_init(struct device_node *node)
+static int __init meson6_timer_init(struct device_node *node)
{
u32 val;
int ret, irq;
timer_base = of_io_request_and_map(node, 0, "meson6-timer");
- if (IS_ERR(timer_base))
- panic("Can't map registers");
+ if (IS_ERR(timer_base)) {
+ pr_err("Can't map registers");
+ return -ENXIO;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
/* Set 1us for timer E */
val = readl(timer_base + TIMER_ISA_MUX);
@@ -158,14 +162,17 @@ static void __init meson6_timer_init(struct device_node *node)
meson6_clkevt_time_stop(CED_ID);
ret = setup_irq(irq, &meson6_timer_irq);
- if (ret)
+ if (ret) {
pr_warn("failed to setup irq %d\n", irq);
+ return ret;
+ }
meson6_clockevent.cpumask = cpu_possible_mask;
meson6_clockevent.irq = irq;
clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC,
1, 0xfffe);
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(meson6, "amlogic,meson6-timer",
meson6_timer_init);
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 89d3e4d7900c..1572c7a778ab 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -146,7 +146,7 @@ static struct clocksource gic_clocksource = {
.archdata = { .vdso_clock_mode = VDSO_CLOCK_GIC },
};
-static void __init __gic_clocksource_init(void)
+static int __init __gic_clocksource_init(void)
{
int ret;
@@ -159,6 +159,8 @@ static void __init __gic_clocksource_init(void)
ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
if (ret < 0)
pr_warn("GIC: Unable to register clocksource\n");
+
+ return ret;
}
void __init gic_clocksource_init(unsigned int frequency)
@@ -179,31 +181,35 @@ static void __init gic_clocksource_of_init(struct device_node *node)
struct clk *clk;
int ret;
- if (WARN_ON(!gic_present || !node->parent ||
- !of_device_is_compatible(node->parent, "mti,gic")))
- return;
+ if (!gic_present || !node->parent ||
+ !of_device_is_compatible(node->parent, "mti,gic")) {
+ pr_warn("No DT definition for the mips gic driver");
+ return -ENXIO;
+ }
clk = of_clk_get(node, 0);
if (!IS_ERR(clk)) {
if (clk_prepare_enable(clk) < 0) {
pr_err("GIC failed to enable clock\n");
clk_put(clk);
- return;
+ return PTR_ERR(clk);
}
gic_frequency = clk_get_rate(clk);
} else if (of_property_read_u32(node, "clock-frequency",
&gic_frequency)) {
pr_err("GIC frequency not specified.\n");
- return;
+ return -EINVAL;;
}
gic_timer_irq = irq_of_parse_and_map(node, 0);
if (!gic_timer_irq) {
pr_err("GIC timer IRQ not specified.\n");
- return;
+ return -EINVAL;;
}
- __gic_clocksource_init();
+ ret = __gic_clocksource_init();
+ if (ret)
+ return ret;
ret = gic_clockevent_init();
if (!ret && !IS_ERR(clk)) {
@@ -213,6 +219,8 @@ static void __init gic_clocksource_of_init(struct device_node *node)
/* And finally start the counter */
gic_start_count();
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
gic_clocksource_of_init);
diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c
index 19857af651c1..841454417acd 100644
--- a/drivers/clocksource/moxart_timer.c
+++ b/drivers/clocksource/moxart_timer.c
@@ -119,34 +119,45 @@ static struct irqaction moxart_timer_irq = {
.dev_id = &moxart_clockevent,
};
-static void __init moxart_timer_init(struct device_node *node)
+static int __init moxart_timer_init(struct device_node *node)
{
int ret, irq;
unsigned long pclk;
struct clk *clk;
base = of_iomap(node, 0);
- if (!base)
- panic("%s: of_iomap failed\n", node->full_name);
+ if (!base) {
+ pr_err("%s: of_iomap failed\n", node->full_name);
+ return -ENXIO;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("%s: irq_of_parse_and_map failed\n", node->full_name);
+ if (irq <= 0) {
+ pr_err("%s: irq_of_parse_and_map failed\n", node->full_name);
+ return -EINVAL;
+ }
ret = setup_irq(irq, &moxart_timer_irq);
- if (ret)
- panic("%s: setup_irq failed\n", node->full_name);
+ if (ret) {
+ pr_err("%s: setup_irq failed\n", node->full_name);
+ return ret;
+ }
clk = of_clk_get(node, 0);
- if (IS_ERR(clk))
- panic("%s: of_clk_get failed\n", node->full_name);
+ if (IS_ERR(clk)) {
+ pr_err("%s: of_clk_get failed\n", node->full_name);
+ return PTR_ERR(clk);
+ }
pclk = clk_get_rate(clk);
- if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
- "moxart_timer", pclk, 200, 32,
- clocksource_mmio_readl_down))
- panic("%s: clocksource_mmio_init failed\n", node->full_name);
+ ret = clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
+ "moxart_timer", pclk, 200, 32,
+ clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("%s: clocksource_mmio_init failed\n", node->full_name);
+ return ret;
+ }
clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
@@ -164,5 +175,7 @@ static void __init moxart_timer_init(struct device_node *node)
*/
clockevents_config_and_register(&moxart_clockevent, pclk,
0x4, 0xfffffffe);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
diff --git a/drivers/clocksource/mps2-timer.c b/drivers/clocksource/mps2-timer.c
index 3d33a5e23dee..3e4431ed9aa9 100644
--- a/drivers/clocksource/mps2-timer.c
+++ b/drivers/clocksource/mps2-timer.c
@@ -250,7 +250,7 @@ out:
return ret;
}
-static void __init mps2_timer_init(struct device_node *np)
+static int __init mps2_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
int ret;
@@ -259,7 +259,7 @@ static void __init mps2_timer_init(struct device_node *np)
ret = mps2_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
- return;
+ return 0;
}
}
@@ -267,9 +267,11 @@ static void __init mps2_timer_init(struct device_node *np)
ret = mps2_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
- return;
+ return 0;
}
}
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 7e583f8ea5f4..90659493c59c 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -181,7 +181,7 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
evt->gpt_base + GPT_IRQ_EN_REG);
}
-static void __init mtk_timer_init(struct device_node *node)
+static int __init mtk_timer_init(struct device_node *node)
{
struct mtk_clock_event_device *evt;
struct resource res;
@@ -190,7 +190,7 @@ static void __init mtk_timer_init(struct device_node *node)
evt = kzalloc(sizeof(*evt), GFP_KERNEL);
if (!evt)
- return;
+ return -ENOMEM;
evt->dev.name = "mtk_tick";
evt->dev.rating = 300;
@@ -248,7 +248,7 @@ static void __init mtk_timer_init(struct device_node *node)
mtk_timer_enable_irq(evt, GPT_CLK_EVT);
- return;
+ return 0;
err_clk_disable:
clk_disable_unprepare(clk);
@@ -262,5 +262,7 @@ err_mem:
release_mem_region(res.start, resource_size(&res));
err_kzalloc:
kfree(evt);
+
+ return -EINVAL;
}
CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index f5ce2961c0d6..0ba0a913b41d 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -31,8 +31,6 @@
#include <linux/stmp_device.h>
#include <linux/sched_clock.h>
-#include <asm/mach/time.h>
-
/*
* There are 2 versions of the timrot on Freescale MXS-based SoCs.
* The v1 on MX23 only gets 16 bits counter, while v2 on MX28
@@ -226,10 +224,10 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
return 0;
}
-static void __init mxs_timer_init(struct device_node *np)
+static int __init mxs_timer_init(struct device_node *np)
{
struct clk *timer_clk;
- int irq;
+ int irq, ret;
mxs_timrot_base = of_iomap(np, 0);
WARN_ON(!mxs_timrot_base);
@@ -237,10 +235,12 @@ static void __init mxs_timer_init(struct device_node *np)
timer_clk = of_clk_get(np, 0);
if (IS_ERR(timer_clk)) {
pr_err("%s: failed to get clk\n", __func__);
- return;
+ return PTR_ERR(timer_clk);
}
- clk_prepare_enable(timer_clk);
+ ret = clk_prepare_enable(timer_clk);
+ if (ret)
+ return ret;
/*
* Initialize timers to a known state
@@ -278,11 +278,19 @@ static void __init mxs_timer_init(struct device_node *np)
mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
/* init and register the timer to the framework */
- mxs_clocksource_init(timer_clk);
- mxs_clockevent_init(timer_clk);
+ ret = mxs_clocksource_init(timer_clk);
+ if (ret)
+ return ret;
+
+ ret = mxs_clockevent_init(timer_clk);
+ if (ret)
+ return ret;
/* Make irqs happen */
irq = irq_of_parse_and_map(np, 0);
- setup_irq(irq, &mxs_timer_irq);
+ if (irq <= 0)
+ return -EINVAL;
+
+ return setup_irq(irq, &mxs_timer_irq);
}
CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index bc8dd443c727..3c124d1ca600 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -193,10 +193,11 @@ static struct irqaction nmdk_timer_irq = {
.dev_id = &nmdk_clkevt,
};
-static void __init nmdk_timer_init(void __iomem *base, int irq,
+static int __init nmdk_timer_init(void __iomem *base, int irq,
struct clk *pclk, struct clk *clk)
{
unsigned long rate;
+ int ret;
mtu_base = base;
@@ -226,10 +227,12 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
/* Timer 0 is the free running clocksource */
nmdk_clksrc_reset();
- if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
- rate, 200, 32, clocksource_mmio_readl_down))
- pr_err("timer: failed to initialize clock source %s\n",
- "mtu_0");
+ ret = clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
+ rate, 200, 32, clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("timer: failed to initialize clock source %s\n", "mtu_0");
+ return ret;
+ }
#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
sched_clock_register(nomadik_read_sched_clock, 32, rate);
@@ -244,9 +247,11 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
mtu_delay_timer.freq = rate;
register_current_timer_delay(&mtu_delay_timer);
+
+ return 0;
}
-static void __init nmdk_timer_of_init(struct device_node *node)
+static int __init nmdk_timer_of_init(struct device_node *node)
{
struct clk *pclk;
struct clk *clk;
@@ -254,22 +259,30 @@ static void __init nmdk_timer_of_init(struct device_node *node)
int irq;
base = of_iomap(node, 0);
- if (!base)
- panic("Can't remap registers");
+ if (!base) {
+ pr_err("Can't remap registers");
+ return -ENXIO;
+ }
pclk = of_clk_get_by_name(node, "apb_pclk");
- if (IS_ERR(pclk))
- panic("could not get apb_pclk");
+ if (IS_ERR(pclk)) {
+ pr_err("could not get apb_pclk");
+ return PTR_ERR(pclk);
+ }
clk = of_clk_get_by_name(node, "timclk");
- if (IS_ERR(clk))
- panic("could not get timclk");
+ if (IS_ERR(clk)) {
+ pr_err("could not get timclk");
+ return PTR_ERR(clk);
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
- nmdk_timer_init(base, irq, pclk, clk);
+ return nmdk_timer_init(base, irq, pclk, clk);
}
CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
nmdk_timer_of_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 45b6a4999713..937e10b84d58 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -150,8 +150,10 @@ static struct irqaction pxa_ost0_irq = {
.dev_id = &ckevt_pxa_osmr0,
};
-static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
+static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
{
+ int ret;
+
timer_writel(0, OIER);
timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
@@ -159,39 +161,57 @@ static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
ckevt_pxa_osmr0.cpumask = cpumask_of(0);
- setup_irq(irq, &pxa_ost0_irq);
+ ret = setup_irq(irq, &pxa_ost0_irq);
+ if (ret) {
+ pr_err("Failed to setup irq");
+ return ret;
+ }
+
+ ret = clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
+ 32, clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("Failed to init clocksource");
+ return ret;
+ }
- clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
- 32, clocksource_mmio_readl_up);
clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
MIN_OSCR_DELTA * 2, 0x7fffffff);
+
+ return 0;
}
-static void __init pxa_timer_dt_init(struct device_node *np)
+static int __init pxa_timer_dt_init(struct device_node *np)
{
struct clk *clk;
- int irq;
+ int irq, ret;
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
- if (!timer_base)
- panic("%s: unable to map resource\n", np->name);
+ if (!timer_base) {
+ pr_err("%s: unable to map resource\n", np->name);
+ return -ENXIO;
+ }
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_crit("%s: unable to get clk\n", np->name);
- return;
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_crit("Failed to prepare clock");
+ return ret;
}
- clk_prepare_enable(clk);
/* we are only interested in OS-timer0 irq */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
- return;
+ return -EINVAL;
}
- pxa_timer_common_init(irq, clk_get_rate(clk));
+ return pxa_timer_common_init(irq, clk_get_rate(clk));
}
CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index f8e09f923651..662576339049 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -178,7 +178,7 @@ static struct delay_timer msm_delay_timer = {
.read_current_timer = msm_read_current_timer,
};
-static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
+static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
bool percpu)
{
struct clocksource *cs = &msm_clocksource;
@@ -218,12 +218,14 @@ err:
sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
msm_delay_timer.freq = dgt_hz;
register_current_timer_delay(&msm_delay_timer);
+
+ return res;
}
-static void __init msm_dt_timer_init(struct device_node *np)
+static int __init msm_dt_timer_init(struct device_node *np)
{
u32 freq;
- int irq;
+ int irq, ret;
struct resource res;
u32 percpu_offset;
void __iomem *base;
@@ -232,34 +234,35 @@ static void __init msm_dt_timer_init(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
pr_err("Failed to map event base\n");
- return;
+ return -ENXIO;
}
/* We use GPT0 for the clockevent */
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0) {
pr_err("Can't get irq\n");
- return;
+ return -EINVAL;
}
/* We use CPU0's DGT for the clocksource */
if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
percpu_offset = 0;
- if (of_address_to_resource(np, 0, &res)) {
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
pr_err("Failed to parse DGT resource\n");
- return;
+ return ret;
}
cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
if (!cpu0_base) {
pr_err("Failed to map source base\n");
- return;
+ return -EINVAL;
}
if (of_property_read_u32(np, "clock-frequency", &freq)) {
pr_err("Unknown frequency\n");
- return;
+ return -EINVAL;
}
event_base = base + 0x4;
@@ -268,7 +271,7 @@ static void __init msm_dt_timer_init(struct device_node *np)
freq /= 4;
writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
- msm_timer_init(freq, 32, irq, !!percpu_offset);
+ return msm_timer_init(freq, 32, irq, !!percpu_offset);
}
CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index b991b288c803..23e267acba25 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -19,7 +19,8 @@
#define TIMER_LOAD_COUNT0 0x00
#define TIMER_LOAD_COUNT1 0x04
-#define TIMER_CONTROL_REG 0x10
+#define TIMER_CONTROL_REG3288 0x10
+#define TIMER_CONTROL_REG3399 0x1c
#define TIMER_INT_STATUS 0x18
#define TIMER_DISABLE 0x0
@@ -31,6 +32,7 @@
struct bc_timer {
struct clock_event_device ce;
void __iomem *base;
+ void __iomem *ctrl;
u32 freq;
};
@@ -46,15 +48,20 @@ static inline void __iomem *rk_base(struct clock_event_device *ce)
return rk_timer(ce)->base;
}
+static inline void __iomem *rk_ctrl(struct clock_event_device *ce)
+{
+ return rk_timer(ce)->ctrl;
+}
+
static inline void rk_timer_disable(struct clock_event_device *ce)
{
- writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG);
+ writel_relaxed(TIMER_DISABLE, rk_ctrl(ce));
}
static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags)
{
writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags,
- rk_base(ce) + TIMER_CONTROL_REG);
+ rk_ctrl(ce));
}
static void rk_timer_update_counter(unsigned long cycles,
@@ -106,37 +113,42 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __init rk_timer_init(struct device_node *np)
+static int __init rk_timer_init(struct device_node *np, u32 ctrl_reg)
{
struct clock_event_device *ce = &bc_timer.ce;
struct clk *timer_clk;
struct clk *pclk;
- int ret, irq;
+ int ret = -EINVAL, irq;
bc_timer.base = of_iomap(np, 0);
if (!bc_timer.base) {
pr_err("Failed to get base address for '%s'\n", TIMER_NAME);
- return;
+ return -ENXIO;
}
+ bc_timer.ctrl = bc_timer.base + ctrl_reg;
pclk = of_clk_get_by_name(np, "pclk");
if (IS_ERR(pclk)) {
+ ret = PTR_ERR(pclk);
pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
goto out_unmap;
}
- if (clk_prepare_enable(pclk)) {
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
goto out_unmap;
}
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk)) {
+ ret = PTR_ERR(timer_clk);
pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
goto out_timer_clk;
}
- if (clk_prepare_enable(timer_clk)) {
+ ret = clk_prepare_enable(timer_clk);
+ if (ret) {
pr_err("Failed to enable timer clock\n");
goto out_timer_clk;
}
@@ -145,17 +157,19 @@ static void __init rk_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
+ ret = -EINVAL;
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
goto out_irq;
}
ce->name = TIMER_NAME;
- ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ;
ce->set_next_event = rk_timer_set_next_event;
ce->set_state_shutdown = rk_timer_shutdown;
ce->set_state_periodic = rk_timer_set_periodic;
ce->irq = irq;
- ce->cpumask = cpumask_of(0);
+ ce->cpumask = cpu_possible_mask;
ce->rating = 250;
rk_timer_interrupt_clear(ce);
@@ -169,7 +183,7 @@ static void __init rk_timer_init(struct device_node *np)
clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
- return;
+ return 0;
out_irq:
clk_disable_unprepare(timer_clk);
@@ -177,6 +191,21 @@ out_timer_clk:
clk_disable_unprepare(pclk);
out_unmap:
iounmap(bc_timer.base);
+
+ return ret;
+}
+
+static int __init rk3288_timer_init(struct device_node *np)
+{
+ return rk_timer_init(np, TIMER_CONTROL_REG3288);
+}
+
+static int __init rk3399_timer_init(struct device_node *np)
+{
+ return rk_timer_init(np, TIMER_CONTROL_REG3399);
}
-CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);
+CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer",
+ rk3288_timer_init);
+CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer",
+ rk3399_timer_init);
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 9502bc4c3f6d..54565bd0093b 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -130,9 +130,9 @@ static void samsung_time_stop(unsigned int channel)
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = __raw_readl(pwm.base + REG_TCON);
+ tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~TCON_START(channel);
- __raw_writel(tcon, pwm.base + REG_TCON);
+ writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -148,14 +148,14 @@ static void samsung_time_setup(unsigned int channel, unsigned long tcnt)
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = __raw_readl(pwm.base + REG_TCON);
+ tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan));
tcon |= TCON_MANUALUPDATE(tcon_chan);
- __raw_writel(tcnt, pwm.base + REG_TCNTB(channel));
- __raw_writel(tcnt, pwm.base + REG_TCMPB(channel));
- __raw_writel(tcon, pwm.base + REG_TCON);
+ writel_relaxed(tcnt, pwm.base + REG_TCNTB(channel));
+ writel_relaxed(tcnt, pwm.base + REG_TCMPB(channel));
+ writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -170,7 +170,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = __raw_readl(pwm.base + REG_TCON);
+ tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~TCON_MANUALUPDATE(channel);
tcon |= TCON_START(channel);
@@ -180,7 +180,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
else
tcon &= ~TCON_AUTORELOAD(channel);
- __raw_writel(tcon, pwm.base + REG_TCON);
+ writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -333,11 +333,10 @@ static u64 notrace samsung_read_sched_clock(void)
return samsung_clocksource_read(NULL);
}
-static void __init samsung_clocksource_init(void)
+static int __init samsung_clocksource_init(void)
{
unsigned long pclk;
unsigned long clock_rate;
- int ret;
pclk = clk_get_rate(pwm.timerclk);
@@ -358,9 +357,7 @@ static void __init samsung_clocksource_init(void)
pwm.variant.bits, clock_rate);
samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
- ret = clocksource_register_hz(&samsung_clocksource, clock_rate);
- if (ret)
- panic("samsung_clocksource_timer: can't register clocksource\n");
+ return clocksource_register_hz(&samsung_clocksource, clock_rate);
}
static void __init samsung_timer_resources(void)
@@ -380,26 +377,31 @@ static void __init samsung_timer_resources(void)
/*
* PWM master driver
*/
-static void __init _samsung_pwm_clocksource_init(void)
+static int __init _samsung_pwm_clocksource_init(void)
{
u8 mask;
int channel;
mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1);
channel = fls(mask) - 1;
- if (channel < 0)
- panic("failed to find PWM channel for clocksource");
+ if (channel < 0) {
+ pr_crit("failed to find PWM channel for clocksource");
+ return -EINVAL;
+ }
pwm.source_id = channel;
mask &= ~(1 << channel);
channel = fls(mask) - 1;
- if (channel < 0)
- panic("failed to find PWM channel for clock event");
+ if (channel < 0) {
+ pr_crit("failed to find PWM channel for clock event");
+ return -EINVAL;
+ }
pwm.event_id = channel;
samsung_timer_resources();
samsung_clockevent_init();
- samsung_clocksource_init();
+
+ return samsung_clocksource_init();
}
void __init samsung_pwm_clocksource_init(void __iomem *base,
@@ -417,8 +419,8 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
}
#ifdef CONFIG_CLKSRC_OF
-static void __init samsung_pwm_alloc(struct device_node *np,
- const struct samsung_pwm_variant *variant)
+static int __init samsung_pwm_alloc(struct device_node *np,
+ const struct samsung_pwm_variant *variant)
{
struct property *prop;
const __be32 *cur;
@@ -441,14 +443,16 @@ static void __init samsung_pwm_alloc(struct device_node *np,
pwm.base = of_iomap(np, 0);
if (!pwm.base) {
pr_err("%s: failed to map PWM registers\n", __func__);
- return;
+ return -ENXIO;
}
pwm.timerclk = of_clk_get_by_name(np, "timers");
- if (IS_ERR(pwm.timerclk))
- panic("failed to get timers clock for timer");
+ if (IS_ERR(pwm.timerclk)) {
+ pr_crit("failed to get timers clock for timer");
+ return PTR_ERR(pwm.timerclk);
+ }
- _samsung_pwm_clocksource_init();
+ return _samsung_pwm_clocksource_init();
}
static const struct samsung_pwm_variant s3c24xx_variant = {
@@ -458,9 +462,9 @@ static const struct samsung_pwm_variant s3c24xx_variant = {
.tclk_mask = (1 << 4),
};
-static void __init s3c2410_pwm_clocksource_init(struct device_node *np)
+static int __init s3c2410_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s3c24xx_variant);
+ return samsung_pwm_alloc(np, &s3c24xx_variant);
}
CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init);
@@ -471,9 +475,9 @@ static const struct samsung_pwm_variant s3c64xx_variant = {
.tclk_mask = (1 << 7) | (1 << 6) | (1 << 5),
};
-static void __init s3c64xx_pwm_clocksource_init(struct device_node *np)
+static int __init s3c64xx_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s3c64xx_variant);
+ return samsung_pwm_alloc(np, &s3c64xx_variant);
}
CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init);
@@ -484,9 +488,9 @@ static const struct samsung_pwm_variant s5p64x0_variant = {
.tclk_mask = 0,
};
-static void __init s5p64x0_pwm_clocksource_init(struct device_node *np)
+static int __init s5p64x0_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s5p64x0_variant);
+ return samsung_pwm_alloc(np, &s5p64x0_variant);
}
CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init);
@@ -497,9 +501,9 @@ static const struct samsung_pwm_variant s5p_variant = {
.tclk_mask = (1 << 5),
};
-static void __init s5p_pwm_clocksource_init(struct device_node *np)
+static int __init s5p_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s5p_variant);
+ return samsung_pwm_alloc(np, &s5p_variant);
}
CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init);
#endif
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 6f3719d73390..97669ee4df2a 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -146,7 +146,7 @@ static u64 notrace sun4i_timer_sched_read(void)
return ~readl(timer_base + TIMER_CNTVAL_REG(1));
}
-static void __init sun4i_timer_init(struct device_node *node)
+static int __init sun4i_timer_init(struct device_node *node)
{
unsigned long rate = 0;
struct clk *clk;
@@ -154,17 +154,28 @@ static void __init sun4i_timer_init(struct device_node *node)
u32 val;
timer_base = of_iomap(node, 0);
- if (!timer_base)
- panic("Can't map registers");
+ if (!timer_base) {
+ pr_crit("Can't map registers");
+ return -ENXIO;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_crit("Can't parse IRQ");
+ return -EINVAL;
+ }
clk = of_clk_get(node, 0);
- if (IS_ERR(clk))
- panic("Can't get timer clock");
- clk_prepare_enable(clk);
+ if (IS_ERR(clk)) {
+ pr_crit("Can't get timer clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Failed to prepare clock");
+ return ret;
+ }
rate = clk_get_rate(clk);
@@ -182,8 +193,12 @@ static void __init sun4i_timer_init(struct device_node *node)
of_machine_is_compatible("allwinner,sun5i-a10s"))
sched_clock_register(sun4i_timer_sched_read, 32, rate);
- clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
- rate, 350, 32, clocksource_mmio_readl_down);
+ ret = clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
+ rate, 350, 32, clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("Failed to register clocksource");
+ return ret;
+ }
ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
@@ -200,12 +215,16 @@ static void __init sun4i_timer_init(struct device_node *node)
TIMER_SYNC_TICKS, 0xffffffff);
ret = setup_irq(irq, &sun4i_timer_irq);
- if (ret)
- pr_warn("failed to setup irq %d\n", irq);
+ if (ret) {
+ pr_err("failed to setup irq %d\n", irq);
+ return ret;
+ }
/* Enable timer0 interrupt */
val = readl(timer_base + TIMER_IRQ_EN_REG);
writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index c407c47a3232..12fcef8cf2d3 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -19,7 +19,7 @@ static u64 notrace read_sched_clock(void)
return read_xtal_counter();
}
-static void __init tango_clocksource_init(struct device_node *np)
+static int __init tango_clocksource_init(struct device_node *np)
{
struct clk *clk;
int xtal_freq, ret;
@@ -27,13 +27,13 @@ static void __init tango_clocksource_init(struct device_node *np)
xtal_in_cnt = of_iomap(np, 0);
if (xtal_in_cnt == NULL) {
pr_err("%s: invalid address\n", np->full_name);
- return;
+ return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%s: invalid clock\n", np->full_name);
- return;
+ return PTR_ERR(clk);
}
xtal_freq = clk_get_rate(clk);
@@ -44,11 +44,13 @@ static void __init tango_clocksource_init(struct device_node *np)
32, clocksource_mmio_readl_up);
if (ret) {
pr_err("%s: registration failed\n", np->full_name);
- return;
+ return ret;
}
sched_clock_register(read_sched_clock, 32, xtal_freq);
register_current_timer_delay(&delay_timer);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 7b94ad2ab278..f960891aa04e 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -165,7 +165,7 @@ static struct irqaction tegra_timer_irq = {
.dev_id = &tegra_clockevent,
};
-static void __init tegra20_init_timer(struct device_node *np)
+static int __init tegra20_init_timer(struct device_node *np)
{
struct clk *clk;
unsigned long rate;
@@ -174,13 +174,13 @@ static void __init tegra20_init_timer(struct device_node *np)
timer_reg_base = of_iomap(np, 0);
if (!timer_reg_base) {
pr_err("Can't map timer registers\n");
- BUG();
+ return -ENXIO;
}
tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
if (tegra_timer_irq.irq <= 0) {
pr_err("Failed to map timer IRQ\n");
- BUG();
+ return -EINVAL;
}
clk = of_clk_get(np, 0);
@@ -211,10 +211,12 @@ static void __init tegra20_init_timer(struct device_node *np)
sched_clock_register(tegra_read_sched_clock, 32, 1000000);
- if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
- "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
+ ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
+ "timer_us", 1000000, 300, 32,
+ clocksource_mmio_readl_up);
+ if (ret) {
pr_err("Failed to register clocksource\n");
- BUG();
+ return ret;
}
tegra_delay_timer.read_current_timer =
@@ -225,24 +227,26 @@ static void __init tegra20_init_timer(struct device_node *np)
ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
if (ret) {
pr_err("Failed to register timer IRQ: %d\n", ret);
- BUG();
+ return ret;
}
tegra_clockevent.cpumask = cpu_all_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_config_and_register(&tegra_clockevent, 1000000,
0x1, 0x1fffffff);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
-static void __init tegra20_init_rtc(struct device_node *np)
+static int __init tegra20_init_rtc(struct device_node *np)
{
struct clk *clk;
rtc_base = of_iomap(np, 0);
if (!rtc_base) {
pr_err("Can't map RTC registers");
- BUG();
+ return -ENXIO;
}
/*
@@ -255,6 +259,6 @@ static void __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
- register_persistent_clock(NULL, tegra_read_persistent_clock64);
+ return register_persistent_clock(NULL, tegra_read_persistent_clock64);
}
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d93ec3c4f139..20ec066481fe 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -246,7 +246,7 @@ static void armada_370_xp_timer_resume(void)
writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
}
-struct syscore_ops armada_370_xp_timer_syscore_ops = {
+static struct syscore_ops armada_370_xp_timer_syscore_ops = {
.suspend = armada_370_xp_timer_suspend,
.resume = armada_370_xp_timer_resume,
};
@@ -260,14 +260,22 @@ static struct delay_timer armada_370_delay_timer = {
.read_current_timer = armada_370_delay_timer_read,
};
-static void __init armada_370_xp_timer_common_init(struct device_node *np)
+static int __init armada_370_xp_timer_common_init(struct device_node *np)
{
u32 clr = 0, set = 0;
int res;
timer_base = of_iomap(np, 0);
- WARN_ON(!timer_base);
+ if (!timer_base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
+
local_base = of_iomap(np, 1);
+ if (!local_base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
if (timer25Mhz) {
set = TIMER0_25MHZ;
@@ -306,14 +314,19 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
*/
sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
- clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
- "armada_370_xp_clocksource",
- timer_clk, 300, 32, clocksource_mmio_readl_down);
+ res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
+ "armada_370_xp_clocksource",
+ timer_clk, 300, 32, clocksource_mmio_readl_down);
+ if (res) {
+ pr_err("Failed to initialize clocksource mmio");
+ return res;
+ }
register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
armada_370_xp_evt = alloc_percpu(struct clock_event_device);
-
+ if (!armada_370_xp_evt)
+ return -ENOMEM;
/*
* Setup clockevent timer (interrupt-driven).
@@ -323,33 +336,54 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
"armada_370_xp_per_cpu_tick",
armada_370_xp_evt);
/* Immediately configure the timer on the boot CPU */
- if (!res)
- armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+ if (res) {
+ pr_err("Failed to request percpu irq");
+ return res;
+ }
+
+ res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+ if (res) {
+ pr_err("Failed to setup timer");
+ return res;
+ }
register_syscore_ops(&armada_370_xp_timer_syscore_ops);
+
+ return 0;
}
-static void __init armada_xp_timer_init(struct device_node *np)
+static int __init armada_xp_timer_init(struct device_node *np)
{
struct clk *clk = of_clk_get_by_name(np, "fixed");
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
- /* The 25Mhz fixed clock is mandatory, and must always be available */
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
timer_clk = clk_get_rate(clk);
- armada_370_xp_timer_common_init(np);
+ return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
armada_xp_timer_init);
-static void __init armada_375_timer_init(struct device_node *np)
+static int __init armada_375_timer_init(struct device_node *np)
{
struct clk *clk;
+ int ret;
clk = of_clk_get_by_name(np, "fixed");
if (!IS_ERR(clk)) {
- clk_prepare_enable(clk);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
timer_clk = clk_get_rate(clk);
} else {
@@ -360,27 +394,43 @@ static void __init armada_375_timer_init(struct device_node *np)
clk = of_clk_get(np, 0);
/* Must have at least a clock */
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
timer25Mhz = false;
}
- armada_370_xp_timer_common_init(np);
+ return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer",
armada_375_timer_init);
-static void __init armada_370_timer_init(struct device_node *np)
+static int __init armada_370_timer_init(struct device_node *np)
{
- struct clk *clk = of_clk_get(np, 0);
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
timer25Mhz = false;
- armada_370_xp_timer_common_init(np);
+ return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
armada_370_timer_init);
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index b06e4c2be406..5ac344b383e1 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -233,10 +233,15 @@ static int __init efm32_clockevent_init(struct device_node *np)
DIV_ROUND_CLOSEST(rate, 1024),
0xf, 0xffff);
- setup_irq(irq, &efm32_clock_event_irq);
+ ret = setup_irq(irq, &efm32_clock_event_irq);
+ if (ret) {
+ pr_err("Failed setup irq");
+ goto err_setup_irq;
+ }
return 0;
+err_setup_irq:
err_get_irq:
iounmap(base);
@@ -255,16 +260,16 @@ err_clk_get:
* This function asserts that we have exactly one clocksource and one
* clock_event_device in the end.
*/
-static void __init efm32_timer_init(struct device_node *np)
+static int __init efm32_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
- int ret;
+ int ret = 0;
if (!has_clocksource) {
ret = efm32_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
- return;
+ return 0;
}
}
@@ -272,9 +277,11 @@ static void __init efm32_timer_init(struct device_node *np)
ret = efm32_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
- return;
+ return 0;
}
}
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c
index daae61e8c820..9649cfdb9213 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/time-lpc32xx.c
@@ -288,16 +288,16 @@ err_clk_enable:
* This function asserts that we have exactly one clocksource and one
* clock_event_device in the end.
*/
-static void __init lpc32xx_timer_init(struct device_node *np)
+static int __init lpc32xx_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
- int ret;
+ int ret = 0;
if (!has_clocksource) {
ret = lpc32xx_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
- return;
+ return 0;
}
}
@@ -305,8 +305,10 @@ static void __init lpc32xx_timer_init(struct device_node *np)
ret = lpc32xx_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
- return;
+ return 0;
}
}
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 0ece7427b497..a28f496e97cf 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -104,25 +104,36 @@ static struct irqaction orion_clkevt_irq = {
.handler = orion_clkevt_irq_handler,
};
-static void __init orion_timer_init(struct device_node *np)
+static int __init orion_timer_init(struct device_node *np)
{
struct clk *clk;
- int irq;
+ int irq, ret;
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
- if (!timer_base)
- panic("%s: unable to map resource\n", np->name);
+ if (!timer_base) {
+ pr_err("%s: unable to map resource\n", np->name);
+ return -ENXIO;
+ }
clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
- panic("%s: unable to get clk\n", np->name);
- clk_prepare_enable(clk);
+ if (IS_ERR(clk)) {
+ pr_err("%s: unable to get clk\n", np->name);
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Failed to prepare clock");
+ return ret;
+ }
/* we are only interested in timer1 irq */
irq = irq_of_parse_and_map(np, 1);
- if (irq <= 0)
- panic("%s: unable to parse timer1 irq\n", np->name);
+ if (irq <= 0) {
+ pr_err("%s: unable to parse timer1 irq\n", np->name);
+ return -EINVAL;
+ }
/* setup timer0 as free-running clocksource */
writel(~0, timer_base + TIMER0_VAL);
@@ -130,19 +141,30 @@ static void __init orion_timer_init(struct device_node *np)
atomic_io_modify(timer_base + TIMER_CTRL,
TIMER0_RELOAD_EN | TIMER0_EN,
TIMER0_RELOAD_EN | TIMER0_EN);
- clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
- clk_get_rate(clk), 300, 32,
- clocksource_mmio_readl_down);
+
+ ret = clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
+ clk_get_rate(clk), 300, 32,
+ clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("Failed to initialize mmio timer");
+ return ret;
+ }
+
sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk));
/* setup timer1 as clockevent timer */
- if (setup_irq(irq, &orion_clkevt_irq))
- panic("%s: unable to setup irq\n", np->name);
+ ret = setup_irq(irq, &orion_clkevt_irq);
+ if (ret) {
+ pr_err("%s: unable to setup irq\n", np->name);
+ return ret;
+ }
ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ;
orion_clkevt.cpumask = cpumask_of(0);
orion_clkevt.irq = irq;
clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk),
ORION_ONESHOT_MIN, ORION_ONESHOT_MAX);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index 376e59bc5fa0..a7d9a08e4b0e 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -148,7 +148,7 @@ static struct pistachio_clocksource pcs_gpt = {
},
};
-static void __init pistachio_clksrc_of_init(struct device_node *node)
+static int __init pistachio_clksrc_of_init(struct device_node *node)
{
struct clk *sys_clk, *fast_clk;
struct regmap *periph_regs;
@@ -158,45 +158,45 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
pcs_gpt.base = of_iomap(node, 0);
if (!pcs_gpt.base) {
pr_err("cannot iomap\n");
- return;
+ return -ENXIO;
}
periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
if (IS_ERR(periph_regs)) {
pr_err("cannot get peripheral regmap (%ld)\n",
PTR_ERR(periph_regs));
- return;
+ return PTR_ERR(periph_regs);
}
/* Switch to using the fast counter clock */
ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
0xf, 0x0);
if (ret)
- return;
+ return ret;
sys_clk = of_clk_get_by_name(node, "sys");
if (IS_ERR(sys_clk)) {
pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk));
- return;
+ return PTR_ERR(sys_clk);
}
fast_clk = of_clk_get_by_name(node, "fast");
if (IS_ERR(fast_clk)) {
pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
- return;
+ return PTR_ERR(fast_clk);
}
ret = clk_prepare_enable(sys_clk);
if (ret < 0) {
pr_err("failed to enable clock (%d)\n", ret);
- return;
+ return ret;
}
ret = clk_prepare_enable(fast_clk);
if (ret < 0) {
pr_err("failed to enable clock (%d)\n", ret);
clk_disable_unprepare(sys_clk);
- return;
+ return ret;
}
rate = clk_get_rate(fast_clk);
@@ -212,7 +212,7 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
raw_spin_lock_init(&pcs_gpt.lock);
sched_clock_register(pistachio_read_sched_clock, 32, rate);
- clocksource_register_hz(&pcs_gpt.cs, rate);
+ return clocksource_register_hz(&pcs_gpt.cs, rate);
}
CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
pistachio_clksrc_of_init);
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 27fa13680be1..90f8fbc154a4 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -238,7 +238,7 @@ static struct notifier_block sirfsoc_cpu_nb = {
.notifier_call = sirfsoc_cpu_notify,
};
-static void __init sirfsoc_clockevent_init(void)
+static int __init sirfsoc_clockevent_init(void)
{
sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
BUG_ON(!sirfsoc_clockevent);
@@ -246,11 +246,11 @@ static void __init sirfsoc_clockevent_init(void)
BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
/* Immediately configure the timer on the boot CPU */
- sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
+ return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
}
/* initialize the kernel jiffy timer source */
-static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
+static int __init sirfsoc_atlas7_timer_init(struct device_node *np)
{
struct clk *clk;
@@ -279,23 +279,29 @@ static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate));
- sirfsoc_clockevent_init();
+ return sirfsoc_clockevent_init();
}
-static void __init sirfsoc_of_timer_init(struct device_node *np)
+static int __init sirfsoc_of_timer_init(struct device_node *np)
{
sirfsoc_timer_base = of_iomap(np, 0);
- if (!sirfsoc_timer_base)
- panic("unable to map timer cpu registers\n");
+ if (!sirfsoc_timer_base) {
+ pr_err("unable to map timer cpu registers\n");
+ return -ENXIO;
+ }
sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
- if (!sirfsoc_timer_irq.irq)
- panic("No irq passed for timer0 via DT\n");
+ if (!sirfsoc_timer_irq.irq) {
+ pr_err("No irq passed for timer0 via DT\n");
+ return -EINVAL;
+ }
sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
- if (!sirfsoc_timer1_irq.irq)
- panic("No irq passed for timer1 via DT\n");
+ if (!sirfsoc_timer1_irq.irq) {
+ pr_err("No irq passed for timer1 via DT\n");
+ return -EINVAL;
+ }
- sirfsoc_atlas7_timer_init(np);
+ return sirfsoc_atlas7_timer_init(np);
}
CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index d911c5dca8f1..1ffac0cb0cb7 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -177,7 +177,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
/*
* Set up both clocksource and clockevent support.
*/
-static void __init at91sam926x_pit_common_init(struct pit_data *data)
+static int __init at91sam926x_pit_common_init(struct pit_data *data)
{
unsigned long pit_rate;
unsigned bits;
@@ -204,14 +204,21 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
data->clksrc.rating = 175;
data->clksrc.read = read_pit_clk;
data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
- clocksource_register_hz(&data->clksrc, pit_rate);
+
+ ret = clocksource_register_hz(&data->clksrc, pit_rate);
+ if (ret) {
+ pr_err("Failed to register clocksource");
+ return ret;
+ }
/* Set up irq handler */
ret = request_irq(data->irq, at91sam926x_pit_interrupt,
IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
"at91_tick", data);
- if (ret)
- panic(pr_fmt("Unable to setup IRQ\n"));
+ if (ret) {
+ pr_err("Unable to setup IRQ\n");
+ return ret;
+ }
/* Set up and register clockevents */
data->clkevt.name = "pit";
@@ -226,34 +233,42 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
data->clkevt.resume = at91sam926x_pit_resume;
data->clkevt.suspend = at91sam926x_pit_suspend;
clockevents_register_device(&data->clkevt);
+
+ return 0;
}
-static void __init at91sam926x_pit_dt_init(struct device_node *node)
+static int __init at91sam926x_pit_dt_init(struct device_node *node)
{
struct pit_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- panic(pr_fmt("Unable to allocate memory\n"));
+ return -ENOMEM;
data->base = of_iomap(node, 0);
- if (!data->base)
- panic(pr_fmt("Could not map PIT address\n"));
+ if (!data->base) {
+ pr_err("Could not map PIT address\n");
+ return -ENXIO;
+ }
data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck))
/* Fallback on clkdev for !CCF-based boards */
data->mck = clk_get(NULL, "mck");
- if (IS_ERR(data->mck))
- panic(pr_fmt("Unable to get mck clk\n"));
+ if (IS_ERR(data->mck)) {
+ pr_err("Unable to get mck clk\n");
+ return PTR_ERR(data->mck);
+ }
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
- if (!data->irq)
- panic(pr_fmt("Unable to get IRQ from DT\n"));
+ if (!data->irq) {
+ pr_err("Unable to get IRQ from DT\n");
+ return -EINVAL;
+ }
- at91sam926x_pit_common_init(data);
+ return at91sam926x_pit_common_init(data);
}
CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
at91sam926x_pit_dt_init);
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index 29d21d68df5a..e90ab5b63a90 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -194,15 +194,17 @@ static struct clock_event_device clkevt = {
/*
* ST (system timer) module supports both clockevents and clocksource.
*/
-static void __init atmel_st_timer_init(struct device_node *node)
+static int __init atmel_st_timer_init(struct device_node *node)
{
struct clk *sclk;
unsigned int sclk_rate, val;
int irq, ret;
regmap_st = syscon_node_to_regmap(node);
- if (IS_ERR(regmap_st))
- panic(pr_fmt("Unable to get regmap\n"));
+ if (IS_ERR(regmap_st)) {
+ pr_err("Unable to get regmap\n");
+ return PTR_ERR(regmap_st);
+ }
/* Disable all timer interrupts, and clear any pending ones */
regmap_write(regmap_st, AT91_ST_IDR,
@@ -211,27 +213,37 @@ static void __init atmel_st_timer_init(struct device_node *node)
/* Get the interrupts property */
irq = irq_of_parse_and_map(node, 0);
- if (!irq)
- panic(pr_fmt("Unable to get IRQ from DT\n"));
+ if (!irq) {
+ pr_err("Unable to get IRQ from DT\n");
+ return -EINVAL;
+ }
/* Make IRQs happen for the system timer */
ret = request_irq(irq, at91rm9200_timer_interrupt,
IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
"at91_tick", regmap_st);
- if (ret)
- panic(pr_fmt("Unable to setup IRQ\n"));
+ if (ret) {
+ pr_err("Unable to setup IRQ\n");
+ return ret;
+ }
sclk = of_clk_get(node, 0);
- if (IS_ERR(sclk))
- panic(pr_fmt("Unable to get slow clock\n"));
+ if (IS_ERR(sclk)) {
+ pr_err("Unable to get slow clock\n");
+ return PTR_ERR(sclk);
+ }
- clk_prepare_enable(sclk);
- if (ret)
- panic(pr_fmt("Could not enable slow clock\n"));
+ ret = clk_prepare_enable(sclk);
+ if (ret) {
+ pr_err("Could not enable slow clock\n");
+ return ret;
+ }
sclk_rate = clk_get_rate(sclk);
- if (!sclk_rate)
- panic(pr_fmt("Invalid slow clock rate\n"));
+ if (!sclk_rate) {
+ pr_err("Invalid slow clock rate\n");
+ return -EINVAL;
+ }
timer_latch = (sclk_rate + HZ / 2) / HZ;
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
@@ -246,7 +258,7 @@ static void __init atmel_st_timer_init(struct device_node *node)
2, AT91_ST_ALMV);
/* register clocksource */
- clocksource_register_hz(&clk32k, sclk_rate);
+ return clocksource_register_hz(&clk32k, sclk_rate);
}
CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st",
atmel_st_timer_init);
diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c
index a536eeb634d8..10318cc99c0e 100644
--- a/drivers/clocksource/timer-digicolor.c
+++ b/drivers/clocksource/timer-digicolor.c
@@ -63,7 +63,7 @@ struct digicolor_timer {
int timer_id; /* one of TIMER_* */
};
-struct digicolor_timer *dc_timer(struct clock_event_device *ce)
+static struct digicolor_timer *dc_timer(struct clock_event_device *ce)
{
return container_of(ce, struct digicolor_timer, ce);
}
@@ -148,7 +148,7 @@ static u64 notrace digicolor_timer_sched_read(void)
return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
}
-static void __init digicolor_timer_init(struct device_node *node)
+static int __init digicolor_timer_init(struct device_node *node)
{
unsigned long rate;
struct clk *clk;
@@ -161,19 +161,19 @@ static void __init digicolor_timer_init(struct device_node *node)
dc_timer_dev.base = of_iomap(node, 0);
if (!dc_timer_dev.base) {
pr_err("Can't map registers");
- return;
+ return -ENXIO;
}
irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id);
if (irq <= 0) {
pr_err("Can't parse IRQ");
- return;
+ return -EINVAL;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("Can't get timer clock");
- return;
+ return PTR_ERR(clk);
}
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
@@ -190,13 +190,17 @@ static void __init digicolor_timer_init(struct device_node *node)
ret = request_irq(irq, digicolor_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC",
&dc_timer_dev.ce);
- if (ret)
+ if (ret) {
pr_warn("request of timer irq %d failed (%d)\n", irq, ret);
+ return ret;
+ }
dc_timer_dev.ce.cpumask = cpu_possible_mask;
dc_timer_dev.ce.irq = irq;
clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer",
digicolor_timer_init);
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 99ec96769dda..f595460bfc58 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -407,8 +407,10 @@ static const struct imx_gpt_data imx6dl_gpt_data = {
.set_next_event = v2_set_next_event,
};
-static void __init _mxc_timer_init(struct imx_timer *imxtm)
+static int __init _mxc_timer_init(struct imx_timer *imxtm)
{
+ int ret;
+
switch (imxtm->type) {
case GPT_TYPE_IMX1:
imxtm->gpt = &imx1_gpt_data;
@@ -423,12 +425,12 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
imxtm->gpt = &imx6dl_gpt_data;
break;
default:
- BUG();
+ return -EINVAL;
}
if (IS_ERR(imxtm->clk_per)) {
pr_err("i.MX timer: unable to get clk\n");
- return;
+ return PTR_ERR(imxtm->clk_per);
}
if (!IS_ERR(imxtm->clk_ipg))
@@ -446,8 +448,11 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
imxtm->gpt->gpt_setup_tctl(imxtm);
/* init and register the timer to the framework */
- mxc_clocksource_init(imxtm);
- mxc_clockevent_init(imxtm);
+ ret = mxc_clocksource_init(imxtm);
+ if (ret)
+ return ret;
+
+ return mxc_clockevent_init(imxtm);
}
void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
@@ -469,21 +474,27 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
_mxc_timer_init(imxtm);
}
-static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
+static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
{
struct imx_timer *imxtm;
static int initialized;
+ int ret;
/* Support one instance only */
if (initialized)
- return;
+ return 0;
imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
- BUG_ON(!imxtm);
+ if (!imxtm)
+ return -ENOMEM;
imxtm->base = of_iomap(np, 0);
- WARN_ON(!imxtm->base);
+ if (!imxtm->base)
+ return -ENXIO;
+
imxtm->irq = irq_of_parse_and_map(np, 0);
+ if (imxtm->irq <= 0)
+ return -EINVAL;
imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
@@ -494,22 +505,26 @@ static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type
imxtm->type = type;
- _mxc_timer_init(imxtm);
+ ret = _mxc_timer_init(imxtm);
+ if (ret)
+ return ret;
initialized = 1;
+
+ return 0;
}
-static void __init imx1_timer_init_dt(struct device_node *np)
+static int __init imx1_timer_init_dt(struct device_node *np)
{
- mxc_timer_init_dt(np, GPT_TYPE_IMX1);
+ return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
}
-static void __init imx21_timer_init_dt(struct device_node *np)
+static int __init imx21_timer_init_dt(struct device_node *np)
{
- mxc_timer_init_dt(np, GPT_TYPE_IMX21);
+ return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
}
-static void __init imx31_timer_init_dt(struct device_node *np)
+static int __init imx31_timer_init_dt(struct device_node *np)
{
enum imx_gpt_type type = GPT_TYPE_IMX31;
@@ -522,12 +537,12 @@ static void __init imx31_timer_init_dt(struct device_node *np)
if (of_machine_is_compatible("fsl,imx6dl"))
type = GPT_TYPE_IMX6DL;
- mxc_timer_init_dt(np, type);
+ return mxc_timer_init_dt(np, type);
}
-static void __init imx6dl_timer_init_dt(struct device_node *np)
+static int __init imx6dl_timer_init_dt(struct device_node *np)
{
- mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
+ return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
}
CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 3f59ac2180dc..df6e672afc04 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -36,11 +36,12 @@ static u64 notrace integrator_read_sched_clock(void)
return -readl(sched_clk_base + TIMER_VALUE);
}
-static void integrator_clocksource_init(unsigned long inrate,
- void __iomem *base)
+static int integrator_clocksource_init(unsigned long inrate,
+ void __iomem *base)
{
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
unsigned long rate = inrate;
+ int ret;
if (rate >= 1500000) {
rate /= 16;
@@ -50,11 +51,15 @@ static void integrator_clocksource_init(unsigned long inrate,
writel(0xffff, base + TIMER_LOAD);
writel(ctrl, base + TIMER_CTRL);
- clocksource_mmio_init(base + TIMER_VALUE, "timer2",
- rate, 200, 16, clocksource_mmio_readl_down);
+ ret = clocksource_mmio_init(base + TIMER_VALUE, "timer2",
+ rate, 200, 16, clocksource_mmio_readl_down);
+ if (ret)
+ return ret;
sched_clk_base = base;
sched_clock_register(integrator_read_sched_clock, 16, rate);
+
+ return 0;
}
static unsigned long timer_reload;
@@ -138,11 +143,12 @@ static struct irqaction integrator_timer_irq = {
.dev_id = &integrator_clockevent,
};
-static void integrator_clockevent_init(unsigned long inrate,
- void __iomem *base, int irq)
+static int integrator_clockevent_init(unsigned long inrate,
+ void __iomem *base, int irq)
{
unsigned long rate = inrate;
unsigned int ctrl = 0;
+ int ret;
clkevt_base = base;
/* Calculate and program a divisor */
@@ -156,14 +162,18 @@ static void integrator_clockevent_init(unsigned long inrate,
timer_reload = rate / HZ;
writel(ctrl, clkevt_base + TIMER_CTRL);
- setup_irq(irq, &integrator_timer_irq);
+ ret = setup_irq(irq, &integrator_timer_irq);
+ if (ret)
+ return ret;
+
clockevents_config_and_register(&integrator_clockevent,
rate,
1,
0xffffU);
+ return 0;
}
-static void __init integrator_ap_timer_init_of(struct device_node *node)
+static int __init integrator_ap_timer_init_of(struct device_node *node)
{
const char *path;
void __iomem *base;
@@ -176,12 +186,12 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
base = of_io_request_and_map(node, 0, "integrator-timer");
if (IS_ERR(base))
- return;
+ return PTR_ERR(base);
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("No clock for %s\n", node->name);
- return;
+ return PTR_ERR(clk);
}
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
@@ -189,30 +199,37 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
err = of_property_read_string(of_aliases,
"arm,timer-primary", &path);
- if (WARN_ON(err))
- return;
+ if (err) {
+ pr_warn("Failed to read property");
+ return err;
+ }
+
pri_node = of_find_node_by_path(path);
+
err = of_property_read_string(of_aliases,
"arm,timer-secondary", &path);
- if (WARN_ON(err))
- return;
+ if (err) {
+ pr_warn("Failed to read property");
+ return err;
+ }
+
+
sec_node = of_find_node_by_path(path);
- if (node == pri_node) {
+ if (node == pri_node)
/* The primary timer lacks IRQ, use as clocksource */
- integrator_clocksource_init(rate, base);
- return;
- }
+ return integrator_clocksource_init(rate, base);
if (node == sec_node) {
/* The secondary timer will drive the clock event */
irq = irq_of_parse_and_map(node, 0);
- integrator_clockevent_init(rate, base, irq);
- return;
+ return integrator_clockevent_init(rate, base, irq);
}
pr_info("Timer @%p unused\n", base);
clk_disable_unprepare(clk);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index 1cea08cf603e..ab68a47ab3b4 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -144,7 +144,7 @@ static int keystone_set_periodic(struct clock_event_device *evt)
return 0;
}
-static void __init keystone_timer_init(struct device_node *np)
+static int __init keystone_timer_init(struct device_node *np)
{
struct clock_event_device *event_dev = &timer.event_dev;
unsigned long rate;
@@ -154,20 +154,20 @@ static void __init keystone_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
pr_err("%s: failed to map interrupts\n", __func__);
- return;
+ return -EINVAL;
}
timer.base = of_iomap(np, 0);
if (!timer.base) {
pr_err("%s: failed to map registers\n", __func__);
- return;
+ return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%s: failed to get clock\n", __func__);
iounmap(timer.base);
- return;
+ return PTR_ERR(clk);
}
error = clk_prepare_enable(clk);
@@ -219,11 +219,12 @@ static void __init keystone_timer_init(struct device_node *np)
clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
pr_info("keystone timer clock @%lu Hz\n", rate);
- return;
+ return 0;
err:
clk_put(clk);
iounmap(timer.base);
+ return error;
}
CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
- keystone_timer_init);
+ keystone_timer_init);
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
index d46108920b2c..70c149af8ee0 100644
--- a/drivers/clocksource/timer-nps.c
+++ b/drivers/clocksource/timer-nps.c
@@ -55,8 +55,8 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc)
return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
}
-static void __init nps_setup_clocksource(struct device_node *node,
- struct clk *clk)
+static int __init nps_setup_clocksource(struct device_node *node,
+ struct clk *clk)
{
int ret, cluster;
@@ -68,7 +68,7 @@ static void __init nps_setup_clocksource(struct device_node *node,
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("Couldn't enable parent clock\n");
- return;
+ return ret;
}
nps_timer_rate = clk_get_rate(clk);
@@ -79,19 +79,21 @@ static void __init nps_setup_clocksource(struct device_node *node,
pr_err("Couldn't register clock source.\n");
clk_disable_unprepare(clk);
}
+
+ return ret;
}
-static void __init nps_timer_init(struct device_node *node)
+static int __init nps_timer_init(struct device_node *node)
{
struct clk *clk;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("Can't get timer clock.\n");
- return;
+ return PTR_ERR(clk);
}
- nps_setup_clocksource(node, clk);
+ return nps_setup_clocksource(node, clk);
}
CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
new file mode 100644
index 000000000000..bd887e2a8cf8
--- /dev/null
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -0,0 +1,297 @@
+/*
+ * drivers/clocksource/timer-oxnas-rps.c
+ *
+ * Copyright (C) 2009 Oxford Semiconductor Ltd
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
+
+/* TIMER1 used as tick
+ * TIMER2 used as clocksource
+ */
+
+/* Registers definitions */
+
+#define TIMER_LOAD_REG 0x0
+#define TIMER_CURR_REG 0x4
+#define TIMER_CTRL_REG 0x8
+#define TIMER_CLRINT_REG 0xC
+
+#define TIMER_BITS 24
+
+#define TIMER_MAX_VAL (BIT(TIMER_BITS) - 1)
+
+#define TIMER_PERIODIC BIT(6)
+#define TIMER_ENABLE BIT(7)
+
+#define TIMER_DIV1 (0)
+#define TIMER_DIV16 (1 << 2)
+#define TIMER_DIV256 (2 << 2)
+
+#define TIMER1_REG_OFFSET 0
+#define TIMER2_REG_OFFSET 0x20
+
+/* Clockevent & Clocksource data */
+
+struct oxnas_rps_timer {
+ struct clock_event_device clkevent;
+ void __iomem *clksrc_base;
+ void __iomem *clkevt_base;
+ unsigned long timer_period;
+ unsigned int timer_prescaler;
+ struct clk *clk;
+ int irq;
+};
+
+static irqreturn_t oxnas_rps_timer_irq(int irq, void *dev_id)
+{
+ struct oxnas_rps_timer *rps = dev_id;
+
+ writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
+
+ rps->clkevent.event_handler(&rps->clkevent);
+
+ return IRQ_HANDLED;
+}
+
+static void oxnas_rps_timer_config(struct oxnas_rps_timer *rps,
+ unsigned long period,
+ unsigned int periodic)
+{
+ uint32_t cfg = rps->timer_prescaler;
+
+ if (period)
+ cfg |= TIMER_ENABLE;
+
+ if (periodic)
+ cfg |= TIMER_PERIODIC;
+
+ writel_relaxed(period, rps->clkevt_base + TIMER_LOAD_REG);
+ writel_relaxed(cfg, rps->clkevt_base + TIMER_CTRL_REG);
+}
+
+static int oxnas_rps_timer_shutdown(struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, 0, 0);
+
+ return 0;
+}
+
+static int oxnas_rps_timer_set_periodic(struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, rps->timer_period, 1);
+
+ return 0;
+}
+
+static int oxnas_rps_timer_set_oneshot(struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, rps->timer_period, 0);
+
+ return 0;
+}
+
+static int oxnas_rps_timer_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, delta, 0);
+
+ return 0;
+}
+
+static int __init oxnas_rps_clockevent_init(struct oxnas_rps_timer *rps)
+{
+ ulong clk_rate = clk_get_rate(rps->clk);
+ ulong timer_rate;
+
+ /* Start with prescaler 1 */
+ rps->timer_prescaler = TIMER_DIV1;
+ rps->timer_period = DIV_ROUND_UP(clk_rate, HZ);
+ timer_rate = clk_rate;
+
+ if (rps->timer_period > TIMER_MAX_VAL) {
+ rps->timer_prescaler = TIMER_DIV16;
+ timer_rate = clk_rate / 16;
+ rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
+ }
+ if (rps->timer_period > TIMER_MAX_VAL) {
+ rps->timer_prescaler = TIMER_DIV256;
+ timer_rate = clk_rate / 256;
+ rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
+ }
+
+ rps->clkevent.name = "oxnas-rps";
+ rps->clkevent.features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ;
+ rps->clkevent.tick_resume = oxnas_rps_timer_shutdown;
+ rps->clkevent.set_state_shutdown = oxnas_rps_timer_shutdown;
+ rps->clkevent.set_state_periodic = oxnas_rps_timer_set_periodic;
+ rps->clkevent.set_state_oneshot = oxnas_rps_timer_set_oneshot;
+ rps->clkevent.set_next_event = oxnas_rps_timer_next_event;
+ rps->clkevent.rating = 200;
+ rps->clkevent.cpumask = cpu_possible_mask;
+ rps->clkevent.irq = rps->irq;
+ clockevents_config_and_register(&rps->clkevent,
+ timer_rate,
+ 1,
+ TIMER_MAX_VAL);
+
+ pr_info("Registered clock event rate %luHz prescaler %x period %lu\n",
+ clk_rate,
+ rps->timer_prescaler,
+ rps->timer_period);
+
+ return 0;
+}
+
+/* Clocksource */
+
+static void __iomem *timer_sched_base;
+
+static u64 notrace oxnas_rps_read_sched_clock(void)
+{
+ return ~readl_relaxed(timer_sched_base);
+}
+
+static int __init oxnas_rps_clocksource_init(struct oxnas_rps_timer *rps)
+{
+ ulong clk_rate = clk_get_rate(rps->clk);
+ int ret;
+
+ /* use prescale 16 */
+ clk_rate = clk_rate / 16;
+
+ writel_relaxed(TIMER_MAX_VAL, rps->clksrc_base + TIMER_LOAD_REG);
+ writel_relaxed(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16,
+ rps->clksrc_base + TIMER_CTRL_REG);
+
+ timer_sched_base = rps->clksrc_base + TIMER_CURR_REG;
+ sched_clock_register(oxnas_rps_read_sched_clock,
+ TIMER_BITS, clk_rate);
+ ret = clocksource_mmio_init(timer_sched_base,
+ "oxnas_rps_clocksource_timer",
+ clk_rate, 250, TIMER_BITS,
+ clocksource_mmio_readl_down);
+ if (WARN_ON(ret)) {
+ pr_err("can't register clocksource\n");
+ return ret;
+ }
+
+ pr_info("Registered clocksource rate %luHz\n", clk_rate);
+
+ return 0;
+}
+
+static int __init oxnas_rps_timer_init(struct device_node *np)
+{
+ struct oxnas_rps_timer *rps;
+ void __iomem *base;
+ int ret;
+
+ rps = kzalloc(sizeof(*rps), GFP_KERNEL);
+ if (!rps)
+ return -ENOMEM;
+
+ rps->clk = of_clk_get(np, 0);
+ if (IS_ERR(rps->clk)) {
+ ret = PTR_ERR(rps->clk);
+ goto err_alloc;
+ }
+
+ ret = clk_prepare_enable(rps->clk);
+ if (ret)
+ goto err_clk;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -ENXIO;
+ goto err_clk_prepare;
+ }
+
+ rps->irq = irq_of_parse_and_map(np, 0);
+ if (rps->irq < 0) {
+ ret = -EINVAL;
+ goto err_iomap;
+ }
+
+ rps->clkevt_base = base + TIMER1_REG_OFFSET;
+ rps->clksrc_base = base + TIMER2_REG_OFFSET;
+
+ /* Disable timers */
+ writel_relaxed(0, rps->clkevt_base + TIMER_CTRL_REG);
+ writel_relaxed(0, rps->clksrc_base + TIMER_CTRL_REG);
+ writel_relaxed(0, rps->clkevt_base + TIMER_LOAD_REG);
+ writel_relaxed(0, rps->clksrc_base + TIMER_LOAD_REG);
+ writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
+ writel_relaxed(0, rps->clksrc_base + TIMER_CLRINT_REG);
+
+ ret = request_irq(rps->irq, oxnas_rps_timer_irq,
+ IRQF_TIMER | IRQF_IRQPOLL,
+ "rps-timer", rps);
+ if (ret)
+ goto err_iomap;
+
+ ret = oxnas_rps_clocksource_init(rps);
+ if (ret)
+ goto err_irqreq;
+
+ ret = oxnas_rps_clockevent_init(rps);
+ if (ret)
+ goto err_irqreq;
+
+ return 0;
+
+err_irqreq:
+ free_irq(rps->irq, rps);
+err_iomap:
+ iounmap(base);
+err_clk_prepare:
+ clk_disable_unprepare(rps->clk);
+err_clk:
+ clk_put(rps->clk);
+err_alloc:
+ kfree(rps);
+
+ return ret;
+}
+
+CLOCKSOURCE_OF_DECLARE(ox810se_rps,
+ "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index 2854c663e8b5..c32148ec7a38 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -19,7 +19,6 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
-#include <asm/mach/time.h>
#define PRIMA2_CLOCK_FREQ 1000000
@@ -189,24 +188,36 @@ static void __init sirfsoc_clockevent_init(void)
}
/* initialize the kernel jiffy timer source */
-static void __init sirfsoc_prima2_timer_init(struct device_node *np)
+static int __init sirfsoc_prima2_timer_init(struct device_node *np)
{
unsigned long rate;
struct clk *clk;
+ int ret;
clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(clk));
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
- BUG_ON(clk_prepare_enable(clk));
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Failed to enable clock");
+ return ret;
+ }
rate = clk_get_rate(clk);
- BUG_ON(rate < PRIMA2_CLOCK_FREQ);
- BUG_ON(rate % PRIMA2_CLOCK_FREQ);
+ if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) {
+ pr_err("Invalid clock rate");
+ return -EINVAL;
+ }
sirfsoc_timer_base = of_iomap(np, 0);
- if (!sirfsoc_timer_base)
- panic("unable to map timer cpu registers\n");
+ if (!sirfsoc_timer_base) {
+ pr_err("unable to map timer cpu registers\n");
+ return -ENXIO;
+ }
sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
@@ -216,14 +227,23 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
- BUG_ON(clocksource_register_hz(&sirfsoc_clocksource,
- PRIMA2_CLOCK_FREQ));
+ ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ);
+ if (ret) {
+ pr_err("Failed to register clocksource");
+ return ret;
+ }
sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);
- BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
+ ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
+ if (ret) {
+ pr_err("Failed to setup irq");
+ return ret;
+ }
sirfsoc_clockevent_init();
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer,
"sirf,prima2-tick", sirfsoc_prima2_timer_init);
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 5f45b9adef60..d07863388e05 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -77,7 +77,7 @@ void __init sp804_timer_disable(void __iomem *base)
writel(0, base + TIMER_CTRL);
}
-void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
+int __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name,
struct clk *clk,
int use_sched_clock)
@@ -89,14 +89,13 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
if (IS_ERR(clk)) {
pr_err("sp804: clock not found: %d\n",
(int)PTR_ERR(clk));
- return;
+ return PTR_ERR(clk);
}
}
rate = sp804_get_clock_rate(clk);
-
if (rate < 0)
- return;
+ return -EINVAL;
/* setup timer 0 as free-running clocksource */
writel(0, base + TIMER_CTRL);
@@ -112,6 +111,8 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
sched_clock_base = base;
sched_clock_register(sp804_read, 32, rate);
}
+
+ return 0;
}
@@ -186,7 +187,7 @@ static struct irqaction sp804_timer_irq = {
.dev_id = &sp804_clockevent,
};
-void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
+int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
{
struct clock_event_device *evt = &sp804_clockevent;
long rate;
@@ -196,12 +197,12 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
if (IS_ERR(clk)) {
pr_err("sp804: %s clock not found: %d\n", name,
(int)PTR_ERR(clk));
- return;
+ return PTR_ERR(clk);
}
rate = sp804_get_clock_rate(clk);
if (rate < 0)
- return;
+ return -EINVAL;
clkevt_base = base;
clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
@@ -213,27 +214,31 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
setup_irq(irq, &sp804_timer_irq);
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
+
+ return 0;
}
-static void __init sp804_of_init(struct device_node *np)
+static int __init sp804_of_init(struct device_node *np)
{
static bool initialized = false;
void __iomem *base;
- int irq;
+ int irq, ret = -EINVAL;
u32 irq_num = 0;
struct clk *clk1, *clk2;
const char *name = of_get_property(np, "compatible", NULL);
base = of_iomap(np, 0);
- if (WARN_ON(!base))
- return;
+ if (!base)
+ return -ENXIO;
/* Ensure timers are disabled */
writel(0, base + TIMER_CTRL);
writel(0, base + TIMER_2_BASE + TIMER_CTRL);
- if (initialized || !of_device_is_available(np))
+ if (initialized || !of_device_is_available(np)) {
+ ret = -EINVAL;
goto err;
+ }
clk1 = of_clk_get(np, 0);
if (IS_ERR(clk1))
@@ -256,35 +261,53 @@ static void __init sp804_of_init(struct device_node *np)
of_property_read_u32(np, "arm,sp804-has-irq", &irq_num);
if (irq_num == 2) {
- __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
- __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
+
+ ret = __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
+ if (ret)
+ goto err;
+
+ ret = __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
+ if (ret)
+ goto err;
} else {
- __sp804_clockevents_init(base, irq, clk1 , name);
- __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
- name, clk2, 1);
+
+ ret = __sp804_clockevents_init(base, irq, clk1 , name);
+ if (ret)
+ goto err;
+
+ ret =__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
+ name, clk2, 1);
+ if (ret)
+ goto err;
}
initialized = true;
- return;
+ return 0;
err:
iounmap(base);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
-static void __init integrator_cp_of_init(struct device_node *np)
+static int __init integrator_cp_of_init(struct device_node *np)
{
static int init_count = 0;
void __iomem *base;
- int irq;
+ int irq, ret = -EINVAL;
const char *name = of_get_property(np, "compatible", NULL);
struct clk *clk;
base = of_iomap(np, 0);
- if (WARN_ON(!base))
- return;
+ if (!base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
+
clk = of_clk_get(np, 0);
- if (WARN_ON(IS_ERR(clk)))
- return;
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
/* Ensure timer is disabled */
writel(0, base + TIMER_CTRL);
@@ -292,19 +315,24 @@ static void __init integrator_cp_of_init(struct device_node *np)
if (init_count == 2 || !of_device_is_available(np))
goto err;
- if (!init_count)
- __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
- else {
+ if (!init_count) {
+ ret = __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
+ if (ret)
+ goto err;
+ } else {
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0)
goto err;
- __sp804_clockevents_init(base, irq, clk, name);
+ ret = __sp804_clockevents_init(base, irq, clk, name);
+ if (ret)
+ goto err;
}
init_count++;
- return;
+ return 0;
err:
iounmap(base);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index f3dcb76799b4..1b2574c4fb97 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -98,7 +98,7 @@ static struct stm32_clock_event_ddata clock_event_ddata = {
},
};
-static void __init stm32_clockevent_init(struct device_node *np)
+static int __init stm32_clockevent_init(struct device_node *np)
{
struct stm32_clock_event_ddata *data = &clock_event_ddata;
struct clk *clk;
@@ -130,12 +130,14 @@ static void __init stm32_clockevent_init(struct device_node *np)
data->base = of_iomap(np, 0);
if (!data->base) {
+ ret = -ENXIO;
pr_err("failed to map registers for clockevent\n");
goto err_iomap;
}
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
+ ret = -EINVAL;
pr_err("%s: failed to get irq.\n", np->full_name);
goto err_get_irq;
}
@@ -173,7 +175,7 @@ static void __init stm32_clockevent_init(struct device_node *np)
pr_info("%s: STM32 clockevent driver initialized (%d bits)\n",
np->full_name, bits);
- return;
+ return ret;
err_get_irq:
iounmap(data->base);
@@ -182,7 +184,7 @@ err_iomap:
err_clk_enable:
clk_put(clk);
err_clk_get:
- return;
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 24c83f9efd87..c184eb84101e 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -311,33 +311,42 @@ err_free:
return ret;
}
-static void __init sun5i_timer_init(struct device_node *node)
+static int __init sun5i_timer_init(struct device_node *node)
{
struct reset_control *rstc;
void __iomem *timer_base;
struct clk *clk;
- int irq;
+ int irq, ret;
timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(timer_base))
- panic("Can't map registers");
+ if (IS_ERR(timer_base)) {
+ pr_err("Can't map registers");
+ return PTR_ERR(timer_base);;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
clk = of_clk_get(node, 0);
- if (IS_ERR(clk))
- panic("Can't get timer clock");
+ if (IS_ERR(clk)) {
+ pr_err("Can't get timer clock");
+ return PTR_ERR(clk);
+ }
rstc = of_reset_control_get(node, NULL);
if (!IS_ERR(rstc))
reset_control_deassert(rstc);
- sun5i_setup_clocksource(node, timer_base, clk, irq);
- sun5i_setup_clockevent(node, timer_base, clk, irq);
+ ret = sun5i_setup_clocksource(node, timer_base, clk, irq);
+ if (ret)
+ return ret;
+
+ return sun5i_setup_clockevent(node, timer_base, clk, irq);
}
CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
- sun5i_timer_init);
+ sun5i_timer_init);
CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
- sun5i_timer_init);
+ sun5i_timer_init);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 8518d9dfba5c..92b7e390f6c8 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -88,14 +88,14 @@ static u64 notrace omap_32k_read_sched_clock(void)
return ti_32k_read_cycles(&ti_32k_timer.cs);
}
-static void __init ti_32k_timer_init(struct device_node *np)
+static int __init ti_32k_timer_init(struct device_node *np)
{
int ret;
ti_32k_timer.base = of_iomap(np, 0);
if (!ti_32k_timer.base) {
pr_err("Can't ioremap 32k timer base\n");
- return;
+ return -ENXIO;
}
ti_32k_timer.counter = ti_32k_timer.base;
@@ -116,11 +116,13 @@ static void __init ti_32k_timer_init(struct device_node *np)
ret = clocksource_register_hz(&ti_32k_timer.cs, 32768);
if (ret) {
pr_err("32k_counter: can't register clocksource\n");
- return;
+ return ret;
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k",
ti_32k_timer_init);
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
index 1744b243898a..704e40c6f151 100644
--- a/drivers/clocksource/timer-u300.c
+++ b/drivers/clocksource/timer-u300.c
@@ -359,27 +359,37 @@ static struct delay_timer u300_delay_timer;
/*
* This sets up the system timers, clock source and clock event.
*/
-static void __init u300_timer_init_of(struct device_node *np)
+static int __init u300_timer_init_of(struct device_node *np)
{
unsigned int irq;
struct clk *clk;
unsigned long rate;
+ int ret;
u300_timer_base = of_iomap(np, 0);
- if (!u300_timer_base)
- panic("could not ioremap system timer\n");
+ if (!u300_timer_base) {
+ pr_err("could not ioremap system timer\n");
+ return -ENXIO;
+ }
/* Get the IRQ for the GP1 timer */
irq = irq_of_parse_and_map(np, 2);
- if (!irq)
- panic("no IRQ for system timer\n");
+ if (!irq) {
+ pr_err("no IRQ for system timer\n");
+ return -EINVAL;
+ }
pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
/* Clock the interrupt controller */
clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
rate = clk_get_rate(clk);
u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
@@ -410,7 +420,9 @@ static void __init u300_timer_init_of(struct device_node *np)
u300_timer_base + U300_TIMER_APP_RGPT1);
/* Set up the IRQ handler */
- setup_irq(irq, &u300_timer_irq);
+ ret = setup_irq(irq, &u300_timer_irq);
+ if (ret)
+ return ret;
/* Reset the General Purpose timer 2 */
writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
@@ -428,9 +440,12 @@ static void __init u300_timer_init_of(struct device_node *np)
u300_timer_base + U300_TIMER_APP_EGPT2);
/* Use general purpose timer 2 as clock source */
- if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
- "GPT2", rate, 300, 32, clocksource_mmio_readl_up))
+ ret = clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
+ "GPT2", rate, 300, 32, clocksource_mmio_readl_up);
+ if (ret) {
pr_err("timer: failed to initialize U300 clock source\n");
+ return ret;
+ }
/* Configure and register the clockevent */
clockevents_config_and_register(&u300_clockevent_data.cevd, rate,
@@ -440,6 +455,7 @@ static void __init u300_timer_init_of(struct device_node *np)
* TODO: init and register the rest of the timers too, they can be
* used by hrtimers!
*/
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c
index 0a26d3dde6c0..220b490a8142 100644
--- a/drivers/clocksource/versatile.c
+++ b/drivers/clocksource/versatile.c
@@ -25,16 +25,18 @@ static u64 notrace versatile_sys_24mhz_read(void)
return readl(versatile_sys_24mhz);
}
-static void __init versatile_sched_clock_init(struct device_node *node)
+static int __init versatile_sched_clock_init(struct device_node *node)
{
void __iomem *base = of_iomap(node, 0);
if (!base)
- return;
+ return -ENXIO;
versatile_sys_24mhz = base + SYS_24MHZ;
sched_clock_register(versatile_sys_24mhz_read, 32, 24000000);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg",
versatile_sched_clock_init);
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index a0e6c68536a1..55d8d8402d90 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -156,15 +156,18 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
return 0;
}
-static void __init pit_timer_init(struct device_node *np)
+static int __init pit_timer_init(struct device_node *np)
{
struct clk *pit_clk;
void __iomem *timer_base;
unsigned long clk_rate;
- int irq;
+ int irq, ret;
timer_base = of_iomap(np, 0);
- BUG_ON(!timer_base);
+ if (!timer_base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
/*
* PIT0 and PIT1 can be chained to build a 64-bit timer,
@@ -175,12 +178,16 @@ static void __init pit_timer_init(struct device_node *np)
clkevt_base = timer_base + PITn_OFFSET(3);
irq = irq_of_parse_and_map(np, 0);
- BUG_ON(irq <= 0);
+ if (irq <= 0)
+ return -EINVAL;
pit_clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(pit_clk));
+ if (IS_ERR(pit_clk))
+ return PTR_ERR(pit_clk);
- BUG_ON(clk_prepare_enable(pit_clk));
+ ret = clk_prepare_enable(pit_clk);
+ if (ret)
+ return ret;
clk_rate = clk_get_rate(pit_clk);
cycle_per_jiffy = clk_rate / (HZ);
@@ -188,8 +195,10 @@ static void __init pit_timer_init(struct device_node *np)
/* enable the pit module */
__raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
- BUG_ON(pit_clocksource_init(clk_rate));
+ ret = pit_clocksource_init(clk_rate);
+ if (ret)
+ return ret;
- pit_clockevent_init(clk_rate, irq);
+ return pit_clockevent_init(clk_rate, irq);
}
CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index ddb409274f45..b15069483fbd 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -121,38 +121,48 @@ static struct irqaction irq = {
.dev_id = &clockevent,
};
-static void __init vt8500_timer_init(struct device_node *np)
+static int __init vt8500_timer_init(struct device_node *np)
{
- int timer_irq;
+ int timer_irq, ret;
regbase = of_iomap(np, 0);
if (!regbase) {
pr_err("%s: Missing iobase description in Device Tree\n",
__func__);
- return;
+ return -ENXIO;
}
+
timer_irq = irq_of_parse_and_map(np, 0);
if (!timer_irq) {
pr_err("%s: Missing irq description in Device Tree\n",
__func__);
- return;
+ return -EINVAL;
}
writel(1, regbase + TIMER_CTRL_VAL);
writel(0xf, regbase + TIMER_STATUS_VAL);
writel(~0, regbase + TIMER_MATCH_VAL);
- if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ))
+ ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ);
+ if (ret) {
pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n",
- __func__, clocksource.name);
+ __func__, clocksource.name);
+ return ret;
+ }
clockevent.cpumask = cpumask_of(0);
- if (setup_irq(timer_irq, &irq))
+ ret = setup_irq(timer_irq, &irq);
+ if (ret) {
pr_err("%s: setup_irq failed for %s\n", __func__,
clockevent.name);
+ return ret;
+ }
+
clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
MIN_OSCR_DELTA * 2, 0xf0000000);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index ceaa6133f9c2..9a53f5ef6157 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -210,9 +210,9 @@ error_free:
return ret;
}
-static void __init zevio_timer_init(struct device_node *node)
+static int __init zevio_timer_init(struct device_node *node)
{
- BUG_ON(zevio_timer_add(node));
+ return zevio_timer_add(node);
}
CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index b7445b6ae5a4..c822d72629d5 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -31,23 +31,18 @@ config CPU_FREQ_BOOST_SW
depends on THERMAL
config CPU_FREQ_STAT
- tristate "CPU frequency translation statistics"
+ bool "CPU frequency transition statistics"
default y
help
- This driver exports CPU frequency statistics information through sysfs
- file system.
-
- To compile this driver as a module, choose M here: the
- module will be called cpufreq_stats.
+ Export CPU frequency statistics information through sysfs.
If in doubt, say N.
config CPU_FREQ_STAT_DETAILS
- bool "CPU frequency translation statistics details"
+ bool "CPU frequency transition statistics details"
depends on CPU_FREQ_STAT
help
- This will show detail CPU frequency translation table in sysfs file
- system.
+ Show detailed CPU frequency transition table in sysfs.
If in doubt, say N.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 32a15052f363..297e9128fe9f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -468,20 +468,17 @@ unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
struct cpufreq_frequency_table *entry;
- unsigned int next_perf_state, next_freq, freq;
+ unsigned int next_perf_state, next_freq, index;
/*
* Find the closest frequency above target_freq.
- *
- * The table is sorted in the reverse order with respect to the
- * frequency and all of the entries are valid (see the initialization).
*/
- entry = policy->freq_table;
- do {
- entry++;
- freq = entry->frequency;
- } while (freq >= target_freq && freq != CPUFREQ_TABLE_END);
- entry--;
+ if (policy->cached_target_freq == target_freq)
+ index = policy->cached_resolved_idx;
+ else
+ index = cpufreq_table_find_index_dl(policy, target_freq);
+
+ entry = &policy->freq_table[index];
next_freq = entry->frequency;
next_perf_state = entry->driver_data;
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 404360cad25c..042023bbbf62 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -48,9 +48,8 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *od_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = od_data->tuners;
- struct od_policy_dbs_info *od_info = to_dbs_info(policy_dbs);
- if (!od_info->freq_table)
+ if (!policy->freq_table)
return freq_next;
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
@@ -92,10 +91,9 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
else {
unsigned int index;
- cpufreq_frequency_table_target(policy,
- od_info->freq_table, policy->cur - 1,
- CPUFREQ_RELATION_H, &index);
- freq_next = od_info->freq_table[index].frequency;
+ index = cpufreq_table_find_index_h(policy,
+ policy->cur - 1);
+ freq_next = policy->freq_table[index].frequency;
}
data->freq_prev = freq_next;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5617c7087d77..3dd4884c6f9e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -74,19 +74,12 @@ static inline bool has_target(void)
}
/* internal prototypes */
-static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
+static int cpufreq_init_governor(struct cpufreq_policy *policy);
+static void cpufreq_exit_governor(struct cpufreq_policy *policy);
static int cpufreq_start_governor(struct cpufreq_policy *policy);
-
-static inline void cpufreq_exit_governor(struct cpufreq_policy *policy)
-{
- (void)cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
-}
-
-static inline void cpufreq_stop_governor(struct cpufreq_policy *policy)
-{
- (void)cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-}
+static void cpufreq_stop_governor(struct cpufreq_policy *policy);
+static void cpufreq_governor_limits(struct cpufreq_policy *policy);
/**
* Two notifier lists: the "policy" list is involved in the
@@ -133,15 +126,6 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
-struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
-{
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
-
- return policy && !policy_is_inactive(policy) ?
- policy->freq_table : NULL;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
-
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
u64 idle_time;
@@ -354,6 +338,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
pr_debug("FREQ: %lu - CPU: %lu\n",
(unsigned long)freqs->new, (unsigned long)freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
+ cpufreq_stats_record_transition(policy, freqs->new);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu))
@@ -507,6 +492,38 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
+/**
+ * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
+ * one.
+ * @target_freq: target frequency to resolve.
+ *
+ * The target to driver frequency mapping is cached in the policy.
+ *
+ * Return: Lowest driver-supported frequency greater than or equal to the
+ * given target_freq, subject to policy (min/max) and driver limitations.
+ */
+unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
+ policy->cached_target_freq = target_freq;
+
+ if (cpufreq_driver->target_index) {
+ int idx;
+
+ idx = cpufreq_frequency_table_target(policy, target_freq,
+ CPUFREQ_RELATION_L);
+ policy->cached_resolved_idx = idx;
+ return policy->freq_table[idx].frequency;
+ }
+
+ if (cpufreq_driver->resolve_freq)
+ return cpufreq_driver->resolve_freq(policy, target_freq);
+
+ return target_freq;
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
+
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
@@ -1115,6 +1132,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
CPUFREQ_REMOVE_POLICY, policy);
down_write(&policy->rwsem);
+ cpufreq_stats_free_table(policy);
cpufreq_remove_dev_symlink(policy);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
@@ -1265,13 +1283,12 @@ static int cpufreq_online(unsigned int cpu)
}
}
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_START, policy);
-
if (new_policy) {
ret = cpufreq_add_dev_interface(policy);
if (ret)
goto out_exit_policy;
+
+ cpufreq_stats_create_table(policy);
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
@@ -1280,6 +1297,9 @@ static int cpufreq_online(unsigned int cpu)
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
}
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_START, policy);
+
ret = cpufreq_init_policy(policy);
if (ret) {
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
@@ -1556,9 +1576,6 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
{
unsigned int new_freq;
- if (cpufreq_suspended)
- return 0;
-
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
@@ -1864,14 +1881,17 @@ static int __target_intermediate(struct cpufreq_policy *policy,
return ret;
}
-static int __target_index(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *freq_table, int index)
+static int __target_index(struct cpufreq_policy *policy, int index)
{
struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
unsigned int intermediate_freq = 0;
+ unsigned int newfreq = policy->freq_table[index].frequency;
int retval = -EINVAL;
bool notify;
+ if (newfreq == policy->cur)
+ return 0;
+
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
if (notify) {
/* Handle switching to intermediate frequency */
@@ -1886,7 +1906,7 @@ static int __target_index(struct cpufreq_policy *policy,
freqs.old = freqs.new;
}
- freqs.new = freq_table[index].frequency;
+ freqs.new = newfreq;
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
__func__, policy->cpu, freqs.old, freqs.new);
@@ -1923,17 +1943,13 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int old_target_freq = target_freq;
- struct cpufreq_frequency_table *freq_table;
- int index, retval;
+ int index;
if (cpufreq_disabled())
return -ENODEV;
/* Make sure that target_freq is within supported range */
- if (target_freq > policy->max)
- target_freq = policy->max;
- if (target_freq < policy->min)
- target_freq = policy->min;
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -1956,23 +1972,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (!cpufreq_driver->target_index)
return -EINVAL;
- freq_table = cpufreq_frequency_get_table(policy->cpu);
- if (unlikely(!freq_table)) {
- pr_err("%s: Unable to find freq_table\n", __func__);
- return -EINVAL;
- }
-
- retval = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &index);
- if (unlikely(retval)) {
- pr_err("%s: Unable to find matching freq\n", __func__);
- return retval;
- }
-
- if (freq_table[index].frequency == policy->cur)
- return 0;
+ index = cpufreq_frequency_table_target(policy, target_freq, relation);
- return __target_index(policy, freq_table, index);
+ return __target_index(policy, index);
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1997,7 +1999,7 @@ __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
return NULL;
}
-static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
+static int cpufreq_init_governor(struct cpufreq_policy *policy)
{
int ret;
@@ -2025,36 +2027,82 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
}
}
- if (event == CPUFREQ_GOV_POLICY_INIT)
- if (!try_module_get(policy->governor->owner))
- return -EINVAL;
-
- pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
+ if (!try_module_get(policy->governor->owner))
+ return -EINVAL;
- ret = policy->governor->governor(policy, event);
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
- if (event == CPUFREQ_GOV_POLICY_INIT) {
- if (ret)
+ if (policy->governor->init) {
+ ret = policy->governor->init(policy);
+ if (ret) {
module_put(policy->governor->owner);
- else
- policy->governor->initialized++;
- } else if (event == CPUFREQ_GOV_POLICY_EXIT) {
- policy->governor->initialized--;
- module_put(policy->governor->owner);
+ return ret;
+ }
}
- return ret;
+ return 0;
+}
+
+static void cpufreq_exit_governor(struct cpufreq_policy *policy)
+{
+ if (cpufreq_suspended || !policy->governor)
+ return;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (policy->governor->exit)
+ policy->governor->exit(policy);
+
+ module_put(policy->governor->owner);
}
static int cpufreq_start_governor(struct cpufreq_policy *policy)
{
int ret;
+ if (cpufreq_suspended)
+ return 0;
+
+ if (!policy->governor)
+ return -EINVAL;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
cpufreq_update_current_freq(policy);
- ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
- return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ if (policy->governor->start) {
+ ret = policy->governor->start(policy);
+ if (ret)
+ return ret;
+ }
+
+ if (policy->governor->limits)
+ policy->governor->limits(policy);
+
+ return 0;
+}
+
+static void cpufreq_stop_governor(struct cpufreq_policy *policy)
+{
+ if (cpufreq_suspended || !policy->governor)
+ return;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (policy->governor->stop)
+ policy->governor->stop(policy);
+}
+
+static void cpufreq_governor_limits(struct cpufreq_policy *policy)
+{
+ if (cpufreq_suspended || !policy->governor)
+ return;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (policy->governor->limits)
+ policy->governor->limits(policy);
}
int cpufreq_register_governor(struct cpufreq_governor *governor)
@@ -2069,7 +2117,6 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
mutex_lock(&cpufreq_governor_mutex);
- governor->initialized = 0;
err = -EBUSY;
if (!find_governor(governor->name)) {
err = 0;
@@ -2184,6 +2231,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->min = new_policy->min;
policy->max = new_policy->max;
+ policy->cached_target_freq = UINT_MAX;
+
pr_debug("new min and max freqs are %u - %u kHz\n",
policy->min, policy->max);
@@ -2195,7 +2244,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if (new_policy->governor == policy->governor) {
pr_debug("cpufreq: governor limits update\n");
- return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ cpufreq_governor_limits(policy);
+ return 0;
}
pr_debug("governor switch\n");
@@ -2210,7 +2260,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
/* start new governor */
policy->governor = new_policy->governor;
- ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+ ret = cpufreq_init_governor(policy);
if (!ret) {
ret = cpufreq_start_governor(policy);
if (!ret) {
@@ -2224,7 +2274,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
- if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
+ if (cpufreq_init_governor(policy))
policy->governor = NULL;
else
cpufreq_start_governor(policy);
@@ -2309,26 +2359,25 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
*********************************************************************/
static int cpufreq_boost_set_sw(int state)
{
- struct cpufreq_frequency_table *freq_table;
struct cpufreq_policy *policy;
int ret = -EINVAL;
for_each_active_policy(policy) {
- freq_table = cpufreq_frequency_get_table(policy->cpu);
- if (freq_table) {
- ret = cpufreq_frequency_table_cpuinfo(policy,
- freq_table);
- if (ret) {
- pr_err("%s: Policy frequency update failed\n",
- __func__);
- break;
- }
-
- down_write(&policy->rwsem);
- policy->user_policy.max = policy->max;
- cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
- up_write(&policy->rwsem);
+ if (!policy->freq_table)
+ continue;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy,
+ policy->freq_table);
+ if (ret) {
+ pr_err("%s: Policy frequency update failed\n",
+ __func__);
+ break;
}
+
+ down_write(&policy->rwsem);
+ policy->user_policy.max = policy->max;
+ cpufreq_governor_limits(policy);
+ up_write(&policy->rwsem);
}
return ret;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 316df247e00d..18da4f8051d3 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -17,7 +17,6 @@
struct cs_policy_dbs_info {
struct policy_dbs_info policy_dbs;
unsigned int down_skip;
- unsigned int requested_freq;
};
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
@@ -75,19 +74,17 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
+ unsigned int requested_freq = policy->cur;
+
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
- if (dbs_info->requested_freq == policy->max)
+ if (requested_freq == policy->max)
goto out;
- dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
-
- if (dbs_info->requested_freq > policy->max)
- dbs_info->requested_freq = policy->max;
+ requested_freq += get_freq_target(cs_tuners, policy);
- __cpufreq_driver_target(policy, dbs_info->requested_freq,
- CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
goto out;
}
@@ -98,36 +95,27 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
- unsigned int freq_target;
+ unsigned int freq_target, requested_freq = policy->cur;
/*
* if we cannot reduce the frequency anymore, break out early
*/
- if (policy->cur == policy->min)
+ if (requested_freq == policy->min)
goto out;
freq_target = get_freq_target(cs_tuners, policy);
- if (dbs_info->requested_freq > freq_target)
- dbs_info->requested_freq -= freq_target;
+ if (requested_freq > freq_target)
+ requested_freq -= freq_target;
else
- dbs_info->requested_freq = policy->min;
+ requested_freq = policy->min;
- __cpufreq_driver_target(policy, dbs_info->requested_freq,
- CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
}
out:
return dbs_data->sampling_rate;
}
-static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data);
-
-static struct notifier_block cs_cpufreq_notifier_block = {
- .notifier_call = dbs_cpufreq_notifier,
-};
-
/************************** sysfs interface ************************/
-static struct dbs_governor cs_dbs_gov;
static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
const char *buf, size_t count)
@@ -268,15 +256,13 @@ static void cs_free(struct policy_dbs_info *policy_dbs)
kfree(to_dbs_info(policy_dbs));
}
-static int cs_init(struct dbs_data *dbs_data, bool notify)
+static int cs_init(struct dbs_data *dbs_data)
{
struct cs_dbs_tuners *tuners;
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
- if (!tuners) {
- pr_err("%s: kzalloc failed\n", __func__);
+ if (!tuners)
return -ENOMEM;
- }
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
tuners->freq_step = DEF_FREQUENCY_STEP;
@@ -288,19 +274,11 @@ static int cs_init(struct dbs_data *dbs_data, bool notify)
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10);
- if (notify)
- cpufreq_register_notifier(&cs_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
return 0;
}
-static void cs_exit(struct dbs_data *dbs_data, bool notify)
+static void cs_exit(struct dbs_data *dbs_data)
{
- if (notify)
- cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
kfree(dbs_data->tuners);
}
@@ -309,16 +287,10 @@ static void cs_start(struct cpufreq_policy *policy)
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->down_skip = 0;
- dbs_info->requested_freq = policy->cur;
}
-static struct dbs_governor cs_dbs_gov = {
- .gov = {
- .name = "conservative",
- .governor = cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
- },
+static struct dbs_governor cs_governor = {
+ .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
.kobj_type = { .default_attrs = cs_attributes },
.gov_dbs_timer = cs_dbs_timer,
.alloc = cs_alloc,
@@ -328,33 +300,7 @@ static struct dbs_governor cs_dbs_gov = {
.start = cs_start,
};
-#define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov)
-
-static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct cpufreq_freqs *freq = data;
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
- struct cs_policy_dbs_info *dbs_info;
-
- if (!policy)
- return 0;
-
- /* policy isn't governed by conservative governor */
- if (policy->governor != CPU_FREQ_GOV_CONSERVATIVE)
- return 0;
-
- dbs_info = to_dbs_info(policy->governor_data);
- /*
- * we only care if our internally tracked freq moves outside the 'valid'
- * ranges of frequency available to us otherwise we do not change it
- */
- if (dbs_info->requested_freq > policy->max
- || dbs_info->requested_freq < policy->min)
- dbs_info->requested_freq = freq->new;
-
- return 0;
-}
+#define CPU_FREQ_GOV_CONSERVATIVE (&cs_governor.gov)
static int __init cpufreq_gov_dbs_init(void)
{
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index be498d56dd69..e415349ab31b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -336,17 +336,6 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
synchronize_sched();
}
-static void gov_cancel_work(struct cpufreq_policy *policy)
-{
- struct policy_dbs_info *policy_dbs = policy->governor_data;
-
- gov_clear_update_util(policy_dbs->policy);
- irq_work_sync(&policy_dbs->irq_work);
- cancel_work_sync(&policy_dbs->work);
- atomic_set(&policy_dbs->work_count, 0);
- policy_dbs->work_in_progress = false;
-}
-
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
struct dbs_governor *gov)
{
@@ -389,7 +378,7 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
gov->free(policy_dbs);
}
-static int cpufreq_governor_init(struct cpufreq_policy *policy)
+int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
struct dbs_data *dbs_data;
@@ -429,7 +418,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
- ret = gov->init(dbs_data, !policy->governor->initialized);
+ ret = gov->init(dbs_data);
if (ret)
goto free_policy_dbs_info;
@@ -458,13 +447,13 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
goto out;
/* Failure, so roll back. */
- pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
+ pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
policy->governor_data = NULL;
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
- gov->exit(dbs_data, !policy->governor->initialized);
+ gov->exit(dbs_data);
kfree(dbs_data);
free_policy_dbs_info:
@@ -474,8 +463,9 @@ out:
mutex_unlock(&gov_dbs_data_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
-static int cpufreq_governor_exit(struct cpufreq_policy *policy)
+void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
struct policy_dbs_info *policy_dbs = policy->governor_data;
@@ -493,17 +483,17 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy)
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
- gov->exit(dbs_data, policy->governor->initialized == 1);
+ gov->exit(dbs_data);
kfree(dbs_data);
}
free_policy_dbs_info(policy_dbs, gov);
mutex_unlock(&gov_dbs_data_mutex);
- return 0;
}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
-static int cpufreq_governor_start(struct cpufreq_policy *policy)
+int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
struct policy_dbs_info *policy_dbs = policy->governor_data;
@@ -539,47 +529,28 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
gov_set_update_util(policy_dbs, sampling_rate);
return 0;
}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
-static int cpufreq_governor_stop(struct cpufreq_policy *policy)
+void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
{
- gov_cancel_work(policy);
- return 0;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+
+ gov_clear_update_util(policy_dbs->policy);
+ irq_work_sync(&policy_dbs->irq_work);
+ cancel_work_sync(&policy_dbs->work);
+ atomic_set(&policy_dbs->work_count, 0);
+ policy_dbs->work_in_progress = false;
}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
-static int cpufreq_governor_limits(struct cpufreq_policy *policy)
+void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
mutex_lock(&policy_dbs->timer_mutex);
-
- if (policy->max < policy->cur)
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
-
+ cpufreq_policy_apply_limits(policy);
gov_update_sample_delay(policy_dbs, 0);
mutex_unlock(&policy_dbs->timer_mutex);
-
- return 0;
-}
-
-int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
-{
- if (event == CPUFREQ_GOV_POLICY_INIT) {
- return cpufreq_governor_init(policy);
- } else if (policy->governor_data) {
- switch (event) {
- case CPUFREQ_GOV_POLICY_EXIT:
- return cpufreq_governor_exit(policy);
- case CPUFREQ_GOV_START:
- return cpufreq_governor_start(policy);
- case CPUFREQ_GOV_STOP:
- return cpufreq_governor_stop(policy);
- case CPUFREQ_GOV_LIMITS:
- return cpufreq_governor_limits(policy);
- }
- }
- return -EINVAL;
}
-EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 34eb214b6d57..ef1037e9c92b 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -138,8 +138,8 @@ struct dbs_governor {
unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
struct policy_dbs_info *(*alloc)(void);
void (*free)(struct policy_dbs_info *policy_dbs);
- int (*init)(struct dbs_data *dbs_data, bool notify);
- void (*exit)(struct dbs_data *dbs_data, bool notify);
+ int (*init)(struct dbs_data *dbs_data);
+ void (*exit)(struct dbs_data *dbs_data);
void (*start)(struct cpufreq_policy *policy);
};
@@ -148,6 +148,25 @@ static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy
return container_of(policy->governor, struct dbs_governor, gov);
}
+/* Governor callback routines */
+int cpufreq_dbs_governor_init(struct cpufreq_policy *policy);
+void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy);
+int cpufreq_dbs_governor_start(struct cpufreq_policy *policy);
+void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy);
+void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
+
+#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
+ { \
+ .name = _name_, \
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT, \
+ .owner = THIS_MODULE, \
+ .init = cpufreq_dbs_governor_init, \
+ .exit = cpufreq_dbs_governor_exit, \
+ .start = cpufreq_dbs_governor_start, \
+ .stop = cpufreq_dbs_governor_stop, \
+ .limits = cpufreq_dbs_governor_limits, \
+ }
+
/* Governor specific operations */
struct od_ops {
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
@@ -155,7 +174,6 @@ struct od_ops {
};
unsigned int dbs_update(struct cpufreq_policy *policy);
-int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 300163430516..3a1f49f5f4c6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -65,34 +65,30 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
{
unsigned int freq_req, freq_reduc, freq_avg;
unsigned int freq_hi, freq_lo;
- unsigned int index = 0;
+ unsigned int index;
unsigned int delay_hi_us;
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct cpufreq_frequency_table *freq_table = policy->freq_table;
- if (!dbs_info->freq_table) {
+ if (!freq_table) {
dbs_info->freq_lo = 0;
dbs_info->freq_lo_delay_us = 0;
return freq_next;
}
- cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
- relation, &index);
- freq_req = dbs_info->freq_table[index].frequency;
+ index = cpufreq_frequency_table_target(policy, freq_next, relation);
+ freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
/* Find freq bounds for freq_avg in freq_table */
- index = 0;
- cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
- CPUFREQ_RELATION_H, &index);
- freq_lo = dbs_info->freq_table[index].frequency;
- index = 0;
- cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
- CPUFREQ_RELATION_L, &index);
- freq_hi = dbs_info->freq_table[index].frequency;
+ index = cpufreq_table_find_index_h(policy, freq_avg);
+ freq_lo = freq_table[index].frequency;
+ index = cpufreq_table_find_index_l(policy, freq_avg);
+ freq_hi = freq_table[index].frequency;
/* Find out how long we have to be in hi and lo freqs */
if (freq_hi == freq_lo) {
@@ -113,7 +109,6 @@ static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
{
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
- dbs_info->freq_table = cpufreq_frequency_get_table(policy->cpu);
dbs_info->freq_lo = 0;
}
@@ -361,17 +356,15 @@ static void od_free(struct policy_dbs_info *policy_dbs)
kfree(to_dbs_info(policy_dbs));
}
-static int od_init(struct dbs_data *dbs_data, bool notify)
+static int od_init(struct dbs_data *dbs_data)
{
struct od_dbs_tuners *tuners;
u64 idle_time;
int cpu;
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
- if (!tuners) {
- pr_err("%s: kzalloc failed\n", __func__);
+ if (!tuners)
return -ENOMEM;
- }
cpu = get_cpu();
idle_time = get_cpu_idle_time_us(cpu, NULL);
@@ -402,7 +395,7 @@ static int od_init(struct dbs_data *dbs_data, bool notify)
return 0;
}
-static void od_exit(struct dbs_data *dbs_data, bool notify)
+static void od_exit(struct dbs_data *dbs_data)
{
kfree(dbs_data->tuners);
}
@@ -420,12 +413,7 @@ static struct od_ops od_ops = {
};
static struct dbs_governor od_dbs_gov = {
- .gov = {
- .name = "ondemand",
- .governor = cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
- },
+ .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
.kobj_type = { .default_attrs = od_attributes },
.gov_dbs_timer = od_dbs_timer,
.alloc = od_alloc,
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
index f0121db3cd9e..640ea4e97106 100644
--- a/drivers/cpufreq/cpufreq_ondemand.h
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -13,7 +13,6 @@
struct od_policy_dbs_info {
struct policy_dbs_info policy_dbs;
- struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
unsigned int freq_lo_delay_us;
unsigned int freq_hi_delay_us;
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index af9f4b96f5a8..dafb679adc58 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -16,27 +16,16 @@
#include <linux/init.h>
#include <linux/module.h>
-static int cpufreq_governor_performance(struct cpufreq_policy *policy,
- unsigned int event)
+static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
{
- switch (event) {
- case CPUFREQ_GOV_START:
- case CPUFREQ_GOV_LIMITS:
- pr_debug("setting to %u kHz because of event %u\n",
- policy->max, event);
- __cpufreq_driver_target(policy, policy->max,
- CPUFREQ_RELATION_H);
- break;
- default:
- break;
- }
- return 0;
+ pr_debug("setting to %u kHz\n", policy->max);
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
}
static struct cpufreq_governor cpufreq_gov_performance = {
.name = "performance",
- .governor = cpufreq_governor_performance,
.owner = THIS_MODULE,
+ .limits = cpufreq_gov_performance_limits,
};
static int __init cpufreq_gov_performance_init(void)
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index b8b400232a74..78a651038faf 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -16,26 +16,15 @@
#include <linux/init.h>
#include <linux/module.h>
-static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
- unsigned int event)
+static void cpufreq_gov_powersave_limits(struct cpufreq_policy *policy)
{
- switch (event) {
- case CPUFREQ_GOV_START:
- case CPUFREQ_GOV_LIMITS:
- pr_debug("setting to %u kHz because of event %u\n",
- policy->min, event);
- __cpufreq_driver_target(policy, policy->min,
- CPUFREQ_RELATION_L);
- break;
- default:
- break;
- }
- return 0;
+ pr_debug("setting to %u kHz\n", policy->min);
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
}
static struct cpufreq_governor cpufreq_gov_powersave = {
.name = "powersave",
- .governor = cpufreq_governor_powersave,
+ .limits = cpufreq_gov_powersave_limits,
.owner = THIS_MODULE,
};
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 5e370a30a964..06d3abdffd3a 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/cputime.h>
-static spinlock_t cpufreq_stats_lock;
+static DEFINE_SPINLOCK(cpufreq_stats_lock);
struct cpufreq_stats {
unsigned int total_trans;
@@ -52,6 +52,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
ssize_t len = 0;
int i;
+ if (policy->fast_switch_enabled)
+ return 0;
+
cpufreq_stats_update(stats);
for (i = 0; i < stats->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -68,6 +71,9 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
ssize_t len = 0;
int i, j;
+ if (policy->fast_switch_enabled)
+ return 0;
+
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i < stats->state_num; i++) {
@@ -130,7 +136,7 @@ static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
return -1;
}
-static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
+void cpufreq_stats_free_table(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stats = policy->stats;
@@ -146,39 +152,25 @@ static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
policy->stats = NULL;
}
-static void cpufreq_stats_free_table(unsigned int cpu)
-{
- struct cpufreq_policy *policy;
-
- policy = cpufreq_cpu_get(cpu);
- if (!policy)
- return;
-
- __cpufreq_stats_free_table(policy);
-
- cpufreq_cpu_put(policy);
-}
-
-static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
+void cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
unsigned int i = 0, count = 0, ret = -ENOMEM;
struct cpufreq_stats *stats;
unsigned int alloc_size;
- unsigned int cpu = policy->cpu;
struct cpufreq_frequency_table *pos, *table;
/* We need cpufreq table for creating stats table */
- table = cpufreq_frequency_get_table(cpu);
+ table = policy->freq_table;
if (unlikely(!table))
- return 0;
+ return;
/* stats already initialized */
if (policy->stats)
- return -EEXIST;
+ return;
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
if (!stats)
- return -ENOMEM;
+ return;
/* Find total allocation size */
cpufreq_for_each_valid_entry(pos, table)
@@ -215,80 +207,32 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
policy->stats = stats;
ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
if (!ret)
- return 0;
+ return;
/* We failed, release resources */
policy->stats = NULL;
kfree(stats->time_in_state);
free_stat:
kfree(stats);
-
- return ret;
-}
-
-static void cpufreq_stats_create_table(unsigned int cpu)
-{
- struct cpufreq_policy *policy;
-
- /*
- * "likely(!policy)" because normally cpufreq_stats will be registered
- * before cpufreq driver
- */
- policy = cpufreq_cpu_get(cpu);
- if (likely(!policy))
- return;
-
- __cpufreq_stats_create_table(policy);
-
- cpufreq_cpu_put(policy);
}
-static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
- unsigned long val, void *data)
+void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
+ unsigned int new_freq)
{
- int ret = 0;
- struct cpufreq_policy *policy = data;
-
- if (val == CPUFREQ_CREATE_POLICY)
- ret = __cpufreq_stats_create_table(policy);
- else if (val == CPUFREQ_REMOVE_POLICY)
- __cpufreq_stats_free_table(policy);
-
- return ret;
-}
-
-static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_freqs *freq = data;
- struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu);
- struct cpufreq_stats *stats;
+ struct cpufreq_stats *stats = policy->stats;
int old_index, new_index;
- if (!policy) {
- pr_err("%s: No policy found\n", __func__);
- return 0;
- }
-
- if (val != CPUFREQ_POSTCHANGE)
- goto put_policy;
-
- if (!policy->stats) {
+ if (!stats) {
pr_debug("%s: No stats found\n", __func__);
- goto put_policy;
+ return;
}
- stats = policy->stats;
-
old_index = stats->last_index;
- new_index = freq_table_get_index(stats, freq->new);
+ new_index = freq_table_get_index(stats, new_freq);
/* We can't do stats->time_in_state[-1]= .. */
- if (old_index == -1 || new_index == -1)
- goto put_policy;
-
- if (old_index == new_index)
- goto put_policy;
+ if (old_index == -1 || new_index == -1 || old_index == new_index)
+ return;
cpufreq_stats_update(stats);
@@ -297,61 +241,4 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
stats->trans_table[old_index * stats->max_state + new_index]++;
#endif
stats->total_trans++;
-
-put_policy:
- cpufreq_cpu_put(policy);
- return 0;
}
-
-static struct notifier_block notifier_policy_block = {
- .notifier_call = cpufreq_stat_notifier_policy
-};
-
-static struct notifier_block notifier_trans_block = {
- .notifier_call = cpufreq_stat_notifier_trans
-};
-
-static int __init cpufreq_stats_init(void)
-{
- int ret;
- unsigned int cpu;
-
- spin_lock_init(&cpufreq_stats_lock);
- ret = cpufreq_register_notifier(&notifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER);
- if (ret)
- return ret;
-
- for_each_online_cpu(cpu)
- cpufreq_stats_create_table(cpu);
-
- ret = cpufreq_register_notifier(&notifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (ret) {
- cpufreq_unregister_notifier(&notifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER);
- for_each_online_cpu(cpu)
- cpufreq_stats_free_table(cpu);
- return ret;
- }
-
- return 0;
-}
-static void __exit cpufreq_stats_exit(void)
-{
- unsigned int cpu;
-
- cpufreq_unregister_notifier(&notifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER);
- cpufreq_unregister_notifier(&notifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- for_each_online_cpu(cpu)
- cpufreq_stats_free_table(cpu);
-}
-
-MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
-MODULE_DESCRIPTION("Export cpufreq stats via sysfs");
-MODULE_LICENSE("GPL");
-
-module_init(cpufreq_stats_init);
-module_exit(cpufreq_stats_exit);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 9f3dec9a3f36..bd897e3e134d 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -65,66 +65,66 @@ static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
return 0;
}
-static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
- unsigned int event)
+static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
+{
+ mutex_lock(&userspace_mutex);
+ kfree(policy->governor_data);
+ policy->governor_data = NULL;
+ mutex_unlock(&userspace_mutex);
+}
+
+static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
{
unsigned int *setspeed = policy->governor_data;
- unsigned int cpu = policy->cpu;
- int rc = 0;
- if (event == CPUFREQ_GOV_POLICY_INIT)
- return cpufreq_userspace_policy_init(policy);
+ BUG_ON(!policy->cur);
+ pr_debug("started managing cpu %u\n", policy->cpu);
- if (!setspeed)
- return -EINVAL;
-
- switch (event) {
- case CPUFREQ_GOV_POLICY_EXIT:
- mutex_lock(&userspace_mutex);
- policy->governor_data = NULL;
- kfree(setspeed);
- mutex_unlock(&userspace_mutex);
- break;
- case CPUFREQ_GOV_START:
- BUG_ON(!policy->cur);
- pr_debug("started managing cpu %u\n", cpu);
-
- mutex_lock(&userspace_mutex);
- per_cpu(cpu_is_managed, cpu) = 1;
- *setspeed = policy->cur;
- mutex_unlock(&userspace_mutex);
- break;
- case CPUFREQ_GOV_STOP:
- pr_debug("managing cpu %u stopped\n", cpu);
-
- mutex_lock(&userspace_mutex);
- per_cpu(cpu_is_managed, cpu) = 0;
- *setspeed = 0;
- mutex_unlock(&userspace_mutex);
- break;
- case CPUFREQ_GOV_LIMITS:
- mutex_lock(&userspace_mutex);
- pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
- cpu, policy->min, policy->max, policy->cur, *setspeed);
-
- if (policy->max < *setspeed)
- __cpufreq_driver_target(policy, policy->max,
- CPUFREQ_RELATION_H);
- else if (policy->min > *setspeed)
- __cpufreq_driver_target(policy, policy->min,
- CPUFREQ_RELATION_L);
- else
- __cpufreq_driver_target(policy, *setspeed,
- CPUFREQ_RELATION_L);
- mutex_unlock(&userspace_mutex);
- break;
- }
- return rc;
+ mutex_lock(&userspace_mutex);
+ per_cpu(cpu_is_managed, policy->cpu) = 1;
+ *setspeed = policy->cur;
+ mutex_unlock(&userspace_mutex);
+ return 0;
+}
+
+static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed = policy->governor_data;
+
+ pr_debug("managing cpu %u stopped\n", policy->cpu);
+
+ mutex_lock(&userspace_mutex);
+ per_cpu(cpu_is_managed, policy->cpu) = 0;
+ *setspeed = 0;
+ mutex_unlock(&userspace_mutex);
+}
+
+static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed = policy->governor_data;
+
+ mutex_lock(&userspace_mutex);
+
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
+ policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
+
+ if (policy->max < *setspeed)
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > *setspeed)
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ else
+ __cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L);
+
+ mutex_unlock(&userspace_mutex);
}
static struct cpufreq_governor cpufreq_gov_userspace = {
.name = "userspace",
- .governor = cpufreq_governor_userspace,
+ .init = cpufreq_userspace_policy_init,
+ .exit = cpufreq_userspace_policy_exit,
+ .start = cpufreq_userspace_policy_start,
+ .stop = cpufreq_userspace_policy_stop,
+ .limits = cpufreq_userspace_policy_limits,
.store_setspeed = cpufreq_set,
.show_setspeed = show_speed,
.owner = THIS_MODULE,
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 7e336d20c184..b95a872800ec 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -38,26 +38,6 @@ struct davinci_cpufreq {
};
static struct davinci_cpufreq cpufreq;
-static int davinci_verify_speed(struct cpufreq_policy *policy)
-{
- struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
- struct cpufreq_frequency_table *freq_table = pdata->freq_table;
- struct clk *armclk = cpufreq.armclk;
-
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
-
- if (policy->cpu)
- return -EINVAL;
-
- cpufreq_verify_within_cpu_limits(policy);
- policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
- policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
- return 0;
-}
-
static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
@@ -121,7 +101,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
static struct cpufreq_driver davinci_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = davinci_verify_speed,
+ .verify = cpufreq_generic_frequency_table_verify,
.target_index = davinci_target,
.get = cpufreq_generic_get,
.init = davinci_cpu_init,
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index a8f1daffc9bc..3bbbf9e6960c 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -63,8 +63,6 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
else
return 0;
}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
-
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
@@ -108,20 +106,16 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
*/
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
{
- struct cpufreq_frequency_table *table =
- cpufreq_frequency_get_table(policy->cpu);
- if (!table)
+ if (!policy->freq_table)
return -ENODEV;
- return cpufreq_frequency_table_verify(policy, table);
+ return cpufreq_frequency_table_verify(policy, policy->freq_table);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
-int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table,
- unsigned int target_freq,
- unsigned int relation,
- unsigned int *index)
+int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
@@ -132,7 +126,9 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
.frequency = 0,
};
struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *table = policy->freq_table;
unsigned int freq, diff, i = 0;
+ int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
target_freq, relation, policy->cpu);
@@ -196,25 +192,26 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
}
if (optimal.driver_data > i) {
- if (suboptimal.driver_data > i)
- return -EINVAL;
- *index = suboptimal.driver_data;
- } else
- *index = optimal.driver_data;
+ if (suboptimal.driver_data > i) {
+ WARN(1, "Invalid frequency table: %d\n", policy->cpu);
+ return 0;
+ }
- pr_debug("target index is %u, freq is:%u kHz\n", *index,
- table[*index].frequency);
+ index = suboptimal.driver_data;
+ } else
+ index = optimal.driver_data;
- return 0;
+ pr_debug("target index is %u, freq is:%u kHz\n", index,
+ table[index].frequency);
+ return index;
}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
+EXPORT_SYMBOL_GPL(cpufreq_table_index_unsorted);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq)
{
- struct cpufreq_frequency_table *pos, *table;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
- table = cpufreq_frequency_get_table(policy->cpu);
if (unlikely(!table)) {
pr_debug("%s: Unable to find frequency table\n", __func__);
return -ENOENT;
@@ -300,15 +297,72 @@ struct freq_attr *cpufreq_generic_attr[] = {
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
+static int set_freq_table_sorted(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+ struct cpufreq_frequency_table *prev = NULL;
+ int ascending = 0;
+
+ policy->freq_table_sorted = CPUFREQ_TABLE_UNSORTED;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ if (!prev) {
+ prev = pos;
+ continue;
+ }
+
+ if (pos->frequency == prev->frequency) {
+ pr_warn("Duplicate freq-table entries: %u\n",
+ pos->frequency);
+ return -EINVAL;
+ }
+
+ /* Frequency increased from prev to pos */
+ if (pos->frequency > prev->frequency) {
+ /* But frequency was decreasing earlier */
+ if (ascending < 0) {
+ pr_debug("Freq table is unsorted\n");
+ return 0;
+ }
+
+ ascending++;
+ } else {
+ /* Frequency decreased from prev to pos */
+
+ /* But frequency was increasing earlier */
+ if (ascending > 0) {
+ pr_debug("Freq table is unsorted\n");
+ return 0;
+ }
+
+ ascending--;
+ }
+
+ prev = pos;
+ }
+
+ if (ascending > 0)
+ policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_ASCENDING;
+ else
+ policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_DESCENDING;
+
+ pr_debug("Freq table is sorted in %s order\n",
+ ascending > 0 ? "ascending" : "descending");
+
+ return 0;
+}
+
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
- int ret = cpufreq_frequency_table_cpuinfo(policy, table);
+ int ret;
- if (!ret)
- policy->freq_table = table;
+ ret = cpufreq_frequency_table_cpuinfo(policy, table);
+ if (ret)
+ return ret;
- return ret;
+ policy->freq_table = table;
+ return set_freq_table_sorted(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1fa1a32928d7..9ec033b4f2d9 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -35,6 +35,7 @@
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
+#include <asm/intel-family.h>
#define ATOM_RATIOS 0x66a
#define ATOM_VIDS 0x66b
@@ -96,7 +97,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
* read from MPERF MSR between last and current sample
* @tsc: Difference of time stamp counter between last and
* current sample
- * @freq: Effective frequency calculated from APERF/MPERF
* @time: Current time from scheduler
*
* This structure is used in the cpudata structure to store performance sample
@@ -108,7 +108,6 @@ struct sample {
u64 aperf;
u64 mperf;
u64 tsc;
- int freq;
u64 time;
};
@@ -281,9 +280,9 @@ struct cpu_defaults {
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
-static struct pstate_adjust_policy pid_params;
-static struct pstate_funcs pstate_funcs;
-static int hwp_active;
+static struct pstate_adjust_policy pid_params __read_mostly;
+static struct pstate_funcs pstate_funcs __read_mostly;
+static int hwp_active __read_mostly;
#ifdef CONFIG_ACPI
static bool acpi_ppc;
@@ -807,7 +806,8 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
}
@@ -944,7 +944,7 @@ static int core_get_max_pstate(void)
if (err)
goto skip_tar;
- tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
+ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
err = rdmsrl_safe(tdp_msr, &tdp_ratio);
if (err)
goto skip_tar;
@@ -972,7 +972,7 @@ static int core_get_turbo_pstate(void)
u64 value;
int nont, ret;
- rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+ rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
nont = core_get_max_pstate();
ret = (value) & 255;
if (ret <= nont)
@@ -1001,7 +1001,7 @@ static int knl_get_turbo_pstate(void)
u64 value;
int nont, ret;
- rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+ rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
nont = core_get_max_pstate();
ret = (((value) >> 8) & 0xFF);
if (ret <= nont)
@@ -1091,6 +1091,26 @@ static struct cpu_defaults knl_params = {
},
};
+static struct cpu_defaults bxt_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 60,
+ .p_gain_pct = 14,
+ .d_gain_pct = 0,
+ .i_gain_pct = 4,
+ },
+ .funcs = {
+ .get_max = core_get_max_pstate,
+ .get_max_physical = core_get_max_pstate_physical,
+ .get_min = core_get_min_pstate,
+ .get_turbo = core_get_turbo_pstate,
+ .get_scaling = core_get_scaling,
+ .get_val = core_get_val,
+ .get_target_pstate = get_target_pstate_use_cpu_load,
+ },
+};
+
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
int max_perf = cpu->pstate.turbo_pstate;
@@ -1113,17 +1133,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate)
-{
- trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
- cpu->pstate.current_pstate = pstate;
-}
-
static void intel_pstate_set_min_pstate(struct cpudata *cpu)
{
int pstate = cpu->pstate.min_pstate;
- intel_pstate_record_pstate(cpu, pstate);
+ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+ cpu->pstate.current_pstate = pstate;
/*
* Generally, there is no guarantee that this code will always run on
* the CPU being updated, so force the register update to run on the
@@ -1283,10 +1298,11 @@ static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
pstate = clamp_t(int, pstate, min_perf, max_perf);
+ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
if (pstate == cpu->pstate.current_pstate)
return;
- intel_pstate_record_pstate(cpu, pstate);
+ cpu->pstate.current_pstate = pstate;
wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
@@ -1334,29 +1350,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
(unsigned long)&policy }
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- ICPU(0x2a, core_params),
- ICPU(0x2d, core_params),
- ICPU(0x37, silvermont_params),
- ICPU(0x3a, core_params),
- ICPU(0x3c, core_params),
- ICPU(0x3d, core_params),
- ICPU(0x3e, core_params),
- ICPU(0x3f, core_params),
- ICPU(0x45, core_params),
- ICPU(0x46, core_params),
- ICPU(0x47, core_params),
- ICPU(0x4c, airmont_params),
- ICPU(0x4e, core_params),
- ICPU(0x4f, core_params),
- ICPU(0x5e, core_params),
- ICPU(0x56, core_params),
- ICPU(0x57, knl_params),
+ ICPU(INTEL_FAM6_SANDYBRIDGE, core_params),
+ ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params),
+ ICPU(INTEL_FAM6_IVYBRIDGE, core_params),
+ ICPU(INTEL_FAM6_HASWELL_CORE, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_CORE, core_params),
+ ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params),
+ ICPU(INTEL_FAM6_HASWELL_X, core_params),
+ ICPU(INTEL_FAM6_HASWELL_ULT, core_params),
+ ICPU(INTEL_FAM6_HASWELL_GT3E, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params),
+ ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params),
+ ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_X, core_params),
+ ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
+ ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
-static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
- ICPU(0x56, core_params),
+static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
+ ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
{}
};
@@ -1575,12 +1592,12 @@ static struct cpufreq_driver intel_pstate_driver = {
.name = "intel_pstate",
};
-static int __initdata no_load;
-static int __initdata no_hwp;
-static int __initdata hwp_only;
-static unsigned int force_load;
+static int no_load __initdata;
+static int no_hwp __initdata;
+static int hwp_only __initdata;
+static unsigned int force_load __initdata;
-static int intel_pstate_msrs_not_valid(void)
+static int __init intel_pstate_msrs_not_valid(void)
{
if (!pstate_funcs.get_max() ||
!pstate_funcs.get_min() ||
@@ -1590,7 +1607,7 @@ static int intel_pstate_msrs_not_valid(void)
return 0;
}
-static void copy_pid_params(struct pstate_adjust_policy *policy)
+static void __init copy_pid_params(struct pstate_adjust_policy *policy)
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
@@ -1601,7 +1618,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
pid_params.setpoint = policy->setpoint;
}
-static void copy_cpu_funcs(struct pstate_funcs *funcs)
+static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
{
pstate_funcs.get_max = funcs->get_max;
pstate_funcs.get_max_physical = funcs->get_max_physical;
@@ -1616,7 +1633,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
#ifdef CONFIG_ACPI
-static bool intel_pstate_no_acpi_pss(void)
+static bool __init intel_pstate_no_acpi_pss(void)
{
int i;
@@ -1645,7 +1662,7 @@ static bool intel_pstate_no_acpi_pss(void)
return true;
}
-static bool intel_pstate_has_acpi_ppc(void)
+static bool __init intel_pstate_has_acpi_ppc(void)
{
int i;
@@ -1673,7 +1690,7 @@ struct hw_vendor_info {
};
/* Hardware vendor-specific info that has its own power management modes */
-static struct hw_vendor_info vendor_info[] = {
+static struct hw_vendor_info vendor_info[] __initdata = {
{1, "HP ", "ProLiant", PSS},
{1, "ORACLE", "X4-2 ", PPC},
{1, "ORACLE", "X4-2L ", PPC},
@@ -1692,7 +1709,7 @@ static struct hw_vendor_info vendor_info[] = {
{0, "", ""},
};
-static bool intel_pstate_platform_pwr_mgmt_exists(void)
+static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
{
struct acpi_table_header hdr;
struct hw_vendor_info *v_info;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index e920889b9ac2..ed915ee85dd9 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -70,7 +70,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
continue;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
return PTR_ERR(clk);
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index a7ecb9a84c15..3f0ce2ae35ee 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -555,8 +555,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->min = policy->cpuinfo.min_freq =
ioread32(&pcch_hdr->minimum_frequency) * 1000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-
pr_debug("init: policy->max is %d, policy->min is %d\n",
policy->max, policy->min);
out:
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 54c45368e3f1..87796e0864e9 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -64,12 +64,14 @@
/**
* struct global_pstate_info - Per policy data structure to maintain history of
* global pstates
- * @highest_lpstate: The local pstate from which we are ramping down
+ * @highest_lpstate_idx: The local pstate index from which we are
+ * ramping down
* @elapsed_time: Time in ms spent in ramping down from
- * highest_lpstate
+ * highest_lpstate_idx
* @last_sampled_time: Time from boot in ms when global pstates were
* last set
- * @last_lpstate,last_gpstate: Last set values for local and global pstates
+ * @last_lpstate_idx, Last set value of local pstate and global
+ * last_gpstate_idx pstate in terms of cpufreq table index
* @timer: Is used for ramping down if cpu goes idle for
* a long time with global pstate held high
* @gpstate_lock: A spinlock to maintain synchronization between
@@ -77,11 +79,11 @@
* governer's target_index calls
*/
struct global_pstate_info {
- int highest_lpstate;
+ int highest_lpstate_idx;
unsigned int elapsed_time;
unsigned int last_sampled_time;
- int last_lpstate;
- int last_gpstate;
+ int last_lpstate_idx;
+ int last_gpstate_idx;
spinlock_t gpstate_lock;
struct timer_list timer;
};
@@ -124,29 +126,47 @@ static int nr_chips;
static DEFINE_PER_CPU(struct chip *, chip_info);
/*
- * Note: The set of pstates consists of contiguous integers, the
- * smallest of which is indicated by powernv_pstate_info.min, the
- * largest of which is indicated by powernv_pstate_info.max.
+ * Note:
+ * The set of pstates consists of contiguous integers.
+ * powernv_pstate_info stores the index of the frequency table for
+ * max, min and nominal frequencies. It also stores number of
+ * available frequencies.
*
- * The nominal pstate is the highest non-turbo pstate in this
- * platform. This is indicated by powernv_pstate_info.nominal.
+ * powernv_pstate_info.nominal indicates the index to the highest
+ * non-turbo frequency.
*/
static struct powernv_pstate_info {
- int min;
- int max;
- int nominal;
- int nr_pstates;
+ unsigned int min;
+ unsigned int max;
+ unsigned int nominal;
+ unsigned int nr_pstates;
} powernv_pstate_info;
+/* Use following macros for conversions between pstate_id and index */
+static inline int idx_to_pstate(unsigned int i)
+{
+ return powernv_freqs[i].driver_data;
+}
+
+static inline unsigned int pstate_to_idx(int pstate)
+{
+ /*
+ * abs() is deliberately used so that is works with
+ * both monotonically increasing and decreasing
+ * pstate values
+ */
+ return abs(pstate - idx_to_pstate(powernv_pstate_info.max));
+}
+
static inline void reset_gpstates(struct cpufreq_policy *policy)
{
struct global_pstate_info *gpstates = policy->driver_data;
- gpstates->highest_lpstate = 0;
+ gpstates->highest_lpstate_idx = 0;
gpstates->elapsed_time = 0;
gpstates->last_sampled_time = 0;
- gpstates->last_lpstate = 0;
- gpstates->last_gpstate = 0;
+ gpstates->last_lpstate_idx = 0;
+ gpstates->last_gpstate_idx = 0;
}
/*
@@ -156,9 +176,10 @@ static inline void reset_gpstates(struct cpufreq_policy *policy)
static int init_powernv_pstates(void)
{
struct device_node *power_mgt;
- int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0;
+ int i, nr_pstates = 0;
const __be32 *pstate_ids, *pstate_freqs;
u32 len_ids, len_freqs;
+ u32 pstate_min, pstate_max, pstate_nominal;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
@@ -208,6 +229,7 @@ static int init_powernv_pstates(void)
return -ENODEV;
}
+ powernv_pstate_info.nr_pstates = nr_pstates;
pr_debug("NR PStates %d\n", nr_pstates);
for (i = 0; i < nr_pstates; i++) {
u32 id = be32_to_cpu(pstate_ids[i]);
@@ -216,15 +238,17 @@ static int init_powernv_pstates(void)
pr_debug("PState id %d freq %d MHz\n", id, freq);
powernv_freqs[i].frequency = freq * 1000; /* kHz */
powernv_freqs[i].driver_data = id;
+
+ if (id == pstate_max)
+ powernv_pstate_info.max = i;
+ else if (id == pstate_nominal)
+ powernv_pstate_info.nominal = i;
+ else if (id == pstate_min)
+ powernv_pstate_info.min = i;
}
+
/* End of list marker entry */
powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
-
- powernv_pstate_info.min = pstate_min;
- powernv_pstate_info.max = pstate_max;
- powernv_pstate_info.nominal = pstate_nominal;
- powernv_pstate_info.nr_pstates = nr_pstates;
-
return 0;
}
@@ -233,12 +257,12 @@ static unsigned int pstate_id_to_freq(int pstate_id)
{
int i;
- i = powernv_pstate_info.max - pstate_id;
+ i = pstate_to_idx(pstate_id);
if (i >= powernv_pstate_info.nr_pstates || i < 0) {
pr_warn("PState id %d outside of PState table, "
"reporting nominal id %d instead\n",
- pstate_id, powernv_pstate_info.nominal);
- i = powernv_pstate_info.max - powernv_pstate_info.nominal;
+ pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
+ i = powernv_pstate_info.nominal;
}
return powernv_freqs[i].frequency;
@@ -252,7 +276,7 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
char *buf)
{
return sprintf(buf, "%u\n",
- pstate_id_to_freq(powernv_pstate_info.nominal));
+ powernv_freqs[powernv_pstate_info.nominal].frequency);
}
struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
@@ -426,7 +450,7 @@ static void set_pstate(void *data)
*/
static inline unsigned int get_nominal_index(void)
{
- return powernv_pstate_info.max - powernv_pstate_info.nominal;
+ return powernv_pstate_info.nominal;
}
static void powernv_cpufreq_throttle_check(void *data)
@@ -435,20 +459,22 @@ static void powernv_cpufreq_throttle_check(void *data)
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
int pmsr_pmax;
+ unsigned int pmsr_pmax_idx;
pmsr = get_pmspr(SPRN_PMSR);
chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
pmsr_pmax = (s8)PMSR_MAX(pmsr);
- if (pmsr_pmax != powernv_pstate_info.max) {
+ pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
+ if (pmsr_pmax_idx != powernv_pstate_info.max) {
if (chip->throttled)
goto next;
chip->throttled = true;
- if (pmsr_pmax < powernv_pstate_info.nominal) {
- pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
+ if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
+ pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n",
cpu, chip->id, pmsr_pmax,
- powernv_pstate_info.nominal);
+ idx_to_pstate(powernv_pstate_info.nominal));
chip->throttle_sub_turbo++;
} else {
chip->throttle_turbo++;
@@ -484,34 +510,35 @@ next:
/**
* calc_global_pstate - Calculate global pstate
- * @elapsed_time: Elapsed time in milliseconds
- * @local_pstate: New local pstate
- * @highest_lpstate: pstate from which its ramping down
+ * @elapsed_time: Elapsed time in milliseconds
+ * @local_pstate_idx: New local pstate
+ * @highest_lpstate_idx: pstate from which its ramping down
*
* Finds the appropriate global pstate based on the pstate from which its
* ramping down and the time elapsed in ramping down. It follows a quadratic
* equation which ensures that it reaches ramping down to pmin in 5sec.
*/
static inline int calc_global_pstate(unsigned int elapsed_time,
- int highest_lpstate, int local_pstate)
+ int highest_lpstate_idx,
+ int local_pstate_idx)
{
- int pstate_diff;
+ int index_diff;
/*
* Using ramp_down_percent we get the percentage of rampdown
* that we are expecting to be dropping. Difference between
- * highest_lpstate and powernv_pstate_info.min will give a absolute
+ * highest_lpstate_idx and powernv_pstate_info.min will give a absolute
* number of how many pstates we will drop eventually by the end of
* 5 seconds, then just scale it get the number pstates to be dropped.
*/
- pstate_diff = ((int)ramp_down_percent(elapsed_time) *
- (highest_lpstate - powernv_pstate_info.min)) / 100;
+ index_diff = ((int)ramp_down_percent(elapsed_time) *
+ (powernv_pstate_info.min - highest_lpstate_idx)) / 100;
/* Ensure that global pstate is >= to local pstate */
- if (highest_lpstate - pstate_diff < local_pstate)
- return local_pstate;
+ if (highest_lpstate_idx + index_diff >= local_pstate_idx)
+ return local_pstate_idx;
else
- return highest_lpstate - pstate_diff;
+ return highest_lpstate_idx + index_diff;
}
static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
@@ -530,8 +557,7 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
else
timer_interval = GPSTATE_TIMER_INTERVAL;
- mod_timer_pinned(&gpstates->timer, jiffies +
- msecs_to_jiffies(timer_interval));
+ mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
}
/**
@@ -547,7 +573,7 @@ void gpstate_timer_handler(unsigned long data)
{
struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
struct global_pstate_info *gpstates = policy->driver_data;
- int gpstate_id;
+ int gpstate_idx;
unsigned int time_diff = jiffies_to_msecs(jiffies)
- gpstates->last_sampled_time;
struct powernv_smp_call_data freq_data;
@@ -557,29 +583,29 @@ void gpstate_timer_handler(unsigned long data)
gpstates->last_sampled_time += time_diff;
gpstates->elapsed_time += time_diff;
- freq_data.pstate_id = gpstates->last_lpstate;
+ freq_data.pstate_id = idx_to_pstate(gpstates->last_lpstate_idx);
- if ((gpstates->last_gpstate == freq_data.pstate_id) ||
+ if ((gpstates->last_gpstate_idx == gpstates->last_lpstate_idx) ||
(gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) {
- gpstate_id = freq_data.pstate_id;
+ gpstate_idx = pstate_to_idx(freq_data.pstate_id);
reset_gpstates(policy);
- gpstates->highest_lpstate = freq_data.pstate_id;
+ gpstates->highest_lpstate_idx = gpstate_idx;
} else {
- gpstate_id = calc_global_pstate(gpstates->elapsed_time,
- gpstates->highest_lpstate,
- freq_data.pstate_id);
+ gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
+ gpstates->highest_lpstate_idx,
+ freq_data.pstate_id);
}
/*
* If local pstate is equal to global pstate, rampdown is over
* So timer is not required to be queued.
*/
- if (gpstate_id != freq_data.pstate_id)
+ if (gpstate_idx != gpstates->last_lpstate_idx)
queue_gpstate_timer(gpstates);
- freq_data.gpstate_id = gpstate_id;
- gpstates->last_gpstate = freq_data.gpstate_id;
- gpstates->last_lpstate = freq_data.pstate_id;
+ freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
+ gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id);
+ gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id);
spin_unlock(&gpstates->gpstate_lock);
@@ -596,7 +622,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
unsigned int new_index)
{
struct powernv_smp_call_data freq_data;
- unsigned int cur_msec, gpstate_id;
+ unsigned int cur_msec, gpstate_idx;
struct global_pstate_info *gpstates = policy->driver_data;
if (unlikely(rebooting) && new_index != get_nominal_index())
@@ -608,15 +634,15 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
cur_msec = jiffies_to_msecs(get_jiffies_64());
spin_lock(&gpstates->gpstate_lock);
- freq_data.pstate_id = powernv_freqs[new_index].driver_data;
+ freq_data.pstate_id = idx_to_pstate(new_index);
if (!gpstates->last_sampled_time) {
- gpstate_id = freq_data.pstate_id;
- gpstates->highest_lpstate = freq_data.pstate_id;
+ gpstate_idx = new_index;
+ gpstates->highest_lpstate_idx = new_index;
goto gpstates_done;
}
- if (gpstates->last_gpstate > freq_data.pstate_id) {
+ if (gpstates->last_gpstate_idx < new_index) {
gpstates->elapsed_time += cur_msec -
gpstates->last_sampled_time;
@@ -627,34 +653,34 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
*/
if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
reset_gpstates(policy);
- gpstates->highest_lpstate = freq_data.pstate_id;
- gpstate_id = freq_data.pstate_id;
+ gpstates->highest_lpstate_idx = new_index;
+ gpstate_idx = new_index;
} else {
/* Elaspsed_time is less than 5 seconds, continue to rampdown */
- gpstate_id = calc_global_pstate(gpstates->elapsed_time,
- gpstates->highest_lpstate,
- freq_data.pstate_id);
+ gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
+ gpstates->highest_lpstate_idx,
+ new_index);
}
} else {
reset_gpstates(policy);
- gpstates->highest_lpstate = freq_data.pstate_id;
- gpstate_id = freq_data.pstate_id;
+ gpstates->highest_lpstate_idx = new_index;
+ gpstate_idx = new_index;
}
/*
* If local pstate is equal to global pstate, rampdown is over
* So timer is not required to be queued.
*/
- if (gpstate_id != freq_data.pstate_id)
+ if (gpstate_idx != new_index)
queue_gpstate_timer(gpstates);
else
del_timer_sync(&gpstates->timer);
gpstates_done:
- freq_data.gpstate_id = gpstate_id;
+ freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
gpstates->last_sampled_time = cur_msec;
- gpstates->last_gpstate = freq_data.gpstate_id;
- gpstates->last_lpstate = freq_data.pstate_id;
+ gpstates->last_gpstate_idx = gpstate_idx;
+ gpstates->last_lpstate_idx = new_index;
spin_unlock(&gpstates->gpstate_lock);
@@ -699,7 +725,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = gpstates;
/* initialize timer */
- init_timer_deferrable(&gpstates->timer);
+ init_timer_pinned_deferrable(&gpstates->timer);
gpstates->timer.data = (unsigned long)policy;
gpstates->timer.function = gpstate_timer_handler;
gpstates->timer.expires = jiffies +
@@ -760,9 +786,7 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
struct cpufreq_policy policy;
cpufreq_get_policy(&policy, cpu);
- cpufreq_frequency_table_target(&policy, policy.freq_table,
- policy.cur,
- CPUFREQ_RELATION_C, &index);
+ index = cpufreq_table_find_index_c(&policy, policy.cur);
powernv_cpufreq_target_index(&policy, index);
cpumask_andnot(&mask, &mask, policy.cpus);
}
@@ -848,8 +872,8 @@ static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
struct powernv_smp_call_data freq_data;
struct global_pstate_info *gpstates = policy->driver_data;
- freq_data.pstate_id = powernv_pstate_info.min;
- freq_data.gpstate_id = powernv_pstate_info.min;
+ freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
+ freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
del_timer_sync(&gpstates->timer);
}
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index 7c4cd5c634f2..dc112481a408 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -94,7 +94,7 @@ static int pmi_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- struct cpufreq_frequency_table *cbe_freqs;
+ struct cpufreq_frequency_table *cbe_freqs = policy->freq_table;
u8 node;
/* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
@@ -103,7 +103,6 @@ static int pmi_notifier(struct notifier_block *nb,
if (event == CPUFREQ_START)
return 0;
- cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
node = cbe_cpu_to_node(policy->cpu);
pr_debug("got notified, event=%lu, node=%u\n", event, node);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index ae8eaed77b70..7b596fa38ad2 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -293,12 +293,8 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
__func__, policy, target_freq, relation);
if (ftab) {
- if (cpufreq_frequency_table_target(policy, ftab,
- target_freq, relation,
- &index)) {
- s3c_freq_dbg("%s: table failed\n", __func__);
- return -EINVAL;
- }
+ index = cpufreq_frequency_table_target(policy, target_freq,
+ relation);
s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
target_freq, index, ftab[index].frequency);
@@ -315,7 +311,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
pll = NULL;
} else {
struct cpufreq_policy tmp_policy;
- int ret;
/* we keep the cpu pll table in Hz, to ensure we get an
* accurate value for the PLL output. */
@@ -323,20 +318,14 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
tmp_policy.min = policy->min * 1000;
tmp_policy.max = policy->max * 1000;
tmp_policy.cpu = policy->cpu;
+ tmp_policy.freq_table = pll_reg;
- /* cpufreq_frequency_table_target uses a pointer to 'index'
- * which is the number of the table entry, not the value of
+ /* cpufreq_frequency_table_target returns the index
+ * of the table entry, not the value of
* the table entry's index field. */
- ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg,
- target_freq, relation,
- &index);
-
- if (ret < 0) {
- pr_err("%s: no PLL available\n", __func__);
- goto err_notpossible;
- }
-
+ index = cpufreq_frequency_table_target(&tmp_policy, target_freq,
+ relation);
pll = pll_reg + index;
s3c_freq_dbg("%s: target %u => %u\n",
@@ -346,10 +335,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
}
return s3c_cpufreq_settarget(policy, target_freq, pll);
-
- err_notpossible:
- pr_err("no compatible settings for %d\n", target_freq);
- return -EINVAL;
}
struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
@@ -571,11 +556,7 @@ static int s3c_cpufreq_build_freq(void)
{
int size, ret;
- if (!cpu_cur.info->calc_freqtable)
- return -EINVAL;
-
kfree(ftab);
- ftab = NULL;
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
size++;
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 06d85917b6d5..9e07588ea9f5 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -246,12 +246,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
- if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
- old_freq, CPUFREQ_RELATION_H,
- &priv_index)) {
- ret = -EINVAL;
- goto exit;
- }
+ priv_index = cpufreq_table_find_index_h(policy, old_freq);
arm_volt = dvs_conf[index].arm_volt;
int_volt = dvs_conf[index].int_volt;
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index e342565e8715..4ba3d3fe142f 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -36,26 +36,12 @@
static int arm_enter_idle_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
- int ret;
-
- if (!idx) {
- cpu_do_idle();
- return idx;
- }
-
- ret = cpu_pm_enter();
- if (!ret) {
- /*
- * Pass idle state index to cpu_suspend which in turn will
- * call the CPU ops suspend protocol with idle index as a
- * parameter.
- */
- ret = arm_cpuidle_suspend(idx);
-
- cpu_pm_exit();
- }
-
- return ret ? -1 : idx;
+ /*
+ * Pass idle state index to arm_cpuidle_suspend which in turn
+ * will call the CPU ops suspend protocol with idle index as a
+ * parameter.
+ */
+ return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
}
static struct cpuidle_driver arm_idle_driver = {
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index a4d0059e232c..c73207abb5a4 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
struct cpuidle_state *target_state = &drv->states[index];
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
- u64 time_start, time_end;
+ ktime_t time_start, time_end;
s64 diff;
/*
@@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
sched_idle_set_state(target_state);
trace_cpu_idle_rcuidle(index, dev->cpu);
- time_start = local_clock();
+ time_start = ns_to_ktime(local_clock());
stop_critical_timings();
entered_state = target_state->enter(dev, drv, index);
start_critical_timings();
- time_end = local_clock();
+ time_end = ns_to_ktime(local_clock());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
@@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
- /*
- * local_clock() returns the time in nanosecond, let's shift
- * by 10 (divide by 1024) to have microsecond based time.
- */
- diff = (time_end - time_start) >> 10;
+ diff = ktime_us_delta(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index d77ba2f12242..1af94e2d1a25 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -159,6 +159,19 @@ config CRYPTO_GHASH_S390
It is available as of z196.
+config CRYPTO_CRC32_S390
+ tristate "CRC-32 algorithms"
+ depends on S390
+ select CRYPTO_HASH
+ select CRC32
+ help
+ Select this option if you want to use hardware accelerated
+ implementations of CRC algorithms. With this option, you
+ can optimize the computation of CRC-32 (IEEE 802.3 Ethernet)
+ and CRC-32C (Castagnoli).
+
+ It is available with IBM z13 or later.
+
config CRYPTO_DEV_MV_CESA
tristate "Marvell's Cryptographic Engine"
depends on PLAT_ORION
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 95b73968cf72..10db7df366c8 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -588,11 +588,6 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- return -ENOENT;
- }
-
crc->regs = devm_ioremap_resource(dev, res);
if (IS_ERR((void *)crc->regs)) {
dev_err(&pdev->dev, "Cannot map CRC IO\n");
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 5652a53415dc..64bf3024b680 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,6 @@
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore driver backend"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -99,6 +99,18 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
To compile this as a module, choose M here: the module
will be called caamhash.
+config CRYPTO_DEV_FSL_CAAM_PKC_API
+ tristate "Register public key cryptography implementations with Crypto API"
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ default y
+ select CRYPTO_RSA
+ help
+ Selecting this will allow SEC Public key support for RSA.
+ Supported cryptographic primitives: encryption, decryption,
+ signature and verification.
+ To compile this as a module, choose M here: the module
+ will be called caam_pkc.
+
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
@@ -116,10 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_IMX
def_bool SOC_IMX6 || SOC_IMX7D
depends on CRYPTO_DEV_FSL_CAAM
-config CRYPTO_DEV_FSL_CAAM_LE
- def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A
- depends on CRYPTO_DEV_FSL_CAAM
-
config CRYPTO_DEV_FSL_CAAM_DEBUG
bool "Enable debug output in CAAM driver"
depends on CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 550758a333e7..08bf5515ae8a 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -2,7 +2,7 @@
# Makefile for the CAAM backend and dependent components
#
ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
- EXTRA_CFLAGS := -DDEBUG
+ ccflags-y := -DDEBUG
endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
@@ -10,6 +10,8 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
caam-objs := ctrl.o
caam_jr-objs := jr.o key_gen.o error.o
+caam_pkc-y := caampkc.o pkc_desc.o
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 5845d4a08797..f1ecc8df8d41 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -847,7 +847,7 @@ static int ahash_update_ctx(struct ahash_request *req)
*next_buflen, 0);
} else {
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
- SEC4_SG_LEN_FIN;
+ cpu_to_caam32(SEC4_SG_LEN_FIN);
}
state->current_buf = !state->current_buf;
@@ -949,7 +949,8 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
+ cpu_to_caam32(SEC4_SG_LEN_FIN);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
new file mode 100644
index 000000000000..851015e652b8
--- /dev/null
+++ b/drivers/crypto/caam/caampkc.c
@@ -0,0 +1,607 @@
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "jr.h"
+#include "error.h"
+#include "desc_constr.h"
+#include "sg_sw_sec4.h"
+#include "caampkc.h"
+
+#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
+#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
+ sizeof(struct rsa_priv_f1_pdb))
+
+static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
+ struct akcipher_request *req)
+{
+ dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+
+ if (edesc->sec4_sg_bytes)
+ dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
+ DMA_TO_DEVICE);
+}
+
+static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
+ struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
+
+ dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
+}
+
+static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
+ struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
+
+ dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+}
+
+/* RSA Job Completion handler */
+static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
+{
+ struct akcipher_request *req = context;
+ struct rsa_edesc *edesc;
+
+ if (err)
+ caam_jr_strstatus(dev, err);
+
+ edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+ rsa_pub_unmap(dev, edesc, req);
+ rsa_io_unmap(dev, edesc, req);
+ kfree(edesc);
+
+ akcipher_request_complete(req, err);
+}
+
+static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
+{
+ struct akcipher_request *req = context;
+ struct rsa_edesc *edesc;
+
+ if (err)
+ caam_jr_strstatus(dev, err);
+
+ edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+ rsa_priv_f1_unmap(dev, edesc, req);
+ rsa_io_unmap(dev, edesc, req);
+ kfree(edesc);
+
+ akcipher_request_complete(req, err);
+}
+
+static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
+ size_t desclen)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct rsa_edesc *edesc;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int sgc;
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ int src_nents, dst_nents;
+
+ src_nents = sg_nents_for_len(req->src, req->src_len);
+ dst_nents = sg_nents_for_len(req->dst, req->dst_len);
+
+ if (src_nents > 1)
+ sec4_sg_len = src_nents;
+ if (dst_nents > 1)
+ sec4_sg_len += dst_nents;
+
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc, hw desc commands and link tables */
+ edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
+ GFP_DMA | flags);
+ if (!edesc)
+ return ERR_PTR(-ENOMEM);
+
+ sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(dev, "unable to map source\n");
+ goto src_fail;
+ }
+
+ sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(dev, "unable to map destination\n");
+ goto dst_fail;
+ }
+
+ edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
+
+ sec4_sg_index = 0;
+ if (src_nents > 1) {
+ sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ sec4_sg_index += src_nents;
+ }
+ if (dst_nents > 1)
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+
+ /* Save nents for later use in Job Descriptor */
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+
+ if (!sec4_sg_bytes)
+ return edesc;
+
+ edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ goto sec4_sg_fail;
+ }
+
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+ return edesc;
+
+sec4_sg_fail:
+ dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+dst_fail:
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+src_fail:
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int set_rsa_pub_pdb(struct akcipher_request *req,
+ struct rsa_edesc *edesc)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct device *dev = ctx->dev;
+ struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
+ int sec4_sg_index = 0;
+
+ pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->n_dma)) {
+ dev_err(dev, "Unable to map RSA modulus memory\n");
+ return -ENOMEM;
+ }
+
+ pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->e_dma)) {
+ dev_err(dev, "Unable to map RSA public exponent memory\n");
+ dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ if (edesc->src_nents > 1) {
+ pdb->sgf |= RSA_PDB_SGF_F;
+ pdb->f_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents;
+ } else {
+ pdb->f_dma = sg_dma_address(req->src);
+ }
+
+ if (edesc->dst_nents > 1) {
+ pdb->sgf |= RSA_PDB_SGF_G;
+ pdb->g_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ } else {
+ pdb->g_dma = sg_dma_address(req->dst);
+ }
+
+ pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
+ pdb->f_len = req->src_len;
+
+ return 0;
+}
+
+static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
+ struct rsa_edesc *edesc)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct device *dev = ctx->dev;
+ struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
+ int sec4_sg_index = 0;
+
+ pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->n_dma)) {
+ dev_err(dev, "Unable to map modulus memory\n");
+ return -ENOMEM;
+ }
+
+ pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->d_dma)) {
+ dev_err(dev, "Unable to map RSA private exponent memory\n");
+ dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ if (edesc->src_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+ pdb->g_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents;
+ } else {
+ pdb->g_dma = sg_dma_address(req->src);
+ }
+
+ if (edesc->dst_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+ pdb->f_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ } else {
+ pdb->f_dma = sg_dma_address(req->dst);
+ }
+
+ pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
+
+ return 0;
+}
+
+static int caam_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct device *jrdev = ctx->dev;
+ struct rsa_edesc *edesc;
+ int ret;
+
+ if (unlikely(!key->n || !key->e))
+ return -EINVAL;
+
+ if (req->dst_len < key->n_sz) {
+ req->dst_len = key->n_sz;
+ dev_err(jrdev, "Output buffer length less than parameter n\n");
+ return -EOVERFLOW;
+ }
+
+ /* Allocate extended descriptor */
+ edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Set RSA Encrypt Protocol Data Block */
+ ret = set_rsa_pub_pdb(req, edesc);
+ if (ret)
+ goto init_fail;
+
+ /* Initialize Job Descriptor */
+ init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
+
+ ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
+ if (!ret)
+ return -EINPROGRESS;
+
+ rsa_pub_unmap(jrdev, edesc, req);
+
+init_fail:
+ rsa_io_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ return ret;
+}
+
+static int caam_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct device *jrdev = ctx->dev;
+ struct rsa_edesc *edesc;
+ int ret;
+
+ if (unlikely(!key->n || !key->d))
+ return -EINVAL;
+
+ if (req->dst_len < key->n_sz) {
+ req->dst_len = key->n_sz;
+ dev_err(jrdev, "Output buffer length less than parameter n\n");
+ return -EOVERFLOW;
+ }
+
+ /* Allocate extended descriptor */
+ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
+ ret = set_rsa_priv_f1_pdb(req, edesc);
+ if (ret)
+ goto init_fail;
+
+ /* Initialize Job Descriptor */
+ init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
+
+ ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
+ if (!ret)
+ return -EINPROGRESS;
+
+ rsa_priv_f1_unmap(jrdev, edesc, req);
+
+init_fail:
+ rsa_io_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ return ret;
+}
+
+static void caam_rsa_free_key(struct caam_rsa_key *key)
+{
+ kzfree(key->d);
+ kfree(key->e);
+ kfree(key->n);
+ key->d = NULL;
+ key->e = NULL;
+ key->n = NULL;
+ key->d_sz = 0;
+ key->e_sz = 0;
+ key->n_sz = 0;
+}
+
+/**
+ * caam_read_raw_data - Read a raw byte stream as a positive integer.
+ * The function skips buffer's leading zeros, copies the remained data
+ * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
+ * the address of the new buffer.
+ *
+ * @buf : The data to read
+ * @nbytes: The amount of data to read
+ */
+static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
+{
+ u8 *val;
+
+ while (!*buf && *nbytes) {
+ buf++;
+ (*nbytes)--;
+ }
+
+ val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
+ if (!val)
+ return NULL;
+
+ memcpy(val, buf, *nbytes);
+
+ return val;
+}
+
+static int caam_rsa_check_key_length(unsigned int len)
+{
+ if (len > 4096)
+ return -EINVAL;
+ return 0;
+}
+
+static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key = {0};
+ struct caam_rsa_key *rsa_key = &ctx->key;
+ int ret;
+
+ /* Free the old RSA key if any */
+ caam_rsa_free_key(rsa_key);
+
+ ret = rsa_parse_pub_key(&raw_key, key, keylen);
+ if (ret)
+ return ret;
+
+ /* Copy key in DMA zone */
+ rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ if (!rsa_key->e)
+ goto err;
+
+ /*
+ * Skip leading zeros and copy the positive integer to a buffer
+ * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+ * expects a positive integer for the RSA modulus and uses its length as
+ * decryption output length.
+ */
+ rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
+ if (!rsa_key->n)
+ goto err;
+
+ if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
+ caam_rsa_free_key(rsa_key);
+ return -EINVAL;
+ }
+
+ rsa_key->e_sz = raw_key.e_sz;
+ rsa_key->n_sz = raw_key.n_sz;
+
+ memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
+
+ return 0;
+err:
+ caam_rsa_free_key(rsa_key);
+ return -ENOMEM;
+}
+
+static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key = {0};
+ struct caam_rsa_key *rsa_key = &ctx->key;
+ int ret;
+
+ /* Free the old RSA key if any */
+ caam_rsa_free_key(rsa_key);
+
+ ret = rsa_parse_priv_key(&raw_key, key, keylen);
+ if (ret)
+ return ret;
+
+ /* Copy key in DMA zone */
+ rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
+ if (!rsa_key->d)
+ goto err;
+
+ rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ if (!rsa_key->e)
+ goto err;
+
+ /*
+ * Skip leading zeros and copy the positive integer to a buffer
+ * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+ * expects a positive integer for the RSA modulus and uses its length as
+ * decryption output length.
+ */
+ rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
+ if (!rsa_key->n)
+ goto err;
+
+ if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
+ caam_rsa_free_key(rsa_key);
+ return -EINVAL;
+ }
+
+ rsa_key->d_sz = raw_key.d_sz;
+ rsa_key->e_sz = raw_key.e_sz;
+ rsa_key->n_sz = raw_key.n_sz;
+
+ memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
+ memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
+
+ return 0;
+
+err:
+ caam_rsa_free_key(rsa_key);
+ return -ENOMEM;
+}
+
+static int caam_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+
+ return (key->n) ? key->n_sz : -EINVAL;
+}
+
+/* Per session pkc's driver context creation function */
+static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ ctx->dev = caam_jr_alloc();
+
+ if (IS_ERR(ctx->dev)) {
+ dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->dev);
+ }
+
+ return 0;
+}
+
+/* Per session pkc's driver context cleanup function */
+static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+
+ caam_rsa_free_key(key);
+ caam_jr_free(ctx->dev);
+}
+
+static struct akcipher_alg caam_rsa = {
+ .encrypt = caam_rsa_enc,
+ .decrypt = caam_rsa_dec,
+ .sign = caam_rsa_dec,
+ .verify = caam_rsa_enc,
+ .set_pub_key = caam_rsa_set_pub_key,
+ .set_priv_key = caam_rsa_set_priv_key,
+ .max_size = caam_rsa_max_size,
+ .init = caam_rsa_init_tfm,
+ .exit = caam_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "rsa-caam",
+ .cra_priority = 3000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct caam_rsa_ctx),
+ },
+};
+
+/* Public Key Cryptography module initialization handler */
+static int __init caam_pkc_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ struct caam_drv_private *priv;
+ u32 cha_inst, pk_inst;
+ int err;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
+
+ /* Determine public key hardware accelerator presence. */
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
+
+ /* Do not register algorithms if PKHA is not present. */
+ if (!pk_inst)
+ return -ENODEV;
+
+ err = crypto_register_akcipher(&caam_rsa);
+ if (err)
+ dev_warn(ctrldev, "%s alg registration failed\n",
+ caam_rsa.base.cra_driver_name);
+ else
+ dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
+
+ return err;
+}
+
+static void __exit caam_pkc_exit(void)
+{
+ crypto_unregister_akcipher(&caam_rsa);
+}
+
+module_init(caam_pkc_init);
+module_exit(caam_pkc_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
new file mode 100644
index 000000000000..f595d159b112
--- /dev/null
+++ b/drivers/crypto/caam/caampkc.h
@@ -0,0 +1,70 @@
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+
+#ifndef _PKC_DESC_H_
+#define _PKC_DESC_H_
+#include "compat.h"
+#include "pdb.h"
+
+/**
+ * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
+ * @n : RSA modulus raw byte stream
+ * @e : RSA public exponent raw byte stream
+ * @d : RSA private exponent raw byte stream
+ * @n_sz : length in bytes of RSA modulus n
+ * @e_sz : length in bytes of RSA public exponent
+ * @d_sz : length in bytes of RSA private exponent
+ */
+struct caam_rsa_key {
+ u8 *n;
+ u8 *e;
+ u8 *d;
+ size_t n_sz;
+ size_t e_sz;
+ size_t d_sz;
+};
+
+/**
+ * caam_rsa_ctx - per session context.
+ * @key : RSA key in DMA zone
+ * @dev : device structure
+ */
+struct caam_rsa_ctx {
+ struct caam_rsa_key key;
+ struct device *dev;
+};
+
+/**
+ * rsa_edesc - s/w-extended rsa descriptor
+ * @src_nents : number of segments in input scatterlist
+ * @dst_nents : number of segments in output scatterlist
+ * @sec4_sg_bytes : length of h/w link table
+ * @sec4_sg_dma : dma address of h/w link table
+ * @sec4_sg : pointer to h/w link table
+ * @pdb : specific RSA Protocol Data Block (PDB)
+ * @hw_desc : descriptor followed by link tables if any
+ */
+struct rsa_edesc {
+ int src_nents;
+ int dst_nents;
+ int sec4_sg_bytes;
+ dma_addr_t sec4_sg_dma;
+ struct sec4_sg_entry *sec4_sg;
+ union {
+ struct rsa_pub_pdb pub;
+ struct rsa_priv_f1_pdb priv_f1;
+ } pdb;
+ u32 hw_desc[];
+};
+
+/* Descriptor construction primitives. */
+void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
+void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
+
+#endif
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index b6955ecdfb3f..7149cd2492e0 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -35,8 +35,11 @@
#include <crypto/md5.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
+#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 5ad5f3009ae0..0ec112ee5204 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -15,6 +15,9 @@
#include "desc_constr.h"
#include "error.h"
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+
/*
* i.MX targets tend to have clock control subsystems that can
* enable/disable clocking to our device.
@@ -106,7 +109,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
if (ctrlpriv->virt_en == 1) {
- setbits32(&ctrl->deco_rsr, DECORSR_JR0);
+ clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
--timeout)
@@ -115,7 +118,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
timeout = 100000;
}
- setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+ clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
--timeout)
@@ -123,12 +126,12 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
if (!timeout) {
dev_err(ctrldev, "failed to acquire DECO 0\n");
- clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+ clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
return -ENODEV;
}
for (i = 0; i < desc_len(desc); i++)
- wr_reg32(&deco->descbuf[i], *(desc + i));
+ wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
flags = DECO_JQCR_WHL;
/*
@@ -139,7 +142,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
flags |= DECO_JQCR_FOUR;
/* Instruct the DECO to execute it */
- setbits32(&deco->jr_ctl_hi, flags);
+ clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
timeout = 10000000;
do {
@@ -158,10 +161,10 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
DECO_OP_STATUS_HI_ERR_MASK;
if (ctrlpriv->virt_en == 1)
- clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
+ clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
/* Mark the DECO as free */
- clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+ clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
if (!timeout)
return -EAGAIN;
@@ -349,7 +352,7 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
r4tst = &ctrl->r4tst[0];
/* put RNG4 into program mode */
- setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
/*
* Performance-wise, it does not make sense to
@@ -363,7 +366,7 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
>> RTSDCTL_ENT_DLY_SHIFT;
if (ent_delay <= val) {
/* put RNG4 into run mode */
- clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
return;
}
@@ -381,9 +384,9 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
* select raw sampling in both entropy shifter
* and statistical checker
*/
- setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
+ clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
/* put RNG4 into run mode */
- clrbits32(&val, RTMCTL_PRGM);
+ clrsetbits_32(&val, RTMCTL_PRGM, 0);
/* write back the control register */
wr_reg32(&r4tst->rtmctl, val);
}
@@ -406,6 +409,23 @@ int caam_get_era(void)
}
EXPORT_SYMBOL(caam_get_era);
+#ifdef CONFIG_DEBUG_FS
+static int caam_debugfs_u64_get(void *data, u64 *val)
+{
+ *val = caam64_to_cpu(*(u64 *)data);
+ return 0;
+}
+
+static int caam_debugfs_u32_get(void *data, u64 *val)
+{
+ *val = caam32_to_cpu(*(u32 *)data);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+#endif
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -504,6 +524,10 @@ static int caam_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto disable_caam_emi_slow;
}
+
+ caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
+ (CSTA_PLEND | CSTA_ALT_PLEND));
+
/* Finding the page size for using the CTPR_MS register */
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
@@ -559,9 +583,9 @@ static int caam_probe(struct platform_device *pdev)
}
if (ctrlpriv->virt_en == 1)
- setbits32(&ctrl->jrstart, JRSTART_JR0_START |
- JRSTART_JR1_START | JRSTART_JR2_START |
- JRSTART_JR3_START);
+ clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
+ JRSTART_JR1_START | JRSTART_JR2_START |
+ JRSTART_JR3_START);
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
@@ -693,7 +717,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
/* Enable RDB bit so that RNG works faster */
- setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
+ clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
}
/* NOTE: RTIC detection ought to go here, around Si time */
@@ -719,48 +743,59 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
/* Controller-level - performance monitor counters */
+
ctrlpriv->ctl_rq_dequeued =
- debugfs_create_u64("rq_dequeued",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->req_dequeued);
+ debugfs_create_file("rq_dequeued",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->req_dequeued,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ob_enc_req =
- debugfs_create_u64("ob_rq_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_req);
+ debugfs_create_file("ob_rq_encrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_req,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ib_dec_req =
- debugfs_create_u64("ib_rq_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_req);
+ debugfs_create_file("ib_rq_decrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_req,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ob_enc_bytes =
- debugfs_create_u64("ob_bytes_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_bytes);
+ debugfs_create_file("ob_bytes_encrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_bytes,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ob_prot_bytes =
- debugfs_create_u64("ob_bytes_protected",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_prot_bytes);
+ debugfs_create_file("ob_bytes_protected",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_prot_bytes,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ib_dec_bytes =
- debugfs_create_u64("ib_bytes_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_bytes);
+ debugfs_create_file("ib_bytes_decrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_bytes,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ib_valid_bytes =
- debugfs_create_u64("ib_bytes_validated",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_valid_bytes);
+ debugfs_create_file("ib_bytes_validated",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_valid_bytes,
+ &caam_fops_u64_ro);
/* Controller level - global status values */
ctrlpriv->ctl_faultaddr =
- debugfs_create_u64("fault_addr",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultaddr);
+ debugfs_create_file("fault_addr",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultaddr,
+ &caam_fops_u32_ro);
ctrlpriv->ctl_faultdetail =
- debugfs_create_u32("fault_detail",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultdetail);
+ debugfs_create_file("fault_detail",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultdetail,
+ &caam_fops_u32_ro);
ctrlpriv->ctl_faultstatus =
- debugfs_create_u32("fault_status",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->status);
+ debugfs_create_file("fault_status",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->status,
+ &caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 1e93c6af2275..26427c11ad87 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -20,19 +20,18 @@
#define SEC4_SG_BPID_MASK 0x000000ff
#define SEC4_SG_BPID_SHIFT 16
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
-#define SEC4_SG_OFFS_MASK 0x00001fff
+#define SEC4_SG_OFFSET_MASK 0x00001fff
struct sec4_sg_entry {
-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \
+ defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
u32 rsvd1;
dma_addr_t ptr;
#else
u64 ptr;
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
u32 len;
- u8 rsvd2;
- u8 buf_pool_id;
- u16 offset;
+ u32 bpid_offset;
};
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
@@ -454,6 +453,8 @@ struct sec4_sg_entry {
#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 98d07de24fc4..d3869b95e7b1 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -5,6 +5,7 @@
*/
#include "desc.h"
+#include "regs.h"
#define IMMEDIATE (1 << 23)
#define CAAM_CMD_SZ sizeof(u32)
@@ -30,9 +31,11 @@
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+extern bool caam_little_end;
+
static inline int desc_len(u32 *desc)
{
- return *desc & HDR_DESCLEN_MASK;
+ return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
}
static inline int desc_bytes(void *desc)
@@ -52,7 +55,7 @@ static inline void *sh_desc_pdb(u32 *desc)
static inline void init_desc(u32 *desc, u32 options)
{
- *desc = (options | HDR_ONE) + 1;
+ *desc = cpu_to_caam32((options | HDR_ONE) + 1);
}
static inline void init_sh_desc(u32 *desc, u32 options)
@@ -74,13 +77,21 @@ static inline void init_job_desc(u32 *desc, u32 options)
init_desc(desc, CMD_DESC_HDR | options);
}
+static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+{
+ u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+
+ init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
+}
+
static inline void append_ptr(u32 *desc, dma_addr_t ptr)
{
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
- *offset = ptr;
+ *offset = cpu_to_caam_dma(ptr);
- (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
+ CAAM_PTR_SZ / CAAM_CMD_SZ);
}
static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
@@ -99,16 +110,17 @@ static inline void append_data(u32 *desc, void *data, int len)
if (len) /* avoid sparse warning: memcpy with byte count of 0 */
memcpy(offset, data, len);
- (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
+ (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
}
static inline void append_cmd(u32 *desc, u32 command)
{
u32 *cmd = desc_end(desc);
- *cmd = command;
+ *cmd = cpu_to_caam32(command);
- (*desc)++;
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 1);
}
#define append_u32 append_cmd
@@ -117,16 +129,22 @@ static inline void append_u64(u32 *desc, u64 data)
{
u32 *offset = desc_end(desc);
- *offset = upper_32_bits(data);
- *(++offset) = lower_32_bits(data);
+ /* Only 32-bit alignment is guaranteed in descriptor buffer */
+ if (caam_little_end) {
+ *offset = cpu_to_caam32(lower_32_bits(data));
+ *(++offset) = cpu_to_caam32(upper_32_bits(data));
+ } else {
+ *offset = cpu_to_caam32(upper_32_bits(data));
+ *(++offset) = cpu_to_caam32(lower_32_bits(data));
+ }
- (*desc) += 2;
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 2);
}
/* Write command without affecting header, and return pointer to next word */
static inline u32 *write_cmd(u32 *desc, u32 command)
{
- *desc = command;
+ *desc = cpu_to_caam32(command);
return desc + 1;
}
@@ -168,14 +186,17 @@ APPEND_CMD_RET(move, MOVE)
static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
{
- *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
+ *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
+ (desc_len(desc) - (jump_cmd - desc)));
}
static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
{
- *move_cmd &= ~MOVE_OFFSET_MASK;
- *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) &
- MOVE_OFFSET_MASK);
+ u32 val = caam32_to_cpu(*move_cmd);
+
+ val &= ~MOVE_OFFSET_MASK;
+ val |= (desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
+ *move_cmd = cpu_to_caam32(val);
}
#define APPEND_CMD(cmd, op) \
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 5ef4be22eb80..a81f551ac222 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -31,7 +31,7 @@ static int caam_reset_hw_jr(struct device *dev)
* mask interrupts since we are going to poll
* for reset completion status
*/
- setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
/* initiate flush (required prior to reset) */
wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
@@ -57,7 +57,7 @@ static int caam_reset_hw_jr(struct device *dev)
}
/* unmask interrupts */
- clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
return 0;
}
@@ -147,7 +147,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
}
/* mask valid interrupts */
- setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
@@ -182,7 +182,7 @@ static void caam_jr_dequeue(unsigned long devarg)
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
if (jrp->outring[hw_idx].desc ==
- jrp->entinfo[sw_idx].desc_addr_dma)
+ caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
break; /* found */
}
/* we should never fail to find a matching descriptor */
@@ -200,7 +200,7 @@ static void caam_jr_dequeue(unsigned long devarg)
usercall = jrp->entinfo[sw_idx].callbk;
userarg = jrp->entinfo[sw_idx].cbkarg;
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
- userstatus = jrp->outring[hw_idx].jrstatus;
+ userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus);
/*
* Make sure all information from the job has been obtained
@@ -236,7 +236,7 @@ static void caam_jr_dequeue(unsigned long devarg)
}
/* reenable / unmask IRQs */
- clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
}
/**
@@ -330,7 +330,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
int head, tail, desc_size;
dma_addr_t desc_dma;
- desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
+ desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, desc_dma)) {
dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
@@ -356,7 +356,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
head_entry->cbkarg = areq;
head_entry->desc_addr_dma = desc_dma;
- jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+ jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
/*
* Guarantee that the descriptor's DMA address has been written to
@@ -444,9 +444,9 @@ static int caam_jr_init(struct device *dev)
spin_lock_init(&jrp->outlock);
/* Select interrupt coalescing parameters */
- setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
- (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
- (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
+ (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+ (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
return 0;
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 3a87c0cf879a..aaa00dd1c601 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -1,18 +1,19 @@
/*
* CAAM Protocol Data Block (PDB) definition header file
*
- * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2008-2016 Freescale Semiconductor, Inc.
*
*/
#ifndef CAAM_PDB_H
#define CAAM_PDB_H
+#include "compat.h"
/*
* PDB- IPSec ESP Header Modification Options
*/
-#define PDBHMO_ESP_DECAP_SHIFT 12
-#define PDBHMO_ESP_ENCAP_SHIFT 4
+#define PDBHMO_ESP_DECAP_SHIFT 28
+#define PDBHMO_ESP_ENCAP_SHIFT 28
/*
* Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the
* Options Byte IP version (IPvsn) field:
@@ -32,12 +33,23 @@
*/
#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+#define PDBNH_ESP_ENCAP_SHIFT 16
+#define PDBNH_ESP_ENCAP_MASK (0xff << PDBNH_ESP_ENCAP_SHIFT)
+
+#define PDBHDRLEN_ESP_DECAP_SHIFT 16
+#define PDBHDRLEN_MASK (0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT)
+
+#define PDB_NH_OFFSET_SHIFT 8
+#define PDB_NH_OFFSET_MASK (0xff << PDB_NH_OFFSET_SHIFT)
+
/*
* PDB - IPSec ESP Encap/Decap Options
*/
#define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */
#define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */
+#define PDBOPTS_ESP_ARS128 0x80 /* 128-entry antireplay window */
#define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */
+#define PDBOPTS_ESP_ARS_MASK 0xc0 /* antireplay window mask */
#define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */
#define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */
#define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */
@@ -54,35 +66,73 @@
/*
* General IPSec encap/decap PDB definitions
*/
+
+/**
+ * ipsec_encap_cbc - PDB part for IPsec CBC encapsulation
+ * @iv: 16-byte array initialization vector
+ */
struct ipsec_encap_cbc {
- u32 iv[4];
+ u8 iv[16];
};
+/**
+ * ipsec_encap_ctr - PDB part for IPsec CTR encapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ * @iv: initialization vector
+ */
struct ipsec_encap_ctr {
- u32 ctr_nonce;
+ u8 ctr_nonce[4];
u32 ctr_initial;
- u32 iv[2];
+ u64 iv;
};
+/**
+ * ipsec_encap_ccm - PDB part for IPsec CCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ * @iv: initialization vector
+ */
struct ipsec_encap_ccm {
- u32 salt; /* lower 24 bits */
- u8 b0_flags;
- u8 ctr_flags;
- u16 ctr_initial;
- u32 iv[2];
+ u8 salt[4];
+ u32 ccm_opt;
+ u64 iv;
};
+/**
+ * ipsec_encap_gcm - PDB part for IPsec GCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @rsvd: reserved, do not use
+ * @iv: initialization vector
+ */
struct ipsec_encap_gcm {
- u32 salt; /* lower 24 bits */
+ u8 salt[4];
u32 rsvd1;
- u32 iv[2];
+ u64 iv;
};
+/**
+ * ipsec_encap_pdb - PDB for IPsec encapsulation
+ * @options: MSB-LSB description
+ * hmo (header manipulation options) - 4b
+ * reserved - 4b
+ * next header - 8b
+ * next header offset - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @spi: IPsec SPI (Security Parameters Index)
+ * @ip_hdr_len: optional IP Header length (in bytes)
+ * reserved - 16b
+ * Opt. IP Hdr Len - 16b
+ * @ip_hdr: optional IP Header content
+ */
struct ipsec_encap_pdb {
- u8 hmo_rsvd;
- u8 ip_nh;
- u8 ip_nh_offset;
- u8 options;
+ u32 options;
u32 seq_num_ext_hi;
u32 seq_num;
union {
@@ -92,36 +142,65 @@ struct ipsec_encap_pdb {
struct ipsec_encap_gcm gcm;
};
u32 spi;
- u16 rsvd1;
- u16 ip_hdr_len;
- u32 ip_hdr[0]; /* optional IP Header content */
+ u32 ip_hdr_len;
+ u32 ip_hdr[0];
};
+/**
+ * ipsec_decap_cbc - PDB part for IPsec CBC decapsulation
+ * @rsvd: reserved, do not use
+ */
struct ipsec_decap_cbc {
u32 rsvd[2];
};
+/**
+ * ipsec_decap_ctr - PDB part for IPsec CTR decapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ */
struct ipsec_decap_ctr {
- u32 salt;
+ u8 ctr_nonce[4];
u32 ctr_initial;
};
+/**
+ * ipsec_decap_ccm - PDB part for IPsec CCM decapsulation
+ * @salt: 3-byte salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ */
struct ipsec_decap_ccm {
- u32 salt;
- u8 iv_flags;
- u8 ctr_flags;
- u16 ctr_initial;
+ u8 salt[4];
+ u32 ccm_opt;
};
+/**
+ * ipsec_decap_gcm - PDB part for IPsec GCN decapsulation
+ * @salt: 4-byte salt
+ * @rsvd: reserved, do not use
+ */
struct ipsec_decap_gcm {
- u32 salt;
+ u8 salt[4];
u32 resvd;
};
+/**
+ * ipsec_decap_pdb - PDB for IPsec decapsulation
+ * @options: MSB-LSB description
+ * hmo (header manipulation options) - 4b
+ * IP header length - 12b
+ * next header offset - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @anti_replay: Anti-replay window; size depends on ARS (option flags)
+ */
struct ipsec_decap_pdb {
- u16 hmo_ip_hdr_len;
- u8 ip_nh_offset;
- u8 options;
+ u32 options;
union {
struct ipsec_decap_cbc cbc;
struct ipsec_decap_ctr ctr;
@@ -130,8 +209,7 @@ struct ipsec_decap_pdb {
};
u32 seq_num_ext_hi;
u32 seq_num;
- u32 anti_replay[2];
- u32 end_index[0];
+ __be32 anti_replay[4];
};
/*
@@ -399,4 +477,52 @@ struct dsa_verify_pdb {
u8 *ab; /* only used if ECC processing */
};
+/* RSA Protocol Data Block */
+#define RSA_PDB_SGF_SHIFT 28
+#define RSA_PDB_E_SHIFT 12
+#define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
+#define RSA_PDB_D_SHIFT 12
+#define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
+
+#define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
+#define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
+#define RSA_PRIV_PDB_SGF_F (0x4 << RSA_PDB_SGF_SHIFT)
+#define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
+
+#define RSA_PRIV_KEY_FRM_1 0
+
+/**
+ * RSA Encrypt Protocol Data Block
+ * @sgf: scatter-gather field
+ * @f_dma: dma address of input data
+ * @g_dma: dma address of encrypted output data
+ * @n_dma: dma address of RSA modulus
+ * @e_dma: dma address of RSA public exponent
+ * @f_len: length in octets of the input data
+ */
+struct rsa_pub_pdb {
+ u32 sgf;
+ dma_addr_t f_dma;
+ dma_addr_t g_dma;
+ dma_addr_t n_dma;
+ dma_addr_t e_dma;
+ u32 f_len;
+} __packed;
+
+/**
+ * RSA Decrypt PDB - Private Key Form #1
+ * @sgf: scatter-gather field
+ * @g_dma: dma address of encrypted input data
+ * @f_dma: dma address of output data
+ * @n_dma: dma address of RSA modulus
+ * @d_dma: dma address of RSA private exponent
+ */
+struct rsa_priv_f1_pdb {
+ u32 sgf;
+ dma_addr_t g_dma;
+ dma_addr_t f_dma;
+ dma_addr_t n_dma;
+ dma_addr_t d_dma;
+} __packed;
+
#endif
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
new file mode 100644
index 000000000000..4e4183e615ea
--- /dev/null
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -0,0 +1,36 @@
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+#include "caampkc.h"
+#include "desc_constr.h"
+
+/* Descriptor for RSA Public operation */
+void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb)
+{
+ init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ append_cmd(desc, pdb->sgf);
+ append_ptr(desc, pdb->f_dma);
+ append_ptr(desc, pdb->g_dma);
+ append_ptr(desc, pdb->n_dma);
+ append_ptr(desc, pdb->e_dma);
+ append_cmd(desc, pdb->f_len);
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSAENC_PUBKEY);
+}
+
+/* Descriptor for RSA Private operation - Private Key Form #1 */
+void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
+{
+ init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ append_cmd(desc, pdb->sgf);
+ append_ptr(desc, pdb->g_dma);
+ append_ptr(desc, pdb->f_dma);
+ append_ptr(desc, pdb->n_dma);
+ append_ptr(desc, pdb->d_dma);
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+ RSA_PRIV_KEY_FRM_1);
+}
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 0ba9c40597dc..b3c5016f6458 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -8,6 +8,7 @@
#define REGS_H
#include <linux/types.h>
+#include <linux/bitops.h>
#include <linux/io.h>
/*
@@ -65,46 +66,56 @@
*
*/
-#ifdef CONFIG_ARM
-/* These are common macros for Power, put here for ARM */
-#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
-#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
+extern bool caam_little_end;
-#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a)
-#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a))
+#define caam_to_cpu(len) \
+static inline u##len caam##len ## _to_cpu(u##len val) \
+{ \
+ if (caam_little_end) \
+ return le##len ## _to_cpu(val); \
+ else \
+ return be##len ## _to_cpu(val); \
+}
-#define out_le32(a, v) out_arch(l, le32, a, v)
-#define in_le32(a) in_arch(l, le32, a)
+#define cpu_to_caam(len) \
+static inline u##len cpu_to_caam##len(u##len val) \
+{ \
+ if (caam_little_end) \
+ return cpu_to_le##len(val); \
+ else \
+ return cpu_to_be##len(val); \
+}
-#define out_be32(a, v) out_arch(l, be32, a, v)
-#define in_be32(a) in_arch(l, be32, a)
+caam_to_cpu(16)
+caam_to_cpu(32)
+caam_to_cpu(64)
+cpu_to_caam(16)
+cpu_to_caam(32)
+cpu_to_caam(64)
-#define clrsetbits(type, addr, clear, set) \
- out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+static inline void wr_reg32(void __iomem *reg, u32 data)
+{
+ if (caam_little_end)
+ iowrite32(data, reg);
+ else
+ iowrite32be(data, reg);
+}
-#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
-#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
-#endif
+static inline u32 rd_reg32(void __iomem *reg)
+{
+ if (caam_little_end)
+ return ioread32(reg);
-#ifdef __BIG_ENDIAN
-#define wr_reg32(reg, data) out_be32(reg, data)
-#define rd_reg32(reg) in_be32(reg)
-#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set)
-#ifdef CONFIG_64BIT
-#define wr_reg64(reg, data) out_be64(reg, data)
-#define rd_reg64(reg) in_be64(reg)
-#endif
-#else
-#ifdef __LITTLE_ENDIAN
-#define wr_reg32(reg, data) __raw_writel(data, reg)
-#define rd_reg32(reg) __raw_readl(reg)
-#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set)
-#ifdef CONFIG_64BIT
-#define wr_reg64(reg, data) __raw_writeq(data, reg)
-#define rd_reg64(reg) __raw_readq(reg)
-#endif
-#endif
-#endif
+ return ioread32be(reg);
+}
+
+static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set)
+{
+ if (caam_little_end)
+ iowrite32((ioread32(reg) & ~clear) | set, reg);
+ else
+ iowrite32be((ioread32be(reg) & ~clear) | set, reg);
+}
/*
* The only users of these wr/rd_reg64 functions is the Job Ring (JR).
@@ -123,29 +134,67 @@
* base + 0x0000 : least-significant 32 bits
* base + 0x0004 : most-significant 32 bits
*/
+#ifdef CONFIG_64BIT
+static inline void wr_reg64(void __iomem *reg, u64 data)
+{
+ if (caam_little_end)
+ iowrite64(data, reg);
+ else
+ iowrite64be(data, reg);
+}
-#ifndef CONFIG_64BIT
-#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \
- defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
-#define REG64_MS32(reg) ((u32 __iomem *)(reg))
-#define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
-#else
-#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1)
-#define REG64_LS32(reg) ((u32 __iomem *)(reg))
-#endif
-
-static inline void wr_reg64(u64 __iomem *reg, u64 data)
+static inline u64 rd_reg64(void __iomem *reg)
{
- wr_reg32(REG64_MS32(reg), data >> 32);
- wr_reg32(REG64_LS32(reg), data);
+ if (caam_little_end)
+ return ioread64(reg);
+ else
+ return ioread64be(reg);
}
-static inline u64 rd_reg64(u64 __iomem *reg)
+#else /* CONFIG_64BIT */
+static inline void wr_reg64(void __iomem *reg, u64 data)
{
- return ((u64)rd_reg32(REG64_MS32(reg)) << 32 |
- (u64)rd_reg32(REG64_LS32(reg)));
+#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+ if (caam_little_end) {
+ wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
+ wr_reg32((u32 __iomem *)(reg), data);
+ } else
+#endif
+ {
+ wr_reg32((u32 __iomem *)(reg), data >> 32);
+ wr_reg32((u32 __iomem *)(reg) + 1, data);
+ }
}
+
+static inline u64 rd_reg64(void __iomem *reg)
+{
+#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+ if (caam_little_end)
+ return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
+ (u64)rd_reg32((u32 __iomem *)(reg)));
+ else
#endif
+ return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
+ (u64)rd_reg32((u32 __iomem *)(reg) + 1));
+}
+#endif /* CONFIG_64BIT */
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#ifdef CONFIG_SOC_IMX7D
+#define cpu_to_caam_dma(value) \
+ (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
+ (u64)cpu_to_caam32(upper_32_bits(value)))
+#define caam_dma_to_cpu(value) \
+ (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
+ (u64)caam32_to_cpu(upper_32_bits(value)))
+#else
+#define cpu_to_caam_dma(value) cpu_to_caam64(value)
+#define caam_dma_to_cpu(value) caam64_to_cpu(value)
+#endif /* CONFIG_SOC_IMX7D */
+#else
+#define cpu_to_caam_dma(value) cpu_to_caam32(value)
+#define caam_dma_to_cpu(value) caam32_to_cpu(value)
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
/*
* jr_outentry
@@ -249,6 +298,8 @@ struct caam_perfmon {
u32 faultliodn; /* FALR - Fault Address LIODN */
u32 faultdetail; /* FADR - Fault Addr Detail */
u32 rsvd2;
+#define CSTA_PLEND BIT(10)
+#define CSTA_ALT_PLEND BIT(18)
u32 status; /* CSTA - CAAM Status */
u64 rsvd3;
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 12ec6616e89d..19dc64fede0d 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -5,18 +5,19 @@
*
*/
+#include "regs.h"
+
struct sec4_sg_entry;
/*
* convert single dma address to h/w link table format
*/
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
- dma_addr_t dma, u32 len, u32 offset)
+ dma_addr_t dma, u32 len, u16 offset)
{
- sec4_sg_ptr->ptr = dma;
- sec4_sg_ptr->len = len;
- sec4_sg_ptr->buf_pool_id = 0;
- sec4_sg_ptr->offset = offset;
+ sec4_sg_ptr->ptr = cpu_to_caam_dma(dma);
+ sec4_sg_ptr->len = cpu_to_caam32(len);
+ sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
@@ -30,7 +31,7 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
*/
static inline struct sec4_sg_entry *
sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
- struct sec4_sg_entry *sec4_sg_ptr, u32 offset)
+ struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
{
while (sg_count) {
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
@@ -48,10 +49,10 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
*/
static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
struct sec4_sg_entry *sec4_sg_ptr,
- u32 offset)
+ u16 offset)
{
sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
- sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
+ sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
}
static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 0d0d4529ee36..58a4244b4752 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -14,9 +14,8 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include "ccp-crypto.h"
@@ -110,15 +109,12 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
- return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key,
- key_len);
+ return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
unsigned int encrypt)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
unsigned int unit;
@@ -146,14 +142,19 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
+
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
*/
- ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher);
- ret = (encrypt) ? crypto_ablkcipher_encrypt(req) :
- crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
-
+ skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
return ret;
}
@@ -192,23 +193,21 @@ static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_ablkcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
ctx->u.aes.key_len = 0;
- fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
- pr_warn("could not load fallback driver %s\n",
- crypto_tfm_alg_name(tfm));
+ pr_warn("could not load fallback driver xts(aes)\n");
return PTR_ERR(fallback_tfm);
}
- ctx->u.aes.tfm_ablkcipher = fallback_tfm;
+ ctx->u.aes.tfm_skcipher = fallback_tfm;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) +
- fallback_tfm->base.crt_ablkcipher.reqsize;
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
return 0;
}
@@ -217,9 +216,7 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->u.aes.tfm_ablkcipher)
- crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher);
- ctx->u.aes.tfm_ablkcipher = NULL;
+ crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
}
static int ccp_register_aes_xts_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index a326ec20bfa8..8335b32e815e 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -17,7 +17,6 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/ccp.h>
-#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
@@ -69,7 +68,7 @@ static inline struct ccp_crypto_ahash_alg *
/***** AES related defines *****/
struct ccp_aes_ctx {
/* Fallback cipher for XTS with unsupported unit sizes */
- struct crypto_ablkcipher *tfm_ablkcipher;
+ struct crypto_skcipher *tfm_skcipher;
/* Cipher used to generate CMAC K1/K2 keys */
struct crypto_cipher *tfm_cipher;
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index e8ef9fd24a16..e373cc6557c6 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -31,22 +31,42 @@
#include "cesa.h"
+/* Limit of the crypto queue before reaching the backlog */
+#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
+
static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
module_param_named(allhwsupport, allhwsupport, int, 0444);
MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
struct mv_cesa_dev *cesa_dev;
-static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine)
+struct crypto_async_request *
+mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
+ struct crypto_async_request **backlog)
{
- struct crypto_async_request *req, *backlog;
+ struct crypto_async_request *req;
+
+ *backlog = crypto_get_backlog(&engine->queue);
+ req = crypto_dequeue_request(&engine->queue);
+
+ if (!req)
+ return NULL;
+
+ return req;
+}
+
+static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
+{
+ struct crypto_async_request *req = NULL, *backlog = NULL;
struct mv_cesa_ctx *ctx;
- spin_lock_bh(&cesa_dev->lock);
- backlog = crypto_get_backlog(&cesa_dev->queue);
- req = crypto_dequeue_request(&cesa_dev->queue);
- engine->req = req;
- spin_unlock_bh(&cesa_dev->lock);
+
+ spin_lock_bh(&engine->lock);
+ if (!engine->req) {
+ req = mv_cesa_dequeue_req_locked(engine, &backlog);
+ engine->req = req;
+ }
+ spin_unlock_bh(&engine->lock);
if (!req)
return;
@@ -55,8 +75,47 @@ static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine)
backlog->complete(backlog, -EINPROGRESS);
ctx = crypto_tfm_ctx(req->tfm);
- ctx->ops->prepare(req, engine);
ctx->ops->step(req);
+
+ return;
+}
+
+static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
+{
+ struct crypto_async_request *req;
+ struct mv_cesa_ctx *ctx;
+ int res;
+
+ req = engine->req;
+ ctx = crypto_tfm_ctx(req->tfm);
+ res = ctx->ops->process(req, status);
+
+ if (res == 0) {
+ ctx->ops->complete(req);
+ mv_cesa_engine_enqueue_complete_request(engine, req);
+ } else if (res == -EINPROGRESS) {
+ ctx->ops->step(req);
+ }
+
+ return res;
+}
+
+static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
+{
+ if (engine->chain.first && engine->chain.last)
+ return mv_cesa_tdma_process(engine, status);
+
+ return mv_cesa_std_process(engine, status);
+}
+
+static inline void
+mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
+ int res)
+{
+ ctx->ops->cleanup(req);
+ local_bh_disable();
+ req->complete(req, res);
+ local_bh_enable();
}
static irqreturn_t mv_cesa_int(int irq, void *priv)
@@ -83,49 +142,54 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
writel(~status, engine->regs + CESA_SA_INT_STATUS);
+ /* Process fetched requests */
+ res = mv_cesa_int_process(engine, status & mask);
ret = IRQ_HANDLED;
+
spin_lock_bh(&engine->lock);
req = engine->req;
+ if (res != -EINPROGRESS)
+ engine->req = NULL;
spin_unlock_bh(&engine->lock);
- if (req) {
- ctx = crypto_tfm_ctx(req->tfm);
- res = ctx->ops->process(req, status & mask);
- if (res != -EINPROGRESS) {
- spin_lock_bh(&engine->lock);
- engine->req = NULL;
- mv_cesa_dequeue_req_unlocked(engine);
- spin_unlock_bh(&engine->lock);
- ctx->ops->cleanup(req);
- local_bh_disable();
- req->complete(req, res);
- local_bh_enable();
- } else {
- ctx->ops->step(req);
- }
+
+ ctx = crypto_tfm_ctx(req->tfm);
+
+ if (res && res != -EINPROGRESS)
+ mv_cesa_complete_req(ctx, req, res);
+
+ /* Launch the next pending request */
+ mv_cesa_rearm_engine(engine);
+
+ /* Iterate over the complete queue */
+ while (true) {
+ req = mv_cesa_engine_dequeue_complete_request(engine);
+ if (!req)
+ break;
+
+ mv_cesa_complete_req(ctx, req, 0);
}
}
return ret;
}
-int mv_cesa_queue_req(struct crypto_async_request *req)
+int mv_cesa_queue_req(struct crypto_async_request *req,
+ struct mv_cesa_req *creq)
{
int ret;
- int i;
+ struct mv_cesa_engine *engine = creq->engine;
+
+ spin_lock_bh(&engine->lock);
+ if (mv_cesa_req_get_type(creq) == CESA_DMA_REQ)
+ mv_cesa_tdma_chain(engine, creq);
- spin_lock_bh(&cesa_dev->lock);
- ret = crypto_enqueue_request(&cesa_dev->queue, req);
- spin_unlock_bh(&cesa_dev->lock);
+ ret = crypto_enqueue_request(&engine->queue, req);
+ spin_unlock_bh(&engine->lock);
if (ret != -EINPROGRESS)
return ret;
- for (i = 0; i < cesa_dev->caps->nengines; i++) {
- spin_lock_bh(&cesa_dev->engines[i].lock);
- if (!cesa_dev->engines[i].req)
- mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]);
- spin_unlock_bh(&cesa_dev->engines[i].lock);
- }
+ mv_cesa_rearm_engine(engine);
return -EINPROGRESS;
}
@@ -309,6 +373,10 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
if (!dma->padding_pool)
return -ENOMEM;
+ dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
+ if (!dma->iv_pool)
+ return -ENOMEM;
+
cesa->dma = dma;
return 0;
@@ -416,7 +484,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&cesa->lock);
- crypto_init_queue(&cesa->queue, 50);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
cesa->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cesa->regs))
@@ -489,6 +557,10 @@ static int mv_cesa_probe(struct platform_device *pdev)
engine);
if (ret)
goto err_cleanup;
+
+ crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
+ atomic_set(&engine->load, 0);
+ INIT_LIST_HEAD(&engine->complete_queue);
}
cesa_dev = cesa;
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index 74071e45ada0..e423d33decd4 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -271,10 +271,13 @@ struct mv_cesa_op_ctx {
/* TDMA descriptor flags */
#define CESA_TDMA_DST_IN_SRAM BIT(31)
#define CESA_TDMA_SRC_IN_SRAM BIT(30)
-#define CESA_TDMA_TYPE_MSK GENMASK(29, 0)
+#define CESA_TDMA_END_OF_REQ BIT(29)
+#define CESA_TDMA_BREAK_CHAIN BIT(28)
+#define CESA_TDMA_TYPE_MSK GENMASK(27, 0)
#define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2
+#define CESA_TDMA_IV 3
/**
* struct mv_cesa_tdma_desc - TDMA descriptor
@@ -390,6 +393,7 @@ struct mv_cesa_dev_dma {
struct dma_pool *op_pool;
struct dma_pool *cache_pool;
struct dma_pool *padding_pool;
+ struct dma_pool *iv_pool;
};
/**
@@ -398,7 +402,6 @@ struct mv_cesa_dev_dma {
* @regs: device registers
* @sram_size: usable SRAM size
* @lock: device lock
- * @queue: crypto request queue
* @engines: array of engines
* @dma: dma pools
*
@@ -410,7 +413,6 @@ struct mv_cesa_dev {
struct device *dev;
unsigned int sram_size;
spinlock_t lock;
- struct crypto_queue queue;
struct mv_cesa_engine *engines;
struct mv_cesa_dev_dma *dma;
};
@@ -429,6 +431,11 @@ struct mv_cesa_dev {
* @int_mask: interrupt mask cache
* @pool: memory pool pointing to the memory region reserved in
* SRAM
+ * @queue: fifo of the pending crypto requests
+ * @load: engine load counter, useful for load balancing
+ * @chain: list of the current tdma descriptors being processed
+ * by this engine.
+ * @complete_queue: fifo of the processed requests by the engine
*
* Structure storing CESA engine information.
*/
@@ -444,23 +451,27 @@ struct mv_cesa_engine {
size_t max_req_len;
u32 int_mask;
struct gen_pool *pool;
+ struct crypto_queue queue;
+ atomic_t load;
+ struct mv_cesa_tdma_chain chain;
+ struct list_head complete_queue;
};
/**
* struct mv_cesa_req_ops - CESA request operations
- * @prepare: prepare a request to be executed on the specified engine
* @process: process a request chunk result (should return 0 if the
* operation, -EINPROGRESS if it needs more steps or an error
* code)
* @step: launch the crypto operation on the next chunk
* @cleanup: cleanup the crypto request (release associated data)
+ * @complete: complete the request, i.e copy result or context from sram when
+ * needed.
*/
struct mv_cesa_req_ops {
- void (*prepare)(struct crypto_async_request *req,
- struct mv_cesa_engine *engine);
int (*process)(struct crypto_async_request *req, u32 status);
void (*step)(struct crypto_async_request *req);
void (*cleanup)(struct crypto_async_request *req);
+ void (*complete)(struct crypto_async_request *req);
};
/**
@@ -507,21 +518,11 @@ enum mv_cesa_req_type {
/**
* struct mv_cesa_req - CESA request
- * @type: request type
* @engine: engine associated with this request
+ * @chain: list of tdma descriptors associated with this request
*/
struct mv_cesa_req {
- enum mv_cesa_req_type type;
struct mv_cesa_engine *engine;
-};
-
-/**
- * struct mv_cesa_tdma_req - CESA TDMA request
- * @base: base information
- * @chain: TDMA chain
- */
-struct mv_cesa_tdma_req {
- struct mv_cesa_req base;
struct mv_cesa_tdma_chain chain;
};
@@ -538,13 +539,11 @@ struct mv_cesa_sg_std_iter {
/**
* struct mv_cesa_ablkcipher_std_req - cipher standard request
- * @base: base information
* @op: operation context
* @offset: current operation offset
* @size: size of the crypto operation
*/
struct mv_cesa_ablkcipher_std_req {
- struct mv_cesa_req base;
struct mv_cesa_op_ctx op;
unsigned int offset;
unsigned int size;
@@ -558,34 +557,27 @@ struct mv_cesa_ablkcipher_std_req {
* @dst_nents: number of entries in the dest sg list
*/
struct mv_cesa_ablkcipher_req {
- union {
- struct mv_cesa_req base;
- struct mv_cesa_tdma_req dma;
- struct mv_cesa_ablkcipher_std_req std;
- } req;
+ struct mv_cesa_req base;
+ struct mv_cesa_ablkcipher_std_req std;
int src_nents;
int dst_nents;
};
/**
* struct mv_cesa_ahash_std_req - standard hash request
- * @base: base information
* @offset: current operation offset
*/
struct mv_cesa_ahash_std_req {
- struct mv_cesa_req base;
unsigned int offset;
};
/**
* struct mv_cesa_ahash_dma_req - DMA hash request
- * @base: base information
* @padding: padding buffer
* @padding_dma: DMA address of the padding buffer
* @cache_dma: DMA address of the cache buffer
*/
struct mv_cesa_ahash_dma_req {
- struct mv_cesa_tdma_req base;
u8 *padding;
dma_addr_t padding_dma;
u8 *cache;
@@ -604,8 +596,8 @@ struct mv_cesa_ahash_dma_req {
* @state: hash state
*/
struct mv_cesa_ahash_req {
+ struct mv_cesa_req base;
union {
- struct mv_cesa_req base;
struct mv_cesa_ahash_dma_req dma;
struct mv_cesa_ahash_std_req std;
} req;
@@ -623,6 +615,35 @@ struct mv_cesa_ahash_req {
extern struct mv_cesa_dev *cesa_dev;
+
+static inline void
+mv_cesa_engine_enqueue_complete_request(struct mv_cesa_engine *engine,
+ struct crypto_async_request *req)
+{
+ list_add_tail(&req->list, &engine->complete_queue);
+}
+
+static inline struct crypto_async_request *
+mv_cesa_engine_dequeue_complete_request(struct mv_cesa_engine *engine)
+{
+ struct crypto_async_request *req;
+
+ req = list_first_entry_or_null(&engine->complete_queue,
+ struct crypto_async_request,
+ list);
+ if (req)
+ list_del(&req->list);
+
+ return req;
+}
+
+
+static inline enum mv_cesa_req_type
+mv_cesa_req_get_type(struct mv_cesa_req *req)
+{
+ return req->chain.first ? CESA_DMA_REQ : CESA_STD_REQ;
+}
+
static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op,
u32 cfg, u32 mask)
{
@@ -695,7 +716,32 @@ static inline bool mv_cesa_mac_op_is_first_frag(const struct mv_cesa_op_ctx *op)
CESA_SA_DESC_CFG_FIRST_FRAG;
}
-int mv_cesa_queue_req(struct crypto_async_request *req);
+int mv_cesa_queue_req(struct crypto_async_request *req,
+ struct mv_cesa_req *creq);
+
+struct crypto_async_request *
+mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
+ struct crypto_async_request **backlog);
+
+static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight)
+{
+ int i;
+ u32 min_load = U32_MAX;
+ struct mv_cesa_engine *selected = NULL;
+
+ for (i = 0; i < cesa_dev->caps->nengines; i++) {
+ struct mv_cesa_engine *engine = cesa_dev->engines + i;
+ u32 load = atomic_read(&engine->load);
+ if (load < min_load) {
+ min_load = load;
+ selected = engine;
+ }
+ }
+
+ atomic_add(weight, &selected->load);
+
+ return selected;
+}
/*
* Helper function that indicates whether a crypto request needs to be
@@ -765,9 +811,9 @@ static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter)
return iter->op_len;
}
-void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq);
+void mv_cesa_dma_step(struct mv_cesa_req *dreq);
-static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq,
+static inline int mv_cesa_dma_process(struct mv_cesa_req *dreq,
u32 status)
{
if (!(status & CESA_SA_INT_ACC0_IDMA_DONE))
@@ -779,10 +825,13 @@ static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq,
return 0;
}
-void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq,
+void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
struct mv_cesa_engine *engine);
+void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq);
+void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
+ struct mv_cesa_req *dreq);
+int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status);
-void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq);
static inline void
mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
@@ -790,6 +839,9 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
memset(chain, 0, sizeof(*chain));
}
+int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+ u32 size, u32 flags, gfp_t gfp_flags);
+
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
const struct mv_cesa_op_ctx *op_templ,
bool skip_ctx,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index dcf1fceb9336..48df03a06066 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -70,25 +70,28 @@ mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_BIDIRECTIONAL);
}
- mv_cesa_dma_cleanup(&creq->req.dma);
+ mv_cesa_dma_cleanup(&creq->base);
}
static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ablkcipher_dma_cleanup(req);
}
static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
+ struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_engine *engine = creq->base.engine;
size_t len = min_t(size_t, req->nbytes - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
+ mv_cesa_adjust_op(engine, &sreq->op);
+ memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
+
len = sg_pcopy_to_buffer(req->src, creq->src_nents,
engine->sram + CESA_SA_DATA_SRAM_OFFSET,
len, sreq->offset);
@@ -106,6 +109,8 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
+ BUG_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
@@ -113,8 +118,8 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
u32 status)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
+ struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
@@ -133,21 +138,19 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
{
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
+ struct mv_cesa_req *basereq = &creq->base;
+ unsigned int ivsize;
int ret;
- if (creq->req.base.type == CESA_DMA_REQ)
- ret = mv_cesa_dma_process(&creq->req.dma, status);
- else
- ret = mv_cesa_ablkcipher_std_process(ablkreq, status);
+ if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
+ return mv_cesa_ablkcipher_std_process(ablkreq, status);
+ ret = mv_cesa_dma_process(basereq, status);
if (ret)
return ret;
- memcpy_fromio(ablkreq->info,
- engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
- crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)));
+ ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
+ memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize);
return 0;
}
@@ -157,8 +160,8 @@ static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
- if (creq->req.base.type == CESA_DMA_REQ)
- mv_cesa_dma_step(&creq->req.dma);
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+ mv_cesa_dma_step(&creq->base);
else
mv_cesa_ablkcipher_std_step(ablkreq);
}
@@ -167,22 +170,19 @@ static inline void
mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_tdma_req *dreq = &creq->req.dma;
+ struct mv_cesa_req *basereq = &creq->base;
- mv_cesa_dma_prepare(dreq, dreq->base.engine);
+ mv_cesa_dma_prepare(basereq, basereq->engine);
}
static inline void
mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
+ struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
sreq->size = 0;
sreq->offset = 0;
- mv_cesa_adjust_op(engine, &sreq->op);
- memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
}
static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
@@ -190,9 +190,9 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
{
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
- creq->req.base.engine = engine;
+ creq->base.engine = engine;
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ablkcipher_dma_prepare(ablkreq);
else
mv_cesa_ablkcipher_std_prepare(ablkreq);
@@ -206,11 +206,34 @@ mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
mv_cesa_ablkcipher_cleanup(ablkreq);
}
+static void
+mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
+{
+ struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct mv_cesa_engine *engine = creq->base.engine;
+ unsigned int ivsize;
+
+ atomic_sub(ablkreq->nbytes, &engine->load);
+ ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
+
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
+ struct mv_cesa_req *basereq;
+
+ basereq = &creq->base;
+ memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+ } else {
+ memcpy_fromio(ablkreq->info,
+ engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ivsize);
+ }
+}
+
static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
.step = mv_cesa_ablkcipher_step,
.process = mv_cesa_ablkcipher_process,
- .prepare = mv_cesa_ablkcipher_prepare,
.cleanup = mv_cesa_ablkcipher_req_cleanup,
+ .complete = mv_cesa_ablkcipher_complete,
};
static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
@@ -295,15 +318,15 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- struct mv_cesa_tdma_req *dreq = &creq->req.dma;
+ struct mv_cesa_req *basereq = &creq->base;
struct mv_cesa_ablkcipher_dma_iter iter;
struct mv_cesa_tdma_chain chain;
bool skip_ctx = false;
int ret;
+ unsigned int ivsize;
- dreq->base.type = CESA_DMA_REQ;
- dreq->chain.first = NULL;
- dreq->chain.last = NULL;
+ basereq->chain.first = NULL;
+ basereq->chain.last = NULL;
if (req->src != req->dst) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
@@ -358,12 +381,21 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
} while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
- dreq->chain = chain;
+ /* Add output data for IV */
+ ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+ ret = mv_cesa_dma_add_iv_op(&chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
+
+ if (ret)
+ goto err_free_tdma;
+
+ basereq->chain = chain;
+ basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
return 0;
err_free_tdma:
- mv_cesa_dma_cleanup(dreq);
+ mv_cesa_dma_cleanup(basereq);
if (req->dst != req->src)
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
DMA_FROM_DEVICE);
@@ -380,11 +412,13 @@ mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
const struct mv_cesa_op_ctx *op_templ)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
+ struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_req *basereq = &creq->base;
- sreq->base.type = CESA_STD_REQ;
sreq->op = *op_templ;
sreq->skip_ctx = false;
+ basereq->chain.first = NULL;
+ basereq->chain.last = NULL;
return 0;
}
@@ -414,7 +448,6 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
CESA_SA_DESC_CFG_OP_MSK);
- /* TODO: add a threshold for DMA usage */
if (cesa_dev->caps->has_tdma)
ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
else
@@ -423,28 +456,41 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
return ret;
}
-static int mv_cesa_des_op(struct ablkcipher_request *req,
- struct mv_cesa_op_ctx *tmpl)
+static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
+ struct mv_cesa_op_ctx *tmpl)
{
- struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
int ret;
-
- mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
- CESA_SA_DESC_CFG_CRYPTM_MSK);
-
- memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
+ struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_engine *engine;
ret = mv_cesa_ablkcipher_req_init(req, tmpl);
if (ret)
return ret;
- ret = mv_cesa_queue_req(&req->base);
+ engine = mv_cesa_select_engine(req->nbytes);
+ mv_cesa_ablkcipher_prepare(&req->base, engine);
+
+ ret = mv_cesa_queue_req(&req->base, &creq->base);
+
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;
}
+static int mv_cesa_des_op(struct ablkcipher_request *req,
+ struct mv_cesa_op_ctx *tmpl)
+{
+ struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
+ CESA_SA_DESC_CFG_CRYPTM_MSK);
+
+ memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
+
+ return mv_cesa_ablkcipher_queue_req(req, tmpl);
+}
+
static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -547,22 +593,13 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- int ret;
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
CESA_SA_DESC_CFG_CRYPTM_MSK);
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
- ret = mv_cesa_ablkcipher_req_init(req, tmpl);
- if (ret)
- return ret;
-
- ret = mv_cesa_queue_req(&req->base);
- if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ablkcipher_cleanup(req);
-
- return ret;
+ return mv_cesa_ablkcipher_queue_req(req, tmpl);
}
static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
@@ -673,7 +710,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- int ret, i;
+ int i;
u32 *key;
u32 cfg;
@@ -696,15 +733,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
CESA_SA_DESC_CFG_CRYPTM_MSK |
CESA_SA_DESC_CFG_AES_LEN_MSK);
- ret = mv_cesa_ablkcipher_req_init(req, tmpl);
- if (ret)
- return ret;
-
- ret = mv_cesa_queue_req(&req->base);
- if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ablkcipher_cleanup(req);
-
- return ret;
+ return mv_cesa_ablkcipher_queue_req(req, tmpl);
}
static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 7a5058da9151..c35912b4fffb 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -103,14 +103,14 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
mv_cesa_ahash_dma_free_cache(&creq->req.dma);
- mv_cesa_dma_cleanup(&creq->req.dma.base);
+ mv_cesa_dma_cleanup(&creq->base);
}
static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_cleanup(req);
}
@@ -118,7 +118,7 @@ static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_last_cleanup(req);
}
@@ -157,11 +157,23 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
+ struct mv_cesa_engine *engine = creq->base.engine;
struct mv_cesa_op_ctx *op;
unsigned int new_cache_ptr = 0;
u32 frag_mode;
size_t len;
+ unsigned int digsize;
+ int i;
+
+ mv_cesa_adjust_op(engine, &creq->op_tmpl);
+ memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
+
+ digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+ for (i = 0; i < digsize / 4; i++)
+ writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
+
+ mv_cesa_adjust_op(engine, &creq->op_tmpl);
+ memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
if (creq->cache_ptr)
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
@@ -237,6 +249,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
+ BUG_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
@@ -254,20 +268,17 @@ static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- struct mv_cesa_tdma_req *dreq = &creq->req.dma.base;
+ struct mv_cesa_req *basereq = &creq->base;
- mv_cesa_dma_prepare(dreq, dreq->base.engine);
+ mv_cesa_dma_prepare(basereq, basereq->engine);
}
static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
sreq->offset = 0;
- mv_cesa_adjust_op(engine, &creq->op_tmpl);
- memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
}
static void mv_cesa_ahash_step(struct crypto_async_request *req)
@@ -275,8 +286,8 @@ static void mv_cesa_ahash_step(struct crypto_async_request *req)
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
- if (creq->req.base.type == CESA_DMA_REQ)
- mv_cesa_dma_step(&creq->req.dma.base);
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+ mv_cesa_dma_step(&creq->base);
else
mv_cesa_ahash_std_step(ahashreq);
}
@@ -285,17 +296,20 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
- struct mv_cesa_engine *engine = creq->req.base.engine;
- unsigned int digsize;
- int ret, i;
- if (creq->req.base.type == CESA_DMA_REQ)
- ret = mv_cesa_dma_process(&creq->req.dma.base, status);
- else
- ret = mv_cesa_ahash_std_process(ahashreq, status);
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+ return mv_cesa_dma_process(&creq->base, status);
- if (ret == -EINPROGRESS)
- return ret;
+ return mv_cesa_ahash_std_process(ahashreq, status);
+}
+
+static void mv_cesa_ahash_complete(struct crypto_async_request *req)
+{
+ struct ahash_request *ahashreq = ahash_request_cast(req);
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
+ struct mv_cesa_engine *engine = creq->base.engine;
+ unsigned int digsize;
+ int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
for (i = 0; i < digsize / 4; i++)
@@ -325,7 +339,7 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
}
}
- return ret;
+ atomic_sub(ahashreq->nbytes, &engine->load);
}
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
@@ -333,19 +347,13 @@ static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
- unsigned int digsize;
- int i;
- creq->req.base.engine = engine;
+ creq->base.engine = engine;
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_prepare(ahashreq);
else
mv_cesa_ahash_std_prepare(ahashreq);
-
- digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
- for (i = 0; i < digsize / 4; i++)
- writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
}
static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
@@ -362,8 +370,8 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
.step = mv_cesa_ahash_step,
.process = mv_cesa_ahash_process,
- .prepare = mv_cesa_ahash_prepare,
.cleanup = mv_cesa_ahash_req_cleanup,
+ .complete = mv_cesa_ahash_complete,
};
static int mv_cesa_ahash_init(struct ahash_request *req,
@@ -553,15 +561,14 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
- struct mv_cesa_tdma_req *dreq = &ahashdreq->base;
+ struct mv_cesa_req *basereq = &creq->base;
struct mv_cesa_ahash_dma_iter iter;
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
int ret;
- dreq->chain.first = NULL;
- dreq->chain.last = NULL;
+ basereq->chain.first = NULL;
+ basereq->chain.last = NULL;
if (creq->src_nents) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
@@ -572,14 +579,14 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
}
}
- mv_cesa_tdma_desc_iter_init(&dreq->chain);
+ mv_cesa_tdma_desc_iter_init(&basereq->chain);
mv_cesa_ahash_req_iter_init(&iter, req);
/*
* Add the cache (left-over data from a previous block) first.
* This will never overflow the SRAM size.
*/
- ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags);
+ ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, &iter, creq, flags);
if (ret)
goto err_free_tdma;
@@ -590,7 +597,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
* data. We intentionally do not add the final op block.
*/
while (true) {
- ret = mv_cesa_dma_add_op_transfers(&dreq->chain,
+ ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
&iter.base,
&iter.src, flags);
if (ret)
@@ -601,7 +608,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (!mv_cesa_ahash_req_iter_next_op(&iter))
break;
- op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
+ op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
frag_len, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
@@ -619,10 +626,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
* operation, which depends whether this is the final request.
*/
if (creq->last_req)
- op = mv_cesa_ahash_dma_last_req(&dreq->chain, &iter, creq,
+ op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
frag_len, flags);
else if (frag_len)
- op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
+ op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
frag_len, flags);
if (IS_ERR(op)) {
@@ -632,7 +639,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (op) {
/* Add dummy desc to wait for crypto operation end */
- ret = mv_cesa_dma_add_dummy_end(&dreq->chain, flags);
+ ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
if (ret)
goto err_free_tdma;
}
@@ -643,10 +650,13 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
else
creq->cache_ptr = 0;
+ basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
+ CESA_TDMA_BREAK_CHAIN);
+
return 0;
err_free_tdma:
- mv_cesa_dma_cleanup(dreq);
+ mv_cesa_dma_cleanup(basereq);
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
err:
@@ -660,11 +670,6 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
int ret;
- if (cesa_dev->caps->has_tdma)
- creq->req.base.type = CESA_DMA_REQ;
- else
- creq->req.base.type = CESA_STD_REQ;
-
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
@@ -678,19 +683,19 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
if (*cached)
return 0;
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (cesa_dev->caps->has_tdma)
ret = mv_cesa_ahash_dma_req_init(req);
return ret;
}
-static int mv_cesa_ahash_update(struct ahash_request *req)
+static int mv_cesa_ahash_queue_req(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+ struct mv_cesa_engine *engine;
bool cached = false;
int ret;
- creq->len += req->nbytes;
ret = mv_cesa_ahash_req_init(req, &cached);
if (ret)
return ret;
@@ -698,61 +703,48 @@ static int mv_cesa_ahash_update(struct ahash_request *req)
if (cached)
return 0;
- ret = mv_cesa_queue_req(&req->base);
+ engine = mv_cesa_select_engine(req->nbytes);
+ mv_cesa_ahash_prepare(&req->base, engine);
+
+ ret = mv_cesa_queue_req(&req->base, &creq->base);
+
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
return ret;
}
+static int mv_cesa_ahash_update(struct ahash_request *req)
+{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+
+ creq->len += req->nbytes;
+
+ return mv_cesa_ahash_queue_req(req);
+}
+
static int mv_cesa_ahash_final(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
- bool cached = false;
- int ret;
mv_cesa_set_mac_op_total_len(tmpl, creq->len);
creq->last_req = true;
req->nbytes = 0;
- ret = mv_cesa_ahash_req_init(req, &cached);
- if (ret)
- return ret;
-
- if (cached)
- return 0;
-
- ret = mv_cesa_queue_req(&req->base);
- if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ahash_cleanup(req);
-
- return ret;
+ return mv_cesa_ahash_queue_req(req);
}
static int mv_cesa_ahash_finup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
- bool cached = false;
- int ret;
creq->len += req->nbytes;
mv_cesa_set_mac_op_total_len(tmpl, creq->len);
creq->last_req = true;
- ret = mv_cesa_ahash_req_init(req, &cached);
- if (ret)
- return ret;
-
- if (cached)
- return 0;
-
- ret = mv_cesa_queue_req(&req->base);
- if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ahash_cleanup(req);
-
- return ret;
+ return mv_cesa_ahash_queue_req(req);
}
static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 0ad8f1ecf175..86a065bcc187 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -37,9 +37,9 @@ bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
return true;
}
-void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq)
+void mv_cesa_dma_step(struct mv_cesa_req *dreq)
{
- struct mv_cesa_engine *engine = dreq->base.engine;
+ struct mv_cesa_engine *engine = dreq->engine;
writel_relaxed(0, engine->regs + CESA_SA_CFG);
@@ -53,19 +53,25 @@ void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq)
engine->regs + CESA_SA_CFG);
writel_relaxed(dreq->chain.first->cur_dma,
engine->regs + CESA_TDMA_NEXT_ADDR);
+ BUG_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
-void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq)
+void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
{
struct mv_cesa_tdma_desc *tdma;
for (tdma = dreq->chain.first; tdma;) {
struct mv_cesa_tdma_desc *old_tdma = tdma;
+ u32 type = tdma->flags & CESA_TDMA_TYPE_MSK;
- if (tdma->flags & CESA_TDMA_OP)
+ if (type == CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
+ else if (type == CESA_TDMA_IV)
+ dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
+ le32_to_cpu(tdma->dst));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -76,7 +82,7 @@ void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq)
dreq->chain.last = NULL;
}
-void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq,
+void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
struct mv_cesa_engine *engine)
{
struct mv_cesa_tdma_desc *tdma;
@@ -88,11 +94,97 @@ void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq,
if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
tdma->src = cpu_to_le32(tdma->src + engine->sram_dma);
- if (tdma->flags & CESA_TDMA_OP)
+ if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
mv_cesa_adjust_op(engine, tdma->op);
}
}
+void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
+ struct mv_cesa_req *dreq)
+{
+ if (engine->chain.first == NULL && engine->chain.last == NULL) {
+ engine->chain.first = dreq->chain.first;
+ engine->chain.last = dreq->chain.last;
+ } else {
+ struct mv_cesa_tdma_desc *last;
+
+ last = engine->chain.last;
+ last->next = dreq->chain.first;
+ engine->chain.last = dreq->chain.last;
+
+ if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
+ last->next_dma = dreq->chain.first->cur_dma;
+ }
+}
+
+int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
+{
+ struct crypto_async_request *req = NULL;
+ struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL;
+ dma_addr_t tdma_cur;
+ int res = 0;
+
+ tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
+
+ for (tdma = engine->chain.first; tdma; tdma = next) {
+ spin_lock_bh(&engine->lock);
+ next = tdma->next;
+ spin_unlock_bh(&engine->lock);
+
+ if (tdma->flags & CESA_TDMA_END_OF_REQ) {
+ struct crypto_async_request *backlog = NULL;
+ struct mv_cesa_ctx *ctx;
+ u32 current_status;
+
+ spin_lock_bh(&engine->lock);
+ /*
+ * if req is NULL, this means we're processing the
+ * request in engine->req.
+ */
+ if (!req)
+ req = engine->req;
+ else
+ req = mv_cesa_dequeue_req_locked(engine,
+ &backlog);
+
+ /* Re-chaining to the next request */
+ engine->chain.first = tdma->next;
+ tdma->next = NULL;
+
+ /* If this is the last request, clear the chain */
+ if (engine->chain.first == NULL)
+ engine->chain.last = NULL;
+ spin_unlock_bh(&engine->lock);
+
+ ctx = crypto_tfm_ctx(req->tfm);
+ current_status = (tdma->cur_dma == tdma_cur) ?
+ status : CESA_SA_INT_ACC0_IDMA_DONE;
+ res = ctx->ops->process(req, current_status);
+ ctx->ops->complete(req);
+
+ if (res == 0)
+ mv_cesa_engine_enqueue_complete_request(engine,
+ req);
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+ }
+
+ if (res || tdma->cur_dma == tdma_cur)
+ break;
+ }
+
+ /* Save the last request in error to engine->req, so that the core
+ * knows which request was fautly */
+ if (res) {
+ spin_lock_bh(&engine->lock);
+ engine->req = req;
+ spin_unlock_bh(&engine->lock);
+ }
+
+ return res;
+}
+
static struct mv_cesa_tdma_desc *
mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
{
@@ -117,6 +209,32 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
return new_tdma;
}
+int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+ u32 size, u32 flags, gfp_t gfp_flags)
+{
+
+ struct mv_cesa_tdma_desc *tdma;
+ u8 *iv;
+ dma_addr_t dma_handle;
+
+ tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
+ if (IS_ERR(tdma))
+ return PTR_ERR(tdma);
+
+ iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
+ if (!iv)
+ return -ENOMEM;
+
+ tdma->byte_cnt = cpu_to_le32(size | BIT(31));
+ tdma->src = src;
+ tdma->dst = cpu_to_le32(dma_handle);
+ tdma->data = iv;
+
+ flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
+ tdma->flags = flags | CESA_TDMA_IV;
+ return 0;
+}
+
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
const struct mv_cesa_op_ctx *op_templ,
bool skip_ctx,
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 59ed54e464a9..625ee50fd78b 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -11,7 +11,6 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -25,6 +24,7 @@
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
@@ -84,7 +84,7 @@ struct dcp_async_ctx {
unsigned int hot:1;
/* Crypto-specific context */
- struct crypto_ablkcipher *fallback;
+ struct crypto_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
};
@@ -374,20 +374,22 @@ static int dcp_chan_thread_aes(void *data)
static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
- ablkcipher_request_set_tfm(req, ctx->fallback);
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
if (enc)
- ret = crypto_ablkcipher_encrypt(req);
+ ret = crypto_skcipher_encrypt(subreq);
else
- ret = crypto_ablkcipher_decrypt(req);
+ ret = crypto_skcipher_decrypt(subreq);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ skcipher_request_zero(subreq);
return ret;
}
@@ -453,28 +455,22 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
- /* Check if the key size is supported by kernel at all. */
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
- tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
-
/*
* If the requested AES key size is not supported by the hardware,
* but is supported by in-kernel software implementation, we use
* software fallback.
*/
- actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- actx->fallback->base.crt_flags |=
- tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK;
+ crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(actx->fallback,
+ tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_ablkcipher_setkey(actx->fallback, key, len);
+ ret = crypto_skcipher_setkey(actx->fallback, key, len);
if (!ret)
return 0;
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |=
- actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK;
+ tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
+ CRYPTO_TFM_RES_MASK;
return ret;
}
@@ -484,9 +480,9 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
const char *name = crypto_tfm_alg_name(tfm);
const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- struct crypto_ablkcipher *blk;
+ struct crypto_skcipher *blk;
- blk = crypto_alloc_ablkcipher(name, 0, flags);
+ blk = crypto_alloc_skcipher(name, 0, flags);
if (IS_ERR(blk))
return PTR_ERR(blk);
@@ -499,8 +495,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
{
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- crypto_free_ablkcipher(actx->fallback);
- actx->fallback = NULL;
+ crypto_free_skcipher(actx->fallback);
}
/*
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 0794f1cc0018..42f0f229f7f7 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -392,7 +392,7 @@ static void nx_of_update_msc(struct device *dev,
((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
i < msc->triplets;
i++) {
- if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) {
+ if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
dev_err(dev, "unknown function code/mode "
"combo: %d/%d (ignored)\n", msc->fc,
msc->mode);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index ce174d3b842c..4ab53a604312 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -528,8 +528,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
omap_aes_dma_stop(dd);
- dmaengine_terminate_all(dd->dma_lch_in);
- dmaengine_terminate_all(dd->dma_lch_out);
return 0;
}
@@ -580,10 +578,12 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
sg_init_table(&dd->in_sgl, 1);
sg_set_buf(&dd->in_sgl, buf_in, total);
dd->in_sg = &dd->in_sgl;
+ dd->in_sg_len = 1;
sg_init_table(&dd->out_sgl, 1);
sg_set_buf(&dd->out_sgl, buf_out, total);
dd->out_sg = &dd->out_sgl;
+ dd->out_sg_len = 1;
return 0;
}
@@ -604,7 +604,6 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
crypto_ablkcipher_reqtfm(req));
struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
struct omap_aes_reqctx *rctx;
- int len;
if (!dd)
return -ENODEV;
@@ -616,6 +615,14 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
dd->in_sg = req->src;
dd->out_sg = req->dst;
+ dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
+ if (dd->in_sg_len < 0)
+ return dd->in_sg_len;
+
+ dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
+ if (dd->out_sg_len < 0)
+ return dd->out_sg_len;
+
if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
omap_aes_check_aligned(dd->out_sg, dd->total)) {
if (omap_aes_copy_sgs(dd))
@@ -625,11 +632,6 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
dd->sgs_copied = 0;
}
- len = ALIGN(dd->total, AES_BLOCK_SIZE);
- dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
- dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
- BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
-
rctx = ablkcipher_request_ctx(req);
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
@@ -1185,17 +1187,19 @@ static int omap_aes_probe(struct platform_device *pdev)
spin_unlock(&list_lock);
for (i = 0; i < dd->pdata->algs_info_size; i++) {
- for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
- algp = &dd->pdata->algs_info[i].algs_list[j];
+ if (!dd->pdata->algs_info[i].registered) {
+ for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+ algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->cra_name);
- INIT_LIST_HEAD(&algp->cra_list);
+ pr_debug("reg alg: %s\n", algp->cra_name);
+ INIT_LIST_HEAD(&algp->cra_list);
- err = crypto_register_alg(algp);
- if (err)
- goto err_algs;
+ err = crypto_register_alg(algp);
+ if (err)
+ goto err_algs;
- dd->pdata->algs_info[i].registered++;
+ dd->pdata->algs_info[i].registered++;
+ }
}
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 3eedb03111ba..5691434ffb2d 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -560,10 +560,12 @@ static int omap_des_copy_sgs(struct omap_des_dev *dd)
sg_init_table(&dd->in_sgl, 1);
sg_set_buf(&dd->in_sgl, buf_in, dd->total);
dd->in_sg = &dd->in_sgl;
+ dd->in_sg_len = 1;
sg_init_table(&dd->out_sgl, 1);
sg_set_buf(&dd->out_sgl, buf_out, dd->total);
dd->out_sg = &dd->out_sgl;
+ dd->out_sg_len = 1;
return 0;
}
@@ -595,6 +597,14 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
dd->in_sg = req->src;
dd->out_sg = req->dst;
+ dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
+ if (dd->in_sg_len < 0)
+ return dd->in_sg_len;
+
+ dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
+ if (dd->out_sg_len < 0)
+ return dd->out_sg_len;
+
if (omap_des_copy_needed(dd->in_sg) ||
omap_des_copy_needed(dd->out_sg)) {
if (omap_des_copy_sgs(dd))
@@ -604,10 +614,6 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
dd->sgs_copied = 0;
}
- dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
- dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
- BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
-
rctx = ablkcipher_request_ctx(req);
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 63464e86f2b1..7fe4eef12fe2 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -100,6 +100,8 @@
#define DEFAULT_TIMEOUT_INTERVAL HZ
+#define DEFAULT_AUTOSUSPEND_DELAY 1000
+
/* mostly device flags */
#define FLAGS_BUSY 0
#define FLAGS_FINAL 1
@@ -173,7 +175,7 @@ struct omap_sham_ctx {
struct omap_sham_hmac_ctx base[0];
};
-#define OMAP_SHAM_QUEUE_LENGTH 1
+#define OMAP_SHAM_QUEUE_LENGTH 10
struct omap_sham_algs_info {
struct ahash_alg *algs_list;
@@ -813,7 +815,6 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- dmaengine_terminate_all(dd->dma_lch);
if (ctx->flags & BIT(FLAGS_SG)) {
dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
@@ -999,7 +1000,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
- pm_runtime_put(dd->dev);
+ pm_runtime_mark_last_busy(dd->dev);
+ pm_runtime_put_autosuspend(dd->dev);
if (req->base.complete)
req->base.complete(&req->base, err);
@@ -1093,7 +1095,7 @@ static int omap_sham_update(struct ahash_request *req)
ctx->offset = 0;
if (ctx->flags & BIT(FLAGS_FINUP)) {
- if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
+ if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 240) {
/*
* OMAP HW accel works only with buffers >= 9
* will switch to bypass in final()
@@ -1149,9 +1151,13 @@ static int omap_sham_final(struct ahash_request *req)
if (ctx->flags & BIT(FLAGS_ERROR))
return 0; /* uncompleted hash is not needed */
- /* OMAP HW accel works only with buffers >= 9 */
- /* HMAC is always >= 9 because ipad == block size */
- if ((ctx->digcnt + ctx->bufcnt) < 9)
+ /*
+ * OMAP HW accel works only with buffers >= 9.
+ * HMAC is always >= 9 because ipad == block size.
+ * If buffersize is less than 240, we use fallback SW encoding,
+ * as using DMA + HW in this case doesn't provide any benefit.
+ */
+ if ((ctx->digcnt + ctx->bufcnt) < 240)
return omap_sham_final_shash(req);
else if (ctx->bufcnt)
return omap_sham_enqueue(req, OP_FINAL);
@@ -1328,7 +1334,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "omap-sha1",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
@@ -1351,7 +1357,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.halg.base = {
.cra_name = "md5",
.cra_driver_name = "omap-md5",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
@@ -1375,7 +1381,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "omap-hmac-sha1",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
@@ -1400,7 +1406,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.halg.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "omap-hmac-md5",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
@@ -1428,7 +1434,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.halg.base = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1450,7 +1456,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1473,7 +1479,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.halg.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1497,7 +1503,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1523,7 +1529,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.halg.base = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1545,7 +1551,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.halg.base = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1568,7 +1574,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.halg.base = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1592,7 +1598,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.halg.base = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1946,6 +1952,9 @@ static int omap_sham_probe(struct platform_device *pdev)
dd->flags |= dd->pdata->flags;
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+
pm_runtime_enable(dev);
pm_runtime_irq_safe(dev);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 3b1c7ecf078f..47576098831f 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -171,7 +171,7 @@ struct spacc_ablk_ctx {
* The fallback cipher. If the operation can't be done in hardware,
* fallback to a software version.
*/
- struct crypto_ablkcipher *sw_cipher;
+ struct crypto_skcipher *sw_cipher;
};
/* AEAD cipher context. */
@@ -789,33 +789,35 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* request for any other size (192 bits) then we need to do a software
* fallback.
*/
- if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
- ctx->sw_cipher) {
+ if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
+ if (!ctx->sw_cipher)
+ return -EINVAL;
+
/*
* Set the fallback transform to use the same request flags as
* the hardware transform.
*/
- ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- ctx->sw_cipher->base.crt_flags |=
- cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;
+ crypto_skcipher_clear_flags(ctx->sw_cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->sw_cipher,
+ cipher->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
+
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |=
+ crypto_skcipher_get_flags(ctx->sw_cipher) &
+ CRYPTO_TFM_RES_MASK;
- err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
if (err)
goto sw_setkey_failed;
- } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
- !ctx->sw_cipher)
- err = -EINVAL;
+ }
memcpy(ctx->key, key, len);
ctx->key_len = len;
sw_setkey_failed:
- if (err && ctx->sw_cipher) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
- }
-
return err;
}
@@ -910,20 +912,21 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
struct crypto_tfm *old_tfm =
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
int err;
- if (!ctx->sw_cipher)
- return -EINVAL;
-
/*
* Change the request to use the software fallback transform, and once
* the ciphering has completed, put the old transform back into the
* request.
*/
- ablkcipher_request_set_tfm(req, ctx->sw_cipher);
- err = is_encrypt ? crypto_ablkcipher_encrypt(req) :
- crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));
+ skcipher_request_set_tfm(subreq, ctx->sw_cipher);
+ skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
return err;
}
@@ -1015,12 +1018,13 @@ static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ctx->sw_cipher = crypto_alloc_skcipher(
+ alg->cra_name, 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
alg->cra_name);
- ctx->sw_cipher = NULL;
+ return PTR_ERR(ctx->sw_cipher);
}
}
ctx->generic.key_offs = spacc_alg->key_offs;
@@ -1035,9 +1039,7 @@ static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
{
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->sw_cipher)
- crypto_free_ablkcipher(ctx->sw_cipher);
- ctx->sw_cipher = NULL;
+ crypto_free_skcipher(ctx->sw_cipher);
}
static int spacc_ablk_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 85b44e577684..ce3cae40f949 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -4,12 +4,13 @@ config CRYPTO_DEV_QAT
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
select CRYPTO_AKCIPHER
+ select CRYPTO_DH
select CRYPTO_HMAC
+ select CRYPTO_RSA
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
select FW_LOADER
- select ASN1
config CRYPTO_DEV_QAT_DH895xCC
tristate "Support for Intel(R) DH895xCC"
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index c5bd5a9abc4d..6bc68bc00d76 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -229,6 +229,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+ hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index 879e04cae714..618cec360b39 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -239,6 +239,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+ hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 6d74b91f2152..92fb6ffdc062 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -1,11 +1,3 @@
-$(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
- $(obj)/qat_rsapubkey-asn1.h
-$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
- $(obj)/qat_rsaprivkey-asn1.h
-
-clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
-clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
-
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \
adf_isr.o \
@@ -19,8 +11,6 @@ intel_qat-objs := adf_cfg.o \
adf_hw_arbiter.o \
qat_crypto.o \
qat_algs.o \
- qat_rsapubkey-asn1.o \
- qat_rsaprivkey-asn1.o \
qat_asym_algs.o \
qat_uclo.o \
qat_hal.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 5a07208ce778..e8822536530b 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -176,6 +176,7 @@ struct adf_hw_device_data {
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
+ void (*reset_device)(struct adf_accel_dev *accel_dev);
const char *fw_name;
const char *fw_mmp_name;
uint32_t fuses;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index b40d9c8dad96..2839fccdd84b 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -82,18 +82,12 @@ struct adf_reset_dev_data {
struct work_struct reset_work;
};
-void adf_dev_restore(struct adf_accel_dev *accel_dev)
+void adf_reset_sbr(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
struct pci_dev *parent = pdev->bus->self;
uint16_t bridge_ctl = 0;
- if (accel_dev->is_vf)
- return;
-
- dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
- accel_dev->accel_id);
-
if (!parent)
parent = pdev;
@@ -101,6 +95,8 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
dev_info(&GET_DEV(accel_dev),
"Transaction still in progress. Proceeding\n");
+ dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
+
pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
@@ -108,8 +104,40 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
msleep(100);
- pci_restore_state(pdev);
- pci_save_state(pdev);
+}
+EXPORT_SYMBOL_GPL(adf_reset_sbr);
+
+void adf_reset_flr(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ u16 control = 0;
+ int pos = 0;
+
+ dev_info(&GET_DEV(accel_dev), "Function level reset\n");
+ pos = pci_pcie_cap(pdev);
+ if (!pos) {
+ dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+ return;
+ }
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &control);
+ control |= PCI_EXP_DEVCTL_BCR_FLR;
+ pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, control);
+ msleep(100);
+}
+EXPORT_SYMBOL_GPL(adf_reset_flr);
+
+void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ if (hw_device->reset_device) {
+ dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
+ accel_dev->accel_id);
+ hw_device->reset_device(accel_dev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ }
}
static void adf_device_reset_worker(struct work_struct *work)
@@ -243,7 +271,8 @@ EXPORT_SYMBOL_GPL(adf_disable_aer);
int adf_init_aer(void)
{
- device_reset_wq = create_workqueue("qat_device_reset_wq");
+ device_reset_wq = alloc_workqueue("qat_device_reset_wq",
+ WQ_MEM_RECLAIM, 0);
return !device_reset_wq ? -EFAULT : 0;
}
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 75faa39bc8d0..980e07475012 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -141,6 +141,8 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
+void adf_reset_sbr(struct adf_accel_dev *accel_dev);
+void adf_reset_flr(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 4a526e2f1d7f..9320ae1d005b 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(adf_sriov_configure);
int __init adf_init_pf_wq(void)
{
/* Workqueue for PF2VF responses */
- pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+ pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
return !pf2vf_resp_wq ? -ENOMEM : 0;
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index aa689cabedb4..bf99e11a3403 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
int __init adf_init_vf_wq(void)
{
- adf_vf_stop_wq = create_workqueue("adf_vf_stop_wq");
+ adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
return !adf_vf_stop_wq ? -EFAULT : 0;
}
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1e8852a8a057..769148dbaeb3 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -947,13 +947,13 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
return 0;
out_free_all:
- memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
- dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ dma_free_coherent(dev, sizeof(*ctx->dec_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
out_free_enc:
- memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
- dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ dma_free_coherent(dev, sizeof(*ctx->enc_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
return -ENOMEM;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 05f49d4f94b2..0d35dca2e925 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -49,11 +49,12 @@
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/dh.h>
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <crypto/scatterwalk.h>
-#include "qat_rsapubkey-asn1.h"
-#include "qat_rsaprivkey-asn1.h"
#include "icp_qat_fw_pke.h"
#include "adf_accel_devices.h"
#include "adf_transport.h"
@@ -75,6 +76,14 @@ struct qat_rsa_input_params {
dma_addr_t d;
dma_addr_t n;
} dec;
+ struct {
+ dma_addr_t c;
+ dma_addr_t p;
+ dma_addr_t q;
+ dma_addr_t dp;
+ dma_addr_t dq;
+ dma_addr_t qinv;
+ } dec_crt;
u64 in_tab[8];
};
} __packed __aligned(64);
@@ -95,71 +104,480 @@ struct qat_rsa_ctx {
char *n;
char *e;
char *d;
+ char *p;
+ char *q;
+ char *dp;
+ char *dq;
+ char *qinv;
dma_addr_t dma_n;
dma_addr_t dma_e;
dma_addr_t dma_d;
+ dma_addr_t dma_p;
+ dma_addr_t dma_q;
+ dma_addr_t dma_dp;
+ dma_addr_t dma_dq;
+ dma_addr_t dma_qinv;
unsigned int key_sz;
+ bool crt_mode;
+ struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_dh_input_params {
+ union {
+ struct {
+ dma_addr_t b;
+ dma_addr_t xa;
+ dma_addr_t p;
+ } in;
+ struct {
+ dma_addr_t xa;
+ dma_addr_t p;
+ } in_g2;
+ u64 in_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_dh_output_params {
+ union {
+ dma_addr_t r;
+ u64 out_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_dh_ctx {
+ char *g;
+ char *xa;
+ char *p;
+ dma_addr_t dma_g;
+ dma_addr_t dma_xa;
+ dma_addr_t dma_p;
+ unsigned int p_size;
+ bool g2;
struct qat_crypto_instance *inst;
} __packed __aligned(64);
-struct qat_rsa_request {
- struct qat_rsa_input_params in;
- struct qat_rsa_output_params out;
+struct qat_asym_request {
+ union {
+ struct qat_rsa_input_params rsa;
+ struct qat_dh_input_params dh;
+ } in;
+ union {
+ struct qat_rsa_output_params rsa;
+ struct qat_dh_output_params dh;
+ } out;
dma_addr_t phy_in;
dma_addr_t phy_out;
char *src_align;
char *dst_align;
struct icp_qat_fw_pke_request req;
- struct qat_rsa_ctx *ctx;
+ union {
+ struct qat_rsa_ctx *rsa;
+ struct qat_dh_ctx *dh;
+ } ctx;
+ union {
+ struct akcipher_request *rsa;
+ struct kpp_request *dh;
+ } areq;
int err;
+ void (*cb)(struct icp_qat_fw_pke_resp *resp);
} __aligned(64);
-static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
{
- struct akcipher_request *areq = (void *)(__force long)resp->opaque;
- struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
- struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
+ struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+ struct kpp_request *areq = req->areq.dh;
+ struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
resp->pke_resp_hdr.comn_resp_flags);
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
- if (req->src_align)
- dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
- req->in.enc.m);
- else
- dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
- DMA_TO_DEVICE);
+ if (areq->src) {
+ if (req->src_align)
+ dma_free_coherent(dev, req->ctx.dh->p_size,
+ req->src_align, req->in.dh.in.b);
+ else
+ dma_unmap_single(dev, req->in.dh.in.b,
+ req->ctx.dh->p_size, DMA_TO_DEVICE);
+ }
- areq->dst_len = req->ctx->key_sz;
+ areq->dst_len = req->ctx.dh->p_size;
if (req->dst_align) {
- char *ptr = req->dst_align;
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
- while (!(*ptr) && areq->dst_len) {
- areq->dst_len--;
- ptr++;
- }
+ dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
+ req->out.dh.r);
+ } else {
+ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+ DMA_FROM_DEVICE);
+ }
- if (areq->dst_len != req->ctx->key_sz)
- memmove(req->dst_align, ptr, areq->dst_len);
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
- scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
- areq->dst_len, 1);
+ kpp_request_complete(areq, err);
+}
+
+#define PKE_DH_1536 0x390c1a49
+#define PKE_DH_G2_1536 0x2e0b1a3e
+#define PKE_DH_2048 0x4d0c1a60
+#define PKE_DH_G2_2048 0x3e0b1a55
+#define PKE_DH_3072 0x510c1a77
+#define PKE_DH_G2_3072 0x3a0b1a6c
+#define PKE_DH_4096 0x690c1a8e
+#define PKE_DH_G2_4096 0x4a0b1a83
+
+static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 1536:
+ return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
+ case 2048:
+ return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
+ case 3072:
+ return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
+ case 4096:
+ return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
+ default:
+ return 0;
+ };
+}
+
+static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
+{
+ return kpp_tfm_ctx(tfm);
+}
+
+static int qat_dh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(kpp_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ int ret, ctr = 0;
+ int n_input_params = 0;
+
+ if (unlikely(!ctx->xa))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->p_size) {
+ req->dst_len = ctx->p_size;
+ return -EOVERFLOW;
+ }
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+ msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
+ !req->src && ctx->g2);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->cb = qat_dh_cb;
+ qat_req->ctx.dh = ctx;
+ qat_req->areq.dh = req;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
- dma_free_coherent(dev, req->ctx->key_sz, req->dst_align,
- req->out.enc.c);
+ /*
+ * If no source is provided use g as base
+ */
+ if (req->src) {
+ qat_req->in.dh.in.xa = ctx->dma_xa;
+ qat_req->in.dh.in.p = ctx->dma_p;
+ n_input_params = 3;
} else {
- char *ptr = sg_virt(areq->dst);
+ if (ctx->g2) {
+ qat_req->in.dh.in_g2.xa = ctx->dma_xa;
+ qat_req->in.dh.in_g2.p = ctx->dma_p;
+ n_input_params = 2;
+ } else {
+ qat_req->in.dh.in.b = ctx->dma_g;
+ qat_req->in.dh.in.xa = ctx->dma_xa;
+ qat_req->in.dh.in.p = ctx->dma_p;
+ n_input_params = 3;
+ }
+ }
- while (!(*ptr) && areq->dst_len) {
- areq->dst_len--;
- ptr++;
+ ret = -ENOMEM;
+ if (req->src) {
+ /*
+ * src can be of any size in valid range, but HW expects it to
+ * be the same as modulo p so in case it is different we need
+ * to allocate a new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
+ qat_req->src_align = NULL;
+ qat_req->in.dh.in.b = dma_map_single(dev,
+ sg_virt(req->src),
+ req->src_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev,
+ qat_req->in.dh.in.b)))
+ return ret;
+
+ } else {
+ int shift = ctx->p_size - req->src_len;
+
+ qat_req->src_align = dma_zalloc_coherent(dev,
+ ctx->p_size,
+ &qat_req->in.dh.in.b,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift,
+ req->src, 0, req->src_len, 0);
}
+ }
+ /*
+ * dst can be of any size in valid range, but HW expects it to be the
+ * same as modulo m so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
+ qat_req->dst_align = NULL;
+ qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
+ req->dst_len,
+ DMA_FROM_DEVICE);
- if (sg_virt(areq->dst) != ptr && areq->dst_len)
- memmove(sg_virt(areq->dst), ptr, areq->dst_len);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+ goto unmap_src;
+
+ } else {
+ qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
+ &qat_req->out.dh.r,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
+ }
- dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
+ qat_req->in.dh.in_tab[n_input_params] = 0;
+ qat_req->out.dh.out_tab[1] = 0;
+ /* Mapping in.in.b or in.in_g2.xa is the same */
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
+ sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+ goto unmap_dst;
+
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap_in_params;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ msg->input_param_count = n_input_params;
+ msg->output_param_count = 1;
+
+ do {
+ ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ } while (ret == -EBUSY && ctr++ < 100);
+
+ if (!ret)
+ return -EINPROGRESS;
+
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+unmap_dst:
+ if (qat_req->dst_align)
+ dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
+ qat_req->out.dh.r);
+ else
+ if (!dma_mapping_error(dev, qat_req->out.dh.r))
+ dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+ DMA_FROM_DEVICE);
+unmap_src:
+ if (req->src) {
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
+ qat_req->in.dh.in.b);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+ dma_unmap_single(dev, qat_req->in.dh.in.b,
+ ctx->p_size,
+ DMA_TO_DEVICE);
+ }
+ return ret;
+}
+
+static int qat_dh_check_params_length(unsigned int p_len)
+{
+ switch (p_len) {
+ case 1536:
+ case 2048:
+ case 3072:
+ case 4096:
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+
+ if (unlikely(!params->p || !params->g))
+ return -EINVAL;
+
+ if (qat_dh_check_params_length(params->p_size << 3))
+ return -EINVAL;
+
+ ctx->p_size = params->p_size;
+ ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+ if (!ctx->p)
+ return -ENOMEM;
+ memcpy(ctx->p, params->p, ctx->p_size);
+
+ /* If g equals 2 don't copy it */
+ if (params->g_size == 1 && *(char *)params->g == 0x02) {
+ ctx->g2 = true;
+ return 0;
+ }
+
+ ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+ if (!ctx->g) {
+ dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+ return -ENOMEM;
+ }
+ memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
+ params->g_size);
+
+ return 0;
+}
+
+static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
+{
+ if (ctx->g) {
+ dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
+ ctx->g = NULL;
+ }
+ if (ctx->xa) {
+ dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
+ ctx->xa = NULL;
+ }
+ if (ctx->p) {
+ dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+ }
+ ctx->p_size = 0;
+ ctx->g2 = false;
+}
+
+static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf,
+ unsigned int len)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ struct dh params;
+ int ret;
+
+ if (crypto_dh_decode_key(buf, len, &params) < 0)
+ return -EINVAL;
+
+ /* Free old secret if any */
+ qat_dh_clear_ctx(dev, ctx);
+
+ ret = qat_dh_set_params(ctx, &params);
+ if (ret < 0)
+ return ret;
+
+ ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+ GFP_KERNEL);
+ if (!ctx->xa) {
+ qat_dh_clear_ctx(dev, ctx);
+ return -ENOMEM;
+ }
+ memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
+ params.key_size);
+
+ return 0;
+}
+
+static int qat_dh_max_size(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->p ? ctx->p_size : -EINVAL;
+}
+
+static int qat_dh_init_tfm(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(get_current_node());
+
+ if (!inst)
+ return -EINVAL;
+
+ ctx->p_size = 0;
+ ctx->g2 = false;
+ ctx->inst = inst;
+ return 0;
+}
+
+static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+ qat_dh_clear_ctx(dev, ctx);
+ qat_crypto_put_instance(ctx->inst);
+}
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+ struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+ struct akcipher_request *areq = req->areq.rsa;
+ struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
+ int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+ resp->pke_resp_hdr.comn_resp_flags);
+
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+ if (req->src_align)
+ dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
+ req->in.rsa.enc.m);
+ else
+ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+ DMA_TO_DEVICE);
+
+ areq->dst_len = req->ctx.rsa->key_sz;
+ if (req->dst_align) {
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
+
+ dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
+ req->out.rsa.enc.c);
+ } else {
+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
DMA_FROM_DEVICE);
}
@@ -175,8 +593,9 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
void qat_alg_asym_callback(void *_resp)
{
struct icp_qat_fw_pke_resp *resp = _resp;
+ struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
- qat_rsa_cb(resp);
+ areq->cb(resp);
}
#define PKE_RSA_EP_512 0x1c161b21
@@ -237,13 +656,42 @@ static unsigned long qat_rsa_dec_fn_id(unsigned int len)
};
}
+#define PKE_RSA_DP2_512 0x1c131b57
+#define PKE_RSA_DP2_1024 0x26131c2d
+#define PKE_RSA_DP2_1536 0x45111d12
+#define PKE_RSA_DP2_2048 0x59121dfa
+#define PKE_RSA_DP2_3072 0x81121ed9
+#define PKE_RSA_DP2_4096 0xb1111fb2
+
+static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_DP2_512;
+ case 1024:
+ return PKE_RSA_DP2_1024;
+ case 1536:
+ return PKE_RSA_DP2_1536;
+ case 2048:
+ return PKE_RSA_DP2_2048;
+ case 3072:
+ return PKE_RSA_DP2_3072;
+ case 4096:
+ return PKE_RSA_DP2_4096;
+ default:
+ return 0;
+ };
+}
+
static int qat_rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- struct qat_rsa_request *qat_req =
+ struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
int ret, ctr = 0;
@@ -262,14 +710,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
if (unlikely(!msg->pke_hdr.cd_pars.func_id))
return -EINVAL;
- qat_req->ctx = ctx;
+ qat_req->cb = qat_rsa_cb;
+ qat_req->ctx.rsa = ctx;
+ qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
- qat_req->in.enc.e = ctx->dma_e;
- qat_req->in.enc.n = ctx->dma_n;
+ qat_req->in.rsa.enc.e = ctx->dma_e;
+ qat_req->in.rsa.enc.n = ctx->dma_n;
ret = -ENOMEM;
/*
@@ -281,16 +731,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src),
+ qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
req->src_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m)))
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
return ret;
} else {
int shift = ctx->key_sz - req->src_len;
qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.enc.m,
+ &qat_req->in.rsa.enc.m,
GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -300,30 +750,30 @@ static int qat_rsa_enc(struct akcipher_request *req)
}
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL;
- qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
+ qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
+ req->dst_len,
+ DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c)))
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
goto unmap_src;
} else {
qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.enc.c,
+ &qat_req->out.rsa.enc.c,
GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
}
- qat_req->in.in_tab[3] = 0;
- qat_req->out.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
+ qat_req->in.rsa.in_tab[3] = 0;
+ qat_req->out.rsa.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
@@ -331,7 +781,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)req;
+ msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
msg->input_param_count = 3;
msg->output_param_count = 1;
do {
@@ -353,19 +803,19 @@ unmap_in_params:
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.enc.c);
+ qat_req->out.rsa.enc.c);
else
- if (!dma_mapping_error(dev, qat_req->out.enc.c))
- dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
- DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+ dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+ ctx->key_sz, DMA_FROM_DEVICE);
unmap_src:
if (qat_req->src_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.enc.m);
+ qat_req->in.rsa.enc.m);
else
- if (!dma_mapping_error(dev, qat_req->in.enc.m))
- dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
- DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+ dma_unmap_single(dev, qat_req->in.rsa.enc.m,
+ ctx->key_sz, DMA_TO_DEVICE);
return ret;
}
@@ -375,7 +825,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- struct qat_rsa_request *qat_req =
+ struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
int ret, ctr = 0;
@@ -390,18 +840,30 @@ static int qat_rsa_dec(struct akcipher_request *req)
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
- msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
+ msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
+ qat_rsa_dec_fn_id_crt(ctx->key_sz) :
+ qat_rsa_dec_fn_id(ctx->key_sz);
if (unlikely(!msg->pke_hdr.cd_pars.func_id))
return -EINVAL;
- qat_req->ctx = ctx;
+ qat_req->cb = qat_rsa_cb;
+ qat_req->ctx.rsa = ctx;
+ qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
- qat_req->in.dec.d = ctx->dma_d;
- qat_req->in.dec.n = ctx->dma_n;
+ if (ctx->crt_mode) {
+ qat_req->in.rsa.dec_crt.p = ctx->dma_p;
+ qat_req->in.rsa.dec_crt.q = ctx->dma_q;
+ qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
+ qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
+ qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
+ } else {
+ qat_req->in.rsa.dec.d = ctx->dma_d;
+ qat_req->in.rsa.dec.n = ctx->dma_n;
+ }
ret = -ENOMEM;
/*
@@ -413,16 +875,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src),
+ qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
req->dst_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c)))
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
return ret;
} else {
int shift = ctx->key_sz - req->src_len;
qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.dec.c,
+ &qat_req->in.rsa.dec.c,
GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -432,31 +894,34 @@ static int qat_rsa_dec(struct akcipher_request *req)
}
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL;
- qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst),
+ qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
req->dst_len,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m)))
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
goto unmap_src;
} else {
qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.dec.m,
+ &qat_req->out.rsa.dec.m,
GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
}
- qat_req->in.in_tab[3] = 0;
- qat_req->out.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
+ if (ctx->crt_mode)
+ qat_req->in.rsa.in_tab[6] = 0;
+ else
+ qat_req->in.rsa.in_tab[3] = 0;
+ qat_req->out.rsa.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
@@ -464,8 +929,12 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)req;
- msg->input_param_count = 3;
+ msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ if (ctx->crt_mode)
+ msg->input_param_count = 6;
+ else
+ msg->input_param_count = 3;
+
msg->output_param_count = 1;
do {
ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
@@ -486,26 +955,24 @@ unmap_in_params:
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.dec.m);
+ qat_req->out.rsa.dec.m);
else
- if (!dma_mapping_error(dev, qat_req->out.dec.m))
- dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
- DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+ dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+ ctx->key_sz, DMA_FROM_DEVICE);
unmap_src:
if (qat_req->src_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.dec.c);
+ qat_req->in.rsa.dec.c);
else
- if (!dma_mapping_error(dev, qat_req->in.dec.c))
- dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
- DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+ dma_unmap_single(dev, qat_req->in.rsa.dec.c,
+ ctx->key_sz, DMA_TO_DEVICE);
return ret;
}
-int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
+int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
{
- struct qat_rsa_ctx *ctx = context;
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
@@ -518,11 +985,6 @@ int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
ctx->key_sz = vlen;
ret = -EINVAL;
- /* In FIPS mode only allow key size 2K & 3K */
- if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
- pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
- goto err;
- }
/* invalid key size provided */
if (!qat_rsa_enc_fn_id(ctx->key_sz))
goto err;
@@ -540,10 +1002,8 @@ err:
return ret;
}
-int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
+int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
{
- struct qat_rsa_ctx *ctx = context;
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
@@ -559,18 +1019,15 @@ int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
}
ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
- if (!ctx->e) {
- ctx->e = NULL;
+ if (!ctx->e)
return -ENOMEM;
- }
+
memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
return 0;
}
-int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
+int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
{
- struct qat_rsa_ctx *ctx = context;
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
@@ -585,12 +1042,6 @@ int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
goto err;
- /* In FIPS mode only allow key size 2K & 3K */
- if (fips_enabled && (vlen != 256 && vlen != 384)) {
- pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
- goto err;
- }
-
ret = -ENOMEM;
ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
if (!ctx->d)
@@ -603,12 +1054,106 @@ err:
return ret;
}
-static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
- unsigned int keylen, bool private)
+static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
{
- struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct device *dev = &GET_DEV(ctx->inst->accel_dev);
- int ret;
+ while (!**ptr && *len) {
+ (*ptr)++;
+ (*len)--;
+ }
+}
+
+static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr;
+ unsigned int len;
+ unsigned int half_key_sz = ctx->key_sz / 2;
+
+ /* p */
+ ptr = rsa_key->p;
+ len = rsa_key->p_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto err;
+ ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+ if (!ctx->p)
+ goto err;
+ memcpy(ctx->p + (half_key_sz - len), ptr, len);
+
+ /* q */
+ ptr = rsa_key->q;
+ len = rsa_key->q_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_p;
+ ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+ if (!ctx->q)
+ goto free_p;
+ memcpy(ctx->q + (half_key_sz - len), ptr, len);
+
+ /* dp */
+ ptr = rsa_key->dp;
+ len = rsa_key->dp_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_q;
+ ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+ GFP_KERNEL);
+ if (!ctx->dp)
+ goto free_q;
+ memcpy(ctx->dp + (half_key_sz - len), ptr, len);
+
+ /* dq */
+ ptr = rsa_key->dq;
+ len = rsa_key->dq_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_dp;
+ ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+ GFP_KERNEL);
+ if (!ctx->dq)
+ goto free_dp;
+ memcpy(ctx->dq + (half_key_sz - len), ptr, len);
+
+ /* qinv */
+ ptr = rsa_key->qinv;
+ len = rsa_key->qinv_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_dq;
+ ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+ GFP_KERNEL);
+ if (!ctx->qinv)
+ goto free_dq;
+ memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
+
+ ctx->crt_mode = true;
+ return;
+
+free_dq:
+ memset(ctx->dq, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+ ctx->dq = NULL;
+free_dp:
+ memset(ctx->dp, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+ ctx->dp = NULL;
+free_q:
+ memset(ctx->q, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+ ctx->q = NULL;
+free_p:
+ memset(ctx->p, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+err:
+ ctx->crt_mode = false;
+}
+
+static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
+{
+ unsigned int half_key_sz = ctx->key_sz / 2;
/* Free the old key if any */
if (ctx->n)
@@ -619,19 +1164,68 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
memset(ctx->d, '\0', ctx->key_sz);
dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
}
+ if (ctx->p) {
+ memset(ctx->p, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+ }
+ if (ctx->q) {
+ memset(ctx->q, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+ }
+ if (ctx->dp) {
+ memset(ctx->dp, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+ }
+ if (ctx->dq) {
+ memset(ctx->dq, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+ }
+ if (ctx->qinv) {
+ memset(ctx->qinv, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
+ }
ctx->n = NULL;
ctx->e = NULL;
ctx->d = NULL;
+ ctx->p = NULL;
+ ctx->q = NULL;
+ ctx->dp = NULL;
+ ctx->dq = NULL;
+ ctx->qinv = NULL;
+ ctx->crt_mode = false;
+ ctx->key_sz = 0;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ struct rsa_key rsa_key;
+ int ret;
+
+ qat_rsa_clear_ctx(dev, ctx);
if (private)
- ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key,
- keylen);
+ ret = rsa_parse_priv_key(&rsa_key, key, keylen);
else
- ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key,
- keylen);
+ ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+ if (ret < 0)
+ goto free;
+
+ ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
if (ret < 0)
goto free;
+ ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
+ if (ret < 0)
+ goto free;
+ if (private) {
+ ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+ if (ret < 0)
+ goto free;
+ qat_rsa_setkey_crt(ctx, &rsa_key);
+ }
if (!ctx->n || !ctx->e) {
/* invalid key provided */
@@ -646,20 +1240,7 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
return 0;
free:
- if (ctx->d) {
- memset(ctx->d, '\0', ctx->key_sz);
- dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
- ctx->d = NULL;
- }
- if (ctx->e) {
- dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
- ctx->e = NULL;
- }
- if (ctx->n) {
- dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
- ctx->n = NULL;
- ctx->key_sz = 0;
- }
+ qat_rsa_clear_ctx(dev, ctx);
return ret;
}
@@ -725,7 +1306,7 @@ static struct akcipher_alg rsa = {
.max_size = qat_rsa_max_size,
.init = qat_rsa_init_tfm,
.exit = qat_rsa_exit_tfm,
- .reqsize = sizeof(struct qat_rsa_request) + 64,
+ .reqsize = sizeof(struct qat_asym_request) + 64,
.base = {
.cra_name = "rsa",
.cra_driver_name = "qat-rsa",
@@ -735,6 +1316,23 @@ static struct akcipher_alg rsa = {
},
};
+static struct kpp_alg dh = {
+ .set_secret = qat_dh_set_secret,
+ .generate_public_key = qat_dh_compute_value,
+ .compute_shared_secret = qat_dh_compute_value,
+ .max_size = qat_dh_max_size,
+ .init = qat_dh_init_tfm,
+ .exit = qat_dh_exit_tfm,
+ .reqsize = sizeof(struct qat_asym_request) + 64,
+ .base = {
+ .cra_name = "dh",
+ .cra_driver_name = "qat-dh",
+ .cra_priority = 1000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct qat_dh_ctx),
+ },
+};
+
int qat_asym_algs_register(void)
{
int ret = 0;
@@ -743,7 +1341,11 @@ int qat_asym_algs_register(void)
if (++active_devs == 1) {
rsa.base.cra_flags = 0;
ret = crypto_register_akcipher(&rsa);
+ if (ret)
+ goto unlock;
+ ret = crypto_register_kpp(&dh);
}
+unlock:
mutex_unlock(&algs_lock);
return ret;
}
@@ -751,7 +1353,9 @@ int qat_asym_algs_register(void)
void qat_asym_algs_unregister(void)
{
mutex_lock(&algs_lock);
- if (--active_devs == 0)
+ if (--active_devs == 0) {
crypto_unregister_akcipher(&rsa);
+ crypto_unregister_kpp(&dh);
+ }
mutex_unlock(&algs_lock);
}
diff --git a/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1 b/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
deleted file mode 100644
index f0066adb79b8..000000000000
--- a/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
+++ /dev/null
@@ -1,11 +0,0 @@
-RsaPrivKey ::= SEQUENCE {
- version INTEGER,
- n INTEGER ({ qat_rsa_get_n }),
- e INTEGER ({ qat_rsa_get_e }),
- d INTEGER ({ qat_rsa_get_d }),
- prime1 INTEGER,
- prime2 INTEGER,
- exponent1 INTEGER,
- exponent2 INTEGER,
- coefficient INTEGER
-}
diff --git a/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1 b/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
deleted file mode 100644
index bd667b31a21a..000000000000
--- a/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
+++ /dev/null
@@ -1,4 +0,0 @@
-RsaPubKey ::= SEQUENCE {
- n INTEGER ({ qat_rsa_get_n }),
- e INTEGER ({ qat_rsa_get_e })
-}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 6e1d5e185526..1dfcab317bed 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -252,6 +252,7 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+ hw_data->reset_device = adf_reset_sbr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index dbcbbe242bd6..b04b42f48366 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -15,8 +15,8 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
#include <crypto/des.h>
+#include <crypto/internal/skcipher.h>
#include "cipher.h"
@@ -189,7 +189,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
fallback:
- ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
ctx->enc_keylen = keylen;
return ret;
@@ -212,10 +212,16 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- ablkcipher_request_set_tfm(req, ctx->fallback);
- ret = encrypt ? crypto_ablkcipher_encrypt(req) :
- crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
return ret;
}
@@ -239,10 +245,9 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
memset(ctx, 0, sizeof(*ctx));
tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
- ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
- CRYPTO_ALG_TYPE_ABLKCIPHER,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback))
return PTR_ERR(ctx->fallback);
@@ -253,7 +258,7 @@ static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
{
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_ablkcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
}
struct qce_ablkcipher_def {
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 5c6a5f8633e5..2b0278bb6e92 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -22,7 +22,7 @@
struct qce_cipher_ctx {
u8 enc_key[QCE_MAX_KEY_SIZE];
unsigned int enc_keylen;
- struct crypto_ablkcipher *fallback;
+ struct crypto_skcipher *fallback;
};
/**
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 2b3a0cfe3331..dce1af0ce85c 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -155,43 +155,43 @@
* expansion of its usage.
*/
struct samsung_aes_variant {
- unsigned int aes_offset;
+ unsigned int aes_offset;
};
struct s5p_aes_reqctx {
- unsigned long mode;
+ unsigned long mode;
};
struct s5p_aes_ctx {
- struct s5p_aes_dev *dev;
+ struct s5p_aes_dev *dev;
- uint8_t aes_key[AES_MAX_KEY_SIZE];
- uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
- int keylen;
+ uint8_t aes_key[AES_MAX_KEY_SIZE];
+ uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
+ int keylen;
};
struct s5p_aes_dev {
- struct device *dev;
- struct clk *clk;
- void __iomem *ioaddr;
- void __iomem *aes_ioaddr;
- int irq_fc;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *ioaddr;
+ void __iomem *aes_ioaddr;
+ int irq_fc;
- struct ablkcipher_request *req;
- struct s5p_aes_ctx *ctx;
- struct scatterlist *sg_src;
- struct scatterlist *sg_dst;
+ struct ablkcipher_request *req;
+ struct s5p_aes_ctx *ctx;
+ struct scatterlist *sg_src;
+ struct scatterlist *sg_dst;
/* In case of unaligned access: */
- struct scatterlist *sg_src_cpy;
- struct scatterlist *sg_dst_cpy;
+ struct scatterlist *sg_src_cpy;
+ struct scatterlist *sg_dst_cpy;
- struct tasklet_struct tasklet;
- struct crypto_queue queue;
- bool busy;
- spinlock_t lock;
+ struct tasklet_struct tasklet;
+ struct crypto_queue queue;
+ bool busy;
+ spinlock_t lock;
- struct samsung_aes_variant *variant;
+ struct samsung_aes_variant *variant;
};
static struct s5p_aes_dev *s5p_dev;
@@ -421,11 +421,11 @@ static bool s5p_aes_rx(struct s5p_aes_dev *dev)
static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
- struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
- uint32_t status;
- unsigned long flags;
- bool set_dma_tx = false;
- bool set_dma_rx = false;
+ struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
+ bool set_dma_tx = false;
+ bool set_dma_rx = false;
+ unsigned long flags;
+ uint32_t status;
spin_lock_irqsave(&dev->lock, flags);
@@ -538,10 +538,10 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
- struct ablkcipher_request *req = dev->req;
- uint32_t aes_control;
- int err;
- unsigned long flags;
+ struct ablkcipher_request *req = dev->req;
+ uint32_t aes_control;
+ unsigned long flags;
+ int err;
aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT)
@@ -653,10 +653,10 @@ exit:
static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
- struct s5p_aes_dev *dev = ctx->dev;
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
+ struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct s5p_aes_dev *dev = ctx->dev;
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
@@ -671,7 +671,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
const uint8_t *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 &&
@@ -763,11 +763,11 @@ static struct crypto_alg algs[] = {
static int s5p_aes_probe(struct platform_device *pdev)
{
- int i, j, err = -ENODEV;
- struct s5p_aes_dev *pdata;
- struct device *dev = &pdev->dev;
- struct resource *res;
+ struct device *dev = &pdev->dev;
+ int i, j, err = -ENODEV;
struct samsung_aes_variant *variant;
+ struct s5p_aes_dev *pdata;
+ struct resource *res;
if (s5p_dev)
return -EEXIST;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index c3f3d89e4831..0c49956ee0ce 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -14,10 +14,9 @@
* Based on omap-aes.c and tegra-aes.c
*/
-#include <crypto/algapi.h>
#include <crypto/aes.h>
-#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha.h>
@@ -150,10 +149,7 @@ struct sahara_ctx {
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
- struct crypto_ablkcipher *fallback;
-
- /* SHA-specific context */
- struct crypto_shash *shash_fallback;
+ struct crypto_skcipher *fallback;
};
struct sahara_aes_reqctx {
@@ -620,25 +616,21 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
- if (keylen != AES_KEYSIZE_128 &&
- keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+ if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
return -EINVAL;
/*
* The requested key size is not supported by HW, do a fallback.
*/
- ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- ctx->fallback->base.crt_flags |=
- (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
- ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
- if (ret) {
- struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
+ ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
- tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm_aux->crt_flags |=
- (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
- }
+ tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
+ CRYPTO_TFM_RES_MASK;
return ret;
}
@@ -670,16 +662,20 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_encrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
return err;
}
@@ -688,16 +684,20 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
return err;
}
@@ -706,16 +706,20 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_encrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
return err;
}
@@ -724,16 +728,20 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
return err;
}
@@ -745,8 +753,9 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
const char *name = crypto_tfm_alg_name(tfm);
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->fallback = crypto_alloc_ablkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback = crypto_alloc_skcipher(name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("Error allocating fallback algo %s\n", name);
return PTR_ERR(ctx->fallback);
@@ -761,9 +770,7 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
{
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->fallback)
- crypto_free_ablkcipher(ctx->fallback);
- ctx->fallback = NULL;
+ crypto_free_skcipher(ctx->fallback);
}
static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
@@ -1180,15 +1187,6 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
static int sahara_sha_cra_init(struct crypto_tfm *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->shash_fallback = crypto_alloc_shash(name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->shash_fallback)) {
- pr_err("Error allocating fallback algo %s\n", name);
- return PTR_ERR(ctx->shash_fallback);
- }
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct sahara_sha_reqctx) +
SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
@@ -1196,14 +1194,6 @@ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
-{
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_shash(ctx->shash_fallback);
- ctx->shash_fallback = NULL;
-}
-
static struct crypto_alg aes_algs[] = {
{
.cra_name = "ecb(aes)",
@@ -1272,7 +1262,6 @@ static struct ahash_alg sha_v3_algs[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = sahara_sha_cra_init,
- .cra_exit = sahara_sha_cra_exit,
}
},
};
@@ -1300,7 +1289,6 @@ static struct ahash_alg sha_v4_algs[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = sahara_sha_cra_init,
- .cra_exit = sahara_sha_cra_exit,
}
},
};
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index b7ee8d30147d..0418a2f41dc0 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -91,10 +91,17 @@ static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
return be16_to_cpu(ptr->len);
}
-static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
+static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
+ bool is_sec1)
{
if (!is_sec1)
- ptr->j_extent = 0;
+ ptr->j_extent = val;
+}
+
+static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
+{
+ if (!is_sec1)
+ ptr->j_extent |= val;
}
/*
@@ -111,7 +118,7 @@ static void map_single_talitos_ptr(struct device *dev,
to_talitos_ptr_len(ptr, len, is_sec1);
to_talitos_ptr(ptr, dma_addr, is_sec1);
- to_talitos_ptr_extent_clear(ptr, is_sec1);
+ to_talitos_ptr_ext_set(ptr, 0, is_sec1);
}
/*
@@ -804,6 +811,11 @@ static void talitos_unregister_rng(struct device *dev)
* crypto alg
*/
#define TALITOS_CRA_PRIORITY 3000
+/*
+ * Defines a priority for doing AEAD with descriptors type
+ * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
+ */
+#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
#define TALITOS_MAX_KEY_SIZE 96
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
@@ -904,35 +916,59 @@ struct talitos_edesc {
static void talitos_sg_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct scatterlist *src,
- struct scatterlist *dst)
+ struct scatterlist *dst,
+ unsigned int len, unsigned int offset)
{
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
unsigned int src_nents = edesc->src_nents ? : 1;
unsigned int dst_nents = edesc->dst_nents ? : 1;
+ if (is_sec1 && dst && dst_nents > 1) {
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
+ len, DMA_FROM_DEVICE);
+ sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
+ offset);
+ }
if (src != dst) {
- dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ if (src_nents == 1 || !is_sec1)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- if (dst) {
+ if (dst && (dst_nents == 1 || !is_sec1))
dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
- }
- } else
+ } else if (src_nents == 1 || !is_sec1) {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
}
static void ipsec_esp_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct aead_request *areq)
{
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+
+ if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
+ DMA_FROM_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
- talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
+ areq->assoclen);
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
+
+ if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
+ unsigned int dst_nents = edesc->dst_nents ? : 1;
+
+ sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
+ areq->assoclen + areq->cryptlen - ivsize);
+ }
}
/*
@@ -942,6 +978,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
struct talitos_desc *desc, void *context,
int err)
{
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
struct aead_request *areq = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
unsigned int authsize = crypto_aead_authsize(authenc);
@@ -955,8 +993,11 @@ static void ipsec_esp_encrypt_done(struct device *dev,
/* copy the generated ICV to dst */
if (edesc->icv_ool) {
- icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
+ if (is_sec1)
+ icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
+ else
+ icvdata = &edesc->link_tbl[edesc->src_nents +
+ edesc->dst_nents + 2];
sg = sg_last(areq->dst, edesc->dst_nents);
memcpy((char *)sg_virt(sg) + sg->length - authsize,
icvdata, authsize);
@@ -977,6 +1018,8 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
struct talitos_edesc *edesc;
struct scatterlist *sg;
char *oicv, *icv;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
edesc = container_of(desc, struct talitos_edesc, desc);
@@ -988,7 +1031,12 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
icv = (char *)sg_virt(sg) + sg->length - authsize;
if (edesc->dma_len) {
- oicv = (char *)&edesc->link_tbl[edesc->src_nents +
+ if (is_sec1)
+ oicv = (char *)&edesc->dma_link_tbl +
+ req->assoclen + req->cryptlen;
+ else
+ oicv = (char *)
+ &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 2];
if (edesc->icv_ool)
icv = oicv + authsize;
@@ -1050,8 +1098,8 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
to_talitos_ptr(link_tbl_ptr + count,
sg_dma_address(sg) + offset, 0);
- link_tbl_ptr[count].len = cpu_to_be16(len);
- link_tbl_ptr[count].j_extent = 0;
+ to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
+ to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
offset = 0;
@@ -1062,17 +1110,43 @@ next:
/* tag end of link table */
if (count > 0)
- link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
+ to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
+ DESC_PTR_LNKTBL_RETURN, 0);
return count;
}
-static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
- int cryptlen,
- struct talitos_ptr *link_tbl_ptr)
+int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr,
+ int sg_count, unsigned int offset, int tbl_off)
{
- return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
- link_tbl_ptr);
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+
+ to_talitos_ptr_len(ptr, len, is_sec1);
+ to_talitos_ptr_ext_set(ptr, 0, is_sec1);
+
+ if (sg_count == 1) {
+ to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
+ return sg_count;
+ }
+ if (is_sec1) {
+ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
+ return sg_count;
+ }
+ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
+ &edesc->link_tbl[tbl_off]);
+ if (sg_count == 1) {
+ /* Only one segment now, so no link tbl needed*/
+ copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
+ return sg_count;
+ }
+ to_talitos_ptr(ptr, edesc->dma_link_tbl +
+ tbl_off * sizeof(struct talitos_ptr), is_sec1);
+ to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
+
+ return sg_count;
}
/*
@@ -1093,42 +1167,52 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
int tbl_off = 0;
int sg_count, ret;
int sg_link_tbl_len;
+ bool sync_needed = false;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
/* hmac key */
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
DMA_TO_DEVICE);
- sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
- (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
- : DMA_TO_DEVICE);
- /* hmac data */
- desc->ptr[1].len = cpu_to_be16(areq->assoclen);
- if (sg_count > 1 &&
- (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
- areq->assoclen,
- &edesc->link_tbl[tbl_off])) > 1) {
- to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
- sizeof(struct talitos_ptr), 0);
- desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
+ sg_count = edesc->src_nents ?: 1;
+ if (is_sec1 && sg_count > 1)
+ sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
+ areq->assoclen + cryptlen);
+ else
+ sg_count = dma_map_sg(dev, areq->src, sg_count,
+ (areq->src == areq->dst) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
- dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- edesc->dma_len, DMA_BIDIRECTIONAL);
+ /* hmac data */
+ ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
+ &desc->ptr[1], sg_count, 0, tbl_off);
+ if (ret > 1) {
tbl_off += ret;
- } else {
- to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
- desc->ptr[1].j_extent = 0;
+ sync_needed = true;
}
/* cipher iv */
- to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
- desc->ptr[2].len = cpu_to_be16(ivsize);
- desc->ptr[2].j_extent = 0;
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
+ to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
+ to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
+ } else {
+ to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
+ to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
+ to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
+ }
/* cipher key */
- map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
- (char *)&ctx->key + ctx->authkeylen,
- DMA_TO_DEVICE);
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
+ (char *)&ctx->key + ctx->authkeylen,
+ DMA_TO_DEVICE);
+ else
+ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
+ (char *)&ctx->key + ctx->authkeylen,
+ DMA_TO_DEVICE);
/*
* cipher in
@@ -1136,78 +1220,82 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
* extent is bytes of HMAC postpended to ciphertext,
* typically 12 for ipsec
*/
- desc->ptr[4].len = cpu_to_be16(cryptlen);
- desc->ptr[4].j_extent = authsize;
+ to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
+ to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
sg_link_tbl_len = cryptlen;
- if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
- sg_link_tbl_len += authsize;
- if (sg_count == 1) {
- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
- areq->assoclen, 0);
- } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
- areq->assoclen, sg_link_tbl_len,
- &edesc->link_tbl[tbl_off])) >
- 1) {
- desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
- tbl_off *
- sizeof(struct talitos_ptr), 0);
- dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- edesc->dma_len,
- DMA_BIDIRECTIONAL);
- tbl_off += ret;
- } else {
- copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
+
+ if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+ sg_link_tbl_len += authsize;
}
- /* cipher out */
- desc->ptr[5].len = cpu_to_be16(cryptlen);
- desc->ptr[5].j_extent = authsize;
+ sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
+ &desc->ptr[4], sg_count, areq->assoclen,
+ tbl_off);
- if (areq->src != areq->dst)
- sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE);
+ if (sg_count > 1) {
+ tbl_off += sg_count;
+ sync_needed = true;
+ }
- edesc->icv_ool = false;
+ /* cipher out */
+ if (areq->src != areq->dst) {
+ sg_count = edesc->dst_nents ? : 1;
+ if (!is_sec1 || sg_count == 1)
+ dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
+ }
- if (sg_count == 1) {
- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
- areq->assoclen, 0);
- } else if ((sg_count =
- sg_to_link_tbl_offset(areq->dst, sg_count,
- areq->assoclen, cryptlen,
- &edesc->link_tbl[tbl_off])) > 1) {
- struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
-
- to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
- tbl_off * sizeof(struct talitos_ptr), 0);
-
- /* Add an entry to the link table for ICV data */
- tbl_ptr += sg_count - 1;
- tbl_ptr->j_extent = 0;
- tbl_ptr++;
- tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
- tbl_ptr->len = cpu_to_be16(authsize);
-
- /* icv data follows link tables */
- to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
- (edesc->src_nents + edesc->dst_nents +
- 2) * sizeof(struct talitos_ptr) +
- authsize, 0);
- desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
- dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
- edesc->dma_len, DMA_BIDIRECTIONAL);
+ sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
+ &desc->ptr[5], sg_count, areq->assoclen,
+ tbl_off);
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
+
+ if (sg_count > 1) {
edesc->icv_ool = true;
+ sync_needed = true;
+
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+ int offset = (edesc->src_nents + edesc->dst_nents + 2) *
+ sizeof(struct talitos_ptr) + authsize;
+
+ /* Add an entry to the link table for ICV data */
+ tbl_ptr += sg_count - 1;
+ to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
+ tbl_ptr++;
+ to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
+ is_sec1);
+ to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
+
+ /* icv data follows link tables */
+ to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
+ is_sec1);
+ }
} else {
- copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
+ edesc->icv_ool = false;
+ }
+
+ /* ICV data */
+ if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
+ to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
+ to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
+ areq->assoclen + cryptlen, is_sec1);
}
/* iv out */
- map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
- DMA_FROM_DEVICE);
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
+ DMA_FROM_DEVICE);
+
+ if (sync_needed)
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
@@ -1233,7 +1321,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
bool encrypt)
{
struct talitos_edesc *edesc;
- int src_nents, dst_nents, alloc_len, dma_len;
+ int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
dma_addr_t iv_dma = 0;
gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -1251,8 +1339,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
if (!dst || dst == src) {
- src_nents = sg_nents_for_len(src,
- assoclen + cryptlen + authsize);
+ src_len = assoclen + cryptlen + authsize;
+ src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL);
@@ -1260,17 +1348,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
+ dst_len = 0;
} else { /* dst && dst != src*/
- src_nents = sg_nents_for_len(src, assoclen + cryptlen +
- (encrypt ? 0 : authsize));
+ src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
+ src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
}
src_nents = (src_nents == 1) ? 0 : src_nents;
- dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
- (encrypt ? authsize : 0));
+ dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
+ dst_nents = sg_nents_for_len(dst, dst_len);
if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n");
err = ERR_PTR(-EINVAL);
@@ -1287,8 +1376,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
alloc_len = sizeof(struct talitos_edesc);
if (src_nents || dst_nents) {
if (is_sec1)
- dma_len = (src_nents ? cryptlen : 0) +
- (dst_nents ? cryptlen : 0);
+ dma_len = (src_nents ? src_len : 0) +
+ (dst_nents ? dst_len : 0);
else
dma_len = (src_nents + dst_nents + 2) *
sizeof(struct talitos_ptr) + authsize * 2;
@@ -1412,40 +1501,13 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
- struct scatterlist *dst, unsigned int len,
- struct talitos_edesc *edesc)
-{
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
-
- if (is_sec1) {
- if (!edesc->src_nents) {
- dma_unmap_sg(dev, src, 1,
- dst != src ? DMA_TO_DEVICE
- : DMA_BIDIRECTIONAL);
- }
- if (dst && edesc->dst_nents) {
- dma_sync_single_for_device(dev,
- edesc->dma_link_tbl + len,
- len, DMA_FROM_DEVICE);
- sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
- edesc->buf + len, len);
- } else if (dst && dst != src) {
- dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
- }
- } else {
- talitos_sg_unmap(dev, edesc, src, dst);
- }
-}
-
static void common_nonsnoop_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct ablkcipher_request *areq)
{
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
- unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
@@ -1470,100 +1532,6 @@ static void ablkcipher_done(struct device *dev,
areq->base.complete(&areq->base, err);
}
-int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
- unsigned int len, struct talitos_edesc *edesc,
- enum dma_data_direction dir, struct talitos_ptr *ptr)
-{
- int sg_count;
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
-
- to_talitos_ptr_len(ptr, len, is_sec1);
-
- if (is_sec1) {
- sg_count = edesc->src_nents ? : 1;
-
- if (sg_count == 1) {
- dma_map_sg(dev, src, 1, dir);
- to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
- } else {
- sg_copy_to_buffer(src, sg_count, edesc->buf, len);
- to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
- dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- len, DMA_TO_DEVICE);
- }
- } else {
- to_talitos_ptr_extent_clear(ptr, is_sec1);
-
- sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
-
- if (sg_count == 1) {
- to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
- } else {
- sg_count = sg_to_link_tbl(src, sg_count, len,
- &edesc->link_tbl[0]);
- if (sg_count > 1) {
- to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
- ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
- dma_sync_single_for_device(dev,
- edesc->dma_link_tbl,
- edesc->dma_len,
- DMA_BIDIRECTIONAL);
- } else {
- /* Only one segment now, so no link tbl needed*/
- to_talitos_ptr(ptr, sg_dma_address(src),
- is_sec1);
- }
- }
- }
- return sg_count;
-}
-
-void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
- unsigned int len, struct talitos_edesc *edesc,
- enum dma_data_direction dir,
- struct talitos_ptr *ptr, int sg_count)
-{
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
-
- if (dir != DMA_NONE)
- sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
-
- to_talitos_ptr_len(ptr, len, is_sec1);
-
- if (is_sec1) {
- if (sg_count == 1) {
- if (dir != DMA_NONE)
- dma_map_sg(dev, dst, 1, dir);
- to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
- } else {
- to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
- dma_sync_single_for_device(dev,
- edesc->dma_link_tbl + len,
- len, DMA_FROM_DEVICE);
- }
- } else {
- to_talitos_ptr_extent_clear(ptr, is_sec1);
-
- if (sg_count == 1) {
- to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
- } else {
- struct talitos_ptr *link_tbl_ptr =
- &edesc->link_tbl[edesc->src_nents + 1];
-
- to_talitos_ptr(ptr, edesc->dma_link_tbl +
- (edesc->src_nents + 1) *
- sizeof(struct talitos_ptr), 0);
- ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
- sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
- dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- edesc->dma_len,
- DMA_BIDIRECTIONAL);
- }
- }
-}
-
static int common_nonsnoop(struct talitos_edesc *edesc,
struct ablkcipher_request *areq,
void (*callback) (struct device *dev,
@@ -1577,6 +1545,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
unsigned int cryptlen = areq->nbytes;
unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
int sg_count, ret;
+ bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1586,25 +1555,39 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* cipher iv */
to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
- to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
+ to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
/* cipher key */
map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
(char *)&ctx->key, DMA_TO_DEVICE);
+ sg_count = edesc->src_nents ?: 1;
+ if (is_sec1 && sg_count > 1)
+ sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
+ cryptlen);
+ else
+ sg_count = dma_map_sg(dev, areq->src, sg_count,
+ (areq->src == areq->dst) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
/*
* cipher in
*/
- sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
- (areq->src == areq->dst) ?
- DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
- &desc->ptr[3]);
+ sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
+ &desc->ptr[3], sg_count, 0, 0);
+ if (sg_count > 1)
+ sync_needed = true;
/* cipher out */
- map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
- (areq->src == areq->dst) ? DMA_NONE
- : DMA_FROM_DEVICE,
- &desc->ptr[4], sg_count);
+ if (areq->src != areq->dst) {
+ sg_count = edesc->dst_nents ? : 1;
+ if (!is_sec1 || sg_count == 1)
+ dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
+ }
+
+ ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
+ sg_count, 0, (edesc->src_nents + 1));
+ if (ret > 1)
+ sync_needed = true;
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
@@ -1613,6 +1596,10 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* last DWORD empty */
desc->ptr[6] = zero_entry;
+ if (sync_needed)
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len, DMA_BIDIRECTIONAL);
+
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
common_nonsnoop_unmap(dev, edesc, areq);
@@ -1676,7 +1663,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
- unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
+ talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
/* When using hashctx-in, must unmap it. */
if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
@@ -1747,8 +1734,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
int ret;
+ bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ int sg_count;
/* first DWORD empty */
desc->ptr[0] = zero_entry;
@@ -1773,11 +1762,19 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
else
desc->ptr[2] = zero_entry;
+ sg_count = edesc->src_nents ?: 1;
+ if (is_sec1 && sg_count > 1)
+ sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
+ else
+ sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
+ DMA_TO_DEVICE);
/*
* data in
*/
- map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
- DMA_TO_DEVICE, &desc->ptr[3]);
+ sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+ &desc->ptr[3], sg_count, 0, 0);
+ if (sg_count > 1)
+ sync_needed = true;
/* fifth DWORD empty */
desc->ptr[4] = zero_entry;
@@ -1798,6 +1795,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
+ if (sync_needed)
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len, DMA_BIDIRECTIONAL);
+
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
common_nonsnoop_hash_unmap(dev, edesc, areq);
@@ -2124,6 +2125,7 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
struct talitos_alg_template {
u32 type;
+ u32 priority;
union {
struct crypto_alg crypto;
struct ahash_alg hash;
@@ -2155,6 +2157,27 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA1_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
.cra_name = "authenc(hmac(sha1),"
@@ -2176,6 +2199,29 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA1_HMAC,
+ },
{ .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2196,6 +2242,27 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA224_HMAC,
},
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA224_HMAC,
+ },
{ .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2219,6 +2286,29 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA224_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA224_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
@@ -2239,6 +2329,27 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA256_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
.cra_name = "authenc(hmac(sha256),"
@@ -2261,6 +2372,29 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA256_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
@@ -2365,6 +2499,27 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_MD5_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
@@ -2385,6 +2540,28 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_MD5_HMAC,
+ },
/* ABLKCIPHER algorithms. */
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
@@ -2901,7 +3078,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
}
alg->cra_module = THIS_MODULE;
- alg->cra_priority = TALITOS_CRA_PRIORITY;
+ if (t_alg->algt.priority)
+ alg->cra_priority = t_alg->algt.priority;
+ else
+ alg->cra_priority = TALITOS_CRA_PRIORITY;
alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
index e5d362a6f680..b497ae3dde07 100644
--- a/drivers/crypto/ux500/cryp/Makefile
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -4,9 +4,9 @@
# * License terms: GNU General Public License (GPL) version 2 */
ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
-CFLAGS_cryp_core.o := -DDEBUG -O0
-CFLAGS_cryp.o := -DDEBUG -O0
-CFLAGS_cryp_irq.o := -DDEBUG -O0
+CFLAGS_cryp_core.o := -DDEBUG
+CFLAGS_cryp.o := -DDEBUG
+CFLAGS_cryp_irq.o := -DDEBUG
endif
obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
index b2f90d9bac72..784d9c0a8853 100644
--- a/drivers/crypto/ux500/hash/Makefile
+++ b/drivers/crypto/ux500/hash/Makefile
@@ -4,7 +4,7 @@
# License terms: GNU General Public License (GPL) version 2
#
ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
-CFLAGS_hash_core.o := -DDEBUG -O0
+CFLAGS_hash_core.o := -DDEBUG
endif
obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
diff --git a/drivers/crypto/vmx/.gitignore b/drivers/crypto/vmx/.gitignore
new file mode 100644
index 000000000000..af4a7ce4738d
--- /dev/null
+++ b/drivers/crypto/vmx/.gitignore
@@ -0,0 +1,2 @@
+aesp8-ppc.S
+ghashp8-ppc.S
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index d28ab96a2475..de6e241b0866 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
-vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o ghash.o
+vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
TARGET := linux-ppc64le
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
new file mode 100644
index 000000000000..cfb25413917c
--- /dev/null
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -0,0 +1,190 @@
+/**
+ * AES XTS routines supporting VMX In-core instructions on Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundations; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY of FITNESS FOR A PARTICUPAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_xts_ctx {
+ struct crypto_blkcipher *fallback;
+ struct aes_key enc_key;
+ struct aes_key dec_key;
+ struct aes_key tweak_key;
+};
+
+static int p8_aes_xts_init(struct crypto_tfm *tfm)
+{
+ const char *alg;
+ struct crypto_blkcipher *fallback;
+ struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!(alg = crypto_tfm_alg_name(tfm))) {
+ printk(KERN_ERR "Failed to get algorithm name.\n");
+ return -ENOENT;
+ }
+
+ fallback =
+ crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ printk(KERN_ERR
+ "Failed to allocate transformation for '%s': %ld\n",
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+ printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+ crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+ crypto_blkcipher_set_flags(
+ fallback,
+ crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
+ ctx->fallback = fallback;
+
+ return 0;
+}
+
+static void p8_aes_xts_exit(struct crypto_tfm *tfm)
+{
+ struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ crypto_free_blkcipher(ctx->fallback);
+ ctx->fallback = NULL;
+ }
+}
+
+static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ int ret;
+ struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ret = xts_check_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
+ ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+ ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+ return ret;
+}
+
+static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes, int enc)
+{
+ int ret;
+ u8 tweak[AES_BLOCK_SIZE];
+ u8 *iv;
+ struct blkcipher_walk walk;
+ struct p8_aes_xts_ctx *ctx =
+ crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+ struct blkcipher_desc fallback_desc = {
+ .tfm = ctx->fallback,
+ .info = desc->info,
+ .flags = desc->flags
+ };
+
+ if (in_interrupt()) {
+ ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
+ crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
+ } else {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+
+ iv = (u8 *)walk.iv;
+ ret = blkcipher_walk_virt(desc, &walk);
+ memset(tweak, 0, AES_BLOCK_SIZE);
+ aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
+
+ while ((nbytes = walk.nbytes)) {
+ if (enc)
+ aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
+ else
+ aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ }
+ return ret;
+}
+
+static int p8_aes_xts_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ return p8_aes_xts_crypt(desc, dst, src, nbytes, 1);
+}
+
+static int p8_aes_xts_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ return p8_aes_xts_crypt(desc, dst, src, nbytes, 0);
+}
+
+struct crypto_alg p8_aes_xts_alg = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "p8_aes_xts",
+ .cra_module = THIS_MODULE,
+ .cra_priority = 2000,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_alignmask = 0,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
+ .cra_init = p8_aes_xts_init,
+ .cra_exit = p8_aes_xts_exit,
+ .cra_blkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = p8_aes_xts_setkey,
+ .encrypt = p8_aes_xts_encrypt,
+ .decrypt = p8_aes_xts_decrypt,
+ }
+};
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
index 4cd34ee54a94..01972e16a6c0 100644
--- a/drivers/crypto/vmx/aesp8-ppc.h
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -19,3 +19,7 @@ void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out,
size_t len, const struct aes_key *key,
const u8 *iv);
+void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len,
+ const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
+void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len,
+ const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 228053921b3f..0b4a293b8a1e 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1,4 +1,11 @@
-#!/usr/bin/env perl
+#! /usr/bin/env perl
+# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@@ -20,6 +27,19 @@
# instructions are interleaved. It's reckoned that eventual
# misalignment penalties at page boundaries are in average lower
# than additional overhead in pure AltiVec approach.
+#
+# May 2016
+#
+# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
+# systems were measured.
+#
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+# CBC en-/decrypt CTR XTS
+# POWER8[le] 3.96/0.72 0.74 1.1
+# POWER8[be] 3.75/0.65 0.66 1.0
$flavour = shift;
@@ -1875,6 +1895,1845 @@ Lctr32_enc8x_done:
___
}} }}}
+#########################################################################
+{{{ # XTS procedures #
+# int aes_p8_xts_[en|de]crypt(const char *inp, char *out, size_t len, #
+# const AES_KEY *key1, const AES_KEY *key2, #
+# [const] unsigned char iv[16]); #
+# If $key2 is NULL, then a "tweak chaining" mode is engaged, in which #
+# input tweak value is assumed to be encrypted already, and last tweak #
+# value, one suitable for consecutive call on same chunk of data, is #
+# written back to original buffer. In addition, in "tweak chaining" #
+# mode only complete input blocks are processed. #
+
+my ($inp,$out,$len,$key1,$key2,$ivp,$rounds,$idx) = map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout) = map("v$_",(0..2));
+my ($output,$inptail,$inpperm,$leperm,$keyperm) = map("v$_",(3..7));
+my ($tweak,$seven,$eighty7,$tmp,$tweak1) = map("v$_",(8..12));
+my $taillen = $key2;
+
+ ($inp,$idx) = ($idx,$inp); # reassign
+
+$code.=<<___;
+.globl .${prefix}_xts_encrypt
+ mr $inp,r3 # reassign
+ li r3,-1
+ ${UCMP}i $len,16
+ bltlr-
+
+ lis r0,0xfff0
+ mfspr r12,256 # save vrsave
+ li r11,0
+ mtspr 256,r0
+
+ vspltisb $seven,0x07 # 0x070707..07
+ le?lvsl $leperm,r11,r11
+ le?vspltisb $tmp,0x0f
+ le?vxor $leperm,$leperm,$seven
+
+ li $idx,15
+ lvx $tweak,0,$ivp # load [unaligned] iv
+ lvsl $inpperm,0,$ivp
+ lvx $inptail,$idx,$ivp
+ le?vxor $inpperm,$inpperm,$tmp
+ vperm $tweak,$tweak,$inptail,$inpperm
+
+ neg r11,$inp
+ lvsr $inpperm,0,r11 # prepare for unaligned load
+ lvx $inout,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ le?vxor $inpperm,$inpperm,$tmp
+
+ ${UCMP}i $key2,0 # key2==NULL?
+ beq Lxts_enc_no_key2
+
+ ?lvsl $keyperm,0,$key2 # prepare for unaligned key
+ lwz $rounds,240($key2)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ lvx $rndkey0,0,$key2
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ mtctr $rounds
+
+Ltweak_xts_enc:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ bdnz Ltweak_xts_enc
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipherlast $tweak,$tweak,$rndkey0
+
+ li $ivp,0 # don't chain the tweak
+ b Lxts_enc
+
+Lxts_enc_no_key2:
+ li $idx,-16
+ and $len,$len,$idx # in "tweak chaining"
+ # mode only complete
+ # blocks are processed
+Lxts_enc:
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+
+ ?lvsl $keyperm,0,$key1 # prepare for unaligned key
+ lwz $rounds,240($key1)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ vslb $eighty7,$seven,$seven # 0x808080..80
+ vor $eighty7,$eighty7,$seven # 0x878787..87
+ vspltisb $tmp,1 # 0x010101..01
+ vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01
+
+ ${UCMP}i $len,96
+ bge _aesp8_xts_encrypt6x
+
+ andi. $taillen,$len,15
+ subic r0,$len,32
+ subi $taillen,$taillen,16
+ subfe r0,r0,r0
+ and r0,r0,$taillen
+ add $inp,$inp,r0
+
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ mtctr $rounds
+ b Loop_xts_enc
+
+.align 5
+Loop_xts_enc:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ bdnz Loop_xts_enc
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $rndkey0,$rndkey0,$tweak
+ vcipherlast $output,$inout,$rndkey0
+
+ le?vperm $tmp,$output,$output,$leperm
+ be?nop
+ le?stvx_u $tmp,0,$out
+ be?stvx_u $output,0,$out
+ addi $out,$out,16
+
+ subic. $len,$len,16
+ beq Lxts_enc_done
+
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+
+ subic r0,$len,32
+ subfe r0,r0,r0
+ and r0,r0,$taillen
+ add $inp,$inp,r0
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $output,$output,$rndkey0 # just in case $len<16
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+
+ mtctr $rounds
+ ${UCMP}i $len,16
+ bge Loop_xts_enc
+
+ vxor $output,$output,$tweak
+ lvsr $inpperm,0,$len # $inpperm is no longer needed
+ vxor $inptail,$inptail,$inptail # $inptail is no longer needed
+ vspltisb $tmp,-1
+ vperm $inptail,$inptail,$tmp,$inpperm
+ vsel $inout,$inout,$output,$inptail
+
+ subi r11,$out,17
+ subi $out,$out,16
+ mtctr $len
+ li $len,16
+Loop_xts_enc_steal:
+ lbzu r0,1(r11)
+ stb r0,16(r11)
+ bdnz Loop_xts_enc_steal
+
+ mtctr $rounds
+ b Loop_xts_enc # one more time...
+
+Lxts_enc_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_enc_ret
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_enc_ret:
+ mtspr 256,r12 # restore vrsave
+ li r3,0
+ blr
+ .long 0
+ .byte 0,12,0x04,0,0x80,6,6,0
+ .long 0
+.size .${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt
+
+.globl .${prefix}_xts_decrypt
+ mr $inp,r3 # reassign
+ li r3,-1
+ ${UCMP}i $len,16
+ bltlr-
+
+ lis r0,0xfff8
+ mfspr r12,256 # save vrsave
+ li r11,0
+ mtspr 256,r0
+
+ andi. r0,$len,15
+ neg r0,r0
+ andi. r0,r0,16
+ sub $len,$len,r0
+
+ vspltisb $seven,0x07 # 0x070707..07
+ le?lvsl $leperm,r11,r11
+ le?vspltisb $tmp,0x0f
+ le?vxor $leperm,$leperm,$seven
+
+ li $idx,15
+ lvx $tweak,0,$ivp # load [unaligned] iv
+ lvsl $inpperm,0,$ivp
+ lvx $inptail,$idx,$ivp
+ le?vxor $inpperm,$inpperm,$tmp
+ vperm $tweak,$tweak,$inptail,$inpperm
+
+ neg r11,$inp
+ lvsr $inpperm,0,r11 # prepare for unaligned load
+ lvx $inout,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ le?vxor $inpperm,$inpperm,$tmp
+
+ ${UCMP}i $key2,0 # key2==NULL?
+ beq Lxts_dec_no_key2
+
+ ?lvsl $keyperm,0,$key2 # prepare for unaligned key
+ lwz $rounds,240($key2)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ lvx $rndkey0,0,$key2
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ mtctr $rounds
+
+Ltweak_xts_dec:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ bdnz Ltweak_xts_dec
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipherlast $tweak,$tweak,$rndkey0
+
+ li $ivp,0 # don't chain the tweak
+ b Lxts_dec
+
+Lxts_dec_no_key2:
+ neg $idx,$len
+ andi. $idx,$idx,15
+ add $len,$len,$idx # in "tweak chaining"
+ # mode only complete
+ # blocks are processed
+Lxts_dec:
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+
+ ?lvsl $keyperm,0,$key1 # prepare for unaligned key
+ lwz $rounds,240($key1)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ vslb $eighty7,$seven,$seven # 0x808080..80
+ vor $eighty7,$eighty7,$seven # 0x878787..87
+ vspltisb $tmp,1 # 0x010101..01
+ vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01
+
+ ${UCMP}i $len,96
+ bge _aesp8_xts_decrypt6x
+
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ mtctr $rounds
+
+ ${UCMP}i $len,16
+ blt Ltail_xts_dec
+ be?b Loop_xts_dec
+
+.align 5
+Loop_xts_dec:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vncipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ bdnz Loop_xts_dec
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $rndkey0,$rndkey0,$tweak
+ vncipherlast $output,$inout,$rndkey0
+
+ le?vperm $tmp,$output,$output,$leperm
+ be?nop
+ le?stvx_u $tmp,0,$out
+ be?stvx_u $output,0,$out
+ addi $out,$out,16
+
+ subic. $len,$len,16
+ beq Lxts_dec_done
+
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+
+ mtctr $rounds
+ ${UCMP}i $len,16
+ bge Loop_xts_dec
+
+Ltail_xts_dec:
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak1,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak1,$tweak1,$tmp
+
+ subi $inp,$inp,16
+ add $inp,$inp,$len
+
+ vxor $inout,$inout,$tweak # :-(
+ vxor $inout,$inout,$tweak1 # :-)
+
+Loop_xts_dec_short:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vncipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ bdnz Loop_xts_dec_short
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $rndkey0,$rndkey0,$tweak1
+ vncipherlast $output,$inout,$rndkey0
+
+ le?vperm $tmp,$output,$output,$leperm
+ be?nop
+ le?stvx_u $tmp,0,$out
+ be?stvx_u $output,0,$out
+
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ #addi $inp,$inp,16
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+
+ lvsr $inpperm,0,$len # $inpperm is no longer needed
+ vxor $inptail,$inptail,$inptail # $inptail is no longer needed
+ vspltisb $tmp,-1
+ vperm $inptail,$inptail,$tmp,$inpperm
+ vsel $inout,$inout,$output,$inptail
+
+ vxor $rndkey0,$rndkey0,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+
+ subi r11,$out,1
+ mtctr $len
+ li $len,16
+Loop_xts_dec_steal:
+ lbzu r0,1(r11)
+ stb r0,16(r11)
+ bdnz Loop_xts_dec_steal
+
+ mtctr $rounds
+ b Loop_xts_dec # one more time...
+
+Lxts_dec_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_dec_ret
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_dec_ret:
+ mtspr 256,r12 # restore vrsave
+ li r3,0
+ blr
+ .long 0
+ .byte 0,12,0x04,0,0x80,6,6,0
+ .long 0
+.size .${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt
+___
+#########################################################################
+{{ # Optimized XTS procedures #
+my $key_=$key2;
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,3,26..31));
+ $x00=0 if ($flavour =~ /osx/);
+my ($in0, $in1, $in2, $in3, $in4, $in5 )=map("v$_",(0..5));
+my ($out0, $out1, $out2, $out3, $out4, $out5)=map("v$_",(7,12..16));
+my ($twk0, $twk1, $twk2, $twk3, $twk4, $twk5)=map("v$_",(17..22));
+my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
+ # v26-v31 last 6 round keys
+my ($keyperm)=($out0); # aliases with "caller", redundant assignment
+my $taillen=$x70;
+
+$code.=<<___;
+.align 5
+_aesp8_xts_encrypt6x:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ mflr r11
+ li r7,`$FRAME+8*16+15`
+ li r3,`$FRAME+8*16+31`
+ $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+ stvx v20,r7,$sp # ABI says so
+ addi r7,r7,32
+ stvx v21,r3,$sp
+ addi r3,r3,32
+ stvx v22,r7,$sp
+ addi r7,r7,32
+ stvx v23,r3,$sp
+ addi r3,r3,32
+ stvx v24,r7,$sp
+ addi r7,r7,32
+ stvx v25,r3,$sp
+ addi r3,r3,32
+ stvx v26,r7,$sp
+ addi r7,r7,32
+ stvx v27,r3,$sp
+ addi r3,r3,32
+ stvx v28,r7,$sp
+ addi r7,r7,32
+ stvx v29,r3,$sp
+ addi r3,r3,32
+ stvx v30,r7,$sp
+ stvx v31,r3,$sp
+ li r0,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ mtspr 256,r0
+
+ subi $rounds,$rounds,3 # -4 in total
+
+ lvx $rndkey0,$x00,$key1 # load key schedule
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ lvx v31,$x00,$key1
+ ?vperm $rndkey0,$rndkey0,v30,$keyperm
+ addi $key_,$sp,$FRAME+15
+ mtctr $rounds
+
+Load_xts_enc_key:
+ ?vperm v24,v30,v31,$keyperm
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ stvx v24,$x00,$key_ # off-load round[1]
+ ?vperm v25,v31,v30,$keyperm
+ lvx v31,$x00,$key1
+ stvx v25,$x10,$key_ # off-load round[2]
+ addi $key_,$key_,0x20
+ bdnz Load_xts_enc_key
+
+ lvx v26,$x10,$key1
+ ?vperm v24,v30,v31,$keyperm
+ lvx v27,$x20,$key1
+ stvx v24,$x00,$key_ # off-load round[3]
+ ?vperm v25,v31,v26,$keyperm
+ lvx v28,$x30,$key1
+ stvx v25,$x10,$key_ # off-load round[4]
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ ?vperm v26,v26,v27,$keyperm
+ lvx v29,$x40,$key1
+ ?vperm v27,v27,v28,$keyperm
+ lvx v30,$x50,$key1
+ ?vperm v28,v28,v29,$keyperm
+ lvx v31,$x60,$key1
+ ?vperm v29,v29,v30,$keyperm
+ lvx $twk5,$x70,$key1 # borrow $twk5
+ ?vperm v30,v30,v31,$keyperm
+ lvx v24,$x00,$key_ # pre-load round[1]
+ ?vperm v31,v31,$twk5,$keyperm
+ lvx v25,$x10,$key_ # pre-load round[2]
+
+ vperm $in0,$inout,$inptail,$inpperm
+ subi $inp,$inp,31 # undo "caller"
+ vxor $twk0,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $out0,$in0,$twk0
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in1,$x10,$inp
+ vxor $twk1,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in1,$in1,$in1,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out1,$in1,$twk1
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in2,$x20,$inp
+ andi. $taillen,$len,15
+ vxor $twk2,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in2,$in2,$in2,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out2,$in2,$twk2
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in3,$x30,$inp
+ sub $len,$len,$taillen
+ vxor $twk3,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in3,$in3,$in3,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out3,$in3,$twk3
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in4,$x40,$inp
+ subi $len,$len,0x60
+ vxor $twk4,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in4,$in4,$in4,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out4,$in4,$twk4
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ vxor $twk5,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in5,$in5,$in5,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out5,$in5,$twk5
+ vxor $tweak,$tweak,$tmp
+
+ vxor v31,v31,$rndkey0
+ mtctr $rounds
+ b Loop_xts_enc6x
+
+.align 5
+Loop_xts_enc6x:
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ vcipher $out5,$out5,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ vcipher $out5,$out5,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_enc6x
+
+ subic $len,$len,96 # $len-=96
+ vxor $in0,$twk0,v31 # xor with last round key
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk0,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vsldoi $tmp,$tmp,$tmp,15
+ vcipher $out4,$out4,v24
+ vcipher $out5,$out5,v24
+
+ subfe. r0,r0,r0 # borrow?-1:0
+ vand $tmp,$tmp,$eighty7
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vxor $tweak,$tweak,$tmp
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vxor $in1,$twk1,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk1,$tweak,$rndkey0
+ vcipher $out4,$out4,v25
+ vcipher $out5,$out5,v25
+
+ and r0,r0,$len
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vcipher $out0,$out0,v26
+ vcipher $out1,$out1,v26
+ vand $tmp,$tmp,$eighty7
+ vcipher $out2,$out2,v26
+ vcipher $out3,$out3,v26
+ vxor $tweak,$tweak,$tmp
+ vcipher $out4,$out4,v26
+ vcipher $out5,$out5,v26
+
+ add $inp,$inp,r0 # $inp is adjusted in such
+ # way that at exit from the
+ # loop inX-in5 are loaded
+ # with last "words"
+ vxor $in2,$twk2,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk2,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vcipher $out0,$out0,v27
+ vcipher $out1,$out1,v27
+ vsldoi $tmp,$tmp,$tmp,15
+ vcipher $out2,$out2,v27
+ vcipher $out3,$out3,v27
+ vand $tmp,$tmp,$eighty7
+ vcipher $out4,$out4,v27
+ vcipher $out5,$out5,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vxor $tweak,$tweak,$tmp
+ vcipher $out0,$out0,v28
+ vcipher $out1,$out1,v28
+ vxor $in3,$twk3,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk3,$tweak,$rndkey0
+ vcipher $out2,$out2,v28
+ vcipher $out3,$out3,v28
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vcipher $out4,$out4,v28
+ vcipher $out5,$out5,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vand $tmp,$tmp,$eighty7
+
+ vcipher $out0,$out0,v29
+ vcipher $out1,$out1,v29
+ vxor $tweak,$tweak,$tmp
+ vcipher $out2,$out2,v29
+ vcipher $out3,$out3,v29
+ vxor $in4,$twk4,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk4,$tweak,$rndkey0
+ vcipher $out4,$out4,v29
+ vcipher $out5,$out5,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+
+ vcipher $out0,$out0,v30
+ vcipher $out1,$out1,v30
+ vand $tmp,$tmp,$eighty7
+ vcipher $out2,$out2,v30
+ vcipher $out3,$out3,v30
+ vxor $tweak,$tweak,$tmp
+ vcipher $out4,$out4,v30
+ vcipher $out5,$out5,v30
+ vxor $in5,$twk5,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk5,$tweak,$rndkey0
+
+ vcipherlast $out0,$out0,$in0
+ lvx_u $in0,$x00,$inp # load next input block
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vcipherlast $out1,$out1,$in1
+ lvx_u $in1,$x10,$inp
+ vcipherlast $out2,$out2,$in2
+ le?vperm $in0,$in0,$in0,$leperm
+ lvx_u $in2,$x20,$inp
+ vand $tmp,$tmp,$eighty7
+ vcipherlast $out3,$out3,$in3
+ le?vperm $in1,$in1,$in1,$leperm
+ lvx_u $in3,$x30,$inp
+ vcipherlast $out4,$out4,$in4
+ le?vperm $in2,$in2,$in2,$leperm
+ lvx_u $in4,$x40,$inp
+ vxor $tweak,$tweak,$tmp
+ vcipherlast $tmp,$out5,$in5 # last block might be needed
+ # in stealing mode
+ le?vperm $in3,$in3,$in3,$leperm
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ le?vperm $in4,$in4,$in4,$leperm
+ le?vperm $in5,$in5,$in5,$leperm
+
+ le?vperm $out0,$out0,$out0,$leperm
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk0
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ vxor $out1,$in1,$twk1
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ vxor $out2,$in2,$twk2
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ vxor $out3,$in3,$twk3
+ le?vperm $out5,$tmp,$tmp,$leperm
+ stvx_u $out4,$x40,$out
+ vxor $out4,$in4,$twk4
+ le?stvx_u $out5,$x50,$out
+ be?stvx_u $tmp, $x50,$out
+ vxor $out5,$in5,$twk5
+ addi $out,$out,0x60
+
+ mtctr $rounds
+ beq Loop_xts_enc6x # did $len-=96 borrow?
+
+ addic. $len,$len,0x60
+ beq Lxts_enc6x_zero
+ cmpwi $len,0x20
+ blt Lxts_enc6x_one
+ nop
+ beq Lxts_enc6x_two
+ cmpwi $len,0x40
+ blt Lxts_enc6x_three
+ nop
+ beq Lxts_enc6x_four
+
+Lxts_enc6x_five:
+ vxor $out0,$in1,$twk0
+ vxor $out1,$in2,$twk1
+ vxor $out2,$in3,$twk2
+ vxor $out3,$in4,$twk3
+ vxor $out4,$in5,$twk4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk5 # unused tweak
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ vxor $tmp,$out4,$twk5 # last block prep for stealing
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ stvx_u $out4,$x40,$out
+ addi $out,$out,0x50
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_four:
+ vxor $out0,$in2,$twk0
+ vxor $out1,$in3,$twk1
+ vxor $out2,$in4,$twk2
+ vxor $out3,$in5,$twk3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk4 # unused tweak
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ vxor $tmp,$out3,$twk4 # last block prep for stealing
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ stvx_u $out3,$x30,$out
+ addi $out,$out,0x40
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_three:
+ vxor $out0,$in3,$twk0
+ vxor $out1,$in4,$twk1
+ vxor $out2,$in5,$twk2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk3 # unused tweak
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $tmp,$out2,$twk3 # last block prep for stealing
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ stvx_u $out2,$x20,$out
+ addi $out,$out,0x30
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_two:
+ vxor $out0,$in4,$twk0
+ vxor $out1,$in5,$twk1
+ vxor $out2,$out2,$out2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk2 # unused tweak
+ vxor $tmp,$out1,$twk2 # last block prep for stealing
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ stvx_u $out1,$x10,$out
+ addi $out,$out,0x20
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_one:
+ vxor $out0,$in5,$twk0
+ nop
+Loop_xts_enc1x:
+ vcipher $out0,$out0,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_enc1x
+
+ add $inp,$inp,$taillen
+ cmpwi $taillen,0
+ vcipher $out0,$out0,v24
+
+ subi $inp,$inp,16
+ vcipher $out0,$out0,v25
+
+ lvsr $inpperm,0,$taillen
+ vcipher $out0,$out0,v26
+
+ lvx_u $in0,0,$inp
+ vcipher $out0,$out0,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vcipher $out0,$out0,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vcipher $out0,$out0,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $twk0,$twk0,v31
+
+ le?vperm $in0,$in0,$in0,$leperm
+ vcipher $out0,$out0,v30
+
+ vperm $in0,$in0,$in0,$inpperm
+ vcipherlast $out0,$out0,$twk0
+
+ vmr $twk0,$twk1 # unused tweak
+ vxor $tmp,$out0,$twk1 # last block prep for stealing
+ le?vperm $out0,$out0,$out0,$leperm
+ stvx_u $out0,$x00,$out # store output
+ addi $out,$out,0x10
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_zero:
+ cmpwi $taillen,0
+ beq Lxts_enc6x_done
+
+ add $inp,$inp,$taillen
+ subi $inp,$inp,16
+ lvx_u $in0,0,$inp
+ lvsr $inpperm,0,$taillen # $in5 is no more
+ le?vperm $in0,$in0,$in0,$leperm
+ vperm $in0,$in0,$in0,$inpperm
+ vxor $tmp,$tmp,$twk0
+Lxts_enc6x_steal:
+ vxor $in0,$in0,$twk0
+ vxor $out0,$out0,$out0
+ vspltisb $out1,-1
+ vperm $out0,$out0,$out1,$inpperm
+ vsel $out0,$in0,$tmp,$out0 # $tmp is last block, remember?
+
+ subi r30,$out,17
+ subi $out,$out,16
+ mtctr $taillen
+Loop_xts_enc6x_steal:
+ lbzu r0,1(r30)
+ stb r0,16(r30)
+ bdnz Loop_xts_enc6x_steal
+
+ li $taillen,0
+ mtctr $rounds
+ b Loop_xts_enc1x # one more time...
+
+.align 4
+Lxts_enc6x_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_enc6x_ret
+
+ vxor $tweak,$twk0,$rndkey0
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_enc6x_ret:
+ mtlr r11
+ li r10,`$FRAME+15`
+ li r11,`$FRAME+31`
+ stvx $seven,r10,$sp # wipe copies of round keys
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,0x04,1,0x80,6,6,0
+ .long 0
+
+.align 5
+_aesp8_xts_enc5x:
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz _aesp8_xts_enc5x
+
+ add $inp,$inp,$taillen
+ cmpwi $taillen,0
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+
+ subi $inp,$inp,16
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ vxor $twk0,$twk0,v31
+
+ vcipher $out0,$out0,v26
+ lvsr $inpperm,r0,$taillen # $in5 is no more
+ vcipher $out1,$out1,v26
+ vcipher $out2,$out2,v26
+ vcipher $out3,$out3,v26
+ vcipher $out4,$out4,v26
+ vxor $in1,$twk1,v31
+
+ vcipher $out0,$out0,v27
+ lvx_u $in0,0,$inp
+ vcipher $out1,$out1,v27
+ vcipher $out2,$out2,v27
+ vcipher $out3,$out3,v27
+ vcipher $out4,$out4,v27
+ vxor $in2,$twk2,v31
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vcipher $out0,$out0,v28
+ vcipher $out1,$out1,v28
+ vcipher $out2,$out2,v28
+ vcipher $out3,$out3,v28
+ vcipher $out4,$out4,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vxor $in3,$twk3,v31
+
+ vcipher $out0,$out0,v29
+ le?vperm $in0,$in0,$in0,$leperm
+ vcipher $out1,$out1,v29
+ vcipher $out2,$out2,v29
+ vcipher $out3,$out3,v29
+ vcipher $out4,$out4,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $in4,$twk4,v31
+
+ vcipher $out0,$out0,v30
+ vperm $in0,$in0,$in0,$inpperm
+ vcipher $out1,$out1,v30
+ vcipher $out2,$out2,v30
+ vcipher $out3,$out3,v30
+ vcipher $out4,$out4,v30
+
+ vcipherlast $out0,$out0,$twk0
+ vcipherlast $out1,$out1,$in1
+ vcipherlast $out2,$out2,$in2
+ vcipherlast $out3,$out3,$in3
+ vcipherlast $out4,$out4,$in4
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+
+.align 5
+_aesp8_xts_decrypt6x:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ mflr r11
+ li r7,`$FRAME+8*16+15`
+ li r3,`$FRAME+8*16+31`
+ $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+ stvx v20,r7,$sp # ABI says so
+ addi r7,r7,32
+ stvx v21,r3,$sp
+ addi r3,r3,32
+ stvx v22,r7,$sp
+ addi r7,r7,32
+ stvx v23,r3,$sp
+ addi r3,r3,32
+ stvx v24,r7,$sp
+ addi r7,r7,32
+ stvx v25,r3,$sp
+ addi r3,r3,32
+ stvx v26,r7,$sp
+ addi r7,r7,32
+ stvx v27,r3,$sp
+ addi r3,r3,32
+ stvx v28,r7,$sp
+ addi r7,r7,32
+ stvx v29,r3,$sp
+ addi r3,r3,32
+ stvx v30,r7,$sp
+ stvx v31,r3,$sp
+ li r0,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ mtspr 256,r0
+
+ subi $rounds,$rounds,3 # -4 in total
+
+ lvx $rndkey0,$x00,$key1 # load key schedule
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ lvx v31,$x00,$key1
+ ?vperm $rndkey0,$rndkey0,v30,$keyperm
+ addi $key_,$sp,$FRAME+15
+ mtctr $rounds
+
+Load_xts_dec_key:
+ ?vperm v24,v30,v31,$keyperm
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ stvx v24,$x00,$key_ # off-load round[1]
+ ?vperm v25,v31,v30,$keyperm
+ lvx v31,$x00,$key1
+ stvx v25,$x10,$key_ # off-load round[2]
+ addi $key_,$key_,0x20
+ bdnz Load_xts_dec_key
+
+ lvx v26,$x10,$key1
+ ?vperm v24,v30,v31,$keyperm
+ lvx v27,$x20,$key1
+ stvx v24,$x00,$key_ # off-load round[3]
+ ?vperm v25,v31,v26,$keyperm
+ lvx v28,$x30,$key1
+ stvx v25,$x10,$key_ # off-load round[4]
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ ?vperm v26,v26,v27,$keyperm
+ lvx v29,$x40,$key1
+ ?vperm v27,v27,v28,$keyperm
+ lvx v30,$x50,$key1
+ ?vperm v28,v28,v29,$keyperm
+ lvx v31,$x60,$key1
+ ?vperm v29,v29,v30,$keyperm
+ lvx $twk5,$x70,$key1 # borrow $twk5
+ ?vperm v30,v30,v31,$keyperm
+ lvx v24,$x00,$key_ # pre-load round[1]
+ ?vperm v31,v31,$twk5,$keyperm
+ lvx v25,$x10,$key_ # pre-load round[2]
+
+ vperm $in0,$inout,$inptail,$inpperm
+ subi $inp,$inp,31 # undo "caller"
+ vxor $twk0,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $out0,$in0,$twk0
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in1,$x10,$inp
+ vxor $twk1,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in1,$in1,$in1,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out1,$in1,$twk1
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in2,$x20,$inp
+ andi. $taillen,$len,15
+ vxor $twk2,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in2,$in2,$in2,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out2,$in2,$twk2
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in3,$x30,$inp
+ sub $len,$len,$taillen
+ vxor $twk3,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in3,$in3,$in3,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out3,$in3,$twk3
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in4,$x40,$inp
+ subi $len,$len,0x60
+ vxor $twk4,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in4,$in4,$in4,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out4,$in4,$twk4
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ vxor $twk5,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in5,$in5,$in5,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out5,$in5,$twk5
+ vxor $tweak,$tweak,$tmp
+
+ vxor v31,v31,$rndkey0
+ mtctr $rounds
+ b Loop_xts_dec6x
+
+.align 5
+Loop_xts_dec6x:
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_dec6x
+
+ subic $len,$len,96 # $len-=96
+ vxor $in0,$twk0,v31 # xor with last round key
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk0,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vsldoi $tmp,$tmp,$tmp,15
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+
+ subfe. r0,r0,r0 # borrow?-1:0
+ vand $tmp,$tmp,$eighty7
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vxor $tweak,$tweak,$tmp
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vxor $in1,$twk1,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk1,$tweak,$rndkey0
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+
+ and r0,r0,$len
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vncipher $out0,$out0,v26
+ vncipher $out1,$out1,v26
+ vand $tmp,$tmp,$eighty7
+ vncipher $out2,$out2,v26
+ vncipher $out3,$out3,v26
+ vxor $tweak,$tweak,$tmp
+ vncipher $out4,$out4,v26
+ vncipher $out5,$out5,v26
+
+ add $inp,$inp,r0 # $inp is adjusted in such
+ # way that at exit from the
+ # loop inX-in5 are loaded
+ # with last "words"
+ vxor $in2,$twk2,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk2,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vncipher $out0,$out0,v27
+ vncipher $out1,$out1,v27
+ vsldoi $tmp,$tmp,$tmp,15
+ vncipher $out2,$out2,v27
+ vncipher $out3,$out3,v27
+ vand $tmp,$tmp,$eighty7
+ vncipher $out4,$out4,v27
+ vncipher $out5,$out5,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vxor $tweak,$tweak,$tmp
+ vncipher $out0,$out0,v28
+ vncipher $out1,$out1,v28
+ vxor $in3,$twk3,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk3,$tweak,$rndkey0
+ vncipher $out2,$out2,v28
+ vncipher $out3,$out3,v28
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vncipher $out4,$out4,v28
+ vncipher $out5,$out5,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vand $tmp,$tmp,$eighty7
+
+ vncipher $out0,$out0,v29
+ vncipher $out1,$out1,v29
+ vxor $tweak,$tweak,$tmp
+ vncipher $out2,$out2,v29
+ vncipher $out3,$out3,v29
+ vxor $in4,$twk4,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk4,$tweak,$rndkey0
+ vncipher $out4,$out4,v29
+ vncipher $out5,$out5,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+
+ vncipher $out0,$out0,v30
+ vncipher $out1,$out1,v30
+ vand $tmp,$tmp,$eighty7
+ vncipher $out2,$out2,v30
+ vncipher $out3,$out3,v30
+ vxor $tweak,$tweak,$tmp
+ vncipher $out4,$out4,v30
+ vncipher $out5,$out5,v30
+ vxor $in5,$twk5,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk5,$tweak,$rndkey0
+
+ vncipherlast $out0,$out0,$in0
+ lvx_u $in0,$x00,$inp # load next input block
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vncipherlast $out1,$out1,$in1
+ lvx_u $in1,$x10,$inp
+ vncipherlast $out2,$out2,$in2
+ le?vperm $in0,$in0,$in0,$leperm
+ lvx_u $in2,$x20,$inp
+ vand $tmp,$tmp,$eighty7
+ vncipherlast $out3,$out3,$in3
+ le?vperm $in1,$in1,$in1,$leperm
+ lvx_u $in3,$x30,$inp
+ vncipherlast $out4,$out4,$in4
+ le?vperm $in2,$in2,$in2,$leperm
+ lvx_u $in4,$x40,$inp
+ vxor $tweak,$tweak,$tmp
+ vncipherlast $out5,$out5,$in5
+ le?vperm $in3,$in3,$in3,$leperm
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ le?vperm $in4,$in4,$in4,$leperm
+ le?vperm $in5,$in5,$in5,$leperm
+
+ le?vperm $out0,$out0,$out0,$leperm
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk0
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ vxor $out1,$in1,$twk1
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ vxor $out2,$in2,$twk2
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ vxor $out3,$in3,$twk3
+ le?vperm $out5,$out5,$out5,$leperm
+ stvx_u $out4,$x40,$out
+ vxor $out4,$in4,$twk4
+ stvx_u $out5,$x50,$out
+ vxor $out5,$in5,$twk5
+ addi $out,$out,0x60
+
+ mtctr $rounds
+ beq Loop_xts_dec6x # did $len-=96 borrow?
+
+ addic. $len,$len,0x60
+ beq Lxts_dec6x_zero
+ cmpwi $len,0x20
+ blt Lxts_dec6x_one
+ nop
+ beq Lxts_dec6x_two
+ cmpwi $len,0x40
+ blt Lxts_dec6x_three
+ nop
+ beq Lxts_dec6x_four
+
+Lxts_dec6x_five:
+ vxor $out0,$in1,$twk0
+ vxor $out1,$in2,$twk1
+ vxor $out2,$in3,$twk2
+ vxor $out3,$in4,$twk3
+ vxor $out4,$in5,$twk4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk5 # unused tweak
+ vxor $twk1,$tweak,$rndkey0
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk1
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ stvx_u $out4,$x40,$out
+ addi $out,$out,0x50
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_four:
+ vxor $out0,$in2,$twk0
+ vxor $out1,$in3,$twk1
+ vxor $out2,$in4,$twk2
+ vxor $out3,$in5,$twk3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk4 # unused tweak
+ vmr $twk1,$twk5
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk5
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ stvx_u $out3,$x30,$out
+ addi $out,$out,0x40
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_three:
+ vxor $out0,$in3,$twk0
+ vxor $out1,$in4,$twk1
+ vxor $out2,$in5,$twk2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk3 # unused tweak
+ vmr $twk1,$twk4
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk4
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ stvx_u $out2,$x20,$out
+ addi $out,$out,0x30
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_two:
+ vxor $out0,$in4,$twk0
+ vxor $out1,$in5,$twk1
+ vxor $out2,$out2,$out2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk2 # unused tweak
+ vmr $twk1,$twk3
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk3
+ stvx_u $out1,$x10,$out
+ addi $out,$out,0x20
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_one:
+ vxor $out0,$in5,$twk0
+ nop
+Loop_xts_dec1x:
+ vncipher $out0,$out0,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_dec1x
+
+ subi r0,$taillen,1
+ vncipher $out0,$out0,v24
+
+ andi. r0,r0,16
+ cmpwi $taillen,0
+ vncipher $out0,$out0,v25
+
+ sub $inp,$inp,r0
+ vncipher $out0,$out0,v26
+
+ lvx_u $in0,0,$inp
+ vncipher $out0,$out0,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vncipher $out0,$out0,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $twk0,$twk0,v31
+
+ le?vperm $in0,$in0,$in0,$leperm
+ vncipher $out0,$out0,v30
+
+ mtctr $rounds
+ vncipherlast $out0,$out0,$twk0
+
+ vmr $twk0,$twk1 # unused tweak
+ vmr $twk1,$twk2
+ le?vperm $out0,$out0,$out0,$leperm
+ stvx_u $out0,$x00,$out # store output
+ addi $out,$out,0x10
+ vxor $out0,$in0,$twk2
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_zero:
+ cmpwi $taillen,0
+ beq Lxts_dec6x_done
+
+ lvx_u $in0,0,$inp
+ le?vperm $in0,$in0,$in0,$leperm
+ vxor $out0,$in0,$twk1
+Lxts_dec6x_steal:
+ vncipher $out0,$out0,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Lxts_dec6x_steal
+
+ add $inp,$inp,$taillen
+ vncipher $out0,$out0,v24
+
+ cmpwi $taillen,0
+ vncipher $out0,$out0,v25
+
+ lvx_u $in0,0,$inp
+ vncipher $out0,$out0,v26
+
+ lvsr $inpperm,0,$taillen # $in5 is no more
+ vncipher $out0,$out0,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vncipher $out0,$out0,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $twk1,$twk1,v31
+
+ le?vperm $in0,$in0,$in0,$leperm
+ vncipher $out0,$out0,v30
+
+ vperm $in0,$in0,$in0,$inpperm
+ vncipherlast $tmp,$out0,$twk1
+
+ le?vperm $out0,$tmp,$tmp,$leperm
+ le?stvx_u $out0,0,$out
+ be?stvx_u $tmp,0,$out
+
+ vxor $out0,$out0,$out0
+ vspltisb $out1,-1
+ vperm $out0,$out0,$out1,$inpperm
+ vsel $out0,$in0,$tmp,$out0
+ vxor $out0,$out0,$twk0
+
+ subi r30,$out,1
+ mtctr $taillen
+Loop_xts_dec6x_steal:
+ lbzu r0,1(r30)
+ stb r0,16(r30)
+ bdnz Loop_xts_dec6x_steal
+
+ li $taillen,0
+ mtctr $rounds
+ b Loop_xts_dec1x # one more time...
+
+.align 4
+Lxts_dec6x_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_dec6x_ret
+
+ vxor $tweak,$twk0,$rndkey0
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_dec6x_ret:
+ mtlr r11
+ li r10,`$FRAME+15`
+ li r11,`$FRAME+31`
+ stvx $seven,r10,$sp # wipe copies of round keys
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,0x04,1,0x80,6,6,0
+ .long 0
+
+.align 5
+_aesp8_xts_dec5x:
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz _aesp8_xts_dec5x
+
+ subi r0,$taillen,1
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+
+ andi. r0,r0,16
+ cmpwi $taillen,0
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vxor $twk0,$twk0,v31
+
+ sub $inp,$inp,r0
+ vncipher $out0,$out0,v26
+ vncipher $out1,$out1,v26
+ vncipher $out2,$out2,v26
+ vncipher $out3,$out3,v26
+ vncipher $out4,$out4,v26
+ vxor $in1,$twk1,v31
+
+ vncipher $out0,$out0,v27
+ lvx_u $in0,0,$inp
+ vncipher $out1,$out1,v27
+ vncipher $out2,$out2,v27
+ vncipher $out3,$out3,v27
+ vncipher $out4,$out4,v27
+ vxor $in2,$twk2,v31
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ vncipher $out1,$out1,v28
+ vncipher $out2,$out2,v28
+ vncipher $out3,$out3,v28
+ vncipher $out4,$out4,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vxor $in3,$twk3,v31
+
+ vncipher $out0,$out0,v29
+ le?vperm $in0,$in0,$in0,$leperm
+ vncipher $out1,$out1,v29
+ vncipher $out2,$out2,v29
+ vncipher $out3,$out3,v29
+ vncipher $out4,$out4,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $in4,$twk4,v31
+
+ vncipher $out0,$out0,v30
+ vncipher $out1,$out1,v30
+ vncipher $out2,$out2,v30
+ vncipher $out3,$out3,v30
+ vncipher $out4,$out4,v30
+
+ vncipherlast $out0,$out0,$twk0
+ vncipherlast $out1,$out1,$in1
+ vncipherlast $out2,$out2,$in2
+ vncipherlast $out3,$out3,$in3
+ vncipherlast $out4,$out4,$in4
+ mtctr $rounds
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+___
+}} }}}
+
my $consts=1;
foreach(split("\n",$code)) {
s/\`([^\`]*)\`/eval($1)/geo;
@@ -1898,7 +3757,7 @@ foreach(split("\n",$code)) {
if ($flavour =~ /le$/o) {
SWITCH: for($conv) {
/\?inv/ && do { @bytes=map($_^0xf,@bytes); last; };
- /\?rev/ && do { @bytes=reverse(@bytes); last; };
+ /\?rev/ && do { @bytes=reverse(@bytes); last; };
}
}
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index e163d5770438..f688c32fbcc7 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -31,10 +31,12 @@ extern struct shash_alg p8_ghash_alg;
extern struct crypto_alg p8_aes_alg;
extern struct crypto_alg p8_aes_cbc_alg;
extern struct crypto_alg p8_aes_ctr_alg;
+extern struct crypto_alg p8_aes_xts_alg;
static struct crypto_alg *algs[] = {
&p8_aes_alg,
&p8_aes_cbc_alg,
&p8_aes_ctr_alg,
+ &p8_aes_xts_alg,
NULL,
};
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 78dac0e9da11..a5be56ec57f2 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -75,7 +75,7 @@ config DEVFREQ_GOV_PASSIVE
comment "DEVFREQ Drivers"
config ARM_EXYNOS_BUS_DEVFREQ
- bool "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+ tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
depends on ARCH_EXYNOS
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select DEVFREQ_GOV_PASSIVE
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 39b048eda2ce..9aea2c7ecbe6 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/of.h>
@@ -481,13 +481,3 @@ static int __init devfreq_event_init(void)
return 0;
}
subsys_initcall(devfreq_event_init);
-
-static void __exit devfreq_event_exit(void)
-{
- class_destroy(devfreq_event_class);
-}
-module_exit(devfreq_event_exit);
-
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_DESCRIPTION("DEVFREQ-Event class support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index e92418facc92..478006b7764a 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -15,7 +15,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_opp.h>
@@ -707,10 +707,12 @@ struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
if (devfreq->dev.parent
&& devfreq->dev.parent->of_node == node) {
mutex_unlock(&devfreq_list_lock);
+ of_node_put(node);
return devfreq;
}
}
mutex_unlock(&devfreq_list_lock);
+ of_node_put(node);
return ERR_PTR(-EPROBE_DEFER);
}
@@ -1199,13 +1201,6 @@ static int __init devfreq_init(void)
}
subsys_initcall(devfreq_init);
-static void __exit devfreq_exit(void)
-{
- class_destroy(devfreq_class);
- destroy_workqueue(devfreq_wq);
-}
-module_exit(devfreq_exit);
-
/*
* The followings are helper functions for devfreq user device drivers with
* OPP framework.
@@ -1471,7 +1466,3 @@ void devm_devfreq_unregister_notifier(struct device *dev,
devm_devfreq_dev_match, devfreq));
}
EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
-
-MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_DESCRIPTION("devfreq class support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index 1e8b4f469f38..eb6f74a2b6b9 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -14,7 +14,7 @@ menuconfig PM_DEVFREQ_EVENT
if PM_DEVFREQ_EVENT
config DEVFREQ_EVENT_EXYNOS_NOCP
- bool "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+ tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
depends on ARCH_EXYNOS
select PM_OPP
help
@@ -22,7 +22,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
config DEVFREQ_EVENT_EXYNOS_PPMU
- bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+ tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
depends on ARCH_EXYNOS
select PM_OPP
help
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index f312485f1451..845bf25fb9fb 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -482,7 +482,8 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
if (!info->edev) {
dev_err(&pdev->dev,
"failed to allocate memory devfreq-event devices\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
edev = info->edev;
platform_set_drvdata(pdev, info);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 2363d0a189b7..29866f7e6d7e 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -383,7 +383,7 @@ err_clk:
static int exynos_bus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
+ struct device_node *np = dev->of_node, *node;
struct devfreq_dev_profile *profile;
struct devfreq_simple_ondemand_data *ondemand_data;
struct devfreq_passive_data *passive_data;
@@ -407,7 +407,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
/* Parse the device-tree to get the resource information */
ret = exynos_bus_parse_of(np, bus);
if (ret < 0)
- goto err;
+ return ret;
profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
if (!profile) {
@@ -415,10 +415,13 @@ static int exynos_bus_probe(struct platform_device *pdev)
goto err;
}
- if (of_parse_phandle(dev->of_node, "devfreq", 0))
+ node = of_parse_phandle(dev->of_node, "devfreq", 0);
+ if (node) {
+ of_node_put(node);
goto passive;
- else
+ } else {
ret = exynos_bus_parent_parse_of(np, bus);
+ }
if (ret < 0)
goto err;
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index f8c5cd53307c..c5f21efd6090 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -126,28 +126,33 @@ static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
hsu_dma_start_channel(hsuc);
}
-static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
-{
- unsigned long flags;
- u32 sr;
-
- spin_lock_irqsave(&hsuc->vchan.lock, flags);
- sr = hsu_chan_readl(hsuc, HSU_CH_SR);
- spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
-
- return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
-}
-
-irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
+/*
+ * hsu_dma_get_status() - get DMA channel status
+ * @chip: HSUART DMA chip
+ * @nr: DMA channel number
+ * @status: pointer for DMA Channel Status Register value
+ *
+ * Description:
+ * The function reads and clears the DMA Channel Status Register, checks
+ * if it was a timeout interrupt and returns a corresponding value.
+ *
+ * Caller should provide a valid pointer for the DMA Channel Status
+ * Register value that will be returned in @status.
+ *
+ * Return:
+ * 1 for DMA timeout status, 0 for other DMA status, or error code for
+ * invalid parameters or no interrupt pending.
+ */
+int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
+ u32 *status)
{
struct hsu_dma_chan *hsuc;
- struct hsu_dma_desc *desc;
unsigned long flags;
u32 sr;
/* Sanity check */
if (nr >= chip->hsu->nr_channels)
- return IRQ_NONE;
+ return -EINVAL;
hsuc = &chip->hsu->chan[nr];
@@ -155,22 +160,65 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
* No matter what situation, need read clear the IRQ status
* There is a bug, see Errata 5, HSD 2900918
*/
- sr = hsu_dma_chan_get_sr(hsuc);
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ sr = hsu_chan_readl(hsuc, HSU_CH_SR);
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+ /* Check if any interrupt is pending */
+ sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
if (!sr)
- return IRQ_NONE;
+ return -EIO;
/* Timeout IRQ, need wait some time, see Errata 2 */
if (sr & HSU_CH_SR_DESCTO_ANY)
udelay(2);
+ /*
+ * At this point, at least one of Descriptor Time Out, Channel Error
+ * or Descriptor Done bits must be set. Clear the Descriptor Time Out
+ * bits and if sr is still non-zero, it must be channel error or
+ * descriptor done which are higher priority than timeout and handled
+ * in hsu_dma_do_irq(). Else, it must be a timeout.
+ */
sr &= ~HSU_CH_SR_DESCTO_ANY;
- if (!sr)
- return IRQ_HANDLED;
+
+ *status = sr;
+
+ return sr ? 0 : 1;
+}
+EXPORT_SYMBOL_GPL(hsu_dma_get_status);
+
+/*
+ * hsu_dma_do_irq() - DMA interrupt handler
+ * @chip: HSUART DMA chip
+ * @nr: DMA channel number
+ * @status: Channel Status Register value
+ *
+ * Description:
+ * This function handles Channel Error and Descriptor Done interrupts.
+ * This function should be called after determining that the DMA interrupt
+ * is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
+ *
+ * Return:
+ * IRQ_NONE for invalid channel number, IRQ_HANDLED otherwise.
+ */
+irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
+ u32 status)
+{
+ struct hsu_dma_chan *hsuc;
+ struct hsu_dma_desc *desc;
+ unsigned long flags;
+
+ /* Sanity check */
+ if (nr >= chip->hsu->nr_channels)
+ return IRQ_NONE;
+
+ hsuc = &chip->hsu->chan[nr];
spin_lock_irqsave(&hsuc->vchan.lock, flags);
desc = hsuc->desc;
if (desc) {
- if (sr & HSU_CH_SR_CHE) {
+ if (status & HSU_CH_SR_CHE) {
desc->status = DMA_ERROR;
} else if (desc->active < desc->nents) {
hsu_dma_start_channel(hsuc);
@@ -184,7 +232,7 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
return IRQ_HANDLED;
}
-EXPORT_SYMBOL_GPL(hsu_dma_irq);
+EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
{
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index e2db76bd56d8..9916058531d9 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -27,13 +27,20 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev)
{
struct hsu_dma_chip *chip = dev;
u32 dmaisr;
+ u32 status;
unsigned short i;
irqreturn_t ret = IRQ_NONE;
+ int err;
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
for (i = 0; i < chip->hsu->nr_channels; i++) {
- if (dmaisr & 0x1)
- ret |= hsu_dma_irq(chip, i);
+ if (dmaisr & 0x1) {
+ err = hsu_dma_get_status(chip, i, &status);
+ if (err > 0)
+ ret |= IRQ_HANDLED;
+ else if (err == 0)
+ ret |= hsu_dma_do_irq(chip, i, status);
+ }
dmaisr >>= 1;
}
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 6744d88bdea8..4fb2eb7c800d 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -2378,22 +2378,19 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
* @num_mc: pointer to the memory controllers count, to be incremented in case
* of success.
* @table: model specific table
- * @allow_dups: allow for multiple devices to exist with the same device id
- * (as implemented, this isn't expected to work correctly in the
- * multi-socket case).
- * @multi_bus: don't assume devices on different buses belong to different
- * memory controllers.
*
* returns 0 in case of success or error code
*/
-static int sbridge_get_all_devices_full(u8 *num_mc,
- const struct pci_id_table *table,
- int allow_dups,
- int multi_bus)
+static int sbridge_get_all_devices(u8 *num_mc,
+ const struct pci_id_table *table)
{
int i, rc;
struct pci_dev *pdev = NULL;
+ int allow_dups = 0;
+ int multi_bus = 0;
+ if (table->type == KNIGHTS_LANDING)
+ allow_dups = multi_bus = 1;
while (table && table->descr) {
for (i = 0; i < table->n_devs; i++) {
if (!allow_dups || i == 0 ||
@@ -2420,11 +2417,6 @@ static int sbridge_get_all_devices_full(u8 *num_mc,
return 0;
}
-#define sbridge_get_all_devices(num_mc, table) \
- sbridge_get_all_devices_full(num_mc, table, 0, 0)
-#define sbridge_get_all_devices_knl(num_mc, table) \
- sbridge_get_all_devices_full(num_mc, table, 1, 1)
-
static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
struct sbridge_dev *sbridge_dev)
{
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 2a0e4f45d5b2..972c813c375b 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -2,7 +2,8 @@
# Makefile for external connector class (extcon) devices
#
-obj-$(CONFIG_EXTCON) += extcon.o
+obj-$(CONFIG_EXTCON) += extcon-core.o
+extcon-core-objs += extcon.o devres.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
new file mode 100644
index 000000000000..e686acd1c459
--- /dev/null
+++ b/drivers/extcon/devres.c
@@ -0,0 +1,216 @@
+/*
+ * drivers/extcon/devres.c - EXTCON device's resource management
+ *
+ * Copyright (C) 2016 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon.h>
+
+static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
+{
+ struct extcon_dev **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+static void devm_extcon_dev_release(struct device *dev, void *res)
+{
+ extcon_dev_free(*(struct extcon_dev **)res);
+}
+
+
+static void devm_extcon_dev_unreg(struct device *dev, void *res)
+{
+ extcon_dev_unregister(*(struct extcon_dev **)res);
+}
+
+struct extcon_dev_notifier_devres {
+ struct extcon_dev *edev;
+ unsigned int id;
+ struct notifier_block *nb;
+};
+
+static void devm_extcon_dev_notifier_unreg(struct device *dev, void *res)
+{
+ struct extcon_dev_notifier_devres *this = res;
+
+ extcon_unregister_notifier(this->edev, this->id, this->nb);
+}
+
+/**
+ * devm_extcon_dev_allocate - Allocate managed extcon device
+ * @dev: device owning the extcon device being created
+ * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
+ * If supported_cable is NULL, cable name related APIs
+ * are disabled.
+ *
+ * This function manages automatically the memory of extcon device using device
+ * resource management and simplify the control of freeing the memory of extcon
+ * device.
+ *
+ * Returns the pointer memory of allocated extcon_dev if success
+ * or ERR_PTR(err) if fail
+ */
+struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const unsigned int *supported_cable)
+{
+ struct extcon_dev **ptr, *edev;
+
+ ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ edev = extcon_dev_allocate(supported_cable);
+ if (IS_ERR(edev)) {
+ devres_free(ptr);
+ return edev;
+ }
+
+ edev->dev.parent = dev;
+
+ *ptr = edev;
+ devres_add(dev, ptr);
+
+ return edev;
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
+
+/**
+ * devm_extcon_dev_free() - Resource-managed extcon_dev_unregister()
+ * @dev: device the extcon belongs to
+ * @edev: the extcon device to unregister
+ *
+ * Free the memory that is allocated with devm_extcon_dev_allocate()
+ * function.
+ */
+void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev)
+{
+ WARN_ON(devres_release(dev, devm_extcon_dev_release,
+ devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
+
+/**
+ * devm_extcon_dev_register() - Resource-managed extcon_dev_register()
+ * @dev: device to allocate extcon device
+ * @edev: the new extcon device to register
+ *
+ * Managed extcon_dev_register() function. If extcon device is attached with
+ * this function, that extcon device is automatically unregistered on driver
+ * detach. Internally this function calls extcon_dev_register() function.
+ * To get more information, refer that function.
+ *
+ * If extcon device is registered with this function and the device needs to be
+ * unregistered separately, devm_extcon_dev_unregister() should be used.
+ *
+ * Returns 0 if success or negaive error number if failure.
+ */
+int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev)
+{
+ struct extcon_dev **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = extcon_dev_register(edev);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = edev;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
+
+/**
+ * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
+ * @dev: device the extcon belongs to
+ * @edev: the extcon device to unregister
+ *
+ * Unregister extcon device that is registered with devm_extcon_dev_register()
+ * function.
+ */
+void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev)
+{
+ WARN_ON(devres_release(dev, devm_extcon_dev_unreg,
+ devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
+
+/**
+ * devm_extcon_register_notifier() - Resource-managed extcon_register_notifier()
+ * @dev: device to allocate extcon device
+ * @edev: the extcon device that has the external connecotr.
+ * @id: the unique id of each external connector in extcon enumeration.
+ * @nb: a notifier block to be registered.
+ *
+ * This function manages automatically the notifier of extcon device using
+ * device resource management and simplify the control of unregistering
+ * the notifier of extcon device.
+ *
+ * Note that the second parameter given to the callback of nb (val) is
+ * "old_state", not the current state. The current state can be retrieved
+ * by looking at the third pameter (edev pointer)'s state value.
+ *
+ * Returns 0 if success or negaive error number if failure.
+ */
+int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev,
+ unsigned int id, struct notifier_block *nb)
+{
+ struct extcon_dev_notifier_devres *ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_extcon_dev_notifier_unreg, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = extcon_register_notifier(edev, id, nb);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ ptr->edev = edev;
+ ptr->id = id;
+ ptr->nb = nb;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL(devm_extcon_register_notifier);
+
+/**
+ * devm_extcon_unregister_notifier()
+ - Resource-managed extcon_unregister_notifier()
+ * @dev: device to allocate extcon device
+ * @edev: the extcon device that has the external connecotr.
+ * @id: the unique id of each external connector in extcon enumeration.
+ * @nb: a notifier block to be registered.
+ */
+void devm_extcon_unregister_notifier(struct device *dev,
+ struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb)
+{
+ WARN_ON(devres_release(dev, devm_extcon_dev_notifier_unreg,
+ devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL(devm_extcon_unregister_notifier);
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 7fc0ae1912f8..44e48aa78a84 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -38,6 +38,7 @@
* @chan: iio channel being queried.
*/
struct adc_jack_data {
+ struct device *dev;
struct extcon_dev *edev;
const unsigned int **cable_names;
@@ -49,6 +50,7 @@ struct adc_jack_data {
struct delayed_work handler;
struct iio_channel *chan;
+ bool wakeup_source;
};
static void adc_jack_handler(struct work_struct *work)
@@ -105,6 +107,7 @@ static int adc_jack_probe(struct platform_device *pdev)
return -EINVAL;
}
+ data->dev = &pdev->dev;
data->edev = devm_extcon_dev_allocate(&pdev->dev, pdata->cable_names);
if (IS_ERR(data->edev)) {
dev_err(&pdev->dev, "failed to allocate extcon device\n");
@@ -128,6 +131,7 @@ static int adc_jack_probe(struct platform_device *pdev)
return PTR_ERR(data->chan);
data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);
+ data->wakeup_source = pdata->wakeup_source;
INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler);
@@ -151,6 +155,9 @@ static int adc_jack_probe(struct platform_device *pdev)
return err;
}
+ if (data->wakeup_source)
+ device_init_wakeup(&pdev->dev, 1);
+
return 0;
}
@@ -165,11 +172,38 @@ static int adc_jack_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int adc_jack_suspend(struct device *dev)
+{
+ struct adc_jack_data *data = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&data->handler);
+ if (device_may_wakeup(data->dev))
+ enable_irq_wake(data->irq);
+
+ return 0;
+}
+
+static int adc_jack_resume(struct device *dev)
+{
+ struct adc_jack_data *data = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(data->dev))
+ disable_irq_wake(data->irq);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(adc_jack_pm_ops,
+ adc_jack_suspend, adc_jack_resume);
+
static struct platform_driver adc_jack_driver = {
.probe = adc_jack_probe,
.remove = adc_jack_remove,
.driver = {
.name = "adc-jack",
+ .pm = &adc_jack_pm_ops,
},
};
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 2b2fecffb1ad..2512660dc4b9 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -24,8 +24,10 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/acpi.h>
#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
@@ -91,7 +93,7 @@ static int usb_extcon_probe(struct platform_device *pdev)
struct usb_extcon_info *info;
int ret;
- if (!np)
+ if (!np && !ACPI_HANDLE(dev))
return -EINVAL;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -141,7 +143,8 @@ static int usb_extcon_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, info->id_irq);
/* Perform initial detection */
usb_extcon_detect_cable(&info->wq_detcable.work);
@@ -155,6 +158,9 @@ static int usb_extcon_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&info->wq_detcable);
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+
return 0;
}
@@ -164,12 +170,6 @@ static int usb_extcon_suspend(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
- if (device_may_wakeup(dev)) {
- ret = enable_irq_wake(info->id_irq);
- if (ret)
- return ret;
- }
-
/*
* We don't want to process any IRQs after this point
* as GPIOs used behind I2C subsystem might not be
@@ -185,13 +185,10 @@ static int usb_extcon_resume(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
- if (device_may_wakeup(dev)) {
- ret = disable_irq_wake(info->id_irq);
- if (ret)
- return ret;
- }
-
enable_irq(info->id_irq);
+ if (!device_may_wakeup(dev))
+ queue_delayed_work(system_power_efficient_wq,
+ &info->wq_detcable, 0);
return ret;
}
@@ -206,6 +203,12 @@ static const struct of_device_id usb_extcon_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, usb_extcon_dt_match);
+static const struct platform_device_id usb_extcon_platform_ids[] = {
+ { .name = "extcon-usb-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, usb_extcon_platform_ids);
+
static struct platform_driver usb_extcon_driver = {
.probe = usb_extcon_probe,
.remove = usb_extcon_remove,
@@ -214,6 +217,7 @@ static struct platform_driver usb_extcon_driver = {
.pm = &usb_extcon_pm_ops,
.of_match_table = usb_extcon_dt_match,
},
+ .id_table = usb_extcon_platform_ids,
};
module_platform_driver(usb_extcon_driver);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 21a123cadf78..8682efc0f57b 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -77,6 +77,26 @@ static const char *extcon_name[] = {
NULL,
};
+/**
+ * struct extcon_cable - An internal data for each cable of extcon device.
+ * @edev: The extcon device
+ * @cable_index: Index of this cable in the edev
+ * @attr_g: Attribute group for the cable
+ * @attr_name: "name" sysfs entry
+ * @attr_state: "state" sysfs entry
+ * @attrs: Array pointing to attr_name and attr_state for attr_g
+ */
+struct extcon_cable {
+ struct extcon_dev *edev;
+ int cable_index;
+
+ struct attribute_group attr_g;
+ struct device_attribute attr_name;
+ struct device_attribute attr_state;
+
+ struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
+};
+
static struct class *extcon_class;
#if defined(CONFIG_ANDROID)
static struct class_compat *switch_class;
@@ -127,38 +147,6 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
return -EINVAL;
}
-static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
-{
- int id = -EINVAL;
- int i = 0;
-
- /* Find the id of extcon cable */
- while (extcon_name[i]) {
- if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
- id = i;
- break;
- }
- i++;
- }
-
- return id;
-}
-
-static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
-{
- int id;
-
- if (edev->max_supported == 0)
- return -EINVAL;
-
- /* Find the the number of extcon cable */
- id = find_cable_id_by_name(edev, name);
- if (id < 0)
- return id;
-
- return find_cable_index_by_id(edev, id);
-}
-
static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
{
if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
@@ -374,25 +362,6 @@ int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
/**
- * extcon_get_cable_state() - Get the status of a specific cable.
- * @edev: the extcon device that has the cable.
- * @cable_name: cable name.
- *
- * Note that this is slower than extcon_get_cable_state_.
- */
-int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
-{
- int id;
-
- id = find_cable_id_by_name(edev, cable_name);
- if (id < 0)
- return id;
-
- return extcon_get_cable_state_(edev, id);
-}
-EXPORT_SYMBOL_GPL(extcon_get_cable_state);
-
-/**
* extcon_set_cable_state_() - Set the status of a specific cable.
* @edev: the extcon device that has the cable.
* @id: the unique id of each external connector
@@ -422,28 +391,6 @@ int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
/**
- * extcon_set_cable_state() - Set the status of a specific cable.
- * @edev: the extcon device that has the cable.
- * @cable_name: cable name.
- * @cable_state: the new cable status. The default semantics is
- * true: attached / false: detached.
- *
- * Note that this is slower than extcon_set_cable_state_.
- */
-int extcon_set_cable_state(struct extcon_dev *edev,
- const char *cable_name, bool cable_state)
-{
- int id;
-
- id = find_cable_id_by_name(edev, cable_name);
- if (id < 0)
- return id;
-
- return extcon_set_cable_state_(edev, id, cable_state);
-}
-EXPORT_SYMBOL_GPL(extcon_set_cable_state);
-
-/**
* extcon_get_extcon_dev() - Get the extcon device instance from the name
* @extcon_name: The extcon name provided with extcon_dev_register()
*/
@@ -467,105 +414,6 @@ out:
EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
/**
- * extcon_register_interest() - Register a notifier for a state change of a
- * specific cable, not an entier set of cables of a
- * extcon device.
- * @obj: an empty extcon_specific_cable_nb object to be returned.
- * @extcon_name: the name of extcon device.
- * if NULL, extcon_register_interest will register
- * every cable with the target cable_name given.
- * @cable_name: the target cable name.
- * @nb: the notifier block to get notified.
- *
- * Provide an empty extcon_specific_cable_nb. extcon_register_interest() sets
- * the struct for you.
- *
- * extcon_register_interest is a helper function for those who want to get
- * notification for a single specific cable's status change. If a user wants
- * to get notification for any changes of all cables of a extcon device,
- * he/she should use the general extcon_register_notifier().
- *
- * Note that the second parameter given to the callback of nb (val) is
- * "old_state", not the current state. The current state can be retrieved
- * by looking at the third pameter (edev pointer)'s state value.
- */
-int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
-{
- unsigned long flags;
- int ret;
-
- if (!obj || !cable_name || !nb)
- return -EINVAL;
-
- if (extcon_name) {
- obj->edev = extcon_get_extcon_dev(extcon_name);
- if (!obj->edev)
- return -ENODEV;
-
- obj->cable_index = find_cable_index_by_name(obj->edev,
- cable_name);
- if (obj->cable_index < 0)
- return obj->cable_index;
-
- obj->user_nb = nb;
-
- spin_lock_irqsave(&obj->edev->lock, flags);
- ret = raw_notifier_chain_register(
- &obj->edev->nh[obj->cable_index],
- obj->user_nb);
- spin_unlock_irqrestore(&obj->edev->lock, flags);
- } else {
- struct class_dev_iter iter;
- struct extcon_dev *extd;
- struct device *dev;
-
- if (!extcon_class)
- return -ENODEV;
- class_dev_iter_init(&iter, extcon_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- extd = dev_get_drvdata(dev);
-
- if (find_cable_index_by_name(extd, cable_name) < 0)
- continue;
-
- class_dev_iter_exit(&iter);
- return extcon_register_interest(obj, extd->name,
- cable_name, nb);
- }
-
- ret = -ENODEV;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(extcon_register_interest);
-
-/**
- * extcon_unregister_interest() - Unregister the notifier registered by
- * extcon_register_interest().
- * @obj: the extcon_specific_cable_nb object returned by
- * extcon_register_interest().
- */
-int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
-{
- unsigned long flags;
- int ret;
-
- if (!obj)
- return -EINVAL;
-
- spin_lock_irqsave(&obj->edev->lock, flags);
- ret = raw_notifier_chain_unregister(
- &obj->edev->nh[obj->cable_index], obj->user_nb);
- spin_unlock_irqrestore(&obj->edev->lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(extcon_unregister_interest);
-
-/**
* extcon_register_notifier() - Register a notifiee to get notified by
* any attach status changes from the extcon.
* @edev: the extcon device that has the external connecotr.
@@ -582,14 +430,35 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
unsigned long flags;
int ret, idx;
- if (!edev || !nb)
+ if (!nb)
return -EINVAL;
- idx = find_cable_index_by_id(edev, id);
+ if (edev) {
+ idx = find_cable_index_by_id(edev, id);
+ if (idx < 0)
+ return idx;
- spin_lock_irqsave(&edev->lock, flags);
- ret = raw_notifier_chain_register(&edev->nh[idx], nb);
- spin_unlock_irqrestore(&edev->lock, flags);
+ spin_lock_irqsave(&edev->lock, flags);
+ ret = raw_notifier_chain_register(&edev->nh[idx], nb);
+ spin_unlock_irqrestore(&edev->lock, flags);
+ } else {
+ struct extcon_dev *extd;
+
+ mutex_lock(&extcon_dev_list_lock);
+ list_for_each_entry(extd, &extcon_dev_list, entry) {
+ idx = find_cable_index_by_id(extd, id);
+ if (idx >= 0)
+ break;
+ }
+ mutex_unlock(&extcon_dev_list_lock);
+
+ if (idx >= 0) {
+ edev = extd;
+ return extcon_register_notifier(extd, id, nb);
+ } else {
+ ret = -ENODEV;
+ }
+ }
return ret;
}
@@ -611,6 +480,8 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
return -EINVAL;
idx = find_cable_index_by_id(edev, id);
+ if (idx < 0)
+ return idx;
spin_lock_irqsave(&edev->lock, flags);
ret = raw_notifier_chain_unregister(&edev->nh[idx], nb);
@@ -693,66 +564,6 @@ void extcon_dev_free(struct extcon_dev *edev)
}
EXPORT_SYMBOL_GPL(extcon_dev_free);
-static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
-{
- struct extcon_dev **r = res;
-
- if (WARN_ON(!r || !*r))
- return 0;
-
- return *r == data;
-}
-
-static void devm_extcon_dev_release(struct device *dev, void *res)
-{
- extcon_dev_free(*(struct extcon_dev **)res);
-}
-
-/**
- * devm_extcon_dev_allocate - Allocate managed extcon device
- * @dev: device owning the extcon device being created
- * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
- * If supported_cable is NULL, cable name related APIs
- * are disabled.
- *
- * This function manages automatically the memory of extcon device using device
- * resource management and simplify the control of freeing the memory of extcon
- * device.
- *
- * Returns the pointer memory of allocated extcon_dev if success
- * or ERR_PTR(err) if fail
- */
-struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *supported_cable)
-{
- struct extcon_dev **ptr, *edev;
-
- ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- edev = extcon_dev_allocate(supported_cable);
- if (IS_ERR(edev)) {
- devres_free(ptr);
- return edev;
- }
-
- edev->dev.parent = dev;
-
- *ptr = edev;
- devres_add(dev, ptr);
-
- return edev;
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
-
-void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev)
-{
- WARN_ON(devres_release(dev, devm_extcon_dev_release,
- devm_extcon_dev_match, edev));
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
-
/**
* extcon_dev_register() - Register a new extcon device
* @edev : the new extcon device (should be allocated before calling)
@@ -1018,63 +829,6 @@ void extcon_dev_unregister(struct extcon_dev *edev)
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
-static void devm_extcon_dev_unreg(struct device *dev, void *res)
-{
- extcon_dev_unregister(*(struct extcon_dev **)res);
-}
-
-/**
- * devm_extcon_dev_register() - Resource-managed extcon_dev_register()
- * @dev: device to allocate extcon device
- * @edev: the new extcon device to register
- *
- * Managed extcon_dev_register() function. If extcon device is attached with
- * this function, that extcon device is automatically unregistered on driver
- * detach. Internally this function calls extcon_dev_register() function.
- * To get more information, refer that function.
- *
- * If extcon device is registered with this function and the device needs to be
- * unregistered separately, devm_extcon_dev_unregister() should be used.
- *
- * Returns 0 if success or negaive error number if failure.
- */
-int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev)
-{
- struct extcon_dev **ptr;
- int ret;
-
- ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = extcon_dev_register(edev);
- if (ret) {
- devres_free(ptr);
- return ret;
- }
-
- *ptr = edev;
- devres_add(dev, ptr);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
-
-/**
- * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
- * @dev: device the extcon belongs to
- * @edev: the extcon device to unregister
- *
- * Unregister extcon device that is registered with devm_extcon_dev_register()
- * function.
- */
-void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev)
-{
- WARN_ON(devres_release(dev, devm_extcon_dev_unreg,
- devm_extcon_dev_match, edev));
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
-
#ifdef CONFIG_OF
/*
* extcon_get_edev_by_phandle - Get the extcon device from devicetree
@@ -1107,10 +861,12 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
list_for_each_entry(edev, &extcon_dev_list, entry) {
if (edev->dev.parent && edev->dev.parent->of_node == node) {
mutex_unlock(&extcon_dev_list_lock);
+ of_node_put(node);
return edev;
}
}
mutex_unlock(&extcon_dev_list_lock);
+ of_node_put(node);
return ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index eac76a79a880..30a24d09ea6c 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -34,6 +34,7 @@ struct pstore_read_data {
int *count;
struct timespec *timespec;
bool *compressed;
+ ssize_t *ecc_notice_size;
char **buf;
};
@@ -69,6 +70,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
*cb_data->compressed = true;
else
*cb_data->compressed = false;
+ *cb_data->ecc_notice_size = 0;
} else if (sscanf(name, "dump-type%u-%u-%d-%lu",
cb_data->type, &part, &cnt, &time) == 4) {
*cb_data->id = generic_id(time, part, cnt);
@@ -76,6 +78,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
cb_data->timespec->tv_sec = time;
cb_data->timespec->tv_nsec = 0;
*cb_data->compressed = false;
+ *cb_data->ecc_notice_size = 0;
} else if (sscanf(name, "dump-type%u-%u-%lu",
cb_data->type, &part, &time) == 3) {
/*
@@ -88,6 +91,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
cb_data->timespec->tv_sec = time;
cb_data->timespec->tv_nsec = 0;
*cb_data->compressed = false;
+ *cb_data->ecc_notice_size = 0;
} else
return 0;
@@ -210,6 +214,7 @@ static int efi_pstore_sysfs_entry_iter(void *data, struct efivar_entry **pos)
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *timespec,
char **buf, bool *compressed,
+ ssize_t *ecc_notice_size,
struct pstore_info *psi)
{
struct pstore_read_data data;
@@ -220,6 +225,7 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
data.count = count;
data.timespec = timespec;
data.compressed = compressed;
+ data.ecc_notice_size = ecc_notice_size;
data.buf = buf;
*data.buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
@@ -393,6 +399,13 @@ static __init int efivars_pstore_init(void)
static __exit void efivars_pstore_exit(void)
{
+ if (!efi_pstore_info.bufsize)
+ return;
+
+ pstore_unregister(&efi_pstore_info);
+ kfree(efi_pstore_info.buf);
+ efi_pstore_info.buf = NULL;
+ efi_pstore_info.bufsize = 0;
}
module_init(efivars_pstore_init);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 05509f3aaee8..8730fd475bf3 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -24,6 +24,9 @@
#include <linux/of_fdt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/ucs2_string.h>
#include <asm/early_ioremap.h>
@@ -195,6 +198,96 @@ static void generic_ops_unregister(void)
efivars_unregister(&generic_efivars);
}
+#if IS_ENABLED(CONFIG_ACPI)
+#define EFIVAR_SSDT_NAME_MAX 16
+static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
+static int __init efivar_ssdt_setup(char *str)
+{
+ if (strlen(str) < sizeof(efivar_ssdt))
+ memcpy(efivar_ssdt, str, strlen(str));
+ else
+ pr_warn("efivar_ssdt: name too long: %s\n", str);
+ return 0;
+}
+__setup("efivar_ssdt=", efivar_ssdt_setup);
+
+static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ struct efivar_entry *entry;
+ struct list_head *list = data;
+ char utf8_name[EFIVAR_SSDT_NAME_MAX];
+ int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
+
+ ucs2_as_utf8(utf8_name, name, limit - 1);
+ if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
+ return 0;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return 0;
+
+ memcpy(entry->var.VariableName, name, name_size);
+ memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
+
+ efivar_entry_add(entry, list);
+
+ return 0;
+}
+
+static __init int efivar_ssdt_load(void)
+{
+ LIST_HEAD(entries);
+ struct efivar_entry *entry, *aux;
+ unsigned long size;
+ void *data;
+ int ret;
+
+ ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
+
+ list_for_each_entry_safe(entry, aux, &entries, list) {
+ pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
+ &entry->var.VendorGuid);
+
+ list_del(&entry->list);
+
+ ret = efivar_entry_size(entry, &size);
+ if (ret) {
+ pr_err("failed to get var size\n");
+ goto free_entry;
+ }
+
+ data = kmalloc(size, GFP_KERNEL);
+ if (!data)
+ goto free_entry;
+
+ ret = efivar_entry_get(entry, NULL, &size, data);
+ if (ret) {
+ pr_err("failed to get var data\n");
+ goto free_data;
+ }
+
+ ret = acpi_load_table(data);
+ if (ret) {
+ pr_err("failed to load table: %d\n", ret);
+ goto free_data;
+ }
+
+ goto free_entry;
+
+free_data:
+ kfree(data);
+
+free_entry:
+ kfree(entry);
+ }
+
+ return ret;
+}
+#else
+static inline int efivar_ssdt_load(void) { return 0; }
+#endif
+
/*
* We register the efi subsystem with the firmware subsystem and the
* efivars subsystem with the efi subsystem, if the system was booted with
@@ -218,6 +311,9 @@ static int __init efisubsys_init(void)
if (error)
goto err_put;
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efivar_ssdt_load();
+
error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
if (error) {
pr_err("efi: Sysfs attribute export failed with error %d.\n",
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 8dd0c7085e59..503bbe2a9d49 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -37,13 +37,13 @@ static int efibc_set_variable(const char *name, const char *value)
size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
if (size > sizeof(entry->var.Data)) {
- pr_err("value is too large");
+ pr_err("value is too large (%zu bytes) for '%s' EFI variable\n", size, name);
return -EINVAL;
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
- pr_err("failed to allocate efivar entry");
+ pr_err("failed to allocate efivar entry for '%s' EFI variable\n", name);
return -ENOMEM;
}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 23bef6bb73ee..41958774cde3 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -22,7 +22,16 @@
#include <linux/stringify.h>
#include <asm/efi.h>
-static void efi_call_virt_check_flags(unsigned long flags, const char *call)
+/*
+ * Wrap around the new efi_call_virt_generic() macros so that the
+ * code doesn't get too cluttered:
+ */
+#define efi_call_virt(f, args...) \
+ efi_call_virt_pointer(efi.systab->runtime, f, args)
+#define __efi_call_virt(f, args...) \
+ __efi_call_virt_pointer(efi.systab->runtime, f, args)
+
+void efi_call_virt_check_flags(unsigned long flags, const char *call)
{
unsigned long cur_flags, mismatch;
@@ -39,48 +48,6 @@ static void efi_call_virt_check_flags(unsigned long flags, const char *call)
}
/*
- * Arch code can implement the following three template macros, avoiding
- * reptition for the void/non-void return cases of {__,}efi_call_virt:
- *
- * * arch_efi_call_virt_setup
- *
- * Sets up the environment for the call (e.g. switching page tables,
- * allowing kernel-mode use of floating point, if required).
- *
- * * arch_efi_call_virt
- *
- * Performs the call. The last expression in the macro must be the call
- * itself, allowing the logic to be shared by the void and non-void
- * cases.
- *
- * * arch_efi_call_virt_teardown
- *
- * Restores the usual kernel environment once the call has returned.
- */
-
-#define efi_call_virt(f, args...) \
-({ \
- efi_status_t __s; \
- unsigned long flags; \
- arch_efi_call_virt_setup(); \
- local_save_flags(flags); \
- __s = arch_efi_call_virt(f, args); \
- efi_call_virt_check_flags(flags, __stringify(f)); \
- arch_efi_call_virt_teardown(); \
- __s; \
-})
-
-#define __efi_call_virt(f, args...) \
-({ \
- unsigned long flags; \
- arch_efi_call_virt_setup(); \
- local_save_flags(flags); \
- arch_efi_call_virt(f, args); \
- efi_call_virt_check_flags(flags, __stringify(f)); \
- arch_efi_call_virt_teardown(); \
-})
-
-/*
* According to section 7.1 of the UEFI spec, Runtime Services are not fully
* reentrant, and there are particular combinations of calls that need to be
* serialized. (source: UEFI Specification v2.4A)
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 03e04582791c..8263429e21b8 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) "psci: " fmt
+#include <linux/acpi.h>
#include <linux/arm-smccc.h>
#include <linux/cpuidle.h>
#include <linux/errno.h>
@@ -256,13 +257,6 @@ static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
u32 *psci_states;
struct device_node *state_node;
- /*
- * If the PSCI cpu_suspend function hook has not been initialized
- * idle states must not be enabled, so bail out
- */
- if (!psci_ops.cpu_suspend)
- return -EOPNOTSUPP;
-
/* Count idle states */
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
count))) {
@@ -310,11 +304,69 @@ free_mem:
return ret;
}
+#ifdef CONFIG_ACPI
+#include <acpi/processor.h>
+
+static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+ int i, count;
+ u32 *psci_states;
+ struct acpi_lpi_state *lpi;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ if (unlikely(!pr || !pr->flags.has_lpi))
+ return -EINVAL;
+
+ count = pr->power.count - 1;
+ if (count <= 0)
+ return -ENODEV;
+
+ psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+ if (!psci_states)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ u32 state;
+
+ lpi = &pr->power.lpi_states[i + 1];
+ /*
+ * Only bits[31:0] represent a PSCI power_state while
+ * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
+ */
+ state = lpi->address;
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ kfree(psci_states);
+ return -EINVAL;
+ }
+ psci_states[i] = state;
+ }
+ /* Idle states parsed correctly, initialize per-cpu pointer */
+ per_cpu(psci_power_state, cpu) = psci_states;
+ return 0;
+}
+#else
+static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+ return -EINVAL;
+}
+#endif
+
int psci_cpu_init_idle(unsigned int cpu)
{
struct device_node *cpu_node;
int ret;
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ if (!acpi_disabled)
+ return psci_acpi_cpu_init_idle(cpu);
+
cpu_node = of_get_cpu_node(cpu, NULL);
if (!cpu_node)
return -ENODEV;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index cebcb405812e..d7860614f87f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -49,7 +49,7 @@ config GPIO_DEVRES
config OF_GPIO
def_bool y
- depends on OF || COMPILE_TEST
+ depends on OF
config GPIO_ACPI
def_bool y
@@ -402,9 +402,12 @@ config GPIO_TB10X
select OF_GPIO
config GPIO_TEGRA
- bool
- default y
+ bool "NVIDIA Tegra GPIO support"
+ default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
+ depends on OF
+ help
+ Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
config GPIO_TS4800
tristate "TS-4800 DIO blocks and compatibles"
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index e85e7539cf5d..eb43ae4835c1 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
return gpio % 8;
}
-static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
+static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
{
- struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned short offset, bit;
u8 reg_val;
@@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
return reg_val;
}
-static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
+static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
int val)
{
- struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned short offset, bit;
u8 reg_val;
@@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
- sch_gpio_reg_set(gc, gpio_num, GIO, 1);
+ sch_gpio_reg_set(sch, gpio_num, GIO, 1);
spin_unlock(&sch->lock);
return 0;
}
static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
{
- return sch_gpio_reg_get(gc, gpio_num, GLV);
+ struct sch_gpio *sch = gpiochip_get_data(gc);
+ return sch_gpio_reg_get(sch, gpio_num, GLV);
}
static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
@@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
- sch_gpio_reg_set(gc, gpio_num, GLV, val);
+ sch_gpio_reg_set(sch, gpio_num, GLV, val);
spin_unlock(&sch->lock);
}
@@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
struct sch_gpio *sch = gpiochip_get_data(gc);
spin_lock(&sch->lock);
- sch_gpio_reg_set(gc, gpio_num, GIO, 0);
+ sch_gpio_reg_set(sch, gpio_num, GIO, 0);
spin_unlock(&sch->lock);
/*
@@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
* GPIO7 is configured by the CMC as SLPIOVR
* Enable GPIO[9:8] core powered gpios explicitly
*/
- sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
- sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
+ sch_gpio_reg_set(sch, 8, GEN, 1);
+ sch_gpio_reg_set(sch, 9, GEN, 1);
/*
* SUS_GPIO[2:0] enabled by default
* Enable SUS_GPIO3 resume powered gpio explicitly
*/
- sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
+ sch_gpio_reg_set(sch, 13, GEN, 1);
break;
case PCI_DEVICE_ID_INTEL_ITC_LPC:
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index 3a5c7011ad3b..8b830996fe02 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
if (!desc && gpio_is_valid(gpio))
return -EPROBE_DEFER;
+ err = gpiod_request(desc, label);
+ if (err)
+ return err;
+
if (flags & GPIOF_OPEN_DRAIN)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
@@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
if (flags & GPIOF_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- err = gpiod_request(desc, label);
- if (err)
- return err;
-
if (flags & GPIOF_DIR_IN)
err = gpiod_direction_input(desc);
else
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 570771ed19e6..be74bd370f1f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1352,14 +1352,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
spin_lock_irqsave(&gpio_lock, flags);
}
done:
- if (status < 0) {
- /* Clear flags that might have been set by the caller before
- * requesting the GPIO.
- */
- clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
- clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
- clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
- }
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
@@ -2587,28 +2579,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
}
EXPORT_SYMBOL_GPL(gpiod_get_optional);
-/**
- * gpiod_parse_flags - helper function to parse GPIO lookup flags
- * @desc: gpio to be setup
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_get_gpio_hog()
- *
- * Set the GPIO descriptor flags based on the given GPIO lookup flags.
- */
-static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
-{
- if (lflags & GPIO_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (lflags & GPIO_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
- if (lflags & GPIO_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
-}
/**
* gpiod_configure_flags - helper function to configure a given GPIO
* @desc: gpio whose value will be assigned
* @con_id: function within the GPIO consumer
+ * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
+ * of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
* Return 0 on success, -ENOENT if no GPIO has been assigned to the
@@ -2616,10 +2593,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
* occurred while trying to acquire the GPIO.
*/
static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
- enum gpiod_flags dflags)
+ unsigned long lflags, enum gpiod_flags dflags)
{
int status;
+ if (lflags & GPIO_ACTIVE_LOW)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ if (lflags & GPIO_OPEN_DRAIN)
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ if (lflags & GPIO_OPEN_SOURCE)
+ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
pr_debug("no flags found for %s\n", con_id);
@@ -2686,13 +2670,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return desc;
}
- gpiod_parse_flags(desc, lookupflags);
-
status = gpiod_request(desc, con_id);
if (status < 0)
return ERR_PTR(status);
- status = gpiod_configure_flags(desc, con_id, flags);
+ status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
if (status < 0) {
dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
gpiod_put(desc);
@@ -2748,6 +2730,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
if (IS_ERR(desc))
return desc;
+ ret = gpiod_request(desc, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
if (active_low)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -2758,10 +2744,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
}
- ret = gpiod_request(desc, NULL);
- if (ret)
- return ERR_PTR(ret);
-
return desc;
}
EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@@ -2814,8 +2796,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
chip = gpiod_to_chip(desc);
hwnum = gpio_chip_hwgpio(desc);
- gpiod_parse_flags(desc, lflags);
-
local_desc = gpiochip_request_own_desc(chip, hwnum, name);
if (IS_ERR(local_desc)) {
status = PTR_ERR(local_desc);
@@ -2824,7 +2804,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
return status;
}
- status = gpiod_configure_flags(desc, name, dflags);
+ status = gpiod_configure_flags(desc, name, lflags, dflags);
if (status < 0) {
pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
name, chip->label, hwnum, status);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 252edba16e36..892d60fb225b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -421,29 +421,6 @@ static int acp_suspend(void *handle)
static int acp_resume(void *handle)
{
- int i, ret;
- struct acp_pm_domain *apd;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- /* return early if no ACP */
- if (!adev->acp.acp_genpd)
- return 0;
-
- /* SMU block will power on ACP irrespective of ACP runtime status.
- * Power off explicitly based on genpd ACP runtime status so that ACP
- * hw and ACP-genpd status are in sync.
- * 'suspend_power_off' represents "Power status before system suspend"
- */
- if (adev->acp.acp_genpd->gpd.suspend_power_off == true) {
- apd = container_of(&adev->acp.acp_genpd->gpd,
- struct acp_pm_domain, gpd);
-
- for (i = 4; i >= 0 ; i--) {
- ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
- if (ret)
- pr_err("ACP tile %d tile suspend failed\n", i);
- }
- }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index 13cdb01e9b45..bc56c8a181e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -156,3 +156,18 @@ u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data)
+{
+ PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+
+ args.ucRegIndex = offset;
+ args.lpI2CDataOut = data;
+ args.ucFlag = 1;
+ args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+ args.ucTransBytes = 1;
+ args.ucSlaveAddr = slave_addr;
+ args.ucLineNumber = line_number;
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
index d6128d9de56e..251aaf41f65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
@@ -27,5 +27,7 @@
int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num);
u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap);
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev,
+ u8 slave_addr, u8 line_number, u8 offset, u8 data);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b2ebd4fef6cf..c2ef94511f70 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -28,6 +28,7 @@
#include "vid.h"
#include "amdgpu_ucode.h"
#include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
#include "clearstate_vi.h"
#include "gmc/gmc_8_2_d.h"
@@ -284,6 +285,7 @@ static const u32 golden_settings_polaris11_a11[] =
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 polaris11_golden_common_all[] =
@@ -314,6 +316,7 @@ static const u32 golden_settings_polaris10_a11[] =
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 polaris10_golden_common_all[] =
@@ -696,6 +699,10 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
polaris10_golden_common_all,
(const u32)ARRAY_SIZE(polaris10_golden_common_all));
WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
+ if (adev->pdev->revision == 0xc7) {
+ amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
+ amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
+ }
break;
case CHIP_CARRIZO:
amdgpu_program_register_sequence(adev,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index ec2a7ada346a..91e25f942d90 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -98,7 +98,6 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-#define CEILING_UCHAR(double) ((double-(uint8_t)(double)) > 0 ? (uint8_t)(double+1) : (uint8_t)(double))
static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
@@ -733,7 +732,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
table->Smio[level] |=
data->mvdd_voltage_table.entries[level].smio_low;
}
- table->SmioMask2 = data->vddci_voltage_table.mask_low;
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
}
@@ -1807,27 +1806,25 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
ro = efuse * (max -min)/255 + min;
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset
- * there is a little difference in calculating
- * volt_with_cks with windows */
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
for (i = 0; i < sclk_table->count; i++) {
data->smc_state_table.Sclk_CKS_masterEn0_7 |=
sclk_table->entries[i].cks_enable << i;
if (hwmgr->chip_id == CHIP_POLARIS10) {
- volt_without_cks = (uint32_t)((2753594000 + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
+ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
(2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
- volt_with_cks = (uint32_t)((279720200 + sclk_table->entries[i].clk * 3232 - (ro - 65) * 100000000) / \
- (252248000 - sclk_table->entries[i].clk/100 * 115764));
+ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
} else {
- volt_without_cks = (uint32_t)((2416794800 + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
- (2625416 - (sclk_table->entries[i].clk/100) * 12586807/10000));
- volt_with_cks = (uint32_t)((2999656000 + sclk_table->entries[i].clk * 392803/100 - (ro - 44) * 1000000) / \
- (3422454 - sclk_table->entries[i].clk/100 * 18886376/10000));
+ volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
+ (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+ volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+ (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
}
if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)CEILING_UCHAR((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625);
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
}
@@ -2685,7 +2682,7 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint16_t vv_id;
- uint16_t vddc = 0;
+ uint32_t vddc = 0;
uint16_t i, j;
uint32_t sclk = 0;
struct phm_ppt_v1_information *table_info =
@@ -2716,8 +2713,9 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
continue);
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
+ /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
+ * real voltage level in unit of 0.01mv */
+ PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
"Invalid VDDC value", result = -EINVAL;);
/* the voltage should not be zero nor equal to leakage ID */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index bf4e18fd3872..90b35c5c10a4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1256,7 +1256,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
}
int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
+ uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
{
int result;
@@ -1274,7 +1274,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
if (0 != result)
return result;
- *voltage = get_voltage_info_param_space.usVoltageLevel;
+ *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index 248c5db5f380..1e35a9625baf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -305,7 +305,7 @@ extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t
extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
uint8_t level);
extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+ uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 233eb7f36c1d..5d0f655bf160 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -1302,7 +1302,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
table->Smio[count] |=
data->mvdd_voltage_table.entries[count].smio_low;
}
- table->SmioMask2 = data->vddci_voltage_table.mask_low;
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index 671fdb4d615a..dccc859f638c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -302,7 +302,7 @@ static int init_dpm_2_parameters(
(((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
if (0 != powerplay_table->usPPMTableOffset) {
- if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
+ if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnablePlatformPowerManagement);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f313b4d8344f..85c4debf47e0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -512,6 +512,10 @@ void intel_detect_pch(struct drm_device *dev)
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
+ } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_KBP;
+ DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+ WARN_ON(!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7c334e902266..bc3f2e6842e7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -990,6 +990,7 @@ enum intel_pch {
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
+ PCH_KBP, /* Kabypoint PCH */
PCH_NOP,
};
@@ -2600,6 +2601,15 @@ struct drm_i915_cmd_table {
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define KBL_REVID_A0 0x0
+#define KBL_REVID_B0 0x1
+#define KBL_REVID_C0 0x2
+#define KBL_REVID_D0 0x3
+#define KBL_REVID_E0 0x4
+
+#define IS_KBL_REVID(p, since, until) \
+ (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@@ -2708,11 +2718,13 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 425e721aac58..66571466e9a8 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index b7ce963fb8f8..44004e3f09e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
- if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
+ * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+ */
+ if (start < 4096 && (IS_GEN8(dev_priv) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2f6fd33c07ba..aab47f7bb61b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2471,7 +2471,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_SPT(dev_priv))
+ if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
spt_irq_handler(dev, iir);
else
cpt_irq_handler(dev, iir);
@@ -4661,7 +4661,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_BROXTON(dev))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev))
+ else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b407411e31ba..3fcf7dd5b6ca 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -220,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+#define GEN8_CONFIG0 _MMIO(0xD00)
+#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
+
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
@@ -1669,6 +1672,9 @@ enum skl_disp_power_wells {
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
+
#if 0
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
@@ -1804,6 +1810,10 @@ enum skl_disp_power_wells {
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
+/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
+#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
+#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+
/* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0)
#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
@@ -2200,6 +2210,8 @@ enum skl_disp_power_wells {
#define ILK_DPFC_STATUS _MMIO(0x43210)
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
+#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
+#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
@@ -6031,6 +6043,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
+#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -6039,6 +6052,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define DISP_ARB_CTL _MMIO(0x45000)
+#define DISP_FBC_MEMORY_WAKE (1<<31)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
@@ -6052,6 +6066,9 @@ enum skl_disp_power_wells {
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define MASK_WAKEMEM (1<<13)
+
#define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
@@ -6069,6 +6086,7 @@ enum skl_disp_power_wells {
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
/* GEN7 chicken */
@@ -6076,6 +6094,7 @@ enum skl_disp_power_wells {
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
#define HIZ_CHICKEN _MMIO(0x7018)
@@ -6921,6 +6940,7 @@ enum skl_disp_power_wells {
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
#define GEN6_UCGCTL1 _MMIO(0x9400)
+# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -6937,6 +6957,7 @@ enum skl_disp_power_wells {
#define GEN7_UCGCTL4 _MMIO(0x940c)
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index a34c23eceba0..2b3b428d9cd2 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,16 +41,22 @@
* be moved to FW_FAILED.
*/
+#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_KBL);
+#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_SKL);
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_BXT);
+#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
-MODULE_FIRMWARE(I915_CSR_SKL);
-MODULE_FIRMWARE(I915_CSR_BXT);
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
-#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@@ -169,12 +175,10 @@ struct stepping_info {
char substepping;
};
-/*
- * Kabylake derivated from Skylake H0, so SKL H0
- * is the right firmware for KBL A0 (revid 0).
- */
static const struct stepping_info kbl_stepping_info[] = {
- {'H', '0'}, {'I', '0'}
+ {'A', '0'}, {'B', '0'}, {'C', '0'},
+ {'D', '0'}, {'E', '0'}, {'F', '0'},
+ {'G', '0'}, {'H', '0'}, {'I', '0'},
};
static const struct stepping_info skl_stepping_info[] = {
@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_KABYLAKE(dev_priv)) {
+ required_min_version = KBL_CSR_VERSION_REQUIRED;
+ } else if (IS_SKYLAKE(dev_priv)) {
required_min_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) {
required_min_version = BXT_CSR_VERSION_REQUIRED;
@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv))
+ csr->fw_path = I915_CSR_KBL;
+ else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 04452cf3eae8..3074c56a643d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11997,6 +11997,12 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
ret = intel_color_check(crtc, crtc_state);
if (ret)
return ret;
+
+ /*
+ * Changing color management on Intel hardware is
+ * handled as part of planes update.
+ */
+ crtc_state->planes_changed = true;
}
ret = 0;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 40745e38d438..891107f92d9f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4645,7 +4645,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp->detect_done = false;
- if (intel_connector->detect_edid)
+ if (is_edp(intel_dp) || intel_connector->detect_edid)
return connector_status_connected;
else
return connector_status_disconnected;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 42eac37de047..7f2d8415ed8b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1103,15 +1103,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t *const batch,
uint32_t index)
{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
- * WaDisableLSQCROPERFforOCL:skl
+ * WaDisableLSQCROPERFforOCL:skl,kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1273,6 +1275,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
{
int ret;
struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
@@ -1286,6 +1289,22 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
return ret;
index = ret;
+ /* WaClearSlmSpaceAtContextSwitch:kbl */
+ /* Actual scratch location is at 128 bytes offset */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
+ uint32_t scratch_addr
+ = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+ wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+ wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ wa_ctx_emit(batch, index, scratch_addr);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ }
/* Pad to end of cacheline */
while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP);
@@ -1687,9 +1706,10 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *engine = ringbuf->engine;
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
- bool vf_flush_wa = false;
+ bool vf_flush_wa = false, dc_flush_wa = false;
u32 flags = 0;
int ret;
+ int len;
flags |= PIPE_CONTROL_CS_STALL;
@@ -1716,9 +1736,21 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
*/
if (IS_GEN9(engine->dev))
vf_flush_wa = true;
+
+ /* WaForGAMHang:kbl */
+ if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
+ dc_flush_wa = true;
}
- ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
+ len = 6;
+
+ if (vf_flush_wa)
+ len += 6;
+
+ if (dc_flush_wa)
+ len += 12;
+
+ ret = intel_ring_begin(request, len);
if (ret)
return ret;
@@ -1731,12 +1763,31 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
intel_logical_ring_emit(ringbuf, 0);
}
+ if (dc_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, flags);
intel_logical_ring_emit(ringbuf, scratch_addr);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
+
+ if (dc_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_advance(ringbuf);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 99e26034ae8d..16e209d326b6 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -1038,5 +1038,16 @@ intel_opregion_get_panel_type(struct drm_device *dev)
return -ENODEV;
}
+ /*
+ * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
+ * low vswing for eDP, whereas the VBT panel type (2) gives us normal
+ * vswing instead. Low vswing results in some display flickers, so
+ * let's simply ignore the OpRegion panel type on SKL for now.
+ */
+ if (IS_SKYLAKE(dev)) {
+ DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+ return -ENODEV;
+ }
+
return ret - 1;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 8357d571553a..aba94099886b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1731,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
- } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
+ } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a7ef45da0a9e..2863b92c9da6 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -54,10 +54,38 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+
+ I915_WRITE(GEN8_CONFIG0,
+ I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
+
+ /* WaEnableChickenDCPR:skl,bxt,kbl */
+ I915_WRITE(GEN8_CHICKEN_DCPR_1,
+ I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+
+ /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
+ /* WaFbcWakeMemOn:skl,bxt,kbl */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS |
+ DISP_FBC_MEMORY_WAKE);
+
+ /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_DISABLE_DUMMY0);
+}
+
static void bxt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ gen9_init_clock_gating(dev);
+
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -6698,6 +6726,38 @@ static void lpt_suspend_hw(struct drm_device *dev)
}
}
+static void kabylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /* WaDisableSDEUnitClockGating:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableGamClockGating:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaFbcNukeOnHostModify:kbl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
+static void skylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /* WaFbcNukeOnHostModify:skl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
static void broadwell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7163,9 +7223,9 @@ static void nop_init_clock_gating(struct drm_device *dev)
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_SKYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 04402bb9d26b..68c5af079ef8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -913,24 +913,26 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t tmp;
int ret;
- /* WaEnableLbsSlaRetryTimerDecrement:skl */
+ /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
- /* WaDisableKillLogic:bxt,skl */
+ /* WaDisableKillLogic:bxt,skl,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
- /* WaClearFlowControlGpgpuContextSave:skl,bxt */
- /* WaDisablePartialInstShootdown:skl,bxt */
+ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics:skl,bxt */
+ /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
@@ -952,18 +954,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
*/
}
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
- /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
+ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
- /* Wa4x4STCOptimizationDisable:skl,bxt */
- /* WaDisablePartialResolveInVc:skl,bxt */
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+ /* WaDisablePartialResolveInVc:skl,bxt,kbl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
- /* WaCcsTlbPrefetchDisable:skl,bxt */
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
@@ -973,31 +975,57 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
- /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
- tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
- IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
- tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
- WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+ /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+ * both tied to WaForceContextSaveRestoreNonCoherent
+ * in some hsds for skl. We keep the tie for all gen9. The
+ * documentation is a bit hazy and so we want to get common behaviour,
+ * even though there is no clear evidence we would need both on kbl/bxt.
+ * This area has been source of system hangs so we play it safe
+ * and mimic the skl regardless of what bspec says.
+ *
+ * Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
- /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
- if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ /* WaForceEnableNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+
+ /* WaDisableHDCInvalidation:skl,bxt,kbl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
+
+ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
- /* WaDisableSTUnitPowerOptimization:skl,bxt */
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
- /* WaOCLCoherentLineFlush:skl,bxt */
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
+ ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+ if (ret)
+ return ret;
+
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret)
return ret;
- /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
@@ -1092,22 +1120,6 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- /* This is tied to WaForceContextSaveRestoreNonCoherent */
- if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
- /*
- *Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
-
- /* WaDisableHDCInvalidation:skl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
- }
-
/* WaBarrierPerformanceFixDisable:skl */
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
@@ -1120,6 +1132,9 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ /* WaDisableGafsUnitClkGating:skl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
/* WaDisableLSQCROPERFforOCL:skl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
@@ -1174,6 +1189,63 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
return ret;
}
+ /* WaInsertDummyPushConstPs:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ return 0;
+}
+
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaEnableGapsTsvCreditFix:kbl */
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+ /* WaDisableDynamicCreditSharing:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT(GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+ /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE);
+
+ /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+ * involving this register should also be added to WA batch as required.
+ */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_RO_PERF_DIS);
+
+ /* WaInsertDummyPushConstPs:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableGafsUnitClkGating:kbl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:kbl */
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1199,6 +1271,9 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
if (IS_BROXTON(dev))
return bxt_init_workarounds(engine);
+ if (IS_KABYLAKE(dev_priv))
+ return kbl_init_workarounds(engine);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 22706c0a54b5..49bd5da194e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -40,7 +40,8 @@ static int
gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
{
struct nvkm_device *device = outp->base.disp->engine.subdev.device;
- nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
+ const u32 soff = gf119_sor_soff(outp);
+ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 4182a21f5923..41cacecbea9a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -65,6 +65,14 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
sun4i_tcon_disable(drv->tcon);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
}
static void sun4i_crtc_enable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 257d2b4f3645..937394cbc241 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -92,7 +92,7 @@ static struct drm_driver sun4i_drv_driver = {
/* Frame Buffer Operations */
/* VBlank Operations */
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = sun4i_drv_enable_vblank,
.disable_vblank = sun4i_drv_disable_vblank,
};
@@ -310,6 +310,7 @@ static int sun4i_drv_probe(struct platform_device *pdev)
count += sun4i_drv_add_endpoints(&pdev->dev, &match,
pipeline);
+ of_node_put(pipeline);
DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
count, i);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 39386f50af87..a71cf98c655f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1034,9 +1034,9 @@ out_unlock:
return ret;
}
-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags)
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
int i;
@@ -1068,6 +1068,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
return false;
}
+EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 9b078a493996..0cd889015dc5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
+ uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
@@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
+
if (!ret)
vmw_bo_pin_reserved(buf, true);
@@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
+ uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
@@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
+ if (buf->pin_count > 0) {
+ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ goto out_unreserve;
+ }
+
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
false);
if (likely(ret == 0) || ret == -ERESTARTSYS)
@@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
+ uint32_t new_flags;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages;
@@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
- bo->mem.start > 0)
+ bo->mem.start > 0 &&
+ buf->pin_count == 0)
(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
- ret = ttm_bo_validate(bo, &placement, interruptible, false);
+ if (buf->pin_count > 0)
+ ret = ttm_bo_mem_compat(&placement, &bo->mem,
+ &new_flags) == true ? 0 : -EINVAL;
+ else
+ ret = ttm_bo_validate(bo, &placement, interruptible, false);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9fcd8200d485..8d528fcf6e96 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -233,6 +233,7 @@ static int vmw_force_iommu;
static int vmw_restrict_iommu;
static int vmw_force_coherent;
static int vmw_restrict_dma_mask;
+static int vmw_assume_16bpp;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
@@ -249,6 +250,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
+module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
static void vmw_print_capabilities(uint32_t capabilities)
@@ -660,6 +663,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+ dev_priv->assume_16bpp = !!vmw_assume_16bpp;
+
dev_priv->enable_fb = enable_fbdev;
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -706,6 +711,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+ /*
+ * Workaround for low memory 2D VMs to compensate for the
+ * allocation taken by fbdev
+ */
+ if (!(dev_priv->capabilities & SVGA_CAP_3D))
+ mem_size *= 2;
+
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
vmw_read(dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 1980e2a28265..89fb19443a3f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -386,6 +386,7 @@ struct vmw_private {
spinlock_t hw_lock;
spinlock_t cap_lock;
bool has_dx;
+ bool assume_16bpp;
/*
* VGA registers.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 679a4cb98ee3..d2d93959b119 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
par->set_fb = &vfb->base;
- if (!par->bo_ptr) {
- /*
- * Pin before mapping. Since we don't know in what placement
- * to pin, call into KMS to do it for us.
- */
- ret = vfb->pin(vfb);
- if (ret) {
- DRM_ERROR("Could not pin the fbdev framebuffer.\n");
- return ret;
- }
-
- ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
- par->vmw_bo->base.num_pages, &par->map);
- if (ret) {
- vfb->unpin(vfb);
- DRM_ERROR("Could not map the fbdev framebuffer.\n");
- return ret;
- }
-
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
- }
-
return 0;
}
@@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info)
if (ret)
goto out_unlock;
+ if (!par->bo_ptr) {
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
+
+ /*
+ * Pin before mapping. Since we don't know in what placement
+ * to pin, call into KMS to do it for us.
+ */
+ ret = vfb->pin(vfb);
+ if (ret) {
+ DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+ par->vmw_bo->base.num_pages, &par->map);
+ if (ret) {
+ vfb->unpin(vfb);
+ DRM_ERROR("Could not map the fbdev framebuffer.\n");
+ goto out_unlock;
+ }
+
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+ }
+
+
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
par->set_fb->width, par->set_fb->height);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 55231cce73a0..e29da45a2847 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1553,14 +1553,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
- u32 assumed_bpp = 2;
+ u32 assumed_bpp = 4;
- /*
- * If using screen objects, then assume 32-bpp because that's what the
- * SVGA device is assuming
- */
- if (dev_priv->active_display_unit == vmw_du_screen_object)
- assumed_bpp = 4;
+ if (dev_priv->assume_16bpp)
+ assumed_bpp = 2;
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index f0374f9b56ca..e57a0bad7a62 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -300,6 +300,9 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
break;
}
+ if (retries == RETRIES)
+ return -EINVAL;
+
*msg_len = reply_len;
*msg = reply;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 9ca818fb034c..41932a7c4f79 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -399,8 +399,10 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
WARN_ON_ONCE(!stdu->defined);
- if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
- new_fb->height == mode->vdisplay)
+ new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+
+ if (new_vfbs && new_vfbs->surface->base_size.width == mode->hdisplay &&
+ new_vfbs->surface->base_size.height == mode->vdisplay)
new_content_type = SAME_AS_DISPLAY;
else if (vfb->dmabuf)
new_content_type = SEPARATE_DMA;
@@ -444,7 +446,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
content_srf.mip_levels[0] = 1;
content_srf.multisample_count = 0;
} else {
- new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
content_srf = *new_vfbs->surface;
}
@@ -464,7 +465,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
return ret;
}
} else if (new_content_type == SAME_AS_DISPLAY) {
- new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
new_display_srf = vmw_surface_reference(new_vfbs->surface);
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index ff940075bb90..eaf2f916d48c 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -486,6 +486,18 @@ config SENSORS_FSCHMD
This driver can also be built as a module. If so, the module
will be called fschmd.
+config SENSORS_FTSTEUTATES
+ tristate "Fujitsu Technology Solutions sensor chip Teutates"
+ depends on I2C && WATCHDOG
+ select WATCHDOG_CORE
+ help
+ If you say yes here you get support for the Fujitsu Technology
+ Solutions (FTS) sensor chip "Teutates" including support for
+ the integrated watchdog.
+
+ This driver can also be built as a module. If so, the module
+ will be called ftsteutates.
+
config SENSORS_GL518SM
tristate "Genesys Logic GL518SM"
depends on I2C
@@ -645,8 +657,8 @@ config SENSORS_JC42
temperature sensors, which are used on many DDR3 memory modules for
mobile devices and servers. Support will include, but not be limited
to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
- MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98, STTS424(E),
- STTS2002, STTS3000, TSE2002, TSE2004, TS3000, and TS3001.
+ MCP9808, MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98,
+ STTS424(E), STTS2002, STTS3000, TSE2002, TSE2004, TS3000, and TS3001.
This driver can also be built as a module. If so, the module
will be called jc42.
@@ -958,6 +970,7 @@ config SENSORS_LM75
tristate "National Semiconductor LM75 and compatibles"
depends on I2C
depends on THERMAL || !THERMAL_OF
+ select REGMAP_I2C
help
If you say yes here you get support for one common type of
temperature sensor chip, with models including:
@@ -1265,6 +1278,17 @@ config SENSORS_SHT21
This driver can also be built as a module. If so, the module
will be called sht21.
+config SENSORS_SHT3x
+ tristate "Sensiron humidity and temperature sensors. SHT3x and compat."
+ depends on I2C
+ select CRC8
+ help
+ If you say yes here you get support for the Sensiron SHT30 and SHT31
+ humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called sht3x.
+
config SENSORS_SHTC1
tristate "Sensiron humidity and temperature sensors. SHTC1 and compat."
depends on I2C
@@ -1514,6 +1538,17 @@ config SENSORS_INA2XX
This driver can also be built as a module. If so, the module
will be called ina2xx.
+config SENSORS_INA3221
+ tristate "Texas Instruments INA3221 Triple Power Monitor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the TI INA3221 Triple Power
+ Monitor.
+
+ This driver can also be built as a module. If so, the module
+ will be called ina3221.
+
config SENSORS_TC74
tristate "Microchip TC74"
depends on I2C
@@ -1538,6 +1573,7 @@ config SENSORS_TMP102
tristate "Texas Instruments TMP102"
depends on I2C
depends on THERMAL || !THERMAL_OF
+ select REGMAP_I2C
help
If you say yes here you get support for Texas Instruments TMP102
sensor chips.
@@ -1561,7 +1597,7 @@ config SENSORS_TMP401
depends on I2C
help
If you say yes here you get support for Texas Instruments TMP401,
- TMP411, TMP431, TMP432 and TMP435 temperature sensor chips.
+ TMP411, TMP431, TMP432, TMP435, and TMP461 temperature sensor chips.
This driver can also be built as a module. If so, the module
will be called tmp401.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 2ef5b7c4c54f..fe87d2895a97 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
obj-$(CONFIG_SENSORS_FAM15H_POWER) += fam15h_power.o
obj-$(CONFIG_SENSORS_FSCHMD) += fschmd.o
+obj-$(CONFIG_SENSORS_FTSTEUTATES) += ftsteutates.o
obj-$(CONFIG_SENSORS_G760A) += g760a.o
obj-$(CONFIG_SENSORS_G762) += g762.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
@@ -77,6 +78,7 @@ obj-$(CONFIG_SENSORS_IBMPOWERNV)+= ibmpowernv.o
obj-$(CONFIG_SENSORS_IIO_HWMON) += iio_hwmon.o
obj-$(CONFIG_SENSORS_INA209) += ina209.o
obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
+obj-$(CONFIG_SENSORS_INA3221) += ina3221.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
@@ -138,6 +140,7 @@ obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o
obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SHT21) += sht21.o
+obj-$(CONFIG_SENSORS_SHT3x) += sht3x.o
obj-$(CONFIG_SENSORS_SHTC1) += shtc1.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
obj-$(CONFIG_SENSORS_SMM665) += smm665.o
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 202c1fbb3407..8ea35932fbaa 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -37,7 +37,6 @@ enum ad7314_variant {
struct ad7314_data {
struct spi_device *spi_dev;
- struct device *hwmon_dev;
u16 rx ____cacheline_aligned;
};
@@ -88,62 +87,30 @@ static ssize_t ad7314_show_temperature(struct device *dev,
}
}
-static ssize_t ad7314_show_name(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
-}
-
-static DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
ad7314_show_temperature, NULL, 0);
-static struct attribute *ad7314_attributes[] = {
- &dev_attr_name.attr,
+static struct attribute *ad7314_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL,
};
-static const struct attribute_group ad7314_group = {
- .attrs = ad7314_attributes,
-};
+ATTRIBUTE_GROUPS(ad7314);
static int ad7314_probe(struct spi_device *spi_dev)
{
- int ret;
struct ad7314_data *chip;
+ struct device *hwmon_dev;
chip = devm_kzalloc(&spi_dev->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
- spi_set_drvdata(spi_dev, chip);
-
- ret = sysfs_create_group(&spi_dev->dev.kobj, &ad7314_group);
- if (ret < 0)
- return ret;
-
- chip->hwmon_dev = hwmon_device_register(&spi_dev->dev);
- if (IS_ERR(chip->hwmon_dev)) {
- ret = PTR_ERR(chip->hwmon_dev);
- goto error_remove_group;
- }
chip->spi_dev = spi_dev;
-
- return 0;
-error_remove_group:
- sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group);
- return ret;
-}
-
-static int ad7314_remove(struct spi_device *spi_dev)
-{
- struct ad7314_data *chip = spi_get_drvdata(spi_dev);
-
- hwmon_device_unregister(chip->hwmon_dev);
- sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(&spi_dev->dev,
+ spi_dev->modalias,
+ chip, ad7314_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct spi_device_id ad7314_id[] = {
@@ -159,7 +126,6 @@ static struct spi_driver ad7314_driver = {
.name = "ad7314",
},
.probe = ad7314_probe,
- .remove = ad7314_remove,
.id_table = ad7314_id,
};
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 4fd9e4de1972..59bd7b9e1772 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -66,14 +66,12 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/delay.h>
#define DEVICE_NAME "ads7871"
struct ads7871_data {
- struct device *hwmon_dev;
- struct mutex update_lock;
+ struct spi_device *spi;
};
static int ads7871_read_reg8(struct spi_device *spi, int reg)
@@ -101,7 +99,8 @@ static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val)
static ssize_t show_voltage(struct device *dev,
struct device_attribute *da, char *buf)
{
- struct spi_device *spi = to_spi_device(dev);
+ struct ads7871_data *pdata = dev_get_drvdata(dev);
+ struct spi_device *spi = pdata->spi;
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int ret, val, i = 0;
uint8_t channel, mux_cnv;
@@ -139,12 +138,6 @@ static ssize_t show_voltage(struct device *dev,
}
}
-static ssize_t ads7871_show_name(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
-}
-
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
@@ -154,9 +147,7 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
-static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL);
-
-static struct attribute *ads7871_attributes[] = {
+static struct attribute *ads7871_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
@@ -165,21 +156,18 @@ static struct attribute *ads7871_attributes[] = {
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in7_input.dev_attr.attr,
- &dev_attr_name.attr,
NULL
};
-static const struct attribute_group ads7871_group = {
- .attrs = ads7871_attributes,
-};
+ATTRIBUTE_GROUPS(ads7871);
static int ads7871_probe(struct spi_device *spi)
{
- int ret, err;
+ struct device *dev = &spi->dev;
+ int ret;
uint8_t val;
struct ads7871_data *pdata;
-
- dev_dbg(&spi->dev, "probe\n");
+ struct device *hwmon_dev;
/* Configure the SPI bus */
spi->mode = (SPI_MODE_0);
@@ -193,7 +181,7 @@ static int ads7871_probe(struct spi_device *spi)
ads7871_write_reg8(spi, REG_OSC_CONTROL, val);
ret = ads7871_read_reg8(spi, REG_OSC_CONTROL);
- dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
+ dev_dbg(dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
/*
* because there is no other error checking on an SPI bus
* we need to make sure we really have a chip
@@ -201,46 +189,23 @@ static int ads7871_probe(struct spi_device *spi)
if (val != ret)
return -ENODEV;
- pdata = devm_kzalloc(&spi->dev, sizeof(struct ads7871_data),
- GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(struct ads7871_data), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
- if (err < 0)
- return err;
-
- spi_set_drvdata(spi, pdata);
+ pdata->spi = spi;
- pdata->hwmon_dev = hwmon_device_register(&spi->dev);
- if (IS_ERR(pdata->hwmon_dev)) {
- err = PTR_ERR(pdata->hwmon_dev);
- goto error_remove;
- }
-
- return 0;
-
-error_remove:
- sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
- return err;
-}
-
-static int ads7871_remove(struct spi_device *spi)
-{
- struct ads7871_data *pdata = spi_get_drvdata(spi);
-
- hwmon_device_unregister(pdata->hwmon_dev);
- sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, spi->modalias,
+ pdata,
+ ads7871_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static struct spi_driver ads7871_driver = {
.driver = {
.name = DEVICE_NAME,
},
-
.probe = ads7871_probe,
- .remove = ads7871_remove,
};
module_spi_driver(ads7871_driver);
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 827c03703128..a7f886961830 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -30,6 +30,7 @@
#define ADT7411_REG_CFG1 0x18
#define ADT7411_CFG1_START_MONITOR (1 << 0)
+#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3)
#define ADT7411_REG_CFG2 0x19
#define ADT7411_CFG2_DISABLE_AVG (1 << 5)
@@ -296,8 +297,10 @@ static int adt7411_probe(struct i2c_client *client,
mutex_init(&data->device_lock);
mutex_init(&data->update_lock);
+ /* According to the datasheet, we must only write 1 to bit 3 */
ret = adt7411_modify_bit(client, ADT7411_REG_CFG1,
- ADT7411_CFG1_START_MONITOR, 1);
+ ADT7411_CFG1_RESERVED_BIT3
+ | ADT7411_CFG1_START_MONITOR, 1);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 2ac87d553e22..acf9c0361d9f 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -81,6 +81,7 @@ static bool disallow_fan_type_call;
#define I8K_HWMON_HAVE_TEMP4 (1 << 3)
#define I8K_HWMON_HAVE_FAN1 (1 << 4)
#define I8K_HWMON_HAVE_FAN2 (1 << 5)
+#define I8K_HWMON_HAVE_FAN3 (1 << 6)
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
@@ -139,6 +140,14 @@ static int i8k_smm(struct smm_regs *regs)
int eax = regs->eax;
cpumask_var_t old_mask;
+#ifdef DEBUG
+ int ebx = regs->ebx;
+ unsigned long duration;
+ ktime_t calltime, delta, rettime;
+
+ calltime = ktime_get();
+#endif
+
/* SMM requires CPU 0 */
if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
return -ENOMEM;
@@ -210,6 +219,15 @@ static int i8k_smm(struct smm_regs *regs)
out:
set_cpus_allowed_ptr(current, old_mask);
free_cpumask_var(old_mask);
+
+#ifdef DEBUG
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = ktime_to_ns(delta) >> 10;
+ pr_debug("smm(0x%.4x 0x%.4x) = 0x%.4x (took %7lu usecs)\n", eax, ebx,
+ (rc ? 0xffff : regs->eax & 0xffff), duration);
+#endif
+
return rc;
}
@@ -252,7 +270,7 @@ static int _i8k_get_fan_type(int fan)
static int i8k_get_fan_type(int fan)
{
/* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
- static int types[2] = { INT_MIN, INT_MIN };
+ static int types[3] = { INT_MIN, INT_MIN, INT_MIN };
if (types[fan] == INT_MIN)
types[fan] = _i8k_get_fan_type(fan);
@@ -719,6 +737,12 @@ static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL,
1);
static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
i8k_hwmon_set_pwm, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
+ 2);
+static SENSOR_DEVICE_ATTR(fan3_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL,
+ 2);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
+ i8k_hwmon_set_pwm, 2);
static struct attribute *i8k_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */
@@ -735,6 +759,9 @@ static struct attribute *i8k_attrs[] = {
&sensor_dev_attr_fan2_input.dev_attr.attr, /* 11 */
&sensor_dev_attr_fan2_label.dev_attr.attr, /* 12 */
&sensor_dev_attr_pwm2.dev_attr.attr, /* 13 */
+ &sensor_dev_attr_fan3_input.dev_attr.attr, /* 14 */
+ &sensor_dev_attr_fan3_label.dev_attr.attr, /* 15 */
+ &sensor_dev_attr_pwm3.dev_attr.attr, /* 16 */
NULL
};
@@ -742,7 +769,7 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
if (disallow_fan_type_call &&
- (index == 9 || index == 12))
+ (index == 9 || index == 12 || index == 15))
return 0;
if (index >= 0 && index <= 1 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
@@ -762,6 +789,9 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
if (index >= 11 && index <= 13 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
return 0;
+ if (index >= 14 && index <= 16 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN3))
+ return 0;
return attr->mode;
}
@@ -807,6 +837,13 @@ static int __init i8k_init_hwmon(void)
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
+ /* Third fan attributes, if fan status or type is OK */
+ err = i8k_get_fan_status(2);
+ if (err < 0)
+ err = i8k_get_fan_type(2);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN3;
+
i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "dell_smm",
NULL, i8k_groups);
if (IS_ERR(i8k_hwmon_dev)) {
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index ada90716448d..f37fe2011640 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -464,7 +464,7 @@ static int emc6w201_detect(struct i2c_client *client,
if (verstep < 0 || (verstep & 0xF0) != 0xB0)
return -ENODEV;
if ((verstep & 0x0F) > 2) {
- dev_dbg(&client->dev, "Unknwown EMC6W201 stepping %d\n",
+ dev_dbg(&client->dev, "Unknown EMC6W201 stepping %d\n",
verstep & 0x0F);
return -ENODEV;
}
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
new file mode 100644
index 000000000000..2b2ff67026be
--- /dev/null
+++ b/drivers/hwmon/ftsteutates.c
@@ -0,0 +1,819 @@
+/*
+ * Support for the FTS Systemmonitoring Chip "Teutates"
+ *
+ * Copyright (C) 2016 Fujitsu Technology Solutions GmbH,
+ * Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+#define FTS_DEVICE_ID_REG 0x0000
+#define FTS_DEVICE_REVISION_REG 0x0001
+#define FTS_DEVICE_STATUS_REG 0x0004
+#define FTS_SATELLITE_STATUS_REG 0x0005
+#define FTS_EVENT_STATUS_REG 0x0006
+#define FTS_GLOBAL_CONTROL_REG 0x0007
+
+#define FTS_SENSOR_EVENT_REG 0x0010
+
+#define FTS_FAN_EVENT_REG 0x0014
+#define FTS_FAN_PRESENT_REG 0x0015
+
+#define FTS_POWER_ON_TIME_COUNTER_A 0x007A
+#define FTS_POWER_ON_TIME_COUNTER_B 0x007B
+#define FTS_POWER_ON_TIME_COUNTER_C 0x007C
+
+#define FTS_PAGE_SELECT_REG 0x007F
+
+#define FTS_WATCHDOG_TIME_PRESET 0x000B
+#define FTS_WATCHDOG_CONTROL 0x5081
+
+#define FTS_NO_FAN_SENSORS 0x08
+#define FTS_NO_TEMP_SENSORS 0x10
+#define FTS_NO_VOLT_SENSORS 0x04
+
+static struct i2c_device_id fts_id[] = {
+ { "ftsteutates", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, fts_id);
+
+enum WATCHDOG_RESOLUTION {
+ seconds = 1,
+ minutes = 60
+};
+
+struct fts_data {
+ struct i2c_client *client;
+ /* update sensor data lock */
+ struct mutex update_lock;
+ /* read/write register lock */
+ struct mutex access_lock;
+ unsigned long last_updated; /* in jiffies */
+ struct watchdog_device wdd;
+ enum WATCHDOG_RESOLUTION resolution;
+ bool valid; /* false until following fields are valid */
+
+ u8 volt[FTS_NO_VOLT_SENSORS];
+
+ u8 temp_input[FTS_NO_TEMP_SENSORS];
+ u8 temp_alarm;
+
+ u8 fan_present;
+ u8 fan_input[FTS_NO_FAN_SENSORS]; /* in rps */
+ u8 fan_source[FTS_NO_FAN_SENSORS];
+ u8 fan_alarm;
+};
+
+#define FTS_REG_FAN_INPUT(idx) ((idx) + 0x20)
+#define FTS_REG_FAN_SOURCE(idx) ((idx) + 0x30)
+#define FTS_REG_FAN_CONTROL(idx) (((idx) << 16) + 0x4881)
+
+#define FTS_REG_TEMP_INPUT(idx) ((idx) + 0x40)
+#define FTS_REG_TEMP_CONTROL(idx) (((idx) << 16) + 0x0681)
+
+#define FTS_REG_VOLT(idx) ((idx) + 0x18)
+
+/*****************************************************************************/
+/* I2C Helper functions */
+/*****************************************************************************/
+static int fts_read_byte(struct i2c_client *client, unsigned short reg)
+{
+ int ret;
+ unsigned char page = reg >> 8;
+ struct fts_data *data = dev_get_drvdata(&client->dev);
+
+ mutex_lock(&data->access_lock);
+
+ dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page);
+ ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page);
+ if (ret < 0)
+ goto error;
+
+ reg &= 0xFF;
+ ret = i2c_smbus_read_byte_data(client, reg);
+ dev_dbg(&client->dev, "read - reg: 0x%.02x: val: 0x%.02x\n", reg, ret);
+
+error:
+ mutex_unlock(&data->access_lock);
+ return ret;
+}
+
+static int fts_write_byte(struct i2c_client *client, unsigned short reg,
+ unsigned char value)
+{
+ int ret;
+ unsigned char page = reg >> 8;
+ struct fts_data *data = dev_get_drvdata(&client->dev);
+
+ mutex_lock(&data->access_lock);
+
+ dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page);
+ ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page);
+ if (ret < 0)
+ goto error;
+
+ reg &= 0xFF;
+ dev_dbg(&client->dev,
+ "write - reg: 0x%.02x: val: 0x%.02x\n", reg, value);
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+
+error:
+ mutex_unlock(&data->access_lock);
+ return ret;
+}
+
+/*****************************************************************************/
+/* Data Updater Helper function */
+/*****************************************************************************/
+static int fts_update_device(struct fts_data *data)
+{
+ int i;
+ int err = 0;
+
+ mutex_lock(&data->update_lock);
+ if (!time_after(jiffies, data->last_updated + 2 * HZ) && data->valid)
+ goto exit;
+
+ err = fts_read_byte(data->client, FTS_DEVICE_STATUS_REG);
+ if (err < 0)
+ goto exit;
+
+ data->valid = !!(err & 0x02); /* Data not ready yet */
+ if (unlikely(!data->valid)) {
+ err = -EAGAIN;
+ goto exit;
+ }
+
+ err = fts_read_byte(data->client, FTS_FAN_PRESENT_REG);
+ if (err < 0)
+ goto exit;
+ data->fan_present = err;
+
+ err = fts_read_byte(data->client, FTS_FAN_EVENT_REG);
+ if (err < 0)
+ goto exit;
+ data->fan_alarm = err;
+
+ for (i = 0; i < FTS_NO_FAN_SENSORS; i++) {
+ if (data->fan_present & BIT(i)) {
+ err = fts_read_byte(data->client, FTS_REG_FAN_INPUT(i));
+ if (err < 0)
+ goto exit;
+ data->fan_input[i] = err;
+
+ err = fts_read_byte(data->client,
+ FTS_REG_FAN_SOURCE(i));
+ if (err < 0)
+ goto exit;
+ data->fan_source[i] = err;
+ } else {
+ data->fan_input[i] = 0;
+ data->fan_source[i] = 0;
+ }
+ }
+
+ err = fts_read_byte(data->client, FTS_SENSOR_EVENT_REG);
+ if (err < 0)
+ goto exit;
+ data->temp_alarm = err;
+
+ for (i = 0; i < FTS_NO_TEMP_SENSORS; i++) {
+ err = fts_read_byte(data->client, FTS_REG_TEMP_INPUT(i));
+ if (err < 0)
+ goto exit;
+ data->temp_input[i] = err;
+ }
+
+ for (i = 0; i < FTS_NO_VOLT_SENSORS; i++) {
+ err = fts_read_byte(data->client, FTS_REG_VOLT(i));
+ if (err < 0)
+ goto exit;
+ data->volt[i] = err;
+ }
+ data->last_updated = jiffies;
+ err = 0;
+exit:
+ mutex_unlock(&data->update_lock);
+ return err;
+}
+
+/*****************************************************************************/
+/* Watchdog functions */
+/*****************************************************************************/
+static int fts_wd_set_resolution(struct fts_data *data,
+ enum WATCHDOG_RESOLUTION resolution)
+{
+ int ret;
+
+ if (data->resolution == resolution)
+ return 0;
+
+ ret = fts_read_byte(data->client, FTS_WATCHDOG_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ if ((resolution == seconds && ret & BIT(1)) ||
+ (resolution == minutes && (ret & BIT(1)) == 0)) {
+ data->resolution = resolution;
+ return 0;
+ }
+
+ if (resolution == seconds)
+ set_bit(1, (unsigned long *)&ret);
+ else
+ ret &= ~BIT(1);
+
+ ret = fts_write_byte(data->client, FTS_WATCHDOG_CONTROL, ret);
+ if (ret < 0)
+ return ret;
+
+ data->resolution = resolution;
+ return ret;
+}
+
+static int fts_wd_set_timeout(struct watchdog_device *wdd, unsigned int timeout)
+{
+ struct fts_data *data;
+ enum WATCHDOG_RESOLUTION resolution = seconds;
+ int ret;
+
+ data = watchdog_get_drvdata(wdd);
+ /* switch watchdog resolution to minutes if timeout does not fit
+ * into a byte
+ */
+ if (timeout > 0xFF) {
+ timeout = DIV_ROUND_UP(timeout, 60) * 60;
+ resolution = minutes;
+ }
+
+ ret = fts_wd_set_resolution(data, resolution);
+ if (ret < 0)
+ return ret;
+
+ wdd->timeout = timeout;
+ return 0;
+}
+
+static int fts_wd_start(struct watchdog_device *wdd)
+{
+ struct fts_data *data = watchdog_get_drvdata(wdd);
+
+ return fts_write_byte(data->client, FTS_WATCHDOG_TIME_PRESET,
+ wdd->timeout / (u8)data->resolution);
+}
+
+static int fts_wd_stop(struct watchdog_device *wdd)
+{
+ struct fts_data *data;
+
+ data = watchdog_get_drvdata(wdd);
+ return fts_write_byte(data->client, FTS_WATCHDOG_TIME_PRESET, 0);
+}
+
+static const struct watchdog_info fts_wd_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "FTS Teutates Hardware Watchdog",
+};
+
+static const struct watchdog_ops fts_wd_ops = {
+ .owner = THIS_MODULE,
+ .start = fts_wd_start,
+ .stop = fts_wd_stop,
+ .set_timeout = fts_wd_set_timeout,
+};
+
+static int fts_watchdog_init(struct fts_data *data)
+{
+ int timeout, ret;
+
+ watchdog_set_drvdata(&data->wdd, data);
+
+ timeout = fts_read_byte(data->client, FTS_WATCHDOG_TIME_PRESET);
+ if (timeout < 0)
+ return timeout;
+
+ /* watchdog not running, set timeout to a default of 60 sec. */
+ if (timeout == 0) {
+ ret = fts_wd_set_resolution(data, seconds);
+ if (ret < 0)
+ return ret;
+ data->wdd.timeout = 60;
+ } else {
+ ret = fts_read_byte(data->client, FTS_WATCHDOG_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ data->resolution = ret & BIT(1) ? seconds : minutes;
+ data->wdd.timeout = timeout * (u8)data->resolution;
+ set_bit(WDOG_HW_RUNNING, &data->wdd.status);
+ }
+
+ /* Register our watchdog part */
+ data->wdd.info = &fts_wd_info;
+ data->wdd.ops = &fts_wd_ops;
+ data->wdd.parent = &data->client->dev;
+ data->wdd.min_timeout = 1;
+
+ /* max timeout 255 minutes. */
+ data->wdd.max_hw_heartbeat_ms = 0xFF * 60 * MSEC_PER_SEC;
+
+ return watchdog_register_device(&data->wdd);
+}
+
+/*****************************************************************************/
+/* SysFS handler functions */
+/*****************************************************************************/
+static ssize_t show_in_value(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", data->volt[index]);
+}
+
+static ssize_t show_temp_value(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", data->temp_input[index]);
+}
+
+static ssize_t show_temp_fault(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ /* 00h Temperature = Sensor Error */
+ return sprintf(buf, "%d\n", data->temp_input[index] == 0);
+}
+
+static ssize_t show_temp_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", !!(data->temp_alarm & BIT(index)));
+}
+
+static ssize_t
+clear_temp_alarm(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ long ret;
+
+ ret = fts_update_device(data);
+ if (ret < 0)
+ return ret;
+
+ if (kstrtoul(buf, 10, &ret) || ret != 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ ret = fts_read_byte(data->client, FTS_REG_TEMP_CONTROL(index));
+ if (ret < 0)
+ goto error;
+
+ ret = fts_write_byte(data->client, FTS_REG_TEMP_CONTROL(index),
+ ret | 0x1);
+ if (ret < 0)
+ goto error;
+
+ data->valid = false;
+error:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static ssize_t show_fan_value(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", data->fan_input[index]);
+}
+
+static ssize_t show_fan_source(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", data->fan_source[index]);
+}
+
+static ssize_t show_fan_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%d\n", !!(data->fan_alarm & BIT(index)));
+}
+
+static ssize_t
+clear_fan_alarm(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ long ret;
+
+ ret = fts_update_device(data);
+ if (ret < 0)
+ return ret;
+
+ if (kstrtoul(buf, 10, &ret) || ret != 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ ret = fts_read_byte(data->client, FTS_REG_FAN_CONTROL(index));
+ if (ret < 0)
+ goto error;
+
+ ret = fts_write_byte(data->client, FTS_REG_FAN_CONTROL(index),
+ ret | 0x1);
+ if (ret < 0)
+ goto error;
+
+ data->valid = false;
+error:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+/*****************************************************************************/
+/* SysFS structs */
+/*****************************************************************************/
+
+/* Temprature sensors */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_value, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_value, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_value, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_value, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_temp_value, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_temp_value, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp9_input, S_IRUGO, show_temp_value, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp10_input, S_IRUGO, show_temp_value, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp11_input, S_IRUGO, show_temp_value, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp12_input, S_IRUGO, show_temp_value, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp13_input, S_IRUGO, show_temp_value, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp14_input, S_IRUGO, show_temp_value, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp15_input, S_IRUGO, show_temp_value, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp16_input, S_IRUGO, show_temp_value, NULL, 15);
+
+static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_temp_fault, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_temp_fault, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_fault, S_IRUGO, show_temp_fault, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_fault, S_IRUGO, show_temp_fault, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp9_fault, S_IRUGO, show_temp_fault, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp10_fault, S_IRUGO, show_temp_fault, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp11_fault, S_IRUGO, show_temp_fault, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp12_fault, S_IRUGO, show_temp_fault, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp13_fault, S_IRUGO, show_temp_fault, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp14_fault, S_IRUGO, show_temp_fault, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp15_fault, S_IRUGO, show_temp_fault, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp16_fault, S_IRUGO, show_temp_fault, NULL, 15);
+
+static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 0);
+static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 1);
+static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 2);
+static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 3);
+static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 4);
+static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 5);
+static SENSOR_DEVICE_ATTR(temp7_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 6);
+static SENSOR_DEVICE_ATTR(temp8_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 7);
+static SENSOR_DEVICE_ATTR(temp9_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 8);
+static SENSOR_DEVICE_ATTR(temp10_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 9);
+static SENSOR_DEVICE_ATTR(temp11_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 10);
+static SENSOR_DEVICE_ATTR(temp12_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 11);
+static SENSOR_DEVICE_ATTR(temp13_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 12);
+static SENSOR_DEVICE_ATTR(temp14_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 13);
+static SENSOR_DEVICE_ATTR(temp15_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 14);
+static SENSOR_DEVICE_ATTR(temp16_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 15);
+
+static struct attribute *fts_temp_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp6_input.dev_attr.attr,
+ &sensor_dev_attr_temp7_input.dev_attr.attr,
+ &sensor_dev_attr_temp8_input.dev_attr.attr,
+ &sensor_dev_attr_temp9_input.dev_attr.attr,
+ &sensor_dev_attr_temp10_input.dev_attr.attr,
+ &sensor_dev_attr_temp11_input.dev_attr.attr,
+ &sensor_dev_attr_temp12_input.dev_attr.attr,
+ &sensor_dev_attr_temp13_input.dev_attr.attr,
+ &sensor_dev_attr_temp14_input.dev_attr.attr,
+ &sensor_dev_attr_temp15_input.dev_attr.attr,
+ &sensor_dev_attr_temp16_input.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
+ &sensor_dev_attr_temp5_fault.dev_attr.attr,
+ &sensor_dev_attr_temp6_fault.dev_attr.attr,
+ &sensor_dev_attr_temp7_fault.dev_attr.attr,
+ &sensor_dev_attr_temp8_fault.dev_attr.attr,
+ &sensor_dev_attr_temp9_fault.dev_attr.attr,
+ &sensor_dev_attr_temp10_fault.dev_attr.attr,
+ &sensor_dev_attr_temp11_fault.dev_attr.attr,
+ &sensor_dev_attr_temp12_fault.dev_attr.attr,
+ &sensor_dev_attr_temp13_fault.dev_attr.attr,
+ &sensor_dev_attr_temp14_fault.dev_attr.attr,
+ &sensor_dev_attr_temp15_fault.dev_attr.attr,
+ &sensor_dev_attr_temp16_fault.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp6_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp7_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp8_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp9_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp10_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp11_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp12_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp13_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp14_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp15_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp16_alarm.dev_attr.attr,
+ NULL
+};
+
+/* Fans */
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_value, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_value, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_value, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_value, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan_value, NULL, 4);
+static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan_value, NULL, 5);
+static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan_value, NULL, 6);
+static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan_value, NULL, 7);
+
+static SENSOR_DEVICE_ATTR(fan1_source, S_IRUGO, show_fan_source, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_source, S_IRUGO, show_fan_source, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_source, S_IRUGO, show_fan_source, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_source, S_IRUGO, show_fan_source, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan5_source, S_IRUGO, show_fan_source, NULL, 4);
+static SENSOR_DEVICE_ATTR(fan6_source, S_IRUGO, show_fan_source, NULL, 5);
+static SENSOR_DEVICE_ATTR(fan7_source, S_IRUGO, show_fan_source, NULL, 6);
+static SENSOR_DEVICE_ATTR(fan8_source, S_IRUGO, show_fan_source, NULL, 7);
+
+static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 0);
+static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 1);
+static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 2);
+static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 3);
+static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 4);
+static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 5);
+static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 6);
+static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 7);
+
+static struct attribute *fts_fan_attrs[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+ &sensor_dev_attr_fan4_input.dev_attr.attr,
+ &sensor_dev_attr_fan5_input.dev_attr.attr,
+ &sensor_dev_attr_fan6_input.dev_attr.attr,
+ &sensor_dev_attr_fan7_input.dev_attr.attr,
+ &sensor_dev_attr_fan8_input.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_source.dev_attr.attr,
+ &sensor_dev_attr_fan2_source.dev_attr.attr,
+ &sensor_dev_attr_fan3_source.dev_attr.attr,
+ &sensor_dev_attr_fan4_source.dev_attr.attr,
+ &sensor_dev_attr_fan5_source.dev_attr.attr,
+ &sensor_dev_attr_fan6_source.dev_attr.attr,
+ &sensor_dev_attr_fan7_source.dev_attr.attr,
+ &sensor_dev_attr_fan8_source.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan2_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan3_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan4_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan5_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan6_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan7_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan8_alarm.dev_attr.attr,
+ NULL
+};
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in_value, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in_value, NULL, 1);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in_value, NULL, 2);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in_value, NULL, 3);
+static struct attribute *fts_voltage_attrs[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group fts_voltage_attr_group = {
+ .attrs = fts_voltage_attrs
+};
+
+static const struct attribute_group fts_temp_attr_group = {
+ .attrs = fts_temp_attrs
+};
+
+static const struct attribute_group fts_fan_attr_group = {
+ .attrs = fts_fan_attrs
+};
+
+static const struct attribute_group *fts_attr_groups[] = {
+ &fts_voltage_attr_group,
+ &fts_temp_attr_group,
+ &fts_fan_attr_group,
+ NULL
+};
+
+/*****************************************************************************/
+/* Module initialization / remove functions */
+/*****************************************************************************/
+static int fts_remove(struct i2c_client *client)
+{
+ struct fts_data *data = dev_get_drvdata(&client->dev);
+
+ watchdog_unregister_device(&data->wdd);
+ return 0;
+}
+
+static int fts_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ u8 revision;
+ struct fts_data *data;
+ int err;
+ s8 deviceid;
+ struct device *hwmon_dev;
+
+ if (client->addr != 0x73)
+ return -ENODEV;
+
+ /* Baseboard Management Controller check */
+ deviceid = i2c_smbus_read_byte_data(client, FTS_DEVICE_ID_REG);
+ if (deviceid > 0 && (deviceid & 0xF0) == 0x10) {
+ switch (deviceid & 0x0F) {
+ case 0x01:
+ break;
+ default:
+ dev_dbg(&client->dev,
+ "No Baseboard Management Controller\n");
+ return -ENODEV;
+ }
+ } else {
+ dev_dbg(&client->dev, "No fujitsu board\n");
+ return -ENODEV;
+ }
+
+ data = devm_kzalloc(&client->dev, sizeof(struct fts_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->update_lock);
+ mutex_init(&data->access_lock);
+ data->client = client;
+ dev_set_drvdata(&client->dev, data);
+
+ err = i2c_smbus_read_byte_data(client, FTS_DEVICE_REVISION_REG);
+ if (err < 0)
+ return err;
+ revision = err;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ "ftsteutates",
+ data,
+ fts_attr_groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ err = fts_watchdog_init(data);
+ if (err)
+ return err;
+
+ dev_info(&client->dev, "Detected FTS Teutates chip, revision: %d.%d\n",
+ (revision & 0xF0) >> 4, revision & 0x0F);
+ return 0;
+}
+
+/*****************************************************************************/
+/* Module Details */
+/*****************************************************************************/
+static struct i2c_driver fts_driver = {
+ .driver = {
+ .name = "ftsteutates",
+ },
+ .id_table = fts_id,
+ .probe = fts_probe,
+ .remove = fts_remove,
+};
+
+module_i2c_driver(fts_driver);
+
+MODULE_AUTHOR("Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>");
+MODULE_DESCRIPTION("FTS Teutates driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
new file mode 100644
index 000000000000..e6b49500c52a
--- /dev/null
+++ b/drivers/hwmon/ina3221.c
@@ -0,0 +1,445 @@
+/*
+ * INA3221 Triple Current/Voltage Monitor
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define INA3221_DRIVER_NAME "ina3221"
+
+#define INA3221_CONFIG 0x00
+#define INA3221_SHUNT1 0x01
+#define INA3221_BUS1 0x02
+#define INA3221_SHUNT2 0x03
+#define INA3221_BUS2 0x04
+#define INA3221_SHUNT3 0x05
+#define INA3221_BUS3 0x06
+#define INA3221_CRIT1 0x07
+#define INA3221_WARN1 0x08
+#define INA3221_CRIT2 0x09
+#define INA3221_WARN2 0x0a
+#define INA3221_CRIT3 0x0b
+#define INA3221_WARN3 0x0c
+#define INA3221_MASK_ENABLE 0x0f
+
+#define INA3221_CONFIG_MODE_SHUNT BIT(1)
+#define INA3221_CONFIG_MODE_BUS BIT(2)
+#define INA3221_CONFIG_MODE_CONTINUOUS BIT(3)
+
+#define INA3221_RSHUNT_DEFAULT 10000
+
+enum ina3221_fields {
+ /* Configuration */
+ F_RST,
+
+ /* Alert Flags */
+ F_WF3, F_WF2, F_WF1,
+ F_CF3, F_CF2, F_CF1,
+
+ /* sentinel */
+ F_MAX_FIELDS
+};
+
+static const struct reg_field ina3221_reg_fields[] = {
+ [F_RST] = REG_FIELD(INA3221_CONFIG, 15, 15),
+
+ [F_WF3] = REG_FIELD(INA3221_MASK_ENABLE, 3, 3),
+ [F_WF2] = REG_FIELD(INA3221_MASK_ENABLE, 4, 4),
+ [F_WF1] = REG_FIELD(INA3221_MASK_ENABLE, 5, 5),
+ [F_CF3] = REG_FIELD(INA3221_MASK_ENABLE, 7, 7),
+ [F_CF2] = REG_FIELD(INA3221_MASK_ENABLE, 8, 8),
+ [F_CF1] = REG_FIELD(INA3221_MASK_ENABLE, 9, 9),
+};
+
+enum ina3221_channels {
+ INA3221_CHANNEL1,
+ INA3221_CHANNEL2,
+ INA3221_CHANNEL3,
+ INA3221_NUM_CHANNELS
+};
+
+static const unsigned int register_channel[] = {
+ [INA3221_SHUNT1] = INA3221_CHANNEL1,
+ [INA3221_SHUNT2] = INA3221_CHANNEL2,
+ [INA3221_SHUNT3] = INA3221_CHANNEL3,
+ [INA3221_CRIT1] = INA3221_CHANNEL1,
+ [INA3221_CRIT2] = INA3221_CHANNEL2,
+ [INA3221_CRIT3] = INA3221_CHANNEL3,
+ [INA3221_WARN1] = INA3221_CHANNEL1,
+ [INA3221_WARN2] = INA3221_CHANNEL2,
+ [INA3221_WARN3] = INA3221_CHANNEL3,
+};
+
+/**
+ * struct ina3221_data - device specific information
+ * @regmap: Register map of the device
+ * @fields: Register fields of the device
+ * @shunt_resistors: Array of resistor values per channel
+ */
+struct ina3221_data {
+ struct regmap *regmap;
+ struct regmap_field *fields[F_MAX_FIELDS];
+ int shunt_resistors[INA3221_NUM_CHANNELS];
+};
+
+static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
+ int *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(ina->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(regval >> 3, 12);
+
+ return 0;
+}
+
+static ssize_t ina3221_show_bus_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int reg = sd_attr->index;
+ int val, voltage_mv, ret;
+
+ ret = ina3221_read_value(ina, reg, &val);
+ if (ret)
+ return ret;
+
+ voltage_mv = val * 8;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", voltage_mv);
+}
+
+static ssize_t ina3221_show_shunt_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int reg = sd_attr->index;
+ int val, voltage_uv, ret;
+
+ ret = ina3221_read_value(ina, reg, &val);
+ if (ret)
+ return ret;
+ voltage_uv = val * 40;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", voltage_uv);
+}
+
+static ssize_t ina3221_show_current(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int reg = sd_attr->index;
+ unsigned int channel = register_channel[reg];
+ int resistance_uo = ina->shunt_resistors[channel];
+ int val, current_ma, voltage_nv, ret;
+
+ ret = ina3221_read_value(ina, reg, &val);
+ if (ret)
+ return ret;
+ voltage_nv = val * 40000;
+
+ current_ma = DIV_ROUND_CLOSEST(voltage_nv, resistance_uo);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", current_ma);
+}
+
+static ssize_t ina3221_set_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int reg = sd_attr->index;
+ unsigned int channel = register_channel[reg];
+ int resistance_uo = ina->shunt_resistors[channel];
+ int val, current_ma, voltage_uv, ret;
+
+ ret = kstrtoint(buf, 0, &current_ma);
+ if (ret)
+ return ret;
+
+ /* clamp current */
+ current_ma = clamp_val(current_ma,
+ INT_MIN / resistance_uo,
+ INT_MAX / resistance_uo);
+
+ voltage_uv = DIV_ROUND_CLOSEST(current_ma * resistance_uo, 1000);
+
+ /* clamp voltage */
+ voltage_uv = clamp_val(voltage_uv, -163800, 163800);
+
+ /* 1 / 40uV(scale) << 3(register shift) = 5 */
+ val = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
+
+ ret = regmap_write(ina->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ina3221_show_shunt(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int channel = sd_attr->index;
+ unsigned int resistance_uo;
+
+ resistance_uo = ina->shunt_resistors[channel];
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", resistance_uo);
+}
+
+static ssize_t ina3221_set_shunt(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int channel = sd_attr->index;
+ int val;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val = clamp_val(val, 1, INT_MAX);
+
+ ina->shunt_resistors[channel] = val;
+
+ return count;
+}
+
+static ssize_t ina3221_show_alert(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int field = sd_attr->index;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_field_read(ina->fields[field], &regval);
+ if (ret)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", regval);
+}
+
+/* bus voltage */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO,
+ ina3221_show_bus_voltage, NULL, INA3221_BUS1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO,
+ ina3221_show_bus_voltage, NULL, INA3221_BUS2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO,
+ ina3221_show_bus_voltage, NULL, INA3221_BUS3);
+
+/* calculated current */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO,
+ ina3221_show_current, NULL, INA3221_SHUNT1);
+static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO,
+ ina3221_show_current, NULL, INA3221_SHUNT2);
+static SENSOR_DEVICE_ATTR(curr3_input, S_IRUGO,
+ ina3221_show_current, NULL, INA3221_SHUNT3);
+
+/* shunt resistance */
+static SENSOR_DEVICE_ATTR(shunt1_resistor, S_IRUGO | S_IWUSR,
+ ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL1);
+static SENSOR_DEVICE_ATTR(shunt2_resistor, S_IRUGO | S_IWUSR,
+ ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL2);
+static SENSOR_DEVICE_ATTR(shunt3_resistor, S_IRUGO | S_IWUSR,
+ ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL3);
+
+/* critical current */
+static SENSOR_DEVICE_ATTR(curr1_crit, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_CRIT1);
+static SENSOR_DEVICE_ATTR(curr2_crit, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_CRIT2);
+static SENSOR_DEVICE_ATTR(curr3_crit, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_CRIT3);
+
+/* critical current alert */
+static SENSOR_DEVICE_ATTR(curr1_crit_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_CF1);
+static SENSOR_DEVICE_ATTR(curr2_crit_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_CF2);
+static SENSOR_DEVICE_ATTR(curr3_crit_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_CF3);
+
+/* warning current */
+static SENSOR_DEVICE_ATTR(curr1_max, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_WARN1);
+static SENSOR_DEVICE_ATTR(curr2_max, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_WARN2);
+static SENSOR_DEVICE_ATTR(curr3_max, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_WARN3);
+
+/* warning current alert */
+static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_WF1);
+static SENSOR_DEVICE_ATTR(curr2_max_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_WF2);
+static SENSOR_DEVICE_ATTR(curr3_max_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_WF3);
+
+/* shunt voltage */
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO,
+ ina3221_show_shunt_voltage, NULL, INA3221_SHUNT1);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO,
+ ina3221_show_shunt_voltage, NULL, INA3221_SHUNT2);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO,
+ ina3221_show_shunt_voltage, NULL, INA3221_SHUNT3);
+
+static struct attribute *ina3221_attrs[] = {
+ /* channel 1 */
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_shunt1_resistor.dev_attr.attr,
+ &sensor_dev_attr_curr1_crit.dev_attr.attr,
+ &sensor_dev_attr_curr1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_curr1_max.dev_attr.attr,
+ &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+
+ /* channel 2 */
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_curr2_input.dev_attr.attr,
+ &sensor_dev_attr_shunt2_resistor.dev_attr.attr,
+ &sensor_dev_attr_curr2_crit.dev_attr.attr,
+ &sensor_dev_attr_curr2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_curr2_max.dev_attr.attr,
+ &sensor_dev_attr_curr2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+
+ /* channel 3 */
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_curr3_input.dev_attr.attr,
+ &sensor_dev_attr_shunt3_resistor.dev_attr.attr,
+ &sensor_dev_attr_curr3_crit.dev_attr.attr,
+ &sensor_dev_attr_curr3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_curr3_max.dev_attr.attr,
+ &sensor_dev_attr_curr3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+
+ NULL,
+};
+ATTRIBUTE_GROUPS(ina3221);
+
+static const struct regmap_range ina3221_yes_ranges[] = {
+ regmap_reg_range(INA3221_SHUNT1, INA3221_BUS3),
+ regmap_reg_range(INA3221_MASK_ENABLE, INA3221_MASK_ENABLE),
+};
+
+static const struct regmap_access_table ina3221_volatile_table = {
+ .yes_ranges = ina3221_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ina3221_yes_ranges),
+};
+
+static const struct regmap_config ina3221_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &ina3221_volatile_table,
+};
+
+static int ina3221_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ina3221_data *ina;
+ struct device *hwmon_dev;
+ int i, ret;
+
+ ina = devm_kzalloc(dev, sizeof(*ina), GFP_KERNEL);
+ if (!ina)
+ return -ENOMEM;
+
+ ina->regmap = devm_regmap_init_i2c(client, &ina3221_regmap_config);
+ if (IS_ERR(ina->regmap)) {
+ dev_err(dev, "Unable to allocate register map\n");
+ return PTR_ERR(ina->regmap);
+ }
+
+ for (i = 0; i < F_MAX_FIELDS; i++) {
+ ina->fields[i] = devm_regmap_field_alloc(dev,
+ ina->regmap,
+ ina3221_reg_fields[i]);
+ if (IS_ERR(ina->fields[i])) {
+ dev_err(dev, "Unable to allocate regmap fields\n");
+ return PTR_ERR(ina->fields[i]);
+ }
+ }
+
+ for (i = 0; i < INA3221_NUM_CHANNELS; i++)
+ ina->shunt_resistors[i] = INA3221_RSHUNT_DEFAULT;
+
+ ret = regmap_field_write(ina->fields[F_RST], true);
+ if (ret) {
+ dev_err(dev, "Unable to reset device\n");
+ return ret;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+ client->name,
+ ina, ina3221_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "Unable to register hwmon device\n");
+ return PTR_ERR(hwmon_dev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ina3221_of_match_table[] = {
+ { .compatible = "ti,ina3221", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ina3221_of_match_table);
+
+static const struct i2c_device_id ina3221_ids[] = {
+ { "ina3221", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ina3221_ids);
+
+static struct i2c_driver ina3221_i2c_driver = {
+ .probe = ina3221_probe,
+ .driver = {
+ .name = INA3221_DRIVER_NAME,
+ .of_match_table = ina3221_of_match_table,
+ },
+ .id_table = ina3221_ids,
+};
+module_i2c_driver(ina3221_i2c_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments INA3221 HWMon Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 9887d3224a86..9d5f85f3384f 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -31,6 +31,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/of.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = {
@@ -104,6 +105,9 @@ static const unsigned short normal_i2c[] = {
#define MCP9804_DEVID 0x0200
#define MCP9804_DEVID_MASK 0xfffc
+#define MCP9808_DEVID 0x0400
+#define MCP9808_DEVID_MASK 0xfffc
+
#define MCP98242_DEVID 0x2000
#define MCP98242_DEVID_MASK 0xfffc
@@ -160,6 +164,7 @@ static struct jc42_chips jc42_chips[] = {
{ IDT_MANID, TS3001_DEVID, TS3001_DEVID_MASK },
{ MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK },
{ MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
+ { MCP_MANID, MCP9808_DEVID, MCP9808_DEVID_MASK },
{ MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
{ MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
{ MCP_MANID, MCP98244_DEVID, MCP98244_DEVID_MASK },
@@ -537,11 +542,20 @@ static const struct i2c_device_id jc42_id[] = {
};
MODULE_DEVICE_TABLE(i2c, jc42_id);
+#ifdef CONFIG_OF
+static const struct of_device_id jc42_of_ids[] = {
+ { .compatible = "jedec,jc-42.4-temp", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, jc42_of_ids);
+#endif
+
static struct i2c_driver jc42_driver = {
- .class = I2C_CLASS_SPD,
+ .class = I2C_CLASS_SPD | I2C_CLASS_HWMON,
.driver = {
.name = "jc42",
.pm = JC42_DEV_PM_OPS,
+ .of_match_table = of_match_ptr(jc42_of_ids),
},
.probe = jc42_probe,
.remove = jc42_remove,
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index df9b3447f2a8..0621ee1b3c98 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -29,23 +29,13 @@
struct jz4740_hwmon {
void __iomem *base;
-
int irq;
-
const struct mfd_cell *cell;
- struct device *hwmon;
-
+ struct platform_device *pdev;
struct completion read_completion;
-
struct mutex lock;
};
-static ssize_t jz4740_hwmon_show_name(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
-{
- return sprintf(buf, "jz4740\n");
-}
-
static irqreturn_t jz4740_hwmon_irq(int irq, void *data)
{
struct jz4740_hwmon *hwmon = data;
@@ -58,6 +48,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
+ struct platform_device *pdev = hwmon->pdev;
struct completion *completion = &hwmon->read_completion;
long t;
unsigned long val;
@@ -68,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
reinit_completion(completion);
enable_irq(hwmon->irq);
- hwmon->cell->enable(to_platform_device(dev));
+ hwmon->cell->enable(pdev);
t = wait_for_completion_interruptible_timeout(completion, HZ);
@@ -80,7 +71,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
ret = t ? t : -ETIMEDOUT;
}
- hwmon->cell->disable(to_platform_device(dev));
+ hwmon->cell->disable(pdev);
disable_irq(hwmon->irq);
mutex_unlock(&hwmon->lock);
@@ -88,26 +79,24 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
return ret;
}
-static DEVICE_ATTR(name, S_IRUGO, jz4740_hwmon_show_name, NULL);
static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL);
-static struct attribute *jz4740_hwmon_attributes[] = {
- &dev_attr_name.attr,
+static struct attribute *jz4740_attrs[] = {
&dev_attr_in0_input.attr,
NULL
};
-static const struct attribute_group jz4740_hwmon_attr_group = {
- .attrs = jz4740_hwmon_attributes,
-};
+ATTRIBUTE_GROUPS(jz4740);
static int jz4740_hwmon_probe(struct platform_device *pdev)
{
int ret;
+ struct device *dev = &pdev->dev;
struct jz4740_hwmon *hwmon;
+ struct device *hwmon_dev;
struct resource *mem;
- hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
+ hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
@@ -125,12 +114,11 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
if (IS_ERR(hwmon->base))
return PTR_ERR(hwmon->base);
+ hwmon->pdev = pdev;
init_completion(&hwmon->read_completion);
mutex_init(&hwmon->lock);
- platform_set_drvdata(pdev, hwmon);
-
- ret = devm_request_irq(&pdev->dev, hwmon->irq, jz4740_hwmon_irq, 0,
+ ret = devm_request_irq(dev, hwmon->irq, jz4740_hwmon_irq, 0,
pdev->name, hwmon);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
@@ -138,38 +126,13 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
}
disable_irq(hwmon->irq);
- ret = sysfs_create_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create sysfs group: %d\n", ret);
- return ret;
- }
-
- hwmon->hwmon = hwmon_device_register(&pdev->dev);
- if (IS_ERR(hwmon->hwmon)) {
- ret = PTR_ERR(hwmon->hwmon);
- goto err_remove_file;
- }
-
- return 0;
-
-err_remove_file:
- sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
- return ret;
-}
-
-static int jz4740_hwmon_remove(struct platform_device *pdev)
-{
- struct jz4740_hwmon *hwmon = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(hwmon->hwmon);
- sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "jz4740", hwmon,
+ jz4740_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
- .remove = jz4740_hwmon_remove,
.driver = {
.name = "jz4740-hwmon",
},
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 69166ab3151d..547a9c87c68c 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -26,8 +26,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/regmap.h>
#include <linux/thermal.h>
#include "lm75.h"
@@ -66,35 +66,21 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
/* The LM75 registers */
+#define LM75_REG_TEMP 0x00
#define LM75_REG_CONF 0x01
-static const u8 LM75_REG_TEMP[3] = {
- 0x00, /* input */
- 0x03, /* max */
- 0x02, /* hyst */
-};
+#define LM75_REG_HYST 0x02
+#define LM75_REG_MAX 0x03
/* Each client has this additional data */
struct lm75_data {
struct i2c_client *client;
- struct device *hwmon_dev;
- struct mutex update_lock;
+ struct regmap *regmap;
u8 orig_conf;
u8 resolution; /* In bits, between 9 and 12 */
u8 resolution_limits;
- char valid; /* !=0 if registers are valid */
- unsigned long last_updated; /* In jiffies */
- unsigned long sample_time; /* In jiffies */
- s16 temp[3]; /* Register values,
- 0 = input
- 1 = max
- 2 = hyst */
+ unsigned int sample_time; /* In ms */
};
-static int lm75_read_value(struct i2c_client *client, u8 reg);
-static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value);
-static struct lm75_data *lm75_update_device(struct device *dev);
-
-
/*-----------------------------------------------------------------------*/
static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
@@ -106,12 +92,15 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
static int lm75_read_temp(void *dev, int *temp)
{
- struct lm75_data *data = lm75_update_device(dev);
+ struct lm75_data *data = dev_get_drvdata(dev);
+ unsigned int _temp;
+ int err;
- if (IS_ERR(data))
- return PTR_ERR(data);
+ err = regmap_read(data->regmap, LM75_REG_TEMP, &_temp);
+ if (err < 0)
+ return err;
- *temp = lm75_reg_to_mc(data->temp[0], data->resolution);
+ *temp = lm75_reg_to_mc(_temp, data->resolution);
return 0;
}
@@ -120,13 +109,15 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct lm75_data *data = lm75_update_device(dev);
+ struct lm75_data *data = dev_get_drvdata(dev);
+ unsigned int temp = 0;
+ int err;
- if (IS_ERR(data))
- return PTR_ERR(data);
+ err = regmap_read(data->regmap, attr->index, &temp);
+ if (err < 0)
+ return err;
- return sprintf(buf, "%ld\n", lm75_reg_to_mc(data->temp[attr->index],
- data->resolution));
+ return sprintf(buf, "%ld\n", lm75_reg_to_mc(temp, data->resolution));
}
static ssize_t set_temp(struct device *dev, struct device_attribute *da,
@@ -134,8 +125,6 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm75_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int nr = attr->index;
long temp;
int error;
u8 resolution;
@@ -153,25 +142,36 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
else
resolution = data->resolution;
- mutex_lock(&data->update_lock);
temp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
- data->temp[nr] = DIV_ROUND_CLOSEST(temp << (resolution - 8),
- 1000) << (16 - resolution);
- lm75_write_value(client, LM75_REG_TEMP[nr], data->temp[nr]);
- mutex_unlock(&data->update_lock);
+ temp = DIV_ROUND_CLOSEST(temp << (resolution - 8),
+ 1000) << (16 - resolution);
+ error = regmap_write(data->regmap, attr->index, temp);
+ if (error < 0)
+ return error;
+
return count;
}
+static ssize_t show_update_interval(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct lm75_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", data->sample_time);
+}
+
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
- show_temp, set_temp, 1);
+ show_temp, set_temp, LM75_REG_MAX);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
- show_temp, set_temp, 2);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+ show_temp, set_temp, LM75_REG_HYST);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, LM75_REG_TEMP);
+static DEVICE_ATTR(update_interval, S_IRUGO, show_update_interval, NULL);
static struct attribute *lm75_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &dev_attr_update_interval.attr,
NULL
};
@@ -185,10 +185,40 @@ static const struct thermal_zone_of_device_ops lm75_of_thermal_ops = {
/* device probe and removal */
+static bool lm75_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != LM75_REG_TEMP;
+}
+
+static bool lm75_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg == LM75_REG_TEMP;
+}
+
+static const struct regmap_config lm75_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = LM75_REG_MAX,
+ .writeable_reg = lm75_is_writeable_reg,
+ .volatile_reg = lm75_is_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
+};
+
+static void lm75_remove(void *data)
+{
+ struct lm75_data *lm75 = data;
+ struct i2c_client *client = lm75->client;
+
+ i2c_smbus_write_byte_data(client, LM75_REG_CONF, lm75->orig_conf);
+}
+
static int
lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
+ struct device *hwmon_dev;
struct lm75_data *data;
int status;
u8 set_mask, clr_mask;
@@ -204,8 +234,10 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENOMEM;
data->client = client;
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
+
+ data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
/* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
* Then tweak to be more precise when appropriate.
@@ -217,7 +249,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
case adt75:
clr_mask |= 1 << 5; /* not one-shot mode */
data->resolution = 12;
- data->sample_time = HZ / 8;
+ data->sample_time = MSEC_PER_SEC / 8;
break;
case ds1775:
case ds75:
@@ -225,35 +257,35 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
clr_mask |= 3 << 5;
set_mask |= 2 << 5; /* 11-bit mode */
data->resolution = 11;
- data->sample_time = HZ;
+ data->sample_time = MSEC_PER_SEC;
break;
case ds7505:
set_mask |= 3 << 5; /* 12-bit mode */
data->resolution = 12;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
case g751:
case lm75:
case lm75a:
data->resolution = 9;
- data->sample_time = HZ / 2;
+ data->sample_time = MSEC_PER_SEC / 2;
break;
case lm75b:
data->resolution = 11;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
case max6625:
data->resolution = 9;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
case max6626:
data->resolution = 12;
data->resolution_limits = 9;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
case tcn75:
data->resolution = 9;
- data->sample_time = HZ / 8;
+ data->sample_time = MSEC_PER_SEC / 8;
break;
case mcp980x:
data->resolution_limits = 9;
@@ -262,14 +294,14 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
case tmp101:
set_mask |= 3 << 5; /* 12-bit mode */
data->resolution = 12;
- data->sample_time = HZ;
+ data->sample_time = MSEC_PER_SEC;
clr_mask |= 1 << 7; /* not one-shot mode */
break;
case tmp112:
set_mask |= 3 << 5; /* 12-bit mode */
clr_mask |= 1 << 7; /* not one-shot mode */
data->resolution = 12;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
case tmp105:
case tmp175:
@@ -278,17 +310,17 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
set_mask |= 3 << 5; /* 12-bit mode */
clr_mask |= 1 << 7; /* not one-shot mode */
data->resolution = 12;
- data->sample_time = HZ / 2;
+ data->sample_time = MSEC_PER_SEC / 2;
break;
case tmp75c:
clr_mask |= 1 << 5; /* not one-shot mode */
data->resolution = 12;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
}
/* configure as specified */
- status = lm75_read_value(client, LM75_REG_CONF);
+ status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(dev, "Can't read config? %d\n", status);
return status;
@@ -297,30 +329,23 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
new = status & ~clr_mask;
new |= set_mask;
if (status != new)
- lm75_write_value(client, LM75_REG_CONF, new);
- dev_dbg(dev, "Config %02x\n", new);
+ i2c_smbus_write_byte_data(client, LM75_REG_CONF, new);
- data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
- data, lm75_groups);
- if (IS_ERR(data->hwmon_dev))
- return PTR_ERR(data->hwmon_dev);
+ devm_add_action(dev, lm75_remove, data);
- devm_thermal_zone_of_sensor_register(data->hwmon_dev, 0,
- data->hwmon_dev,
- &lm75_of_thermal_ops);
+ dev_dbg(dev, "Config %02x\n", new);
- dev_info(dev, "%s: sensor '%s'\n",
- dev_name(data->hwmon_dev), client->name);
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, lm75_groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- return 0;
-}
+ devm_thermal_zone_of_sensor_register(hwmon_dev, 0,
+ hwmon_dev,
+ &lm75_of_thermal_ops);
-static int lm75_remove(struct i2c_client *client)
-{
- struct lm75_data *data = i2c_get_clientdata(client);
+ dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), client->name);
- hwmon_device_unregister(data->hwmon_dev);
- lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
return 0;
}
@@ -449,13 +474,13 @@ static int lm75_suspend(struct device *dev)
{
int status;
struct i2c_client *client = to_i2c_client(dev);
- status = lm75_read_value(client, LM75_REG_CONF);
+ status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
return status;
}
status = status | LM75_SHUTDOWN;
- lm75_write_value(client, LM75_REG_CONF, status);
+ i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
return 0;
}
@@ -463,13 +488,13 @@ static int lm75_resume(struct device *dev)
{
int status;
struct i2c_client *client = to_i2c_client(dev);
- status = lm75_read_value(client, LM75_REG_CONF);
+ status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
return status;
}
status = status & ~LM75_SHUTDOWN;
- lm75_write_value(client, LM75_REG_CONF, status);
+ i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
return 0;
}
@@ -489,73 +514,11 @@ static struct i2c_driver lm75_driver = {
.pm = LM75_DEV_PM_OPS,
},
.probe = lm75_probe,
- .remove = lm75_remove,
.id_table = lm75_ids,
.detect = lm75_detect,
.address_list = normal_i2c,
};
-/*-----------------------------------------------------------------------*/
-
-/* register access */
-
-/*
- * All registers are word-sized, except for the configuration register.
- * LM75 uses a high-byte first convention, which is exactly opposite to
- * the SMBus standard.
- */
-static int lm75_read_value(struct i2c_client *client, u8 reg)
-{
- if (reg == LM75_REG_CONF)
- return i2c_smbus_read_byte_data(client, reg);
- else
- return i2c_smbus_read_word_swapped(client, reg);
-}
-
-static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
-{
- if (reg == LM75_REG_CONF)
- return i2c_smbus_write_byte_data(client, reg, value);
- else
- return i2c_smbus_write_word_swapped(client, reg, value);
-}
-
-static struct lm75_data *lm75_update_device(struct device *dev)
-{
- struct lm75_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- struct lm75_data *ret = data;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + data->sample_time)
- || !data->valid) {
- int i;
- dev_dbg(&client->dev, "Starting lm75 update\n");
-
- for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
- int status;
-
- status = lm75_read_value(client, LM75_REG_TEMP[i]);
- if (unlikely(status < 0)) {
- dev_dbg(dev,
- "LM75: Failed to read value: reg %d, error %d\n",
- LM75_REG_TEMP[i], status);
- ret = ERR_PTR(status);
- data->valid = 0;
- goto abort;
- }
- data->temp[i] = status;
- }
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
-abort:
- mutex_unlock(&data->update_lock);
- return ret;
-}
-
module_i2c_driver(lm75_driver);
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index e30a5939dc0d..f51e758ba529 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -171,7 +171,6 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define SA56004_REG_R_LOCAL_TEMPL 0x22
-#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */
#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
/* TMP451 registers */
@@ -366,11 +365,9 @@ enum lm90_temp11_reg_index {
struct lm90_data {
struct i2c_client *client;
- struct device *hwmon_dev;
const struct attribute_group *groups[6];
struct mutex update_lock;
- struct regulator *regulator;
- char valid; /* zero until following fields are valid */
+ bool valid; /* true if register values are valid */
unsigned long last_updated; /* in jiffies */
int kind;
u32 flags;
@@ -412,7 +409,7 @@ static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value)
* because we don't want the address pointer to change between the write
* byte and the read byte transactions.
*/
-static int lm90_read_reg(struct i2c_client *client, u8 reg, u8 *value)
+static int lm90_read_reg(struct i2c_client *client, u8 reg)
{
int err;
@@ -423,20 +420,12 @@ static int lm90_read_reg(struct i2c_client *client, u8 reg, u8 *value)
} else
err = i2c_smbus_read_byte_data(client, reg);
- if (err < 0) {
- dev_warn(&client->dev, "Register %#02x read failed (%d)\n",
- reg, err);
- return err;
- }
- *value = err;
-
- return 0;
+ return err;
}
-static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
+static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl)
{
- int err;
- u8 oldh, newh, l;
+ int oldh, newh, l;
/*
* There is a trick here. We have to read two registers to have the
@@ -451,18 +440,21 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
* we have to read the low byte again, and now we believe we have a
* correct reading.
*/
- if ((err = lm90_read_reg(client, regh, &oldh))
- || (err = lm90_read_reg(client, regl, &l))
- || (err = lm90_read_reg(client, regh, &newh)))
- return err;
+ oldh = lm90_read_reg(client, regh);
+ if (oldh < 0)
+ return oldh;
+ l = lm90_read_reg(client, regl);
+ if (l < 0)
+ return l;
+ newh = lm90_read_reg(client, regh);
+ if (newh < 0)
+ return newh;
if (oldh != newh) {
- err = lm90_read_reg(client, regl, &l);
- if (err)
- return err;
+ l = lm90_read_reg(client, regl);
+ if (l < 0)
+ return l;
}
- *value = (newh << 8) | l;
-
- return 0;
+ return (newh << 8) | l;
}
/*
@@ -473,20 +465,23 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
* various registers have different meanings as a result of selecting a
* non-default remote channel.
*/
-static inline void lm90_select_remote_channel(struct i2c_client *client,
- struct lm90_data *data,
- int channel)
+static inline int lm90_select_remote_channel(struct i2c_client *client,
+ struct lm90_data *data,
+ int channel)
{
- u8 config;
+ int config;
if (data->kind == max6696) {
- lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
+ config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+ if (config < 0)
+ return config;
config &= ~0x08;
if (channel)
config |= 0x08;
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
config);
}
+ return 0;
}
/*
@@ -513,118 +508,204 @@ static void lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
data->update_interval = DIV_ROUND_CLOSEST(update_interval, 64);
}
+static int lm90_update_limits(struct device *dev)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int val;
+
+ val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
+ if (val < 0)
+ return val;
+ data->temp8[LOCAL_CRIT] = val;
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+ if (val < 0)
+ return val;
+ data->temp8[REMOTE_CRIT] = val;
+
+ val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
+ if (val < 0)
+ return val;
+ data->temp_hyst = val;
+
+ lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE_LOW] = val << 8;
+
+ if (data->flags & LM90_HAVE_REM_LIMIT_EXT) {
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE_LOW] |= val;
+ }
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE_HIGH] = val << 8;
+
+ if (data->flags & LM90_HAVE_REM_LIMIT_EXT) {
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE_HIGH] |= val;
+ }
+
+ if (data->flags & LM90_HAVE_OFFSET) {
+ val = lm90_read16(client, LM90_REG_R_REMOTE_OFFSH,
+ LM90_REG_R_REMOTE_OFFSL);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE_OFFSET] = val;
+ }
+
+ if (data->flags & LM90_HAVE_EMERGENCY) {
+ val = lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG);
+ if (val < 0)
+ return val;
+ data->temp8[LOCAL_EMERG] = val;
+
+ val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG);
+ if (val < 0)
+ return val;
+ data->temp8[REMOTE_EMERG] = val;
+ }
+
+ if (data->kind == max6696) {
+ val = lm90_select_remote_channel(client, data, 1);
+ if (val < 0)
+ return val;
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+ if (val < 0)
+ return val;
+ data->temp8[REMOTE2_CRIT] = val;
+
+ val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG);
+ if (val < 0)
+ return val;
+ data->temp8[REMOTE2_EMERG] = val;
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE2_LOW] = val << 8;
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE2_HIGH] = val << 8;
+
+ lm90_select_remote_channel(client, data, 0);
+ }
+
+ return 0;
+}
+
static struct lm90_data *lm90_update_device(struct device *dev)
{
struct lm90_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
unsigned long next_update;
+ int val = 0;
mutex_lock(&data->update_lock);
+ if (!data->valid) {
+ val = lm90_update_limits(dev);
+ if (val < 0)
+ goto error;
+ }
+
next_update = data->last_updated +
msecs_to_jiffies(data->update_interval);
if (time_after(jiffies, next_update) || !data->valid) {
- u8 h, l;
- u8 alarms;
-
dev_dbg(&client->dev, "Updating lm90 data.\n");
- lm90_read_reg(client, LM90_REG_R_LOCAL_LOW,
- &data->temp8[LOCAL_LOW]);
- lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH,
- &data->temp8[LOCAL_HIGH]);
- lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT,
- &data->temp8[LOCAL_CRIT]);
- lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
- &data->temp8[REMOTE_CRIT]);
- lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
+
+ data->valid = false;
+
+ val = lm90_read_reg(client, LM90_REG_R_LOCAL_LOW);
+ if (val < 0)
+ goto error;
+ data->temp8[LOCAL_LOW] = val;
+
+ val = lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH);
+ if (val < 0)
+ goto error;
+ data->temp8[LOCAL_HIGH] = val;
if (data->reg_local_ext) {
- lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
- data->reg_local_ext,
- &data->temp11[LOCAL_TEMP]);
+ val = lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
+ data->reg_local_ext);
+ if (val < 0)
+ goto error;
+ data->temp11[LOCAL_TEMP] = val;
} else {
- if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
- &h) == 0)
- data->temp11[LOCAL_TEMP] = h << 8;
- }
- lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL,
- &data->temp11[REMOTE_TEMP]);
-
- if (lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h) == 0) {
- data->temp11[REMOTE_LOW] = h << 8;
- if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
- && lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL,
- &l) == 0)
- data->temp11[REMOTE_LOW] |= l;
- }
- if (lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h) == 0) {
- data->temp11[REMOTE_HIGH] = h << 8;
- if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
- && lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL,
- &l) == 0)
- data->temp11[REMOTE_HIGH] |= l;
+ val = lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP);
+ if (val < 0)
+ goto error;
+ data->temp11[LOCAL_TEMP] = val << 8;
}
+ val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
+ LM90_REG_R_REMOTE_TEMPL);
+ if (val < 0)
+ goto error;
+ data->temp11[REMOTE_TEMP] = val;
- if (data->flags & LM90_HAVE_OFFSET) {
- if (lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSH,
- &h) == 0
- && lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSL,
- &l) == 0)
- data->temp11[REMOTE_OFFSET] = (h << 8) | l;
- }
- if (data->flags & LM90_HAVE_EMERGENCY) {
- lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG,
- &data->temp8[LOCAL_EMERG]);
- lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
- &data->temp8[REMOTE_EMERG]);
- }
- lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
- data->alarms = alarms; /* save as 16 bit value */
+ val = lm90_read_reg(client, LM90_REG_R_STATUS);
+ if (val < 0)
+ goto error;
+ data->alarms = val; /* lower 8 bit of alarms */
if (data->kind == max6696) {
- lm90_select_remote_channel(client, data, 1);
- lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
- &data->temp8[REMOTE2_CRIT]);
- lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
- &data->temp8[REMOTE2_EMERG]);
- lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL,
- &data->temp11[REMOTE2_TEMP]);
- if (!lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h))
- data->temp11[REMOTE2_LOW] = h << 8;
- if (!lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h))
- data->temp11[REMOTE2_HIGH] = h << 8;
+ val = lm90_select_remote_channel(client, data, 1);
+ if (val < 0)
+ goto error;
+
+ val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
+ LM90_REG_R_REMOTE_TEMPL);
+ if (val < 0)
+ goto error;
+ data->temp11[REMOTE2_TEMP] = val;
+
lm90_select_remote_channel(client, data, 0);
- if (!lm90_read_reg(client, MAX6696_REG_R_STATUS2,
- &alarms))
- data->alarms |= alarms << 8;
+ val = lm90_read_reg(client, MAX6696_REG_R_STATUS2);
+ if (val < 0)
+ goto error;
+ data->alarms |= val << 8;
}
/*
* Re-enable ALERT# output if it was originally enabled and
* relevant alarms are all clear
*/
- if ((data->config_orig & 0x80) == 0
- && (data->alarms & data->alert_alarms) == 0) {
- u8 config;
+ if (!(data->config_orig & 0x80) &&
+ !(data->alarms & data->alert_alarms)) {
+ val = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+ if (val < 0)
+ goto error;
- lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
- if (config & 0x80) {
+ if (val & 0x80) {
dev_dbg(&client->dev, "Re-enabling ALERT#\n");
i2c_smbus_write_byte_data(client,
LM90_REG_W_CONFIG1,
- config & ~0x80);
+ val & ~0x80);
}
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
+error:
mutex_unlock(&data->update_lock);
+ if (val < 0)
+ return ERR_PTR(val);
+
return data;
}
@@ -709,16 +790,14 @@ static inline int temp_from_u8_adt7461(struct lm90_data *data, u8 val)
{
if (data->flags & LM90_FLAG_ADT7461_EXT)
return (val - 64) * 1000;
- else
- return temp_from_s8(val);
+ return temp_from_s8(val);
}
static inline int temp_from_u16_adt7461(struct lm90_data *data, u16 val)
{
if (data->flags & LM90_FLAG_ADT7461_EXT)
return (val - 0x4000) / 64 * 250;
- else
- return temp_from_s16(val);
+ return temp_from_s16(val);
}
static u8 temp_to_u8_adt7461(struct lm90_data *data, long val)
@@ -729,13 +808,12 @@ static u8 temp_to_u8_adt7461(struct lm90_data *data, long val)
if (val >= 191000)
return 0xFF;
return (val + 500 + 64000) / 1000;
- } else {
- if (val <= 0)
- return 0;
- if (val >= 127000)
- return 127;
- return (val + 500) / 1000;
}
+ if (val <= 0)
+ return 0;
+ if (val >= 127000)
+ return 127;
+ return (val + 500) / 1000;
}
static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
@@ -746,13 +824,12 @@ static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
if (val >= 191750)
return 0xFFC0;
return (val + 64000 + 125) / 250 * 64;
- } else {
- if (val <= 0)
- return 0;
- if (val >= 127750)
- return 0x7FC0;
- return (val + 125) / 250 * 64;
}
+ if (val <= 0)
+ return 0;
+ if (val >= 127750)
+ return 0x7FC0;
+ return (val + 125) / 250 * 64;
}
/*
@@ -766,6 +843,9 @@ static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
struct lm90_data *data = lm90_update_device(dev);
int temp;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
if (data->kind == adt7461 || data->kind == tmp451)
temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
else if (data->kind == max6646)
@@ -832,6 +912,9 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
struct lm90_data *data = lm90_update_device(dev);
int temp;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
if (data->kind == adt7461 || data->kind == tmp451)
temp = temp_from_u16_adt7461(data, data->temp11[attr->index]);
else if (data->kind == max6646)
@@ -907,6 +990,9 @@ static ssize_t show_temphyst(struct device *dev,
struct lm90_data *data = lm90_update_device(dev);
int temp;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
if (data->kind == adt7461 || data->kind == tmp451)
temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
else if (data->kind == max6646)
@@ -953,6 +1039,10 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct lm90_data *data = lm90_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->alarms);
}
@@ -963,6 +1053,9 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute
struct lm90_data *data = lm90_update_device(dev);
int bitnr = attr->index;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
}
@@ -1404,8 +1497,11 @@ static int lm90_detect(struct i2c_client *client,
return 0;
}
-static void lm90_restore_conf(struct i2c_client *client, struct lm90_data *data)
+static void lm90_restore_conf(void *_data)
{
+ struct lm90_data *data = _data;
+ struct i2c_client *client = data->client;
+
/* Restore initial configuration */
i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE,
data->convrate_orig);
@@ -1413,24 +1509,22 @@ static void lm90_restore_conf(struct i2c_client *client, struct lm90_data *data)
data->config_orig);
}
-static void lm90_init_client(struct i2c_client *client, struct lm90_data *data)
+static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
{
- u8 config, convrate;
+ int config, convrate;
- if (lm90_read_reg(client, LM90_REG_R_CONVRATE, &convrate) < 0) {
- dev_warn(&client->dev, "Failed to read convrate register!\n");
- convrate = LM90_DEF_CONVRATE_RVAL;
- }
+ convrate = lm90_read_reg(client, LM90_REG_R_CONVRATE);
+ if (convrate < 0)
+ return convrate;
data->convrate_orig = convrate;
/*
* Start the conversions.
*/
lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
- if (lm90_read_reg(client, LM90_REG_R_CONFIG1, &config) < 0) {
- dev_warn(&client->dev, "Initialization failed!\n");
- return;
- }
+ config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+ if (config < 0)
+ return config;
data->config_orig = config;
/* Check Temperature Range Select */
@@ -1456,17 +1550,26 @@ static void lm90_init_client(struct i2c_client *client, struct lm90_data *data)
config &= 0xBF; /* run */
if (config != data->config_orig) /* Only write if changed */
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
+
+ devm_add_action(&client->dev, lm90_restore_conf, data);
+
+ return 0;
}
static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
{
struct lm90_data *data = i2c_get_clientdata(client);
- u8 st, st2 = 0;
+ int st, st2 = 0;
- lm90_read_reg(client, LM90_REG_R_STATUS, &st);
+ st = lm90_read_reg(client, LM90_REG_R_STATUS);
+ if (st < 0)
+ return false;
- if (data->kind == max6696)
- lm90_read_reg(client, MAX6696_REG_R_STATUS2, &st2);
+ if (data->kind == max6696) {
+ st2 = lm90_read_reg(client, MAX6696_REG_R_STATUS2);
+ if (st2 < 0)
+ return false;
+ }
*status = st | (st2 << 8);
@@ -1506,6 +1609,16 @@ static irqreturn_t lm90_irq_thread(int irq, void *dev_id)
return IRQ_NONE;
}
+static void lm90_remove_pec(void *dev)
+{
+ device_remove_file(dev, &dev_attr_pec);
+}
+
+static void lm90_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
static int lm90_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1513,6 +1626,7 @@ static int lm90_probe(struct i2c_client *client,
struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
struct lm90_data *data;
struct regulator *regulator;
+ struct device *hwmon_dev;
int groups = 0;
int err;
@@ -1526,6 +1640,8 @@ static int lm90_probe(struct i2c_client *client,
return err;
}
+ devm_add_action(dev, lm90_regulator_disable, regulator);
+
data = devm_kzalloc(dev, sizeof(struct lm90_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1534,8 +1650,6 @@ static int lm90_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- data->regulator = regulator;
-
/* Set the device type */
data->kind = id->driver_data;
if (data->kind == adm1032) {
@@ -1557,7 +1671,11 @@ static int lm90_probe(struct i2c_client *client,
data->max_convrate = lm90_params[data->kind].max_convrate;
/* Initialize the LM90 chip */
- lm90_init_client(client, data);
+ err = lm90_init_client(client, data);
+ if (err < 0) {
+ dev_err(dev, "Failed to initialize device\n");
+ return err;
+ }
/* Register sysfs hooks */
data->groups[groups++] = &lm90_group;
@@ -1577,15 +1695,14 @@ static int lm90_probe(struct i2c_client *client,
if (client->flags & I2C_CLIENT_PEC) {
err = device_create_file(dev, &dev_attr_pec);
if (err)
- goto exit_restore;
+ return err;
+ devm_add_action(dev, lm90_remove_pec, dev);
}
- data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
- data, data->groups);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_pec;
- }
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
if (client->irq) {
dev_dbg(dev, "IRQ: %d\n", client->irq);
@@ -1595,33 +1712,11 @@ static int lm90_probe(struct i2c_client *client,
"lm90", client);
if (err < 0) {
dev_err(dev, "cannot request IRQ %d\n", client->irq);
- goto exit_unregister;
+ return err;
}
}
return 0;
-
-exit_unregister:
- hwmon_device_unregister(data->hwmon_dev);
-exit_remove_pec:
- device_remove_file(dev, &dev_attr_pec);
-exit_restore:
- lm90_restore_conf(client, data);
- regulator_disable(data->regulator);
-
- return err;
-}
-
-static int lm90_remove(struct i2c_client *client)
-{
- struct lm90_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- device_remove_file(&client->dev, &dev_attr_pec);
- lm90_restore_conf(client, data);
- regulator_disable(data->regulator);
-
- return 0;
}
static void lm90_alert(struct i2c_client *client, unsigned int flag)
@@ -1636,13 +1731,16 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag)
*/
struct lm90_data *data = i2c_get_clientdata(client);
- if ((data->flags & LM90_HAVE_BROKEN_ALERT)
- && (alarms & data->alert_alarms)) {
- u8 config;
+ if ((data->flags & LM90_HAVE_BROKEN_ALERT) &&
+ (alarms & data->alert_alarms)) {
+ int config;
+
dev_dbg(&client->dev, "Disabling ALERT#\n");
- lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
- config | 0x80);
+ config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+ if (config >= 0)
+ i2c_smbus_write_byte_data(client,
+ LM90_REG_W_CONFIG1,
+ config | 0x80);
}
} else {
dev_info(&client->dev, "Everything OK\n");
@@ -1655,7 +1753,6 @@ static struct i2c_driver lm90_driver = {
.name = "lm90",
},
.probe = lm90_probe,
- .remove = lm90_remove,
.alert = lm90_alert,
.id_table = lm90_id,
.detect = lm90_detect,
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
new file mode 100644
index 000000000000..b73a48832732
--- /dev/null
+++ b/drivers/hwmon/sht3x.c
@@ -0,0 +1,775 @@
+/* Sensirion SHT3x-DIS humidity and temperature sensor driver.
+ * The SHT3x comes in many different versions, this driver is for the
+ * I2C version only.
+ *
+ * Copyright (C) 2016 Sensirion AG, Switzerland
+ * Author: David Frey <david.frey@sensirion.com>
+ * Author: Pascal Sachs <pascal.sachs@sensirion.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/platform_data/sht3x.h>
+
+/* commands (high precision mode) */
+static const unsigned char sht3x_cmd_measure_blocking_hpm[] = { 0x2c, 0x06 };
+static const unsigned char sht3x_cmd_measure_nonblocking_hpm[] = { 0x24, 0x00 };
+
+/* commands (low power mode) */
+static const unsigned char sht3x_cmd_measure_blocking_lpm[] = { 0x2c, 0x10 };
+static const unsigned char sht3x_cmd_measure_nonblocking_lpm[] = { 0x24, 0x16 };
+
+/* commands for periodic mode */
+static const unsigned char sht3x_cmd_measure_periodic_mode[] = { 0xe0, 0x00 };
+static const unsigned char sht3x_cmd_break[] = { 0x30, 0x93 };
+
+/* commands for heater control */
+static const unsigned char sht3x_cmd_heater_on[] = { 0x30, 0x6d };
+static const unsigned char sht3x_cmd_heater_off[] = { 0x30, 0x66 };
+
+/* other commands */
+static const unsigned char sht3x_cmd_read_status_reg[] = { 0xf3, 0x2d };
+static const unsigned char sht3x_cmd_clear_status_reg[] = { 0x30, 0x41 };
+
+/* delays for non-blocking i2c commands, both in us */
+#define SHT3X_NONBLOCKING_WAIT_TIME_HPM 15000
+#define SHT3X_NONBLOCKING_WAIT_TIME_LPM 4000
+
+#define SHT3X_WORD_LEN 2
+#define SHT3X_CMD_LENGTH 2
+#define SHT3X_CRC8_LEN 1
+#define SHT3X_RESPONSE_LENGTH 6
+#define SHT3X_CRC8_POLYNOMIAL 0x31
+#define SHT3X_CRC8_INIT 0xFF
+#define SHT3X_MIN_TEMPERATURE -45000
+#define SHT3X_MAX_TEMPERATURE 130000
+#define SHT3X_MIN_HUMIDITY 0
+#define SHT3X_MAX_HUMIDITY 100000
+
+enum sht3x_chips {
+ sht3x,
+ sts3x,
+};
+
+enum sht3x_limits {
+ limit_max = 0,
+ limit_max_hyst,
+ limit_min,
+ limit_min_hyst,
+};
+
+DECLARE_CRC8_TABLE(sht3x_crc8_table);
+
+/* periodic measure commands (high precision mode) */
+static const char periodic_measure_commands_hpm[][SHT3X_CMD_LENGTH] = {
+ /* 0.5 measurements per second */
+ {0x20, 0x32},
+ /* 1 measurements per second */
+ {0x21, 0x30},
+ /* 2 measurements per second */
+ {0x22, 0x36},
+ /* 4 measurements per second */
+ {0x23, 0x34},
+ /* 10 measurements per second */
+ {0x27, 0x37},
+};
+
+/* periodic measure commands (low power mode) */
+static const char periodic_measure_commands_lpm[][SHT3X_CMD_LENGTH] = {
+ /* 0.5 measurements per second */
+ {0x20, 0x2f},
+ /* 1 measurements per second */
+ {0x21, 0x2d},
+ /* 2 measurements per second */
+ {0x22, 0x2b},
+ /* 4 measurements per second */
+ {0x23, 0x29},
+ /* 10 measurements per second */
+ {0x27, 0x2a},
+};
+
+struct sht3x_limit_commands {
+ const char read_command[SHT3X_CMD_LENGTH];
+ const char write_command[SHT3X_CMD_LENGTH];
+};
+
+static const struct sht3x_limit_commands limit_commands[] = {
+ /* temp1_max, humidity1_max */
+ [limit_max] = { {0xe1, 0x1f}, {0x61, 0x1d} },
+ /* temp_1_max_hyst, humidity1_max_hyst */
+ [limit_max_hyst] = { {0xe1, 0x14}, {0x61, 0x16} },
+ /* temp1_min, humidity1_min */
+ [limit_min] = { {0xe1, 0x02}, {0x61, 0x00} },
+ /* temp_1_min_hyst, humidity1_min_hyst */
+ [limit_min_hyst] = { {0xe1, 0x09}, {0x61, 0x0B} },
+};
+
+#define SHT3X_NUM_LIMIT_CMD ARRAY_SIZE(limit_commands)
+
+static const u16 mode_to_update_interval[] = {
+ 0,
+ 2000,
+ 1000,
+ 500,
+ 250,
+ 100,
+};
+
+struct sht3x_data {
+ struct i2c_client *client;
+ struct mutex i2c_lock; /* lock for sending i2c commands */
+ struct mutex data_lock; /* lock for updating driver data */
+
+ u8 mode;
+ const unsigned char *command;
+ u32 wait_time; /* in us*/
+ unsigned long last_update; /* last update in periodic mode*/
+
+ struct sht3x_platform_data setup;
+
+ /*
+ * cached values for temperature and humidity and limits
+ * the limits arrays have the following order:
+ * max, max_hyst, min, min_hyst
+ */
+ int temperature;
+ int temperature_limits[SHT3X_NUM_LIMIT_CMD];
+ u32 humidity;
+ u32 humidity_limits[SHT3X_NUM_LIMIT_CMD];
+};
+
+static u8 get_mode_from_update_interval(u16 value)
+{
+ size_t index;
+ u8 number_of_modes = ARRAY_SIZE(mode_to_update_interval);
+
+ if (value == 0)
+ return 0;
+
+ /* find next faster update interval */
+ for (index = 1; index < number_of_modes; index++) {
+ if (mode_to_update_interval[index] <= value)
+ return index;
+ }
+
+ return number_of_modes - 1;
+}
+
+static int sht3x_read_from_command(struct i2c_client *client,
+ struct sht3x_data *data,
+ const char *command,
+ char *buf, int length, u32 wait_time)
+{
+ int ret;
+
+ mutex_lock(&data->i2c_lock);
+ ret = i2c_master_send(client, command, SHT3X_CMD_LENGTH);
+
+ if (ret != SHT3X_CMD_LENGTH) {
+ ret = ret < 0 ? ret : -EIO;
+ goto out;
+ }
+
+ if (wait_time)
+ usleep_range(wait_time, wait_time + 1000);
+
+ ret = i2c_master_recv(client, buf, length);
+ if (ret != length) {
+ ret = ret < 0 ? ret : -EIO;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ mutex_unlock(&data->i2c_lock);
+ return ret;
+}
+
+static int sht3x_extract_temperature(u16 raw)
+{
+ /*
+ * From datasheet:
+ * T = -45 + 175 * ST / 2^16
+ * Adapted for integer fixed point (3 digit) arithmetic.
+ */
+ return ((21875 * (int)raw) >> 13) - 45000;
+}
+
+static u32 sht3x_extract_humidity(u16 raw)
+{
+ /*
+ * From datasheet:
+ * RH = 100 * SRH / 2^16
+ * Adapted for integer fixed point (3 digit) arithmetic.
+ */
+ return (12500 * (u32)raw) >> 13;
+}
+
+static struct sht3x_data *sht3x_update_client(struct device *dev)
+{
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ u16 interval_ms = mode_to_update_interval[data->mode];
+ unsigned long interval_jiffies = msecs_to_jiffies(interval_ms);
+ unsigned char buf[SHT3X_RESPONSE_LENGTH];
+ u16 val;
+ int ret = 0;
+
+ mutex_lock(&data->data_lock);
+ /*
+ * Only update cached readings once per update interval in periodic
+ * mode. In single shot mode the sensor measures values on demand, so
+ * every time the sysfs interface is called, a measurement is triggered.
+ * In periodic mode however, the measurement process is handled
+ * internally by the sensor and reading out sensor values only makes
+ * sense if a new reading is available.
+ */
+ if (time_after(jiffies, data->last_update + interval_jiffies)) {
+ ret = sht3x_read_from_command(client, data, data->command, buf,
+ sizeof(buf), data->wait_time);
+ if (ret)
+ goto out;
+
+ val = be16_to_cpup((__be16 *)buf);
+ data->temperature = sht3x_extract_temperature(val);
+ val = be16_to_cpup((__be16 *)(buf + 3));
+ data->humidity = sht3x_extract_humidity(val);
+ data->last_update = jiffies;
+ }
+
+out:
+ mutex_unlock(&data->data_lock);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return data;
+}
+
+/* sysfs attributes */
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sht3x_data *data = sht3x_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temperature);
+}
+
+static ssize_t humidity1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sht3x_data *data = sht3x_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%u\n", data->humidity);
+}
+
+/*
+ * limits_update must only be called from probe or with data_lock held
+ */
+static int limits_update(struct sht3x_data *data)
+{
+ int ret;
+ u8 index;
+ int temperature;
+ u32 humidity;
+ u16 raw;
+ char buffer[SHT3X_RESPONSE_LENGTH];
+ const struct sht3x_limit_commands *commands;
+ struct i2c_client *client = data->client;
+
+ for (index = 0; index < SHT3X_NUM_LIMIT_CMD; index++) {
+ commands = &limit_commands[index];
+ ret = sht3x_read_from_command(client, data,
+ commands->read_command, buffer,
+ SHT3X_RESPONSE_LENGTH, 0);
+
+ if (ret)
+ return ret;
+
+ raw = be16_to_cpup((__be16 *)buffer);
+ temperature = sht3x_extract_temperature((raw & 0x01ff) << 7);
+ humidity = sht3x_extract_humidity(raw & 0xfe00);
+ data->temperature_limits[index] = temperature;
+ data->humidity_limits[index] = humidity;
+ }
+
+ return ret;
+}
+
+static ssize_t temp1_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ u8 index = to_sensor_dev_attr(attr)->index;
+ int temperature_limit = data->temperature_limits[index];
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", temperature_limit);
+}
+
+static ssize_t humidity1_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ u8 index = to_sensor_dev_attr(attr)->index;
+ u32 humidity_limit = data->humidity_limits[index];
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", humidity_limit);
+}
+
+/*
+ * limit_store must only be called with data_lock held
+ */
+static size_t limit_store(struct device *dev,
+ size_t count,
+ u8 index,
+ int temperature,
+ u32 humidity)
+{
+ char buffer[SHT3X_CMD_LENGTH + SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+ char *position = buffer;
+ int ret;
+ u16 raw;
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ const struct sht3x_limit_commands *commands;
+
+ commands = &limit_commands[index];
+
+ memcpy(position, commands->write_command, SHT3X_CMD_LENGTH);
+ position += SHT3X_CMD_LENGTH;
+ /*
+ * ST = (T + 45) / 175 * 2^16
+ * SRH = RH / 100 * 2^16
+ * adapted for fixed point arithmetic and packed the same as
+ * in limit_show()
+ */
+ raw = ((u32)(temperature + 45000) * 24543) >> (16 + 7);
+ raw |= ((humidity * 42950) >> 16) & 0xfe00;
+
+ *((__be16 *)position) = cpu_to_be16(raw);
+ position += SHT3X_WORD_LEN;
+ *position = crc8(sht3x_crc8_table,
+ position - SHT3X_WORD_LEN,
+ SHT3X_WORD_LEN,
+ SHT3X_CRC8_INIT);
+
+ mutex_lock(&data->i2c_lock);
+ ret = i2c_master_send(client, buffer, sizeof(buffer));
+ mutex_unlock(&data->i2c_lock);
+
+ if (ret != sizeof(buffer))
+ return ret < 0 ? ret : -EIO;
+
+ data->temperature_limits[index] = temperature;
+ data->humidity_limits[index] = humidity;
+ return count;
+}
+
+static ssize_t temp1_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int temperature;
+ int ret;
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ u8 index = to_sensor_dev_attr(attr)->index;
+
+ ret = kstrtoint(buf, 0, &temperature);
+ if (ret)
+ return ret;
+
+ temperature = clamp_val(temperature, SHT3X_MIN_TEMPERATURE,
+ SHT3X_MAX_TEMPERATURE);
+ mutex_lock(&data->data_lock);
+ ret = limit_store(dev, count, index, temperature,
+ data->humidity_limits[index]);
+ mutex_unlock(&data->data_lock);
+
+ return ret;
+}
+
+static ssize_t humidity1_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ u32 humidity;
+ int ret;
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ u8 index = to_sensor_dev_attr(attr)->index;
+
+ ret = kstrtou32(buf, 0, &humidity);
+ if (ret)
+ return ret;
+
+ humidity = clamp_val(humidity, SHT3X_MIN_HUMIDITY, SHT3X_MAX_HUMIDITY);
+ mutex_lock(&data->data_lock);
+ ret = limit_store(dev, count, index, data->temperature_limits[index],
+ humidity);
+ mutex_unlock(&data->data_lock);
+
+ return ret;
+}
+
+static void sht3x_select_command(struct sht3x_data *data)
+{
+ /*
+ * In blocking mode (clock stretching mode) the I2C bus
+ * is blocked for other traffic, thus the call to i2c_master_recv()
+ * will wait until the data is ready. For non blocking mode, we
+ * have to wait ourselves.
+ */
+ if (data->mode > 0) {
+ data->command = sht3x_cmd_measure_periodic_mode;
+ data->wait_time = 0;
+ } else if (data->setup.blocking_io) {
+ data->command = data->setup.high_precision ?
+ sht3x_cmd_measure_blocking_hpm :
+ sht3x_cmd_measure_blocking_lpm;
+ data->wait_time = 0;
+ } else {
+ if (data->setup.high_precision) {
+ data->command = sht3x_cmd_measure_nonblocking_hpm;
+ data->wait_time = SHT3X_NONBLOCKING_WAIT_TIME_HPM;
+ } else {
+ data->command = sht3x_cmd_measure_nonblocking_lpm;
+ data->wait_time = SHT3X_NONBLOCKING_WAIT_TIME_LPM;
+ }
+ }
+}
+
+static int status_register_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer, int length)
+{
+ int ret;
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ ret = sht3x_read_from_command(client, data, sht3x_cmd_read_status_reg,
+ buffer, length, 0);
+
+ return ret;
+}
+
+static ssize_t temp1_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+ int ret;
+
+ ret = status_register_read(dev, attr, buffer,
+ SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x04));
+}
+
+static ssize_t humidity1_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+ int ret;
+
+ ret = status_register_read(dev, attr, buffer,
+ SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x08));
+}
+
+static ssize_t heater_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+ int ret;
+
+ ret = status_register_read(dev, attr, buffer,
+ SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x20));
+}
+
+static ssize_t heater_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret;
+ bool status;
+
+ ret = kstrtobool(buf, &status);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->i2c_lock);
+
+ if (status)
+ ret = i2c_master_send(client, (char *)&sht3x_cmd_heater_on,
+ SHT3X_CMD_LENGTH);
+ else
+ ret = i2c_master_send(client, (char *)&sht3x_cmd_heater_off,
+ SHT3X_CMD_LENGTH);
+
+ mutex_unlock(&data->i2c_lock);
+
+ return ret;
+}
+
+static ssize_t update_interval_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sht3x_data *data = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ mode_to_update_interval[data->mode]);
+}
+
+static ssize_t update_interval_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ u16 update_interval;
+ u8 mode;
+ int ret;
+ const char *command;
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ ret = kstrtou16(buf, 0, &update_interval);
+ if (ret)
+ return ret;
+
+ mode = get_mode_from_update_interval(update_interval);
+
+ mutex_lock(&data->data_lock);
+ /* mode did not change */
+ if (mode == data->mode) {
+ mutex_unlock(&data->data_lock);
+ return count;
+ }
+
+ mutex_lock(&data->i2c_lock);
+ /*
+ * Abort periodic measure mode.
+ * To do any changes to the configuration while in periodic mode, we
+ * have to send a break command to the sensor, which then falls back
+ * to single shot (mode = 0).
+ */
+ if (data->mode > 0) {
+ ret = i2c_master_send(client, sht3x_cmd_break,
+ SHT3X_CMD_LENGTH);
+ if (ret != SHT3X_CMD_LENGTH)
+ goto out;
+ data->mode = 0;
+ }
+
+ if (mode > 0) {
+ if (data->setup.high_precision)
+ command = periodic_measure_commands_hpm[mode - 1];
+ else
+ command = periodic_measure_commands_lpm[mode - 1];
+
+ /* select mode */
+ ret = i2c_master_send(client, command, SHT3X_CMD_LENGTH);
+ if (ret != SHT3X_CMD_LENGTH)
+ goto out;
+ }
+
+ /* select mode and command */
+ data->mode = mode;
+ sht3x_select_command(data);
+
+out:
+ mutex_unlock(&data->i2c_lock);
+ mutex_unlock(&data->data_lock);
+ if (ret != SHT3X_CMD_LENGTH)
+ return ret < 0 ? ret : -EIO;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, temp1_input_show, NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, humidity1_input_show,
+ NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
+ temp1_limit_show, temp1_limit_store,
+ limit_max);
+static SENSOR_DEVICE_ATTR(humidity1_max, S_IRUGO | S_IWUSR,
+ humidity1_limit_show, humidity1_limit_store,
+ limit_max);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
+ temp1_limit_show, temp1_limit_store,
+ limit_max_hyst);
+static SENSOR_DEVICE_ATTR(humidity1_max_hyst, S_IRUGO | S_IWUSR,
+ humidity1_limit_show, humidity1_limit_store,
+ limit_max_hyst);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR,
+ temp1_limit_show, temp1_limit_store,
+ limit_min);
+static SENSOR_DEVICE_ATTR(humidity1_min, S_IRUGO | S_IWUSR,
+ humidity1_limit_show, humidity1_limit_store,
+ limit_min);
+static SENSOR_DEVICE_ATTR(temp1_min_hyst, S_IRUGO | S_IWUSR,
+ temp1_limit_show, temp1_limit_store,
+ limit_min_hyst);
+static SENSOR_DEVICE_ATTR(humidity1_min_hyst, S_IRUGO | S_IWUSR,
+ humidity1_limit_show, humidity1_limit_store,
+ limit_min_hyst);
+static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, temp1_alarm_show, NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_alarm, S_IRUGO, humidity1_alarm_show,
+ NULL, 0);
+static SENSOR_DEVICE_ATTR(heater_enable, S_IRUGO | S_IWUSR,
+ heater_enable_show, heater_enable_store, 0);
+static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
+ update_interval_show, update_interval_store, 0);
+
+static struct attribute *sht3x_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_humidity1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_humidity1_max.dev_attr.attr,
+ &sensor_dev_attr_humidity1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_hyst.dev_attr.attr,
+ &sensor_dev_attr_humidity1_min.dev_attr.attr,
+ &sensor_dev_attr_humidity1_min_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_alarm.dev_attr.attr,
+ &sensor_dev_attr_humidity1_alarm.dev_attr.attr,
+ &sensor_dev_attr_heater_enable.dev_attr.attr,
+ &sensor_dev_attr_update_interval.dev_attr.attr,
+ NULL
+};
+
+static struct attribute *sts3x_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(sht3x);
+ATTRIBUTE_GROUPS(sts3x);
+
+static int sht3x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct sht3x_data *data;
+ struct device *hwmon_dev;
+ struct i2c_adapter *adap = client->adapter;
+ struct device *dev = &client->dev;
+ const struct attribute_group **attribute_groups;
+
+ /*
+ * we require full i2c support since the sht3x uses multi-byte read and
+ * writes as well as multi-byte commands which are not supported by
+ * the smbus protocol
+ */
+ if (!i2c_check_functionality(adap, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = i2c_master_send(client, sht3x_cmd_clear_status_reg,
+ SHT3X_CMD_LENGTH);
+ if (ret != SHT3X_CMD_LENGTH)
+ return ret < 0 ? ret : -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->setup.blocking_io = false;
+ data->setup.high_precision = true;
+ data->mode = 0;
+ data->last_update = 0;
+ data->client = client;
+ crc8_populate_msb(sht3x_crc8_table, SHT3X_CRC8_POLYNOMIAL);
+
+ if (client->dev.platform_data)
+ data->setup = *(struct sht3x_platform_data *)dev->platform_data;
+
+ sht3x_select_command(data);
+
+ mutex_init(&data->i2c_lock);
+ mutex_init(&data->data_lock);
+
+ ret = limits_update(data);
+ if (ret)
+ return ret;
+
+ if (id->driver_data == sts3x)
+ attribute_groups = sts3x_groups;
+ else
+ attribute_groups = sht3x_groups;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+ client->name,
+ data,
+ attribute_groups);
+
+ if (IS_ERR(hwmon_dev))
+ dev_dbg(dev, "unable to register hwmon device\n");
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+/* device ID table */
+static const struct i2c_device_id sht3x_ids[] = {
+ {"sht3x", sht3x},
+ {"sts3x", sts3x},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, sht3x_ids);
+
+static struct i2c_driver sht3x_i2c_driver = {
+ .driver.name = "sht3x",
+ .probe = sht3x_probe,
+ .id_table = sht3x_ids,
+};
+
+module_i2c_driver(sht3x_i2c_driver);
+
+MODULE_AUTHOR("David Frey <david.frey@sensirion.com>");
+MODULE_AUTHOR("Pascal Sachs <pascal.sachs@sensirion.com>");
+MODULE_DESCRIPTION("Sensirion SHT3x humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index f1e96fd7f445..a942a2574a4d 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -11,12 +11,9 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -27,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/jiffies.h>
+#include <linux/regmap.h>
#include <linux/thermal.h>
#include <linux/of.h>
@@ -50,14 +48,23 @@
#define TMP102_TLOW_REG 0x02
#define TMP102_THIGH_REG 0x03
+#define TMP102_CONFREG_MASK (TMP102_CONF_SD | TMP102_CONF_TM | \
+ TMP102_CONF_POL | TMP102_CONF_F0 | \
+ TMP102_CONF_F1 | TMP102_CONF_OS | \
+ TMP102_CONF_EM | TMP102_CONF_AL | \
+ TMP102_CONF_CR0 | TMP102_CONF_CR1)
+
+#define TMP102_CONFIG_CLEAR (TMP102_CONF_SD | TMP102_CONF_OS | \
+ TMP102_CONF_CR0)
+#define TMP102_CONFIG_SET (TMP102_CONF_TM | TMP102_CONF_EM | \
+ TMP102_CONF_CR1)
+
+#define CONVERSION_TIME_MS 35 /* in milli-seconds */
+
struct tmp102 {
- struct i2c_client *client;
- struct device *hwmon_dev;
- struct mutex lock;
+ struct regmap *regmap;
u16 config_orig;
- unsigned long last_update;
- int temp[3];
- bool first_time;
+ unsigned long ready_time;
};
/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
@@ -72,44 +79,22 @@ static inline u16 tmp102_mC_to_reg(int val)
return (val * 128) / 1000;
}
-static const u8 tmp102_reg[] = {
- TMP102_TEMP_REG,
- TMP102_TLOW_REG,
- TMP102_THIGH_REG,
-};
-
-static struct tmp102 *tmp102_update_device(struct device *dev)
-{
- struct tmp102 *tmp102 = dev_get_drvdata(dev);
- struct i2c_client *client = tmp102->client;
-
- mutex_lock(&tmp102->lock);
- if (time_after(jiffies, tmp102->last_update + HZ / 3)) {
- int i;
- for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) {
- int status = i2c_smbus_read_word_swapped(client,
- tmp102_reg[i]);
- if (status > -1)
- tmp102->temp[i] = tmp102_reg_to_mC(status);
- }
- tmp102->last_update = jiffies;
- tmp102->first_time = false;
- }
- mutex_unlock(&tmp102->lock);
- return tmp102;
-}
-
static int tmp102_read_temp(void *dev, int *temp)
{
- struct tmp102 *tmp102 = tmp102_update_device(dev);
+ struct tmp102 *tmp102 = dev_get_drvdata(dev);
+ unsigned int reg;
+ int ret;
- /* Is it too early even to return a conversion? */
- if (tmp102->first_time) {
+ if (time_before(jiffies, tmp102->ready_time)) {
dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__);
return -EAGAIN;
}
- *temp = tmp102->temp[0];
+ ret = regmap_read(tmp102->regmap, TMP102_TEMP_REG, &reg);
+ if (ret < 0)
+ return ret;
+
+ *temp = tmp102_reg_to_mC(reg);
return 0;
}
@@ -119,13 +104,20 @@ static ssize_t tmp102_show_temp(struct device *dev,
char *buf)
{
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
- struct tmp102 *tmp102 = tmp102_update_device(dev);
+ struct tmp102 *tmp102 = dev_get_drvdata(dev);
+ int regaddr = sda->index;
+ unsigned int reg;
+ int err;
- /* Is it too early even to return a read? */
- if (tmp102->first_time)
+ if (regaddr == TMP102_TEMP_REG &&
+ time_before(jiffies, tmp102->ready_time))
return -EAGAIN;
- return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
+ err = regmap_read(tmp102->regmap, regaddr, &reg);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%d\n", tmp102_reg_to_mC(reg));
}
static ssize_t tmp102_set_temp(struct device *dev,
@@ -134,29 +126,26 @@ static ssize_t tmp102_set_temp(struct device *dev,
{
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct tmp102 *tmp102 = dev_get_drvdata(dev);
- struct i2c_client *client = tmp102->client;
+ int reg = sda->index;
long val;
- int status;
+ int err;
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
val = clamp_val(val, -256000, 255000);
- mutex_lock(&tmp102->lock);
- tmp102->temp[sda->index] = val;
- status = i2c_smbus_write_word_swapped(client, tmp102_reg[sda->index],
- tmp102_mC_to_reg(val));
- mutex_unlock(&tmp102->lock);
- return status ? : count;
+ err = regmap_write(tmp102->regmap, reg, tmp102_mC_to_reg(val));
+ return err ? : count;
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL,
+ TMP102_TEMP_REG);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp,
- tmp102_set_temp, 1);
+ tmp102_set_temp, TMP102_TLOW_REG);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp,
- tmp102_set_temp, 2);
+ tmp102_set_temp, TMP102_THIGH_REG);
static struct attribute *tmp102_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -166,20 +155,46 @@ static struct attribute *tmp102_attrs[] = {
};
ATTRIBUTE_GROUPS(tmp102);
-#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
-#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
-
static const struct thermal_zone_of_device_ops tmp102_of_thermal_ops = {
.get_temp = tmp102_read_temp,
};
+static void tmp102_restore_config(void *data)
+{
+ struct tmp102 *tmp102 = data;
+
+ regmap_write(tmp102->regmap, TMP102_CONF_REG, tmp102->config_orig);
+}
+
+static bool tmp102_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != TMP102_TEMP_REG;
+}
+
+static bool tmp102_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg == TMP102_TEMP_REG;
+}
+
+static const struct regmap_config tmp102_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP102_THIGH_REG,
+ .writeable_reg = tmp102_is_writeable_reg,
+ .volatile_reg = tmp102_is_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
+};
+
static int tmp102_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct tmp102 *tmp102;
- int status;
+ unsigned int regval;
+ int err;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
@@ -193,101 +208,82 @@ static int tmp102_probe(struct i2c_client *client,
return -ENOMEM;
i2c_set_clientdata(client, tmp102);
- tmp102->client = client;
- status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
- if (status < 0) {
+ tmp102->regmap = devm_regmap_init_i2c(client, &tmp102_regmap_config);
+ if (IS_ERR(tmp102->regmap))
+ return PTR_ERR(tmp102->regmap);
+
+ err = regmap_read(tmp102->regmap, TMP102_CONF_REG, &regval);
+ if (err < 0) {
dev_err(dev, "error reading config register\n");
- return status;
+ return err;
}
- tmp102->config_orig = status;
- status = i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
- TMP102_CONFIG);
- if (status < 0) {
- dev_err(dev, "error writing config register\n");
- goto fail_restore_config;
+
+ if ((regval & ~TMP102_CONFREG_MASK) !=
+ (TMP102_CONF_R0 | TMP102_CONF_R1)) {
+ dev_err(dev, "unexpected config register value\n");
+ return -ENODEV;
}
- status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
- if (status < 0) {
- dev_err(dev, "error reading config register\n");
- goto fail_restore_config;
+
+ tmp102->config_orig = regval;
+
+ devm_add_action(dev, tmp102_restore_config, tmp102);
+
+ regval &= ~TMP102_CONFIG_CLEAR;
+ regval |= TMP102_CONFIG_SET;
+
+ err = regmap_write(tmp102->regmap, TMP102_CONF_REG, regval);
+ if (err < 0) {
+ dev_err(dev, "error writing config register\n");
+ return err;
}
- status &= ~TMP102_CONFIG_RD_ONLY;
- if (status != TMP102_CONFIG) {
- dev_err(dev, "config settings did not stick\n");
- status = -ENODEV;
- goto fail_restore_config;
+
+ tmp102->ready_time = jiffies;
+ if (tmp102->config_orig & TMP102_CONF_SD) {
+ /*
+ * Mark that we are not ready with data until the first
+ * conversion is complete
+ */
+ tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS);
}
- tmp102->last_update = jiffies;
- /* Mark that we are not ready with data until conversion is complete */
- tmp102->first_time = true;
- mutex_init(&tmp102->lock);
- hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
- tmp102, tmp102_groups);
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ tmp102,
+ tmp102_groups);
if (IS_ERR(hwmon_dev)) {
dev_dbg(dev, "unable to register hwmon device\n");
- status = PTR_ERR(hwmon_dev);
- goto fail_restore_config;
+ return PTR_ERR(hwmon_dev);
}
- tmp102->hwmon_dev = hwmon_dev;
devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
&tmp102_of_thermal_ops);
dev_info(dev, "initialized\n");
return 0;
-
-fail_restore_config:
- i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
- tmp102->config_orig);
- return status;
-}
-
-static int tmp102_remove(struct i2c_client *client)
-{
- struct tmp102 *tmp102 = i2c_get_clientdata(client);
-
- hwmon_device_unregister(tmp102->hwmon_dev);
-
- /* Stop monitoring if device was stopped originally */
- if (tmp102->config_orig & TMP102_CONF_SD) {
- int config;
-
- config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
- if (config >= 0)
- i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
- config | TMP102_CONF_SD);
- }
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
static int tmp102_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- int config;
-
- config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
- if (config < 0)
- return config;
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
- config |= TMP102_CONF_SD;
- return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
+ return regmap_update_bits(tmp102->regmap, TMP102_CONF_REG,
+ TMP102_CONF_SD, TMP102_CONF_SD);
}
static int tmp102_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- int config;
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+ int err;
+
+ err = regmap_update_bits(tmp102->regmap, TMP102_CONF_REG,
+ TMP102_CONF_SD, 0);
- config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
- if (config < 0)
- return config;
+ tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS);
- config &= ~TMP102_CONF_SD;
- return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
+ return err;
}
#endif /* CONFIG_PM */
@@ -303,7 +299,6 @@ static struct i2c_driver tmp102_driver = {
.driver.name = DRIVER_NAME,
.driver.pm = &tmp102_dev_pm_ops,
.probe = tmp102_probe,
- .remove = tmp102_remove,
.id_table = tmp102_id,
};
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ccf4cffe0ee1..eeeed2c7d081 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -47,7 +47,7 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
0x4e, 0x4f, I2C_CLIENT_END };
-enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
+enum chips { tmp401, tmp411, tmp431, tmp432, tmp435, tmp461 };
/*
* The TMP401 registers, note some registers have different addresses for
@@ -62,31 +62,34 @@ enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
#define TMP401_MANUFACTURER_ID_REG 0xFE
#define TMP401_DEVICE_ID_REG 0xFF
-static const u8 TMP401_TEMP_MSB_READ[6][2] = {
+static const u8 TMP401_TEMP_MSB_READ[7][2] = {
{ 0x00, 0x01 }, /* temp */
{ 0x06, 0x08 }, /* low limit */
{ 0x05, 0x07 }, /* high limit */
{ 0x20, 0x19 }, /* therm (crit) limit */
{ 0x30, 0x34 }, /* lowest */
{ 0x32, 0x36 }, /* highest */
+ { 0, 0x11 }, /* offset */
};
-static const u8 TMP401_TEMP_MSB_WRITE[6][2] = {
+static const u8 TMP401_TEMP_MSB_WRITE[7][2] = {
{ 0, 0 }, /* temp (unused) */
{ 0x0C, 0x0E }, /* low limit */
{ 0x0B, 0x0D }, /* high limit */
{ 0x20, 0x19 }, /* therm (crit) limit */
{ 0x30, 0x34 }, /* lowest */
{ 0x32, 0x36 }, /* highest */
+ { 0, 0x11 }, /* offset */
};
-static const u8 TMP401_TEMP_LSB[6][2] = {
+static const u8 TMP401_TEMP_LSB[7][2] = {
{ 0x15, 0x10 }, /* temp */
{ 0x17, 0x14 }, /* low limit */
{ 0x16, 0x13 }, /* high limit */
{ 0, 0 }, /* therm (crit) limit (unused) */
{ 0x31, 0x35 }, /* lowest */
{ 0x33, 0x37 }, /* highest */
+ { 0, 0x12 }, /* offset */
};
static const u8 TMP432_TEMP_MSB_READ[4][3] = {
@@ -149,6 +152,7 @@ static const struct i2c_device_id tmp401_id[] = {
{ "tmp431", tmp431 },
{ "tmp432", tmp432 },
{ "tmp435", tmp435 },
+ { "tmp461", tmp461 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp401_id);
@@ -170,7 +174,7 @@ struct tmp401_data {
/* register values */
u8 status[4];
u8 config;
- u16 temp[6][3];
+ u16 temp[7][3];
u8 temp_crit_hyst;
};
@@ -613,6 +617,22 @@ static const struct attribute_group tmp432_group = {
};
/*
+ * Additional features of the TMP461 chip.
+ * The TMP461 temperature offset for the remote channel.
+ */
+static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IWUSR | S_IRUGO, show_temp,
+ store_temp, 6, 1);
+
+static struct attribute *tmp461_attributes[] = {
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tmp461_group = {
+ .attrs = tmp461_attributes,
+};
+
+/*
* Begin non sysfs callback code (aka Real code)
*/
@@ -714,7 +734,7 @@ static int tmp401_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
static const char * const names[] = {
- "TMP401", "TMP411", "TMP431", "TMP432", "TMP435"
+ "TMP401", "TMP411", "TMP431", "TMP432", "TMP435", "TMP461"
};
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -745,6 +765,9 @@ static int tmp401_probe(struct i2c_client *client,
if (data->kind == tmp432)
data->groups[groups++] = &tmp432_group;
+ if (data->kind == tmp461)
+ data->groups[groups++] = &tmp461_group;
+
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, data->groups);
if (IS_ERR(hwmon_dev))
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 1be543e8e42f..6f0a51a2c6ec 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -23,6 +23,7 @@
#include <linux/debugfs.h>
#include <linux/idr.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include "intel_th.h"
@@ -67,23 +68,33 @@ static int intel_th_probe(struct device *dev)
hubdrv = to_intel_th_driver(hub->dev.driver);
+ pm_runtime_set_active(dev);
+ pm_runtime_no_callbacks(dev);
+ pm_runtime_enable(dev);
+
ret = thdrv->probe(to_intel_th_device(dev));
if (ret)
- return ret;
+ goto out_pm;
if (thdrv->attr_group) {
ret = sysfs_create_group(&thdev->dev.kobj, thdrv->attr_group);
- if (ret) {
- thdrv->remove(thdev);
-
- return ret;
- }
+ if (ret)
+ goto out;
}
if (thdev->type == INTEL_TH_OUTPUT &&
!intel_th_output_assigned(thdev))
+ /* does not talk to hardware */
ret = hubdrv->assign(hub, thdev);
+out:
+ if (ret)
+ thdrv->remove(thdev);
+
+out_pm:
+ if (ret)
+ pm_runtime_disable(dev);
+
return ret;
}
@@ -103,6 +114,8 @@ static int intel_th_remove(struct device *dev)
if (thdrv->attr_group)
sysfs_remove_group(&thdev->dev.kobj, thdrv->attr_group);
+ pm_runtime_get_sync(dev);
+
thdrv->remove(thdev);
if (intel_th_output_assigned(thdev)) {
@@ -110,9 +123,14 @@ static int intel_th_remove(struct device *dev)
to_intel_th_driver(dev->parent->driver);
if (hub->dev.driver)
+ /* does not talk to hardware */
hubdrv->unassign(hub, thdev);
}
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
return 0;
}
@@ -185,6 +203,7 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
{
struct intel_th_driver *thdrv =
to_intel_th_driver_or_null(thdev->dev.driver);
+ int ret = 0;
if (!thdrv)
return -ENODEV;
@@ -192,12 +211,17 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
if (!try_module_get(thdrv->driver.owner))
return -ENODEV;
+ pm_runtime_get_sync(&thdev->dev);
+
if (thdrv->activate)
- return thdrv->activate(thdev);
+ ret = thdrv->activate(thdev);
+ else
+ intel_th_trace_enable(thdev);
- intel_th_trace_enable(thdev);
+ if (ret)
+ pm_runtime_put(&thdev->dev);
- return 0;
+ return ret;
}
static void intel_th_output_deactivate(struct intel_th_device *thdev)
@@ -213,6 +237,7 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev)
else
intel_th_trace_disable(thdev);
+ pm_runtime_put(&thdev->dev);
module_put(thdrv->driver.owner);
}
@@ -465,6 +490,38 @@ static struct intel_th_subdevice {
},
};
+#ifdef CONFIG_MODULES
+static void __intel_th_request_hub_module(struct work_struct *work)
+{
+ struct intel_th *th = container_of(work, struct intel_th,
+ request_module_work);
+
+ request_module("intel_th_%s", th->hub->name);
+}
+
+static int intel_th_request_hub_module(struct intel_th *th)
+{
+ INIT_WORK(&th->request_module_work, __intel_th_request_hub_module);
+ schedule_work(&th->request_module_work);
+
+ return 0;
+}
+
+static void intel_th_request_hub_module_flush(struct intel_th *th)
+{
+ flush_work(&th->request_module_work);
+}
+#else
+static inline int intel_th_request_hub_module(struct intel_th *th)
+{
+ return -EINVAL;
+}
+
+static inline void intel_th_request_hub_module_flush(struct intel_th *th)
+{
+}
+#endif /* CONFIG_MODULES */
+
static int intel_th_populate(struct intel_th *th, struct resource *devres,
unsigned int ndevres, int irq)
{
@@ -535,7 +592,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
/* need switch driver to be loaded to enumerate the rest */
if (subdev->type == INTEL_TH_SWITCH && !req) {
th->hub = thdev;
- err = request_module("intel_th_%s", subdev->name);
+ err = intel_th_request_hub_module(th);
if (!err)
req++;
}
@@ -628,6 +685,10 @@ intel_th_alloc(struct device *dev, struct resource *devres,
dev_set_drvdata(dev, th);
+ pm_runtime_no_callbacks(dev);
+ pm_runtime_put(dev);
+ pm_runtime_allow(dev);
+
err = intel_th_populate(th, devres, ndevres, irq);
if (err)
goto err_chrdev;
@@ -635,6 +696,8 @@ intel_th_alloc(struct device *dev, struct resource *devres,
return th;
err_chrdev:
+ pm_runtime_forbid(dev);
+
__unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
"intel_th/output");
@@ -652,12 +715,16 @@ void intel_th_free(struct intel_th *th)
{
int i;
+ intel_th_request_hub_module_flush(th);
for (i = 0; i < TH_SUBDEVICE_MAX; i++)
if (th->thdev[i] != th->hub)
intel_th_device_remove(th->thdev[i]);
intel_th_device_remove(th->hub);
+ pm_runtime_get_sync(th->dev);
+ pm_runtime_forbid(th->dev);
+
__unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
"intel_th/output");
@@ -682,6 +749,7 @@ int intel_th_trace_enable(struct intel_th_device *thdev)
if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT))
return -EINVAL;
+ pm_runtime_get_sync(&thdev->dev);
hubdrv->enable(hub, &thdev->output);
return 0;
@@ -702,6 +770,7 @@ int intel_th_trace_disable(struct intel_th_device *thdev)
return -EINVAL;
hubdrv->disable(hub, &thdev->output);
+ pm_runtime_put(&thdev->dev);
return 0;
}
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 9beea0b54231..33e09369a491 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -22,6 +22,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/pm_runtime.h>
#include "intel_th.h"
#include "gth.h"
@@ -190,6 +191,11 @@ static ssize_t master_attr_store(struct device *dev,
if (old_port >= 0) {
gth->master[ma->master] = -1;
clear_bit(ma->master, gth->output[old_port].master);
+
+ /*
+ * if the port is active, program this setting,
+ * implies that runtime PM is on
+ */
if (gth->output[old_port].output->active)
gth_master_set(gth, ma->master, -1);
}
@@ -204,7 +210,7 @@ static ssize_t master_attr_store(struct device *dev,
set_bit(ma->master, gth->output[port].master);
- /* if the port is active, program this setting */
+ /* if the port is active, program this setting, see above */
if (gth->output[port].output->active)
gth_master_set(gth, ma->master, port);
}
@@ -326,11 +332,15 @@ static ssize_t output_attr_show(struct device *dev,
struct gth_device *gth = oa->gth;
size_t count;
+ pm_runtime_get_sync(dev);
+
spin_lock(&gth->gth_lock);
count = snprintf(buf, PAGE_SIZE, "%x\n",
gth_output_parm_get(gth, oa->port, oa->parm));
spin_unlock(&gth->gth_lock);
+ pm_runtime_put(dev);
+
return count;
}
@@ -346,10 +356,14 @@ static ssize_t output_attr_store(struct device *dev,
if (kstrtouint(buf, 16, &config) < 0)
return -EINVAL;
+ pm_runtime_get_sync(dev);
+
spin_lock(&gth->gth_lock);
gth_output_parm_set(gth, oa->port, oa->parm, config);
spin_unlock(&gth->gth_lock);
+ pm_runtime_put(dev);
+
return count;
}
@@ -451,7 +465,7 @@ static int intel_th_output_attributes(struct gth_device *gth)
}
/**
- * intel_th_gth_disable() - enable tracing to an output device
+ * intel_th_gth_disable() - disable tracing to an output device
* @thdev: GTH device
* @output: output device's descriptor
*
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 0df22e30673d..4c195786bf1f 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -114,6 +114,9 @@ intel_th_output_assigned(struct intel_th_device *thdev)
* @unassign: deassociate an output type device from an output port
* @enable: enable tracing for a given output device
* @disable: disable tracing for a given output device
+ * @irq: interrupt callback
+ * @activate: enable tracing on the output's side
+ * @deactivate: disable tracing on the output's side
* @fops: file operations for device nodes
* @attr_group: attributes provided by the driver
*
@@ -205,6 +208,9 @@ struct intel_th {
int id;
int major;
+#ifdef CONFIG_MODULES
+ struct work_struct request_module_work;
+#endif /* CONFIG_MODULES */
#ifdef CONFIG_INTEL_TH_DEBUG
struct dentry *dbg;
#endif
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 5e25c7eb31d3..0bba3842336e 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -80,6 +80,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1a8e),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Kaby Lake PCH-H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index ff31108b066f..51f81d64ca37 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -15,6 +15,7 @@
* as defined in MIPI STPv2 specification.
*/
+#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -482,14 +483,40 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
return -EFAULT;
}
+ pm_runtime_get_sync(&stm->dev);
+
count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
kbuf, count);
+ pm_runtime_mark_last_busy(&stm->dev);
+ pm_runtime_put_autosuspend(&stm->dev);
kfree(kbuf);
return count;
}
+static void stm_mmap_open(struct vm_area_struct *vma)
+{
+ struct stm_file *stmf = vma->vm_file->private_data;
+ struct stm_device *stm = stmf->stm;
+
+ pm_runtime_get(&stm->dev);
+}
+
+static void stm_mmap_close(struct vm_area_struct *vma)
+{
+ struct stm_file *stmf = vma->vm_file->private_data;
+ struct stm_device *stm = stmf->stm;
+
+ pm_runtime_mark_last_busy(&stm->dev);
+ pm_runtime_put_autosuspend(&stm->dev);
+}
+
+static const struct vm_operations_struct stm_mmap_vmops = {
+ .open = stm_mmap_open,
+ .close = stm_mmap_close,
+};
+
static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
{
struct stm_file *stmf = file->private_data;
@@ -514,8 +541,11 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
if (!phys)
return -EINVAL;
+ pm_runtime_get_sync(&stm->dev);
+
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &stm_mmap_vmops;
vm_iomap_memory(vma, phys, size);
return 0;
@@ -701,6 +731,17 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
if (err)
goto err_device;
+ /*
+ * Use delayed autosuspend to avoid bouncing back and forth
+ * on recurring character device writes, with the initial
+ * delay time of 2 seconds.
+ */
+ pm_runtime_no_callbacks(&stm->dev);
+ pm_runtime_use_autosuspend(&stm->dev);
+ pm_runtime_set_autosuspend_delay(&stm->dev, 2000);
+ pm_runtime_set_suspended(&stm->dev);
+ pm_runtime_enable(&stm->dev);
+
return 0;
err_device:
@@ -724,6 +765,9 @@ void stm_unregister_device(struct stm_data *stm_data)
struct stm_source_device *src, *iter;
int i, ret;
+ pm_runtime_dont_use_autosuspend(&stm->dev);
+ pm_runtime_disable(&stm->dev);
+
mutex_lock(&stm->link_mutex);
list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
ret = __stm_source_link_drop(src, stm);
@@ -878,6 +922,8 @@ static int __stm_source_link_drop(struct stm_source_device *src,
stm_output_free(link, &src->output);
list_del_init(&src->link_entry);
+ pm_runtime_mark_last_busy(&link->dev);
+ pm_runtime_put_autosuspend(&link->dev);
/* matches stm_find_device() from stm_source_link_store() */
stm_put_device(link);
rcu_assign_pointer(src->link, NULL);
@@ -971,8 +1017,11 @@ static ssize_t stm_source_link_store(struct device *dev,
if (!link)
return -EINVAL;
+ pm_runtime_get(&link->dev);
+
err = stm_source_link_add(src, link);
if (err) {
+ pm_runtime_put_autosuspend(&link->dev);
/* matches the stm_find_device() above */
stm_put_device(link);
}
@@ -1033,6 +1082,9 @@ int stm_source_register_device(struct device *parent,
if (err)
goto err;
+ pm_runtime_no_callbacks(&src->dev);
+ pm_runtime_forbid(&src->dev);
+
err = device_add(&src->dev);
if (err)
goto err;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index cc6439ab3f71..041050edd809 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1268,6 +1268,8 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
}
}
+ idx = 0;
+
do {
if (msgs[idx].len == 0) {
ret = -EINVAL;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 445398c314a3..b126dbaa47e3 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -912,7 +912,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
ret = tegra_i2c_init(i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize i2c controller");
- goto unprepare_div_clk;
+ goto disable_div_clk;
}
ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index e33022e2d459..6e5fac6a5262 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -56,9 +56,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
* The board info passed can safely be __initdata, but be careful of embedded
* pointers (for platform_data, functions, etc) since that won't be copied.
*/
-int __init
-i2c_register_board_info(int busnum,
- struct i2c_board_info const *info, unsigned len)
+int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
{
int status;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index af11b658984d..74e5aeaf84f9 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -107,12 +107,11 @@ struct acpi_i2c_lookup {
acpi_handle device_handle;
};
-static int acpi_i2c_find_address(struct acpi_resource *ares, void *data)
+static int acpi_i2c_fill_info(struct acpi_resource *ares, void *data)
{
struct acpi_i2c_lookup *lookup = data;
struct i2c_board_info *info = lookup->info;
struct acpi_resource_i2c_serialbus *sb;
- acpi_handle adapter_handle;
acpi_status status;
if (info->addr || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
@@ -122,80 +121,102 @@ static int acpi_i2c_find_address(struct acpi_resource *ares, void *data)
if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C)
return 1;
- /*
- * Extract the ResourceSource and make sure that the handle matches
- * with the I2C adapter handle.
- */
status = acpi_get_handle(lookup->device_handle,
sb->resource_source.string_ptr,
- &adapter_handle);
- if (ACPI_SUCCESS(status) && adapter_handle == lookup->adapter_handle) {
- info->addr = sb->slave_address;
- if (sb->access_mode == ACPI_I2C_10BIT_MODE)
- info->flags |= I2C_CLIENT_TEN;
- }
+ &lookup->adapter_handle);
+ if (!ACPI_SUCCESS(status))
+ return 1;
+
+ info->addr = sb->slave_address;
+ if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+ info->flags |= I2C_CLIENT_TEN;
return 1;
}
-static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
+static int acpi_i2c_get_info(struct acpi_device *adev,
+ struct i2c_board_info *info,
+ acpi_handle *adapter_handle)
{
- struct i2c_adapter *adapter = data;
struct list_head resource_list;
- struct acpi_i2c_lookup lookup;
struct resource_entry *entry;
- struct i2c_board_info info;
- struct acpi_device *adev;
+ struct acpi_i2c_lookup lookup;
int ret;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
- return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
+ return -EINVAL;
- memset(&info, 0, sizeof(info));
- info.fwnode = acpi_fwnode_handle(adev);
+ memset(info, 0, sizeof(*info));
+ info->fwnode = acpi_fwnode_handle(adev);
memset(&lookup, 0, sizeof(lookup));
- lookup.adapter_handle = ACPI_HANDLE(&adapter->dev);
- lookup.device_handle = handle;
- lookup.info = &info;
+ lookup.device_handle = acpi_device_handle(adev);
+ lookup.info = info;
- /*
- * Look up for I2cSerialBus resource with ResourceSource that
- * matches with this adapter.
- */
+ /* Look up for I2cSerialBus resource */
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_i2c_find_address, &lookup);
+ acpi_i2c_fill_info, &lookup);
acpi_dev_free_resource_list(&resource_list);
- if (ret < 0 || !info.addr)
- return AE_OK;
+ if (ret < 0 || !info->addr)
+ return -EINVAL;
+
+ *adapter_handle = lookup.adapter_handle;
/* Then fill IRQ number if any */
ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (ret < 0)
- return AE_OK;
+ return -EINVAL;
resource_list_for_each_entry(entry, &resource_list) {
if (resource_type(entry->res) == IORESOURCE_IRQ) {
- info.irq = entry->res->start;
+ info->irq = entry->res->start;
break;
}
}
acpi_dev_free_resource_list(&resource_list);
+ strlcpy(info->type, dev_name(&adev->dev), sizeof(info->type));
+
+ return 0;
+}
+
+static void acpi_i2c_register_device(struct i2c_adapter *adapter,
+ struct acpi_device *adev,
+ struct i2c_board_info *info)
+{
adev->power.flags.ignore_parent = true;
- strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
- if (!i2c_new_device(adapter, &info)) {
+ acpi_device_set_enumerated(adev);
+
+ if (!i2c_new_device(adapter, info)) {
adev->power.flags.ignore_parent = false;
dev_err(&adapter->dev,
"failed to add I2C device %s from ACPI\n",
dev_name(&adev->dev));
}
+}
+
+static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_adapter *adapter = data;
+ struct acpi_device *adev;
+ acpi_handle adapter_handle;
+ struct i2c_board_info info;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (acpi_i2c_get_info(adev, &info, &adapter_handle))
+ return AE_OK;
+
+ if (adapter_handle != ACPI_HANDLE(&adapter->dev))
+ return AE_OK;
+
+ acpi_i2c_register_device(adapter, adev, &info);
return AE_OK;
}
@@ -225,8 +246,80 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap)
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
+static int acpi_i2c_match_adapter(struct device *dev, void *data)
+{
+ struct i2c_adapter *adapter = i2c_verify_adapter(dev);
+
+ if (!adapter)
+ return 0;
+
+ return ACPI_HANDLE(dev) == (acpi_handle)data;
+}
+
+static int acpi_i2c_match_device(struct device *dev, void *data)
+{
+ return ACPI_COMPANION(dev) == data;
+}
+
+static struct i2c_adapter *acpi_i2c_find_adapter_by_handle(acpi_handle handle)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, handle,
+ acpi_i2c_match_adapter);
+ return dev ? i2c_verify_adapter(dev) : NULL;
+}
+
+static struct i2c_client *acpi_i2c_find_client_by_adev(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, adev, acpi_i2c_match_device);
+ return dev ? i2c_verify_client(dev) : NULL;
+}
+
+static int acpi_i2c_notify(struct notifier_block *nb, unsigned long value,
+ void *arg)
+{
+ struct acpi_device *adev = arg;
+ struct i2c_board_info info;
+ acpi_handle adapter_handle;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+
+ switch (value) {
+ case ACPI_RECONFIG_DEVICE_ADD:
+ if (acpi_i2c_get_info(adev, &info, &adapter_handle))
+ break;
+
+ adapter = acpi_i2c_find_adapter_by_handle(adapter_handle);
+ if (!adapter)
+ break;
+
+ acpi_i2c_register_device(adapter, adev, &info);
+ break;
+ case ACPI_RECONFIG_DEVICE_REMOVE:
+ if (!acpi_device_enumerated(adev))
+ break;
+
+ client = acpi_i2c_find_client_by_adev(adev);
+ if (!client)
+ break;
+
+ i2c_unregister_device(client);
+ put_device(&client->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block i2c_acpi_notifier = {
+ .notifier_call = acpi_i2c_notify,
+};
#else /* CONFIG_ACPI */
static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
+extern struct notifier_block i2c_acpi_notifier;
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_I2C_OPREGION
@@ -1089,6 +1182,8 @@ void i2c_unregister_device(struct i2c_client *client)
{
if (client->dev.of_node)
of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+ if (ACPI_COMPANION(&client->dev))
+ acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
device_unregister(&client->dev);
}
EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -2117,6 +2212,8 @@ static int __init i2c_init(void)
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_register(&i2c_of_notifier));
+ if (IS_ENABLED(CONFIG_ACPI))
+ WARN_ON(acpi_reconfig_notifier_register(&i2c_acpi_notifier));
return 0;
@@ -2132,6 +2229,8 @@ bus_err:
static void __exit i2c_exit(void)
{
+ if (IS_ENABLED(CONFIG_ACPI))
+ WARN_ON(acpi_reconfig_notifier_unregister(&i2c_acpi_notifier));
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier));
i2c_del_driver(&dummy_driver);
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 26e7c5187a58..c6a90b4a9c62 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -145,7 +145,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
mux->data.idle_in_use = true;
/* map address from "reg" if exists */
- if (of_address_to_resource(np, 0, &res)) {
+ if (of_address_to_resource(np, 0, &res) == 0) {
mux->data.reg_size = resource_size(&res);
mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(mux->data.reg))
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index ef907fd5ba98..bf9a2ad296ed 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1770,7 +1770,6 @@ static int ide_cd_probe(ide_drive_t *drive)
drive->driver_data = info;
g->minors = 1;
- g->driverfs_dev = &drive->gendev;
g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
if (ide_cdrom_setup(drive)) {
put_device(&info->dev);
@@ -1780,7 +1779,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- add_disk(g);
+ device_add_disk(&drive->gendev, g);
return 0;
out_free_disk:
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 474173eb31bb..5887a7a09e37 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -459,9 +459,6 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
layer. the packet must be complete, as we do not
touch it at all. */
- if (cgc->data_direction == CGC_DATA_WRITE)
- flags |= REQ_WRITE;
-
if (cgc->sense)
memset(cgc->sense, 0, sizeof(struct request_sense));
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 05dbcce70b0e..e378ef70ed63 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -431,7 +431,7 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
ide_drive_t *drive = q->queuedata;
struct ide_cmd *cmd;
- if (!(rq->cmd_flags & REQ_FLUSH))
+ if (req_op(rq) != REQ_OP_FLUSH)
return BLKPREP_OK;
if (rq->special) {
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 2fb5350c5410..f079d8d1d856 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -206,7 +206,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
memcpy(rq->cmd, pc->c, 12);
pc->rq = rq;
- if (rq->cmd_flags & REQ_WRITE)
+ if (cmd == WRITE)
pc->flags |= PC_FLAG_WRITING;
pc->flags |= PC_FLAG_DMA_OK;
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 838996a0039e..e823394ed543 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -412,12 +412,11 @@ static int ide_gd_probe(ide_drive_t *drive)
set_capacity(g, ide_gd_capacity(drive));
g->minors = IDE_DISK_MINORS;
- g->driverfs_dev = &drive->gendev;
g->flags |= GENHD_FL_EXT_DEVT;
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
- add_disk(g);
+ device_add_disk(&drive->gendev, g);
return 0;
out_free_disk:
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index c96649292b55..9b2ef248788d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -46,8 +46,6 @@
* to avoid complications with the lapic timer workaround.
* Have not seen issues with suspend, but may need same workaround here.
*
- * There is currently no kernel-based automatic probing/loading mechanism
- * if the driver is built as a module.
*/
/* un-comment DEBUG to enable pr_debug() statements */
@@ -60,8 +58,9 @@
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/msr.h>
@@ -827,6 +826,35 @@ static struct cpuidle_state bxt_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state dnv_cstates[] = {
+ {
+ .name = "C1-DNV",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C1E-DNV",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-DNV",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 50,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
+
/**
* intel_idle
* @dev: cpuidle_device
@@ -1016,45 +1044,50 @@ static const struct idle_cpu idle_cpu_bxt = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_dnv = {
+ .state_table = dnv_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
- ICPU(0x1a, idle_cpu_nehalem),
- ICPU(0x1e, idle_cpu_nehalem),
- ICPU(0x1f, idle_cpu_nehalem),
- ICPU(0x25, idle_cpu_nehalem),
- ICPU(0x2c, idle_cpu_nehalem),
- ICPU(0x2e, idle_cpu_nehalem),
- ICPU(0x1c, idle_cpu_atom),
- ICPU(0x26, idle_cpu_lincroft),
- ICPU(0x2f, idle_cpu_nehalem),
- ICPU(0x2a, idle_cpu_snb),
- ICPU(0x2d, idle_cpu_snb),
- ICPU(0x36, idle_cpu_atom),
- ICPU(0x37, idle_cpu_byt),
- ICPU(0x4c, idle_cpu_cht),
- ICPU(0x3a, idle_cpu_ivb),
- ICPU(0x3e, idle_cpu_ivt),
- ICPU(0x3c, idle_cpu_hsw),
- ICPU(0x3f, idle_cpu_hsw),
- ICPU(0x45, idle_cpu_hsw),
- ICPU(0x46, idle_cpu_hsw),
- ICPU(0x4d, idle_cpu_avn),
- ICPU(0x3d, idle_cpu_bdw),
- ICPU(0x47, idle_cpu_bdw),
- ICPU(0x4f, idle_cpu_bdw),
- ICPU(0x56, idle_cpu_bdw),
- ICPU(0x4e, idle_cpu_skl),
- ICPU(0x5e, idle_cpu_skl),
- ICPU(0x8e, idle_cpu_skl),
- ICPU(0x9e, idle_cpu_skl),
- ICPU(0x55, idle_cpu_skx),
- ICPU(0x57, idle_cpu_knl),
- ICPU(0x5c, idle_cpu_bxt),
+ ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_NEHALEM, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_WESTMERE2, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom),
+ ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft),
+ ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb),
+ ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
+ ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
+ ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
+ ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
+ ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
+ ICPU(INTEL_FAM6_HASWELL_CORE, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn),
+ ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_BROADWELL_XEON_D, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_SKYLAKE_MOBILE, idle_cpu_skl),
+ ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, idle_cpu_skl),
+ ICPU(INTEL_FAM6_KABYLAKE_MOBILE, idle_cpu_skl),
+ ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl),
+ ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx),
+ ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
+ ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
{}
};
-MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
/*
* intel_idle_probe()
@@ -1154,7 +1187,10 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)
{
unsigned long long ns;
- ns = irtl_ns_units[(irtl >> 10) & 0x3];
+ if (!irtl)
+ return 0;
+
+ ns = irtl_ns_units[(irtl >> 10) & 0x7];
return div64_u64((irtl & 0x3FF) * ns, 1000);
}
@@ -1167,43 +1203,39 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)
static void bxt_idle_state_table_update(void)
{
unsigned long long msr;
+ unsigned int usec;
rdmsrl(MSR_PKGC6_IRTL, msr);
- if (msr) {
- unsigned int usec = irtl_2_usec(msr);
-
+ usec = irtl_2_usec(msr);
+ if (usec) {
bxt_cstates[2].exit_latency = usec;
bxt_cstates[2].target_residency = usec;
}
rdmsrl(MSR_PKGC7_IRTL, msr);
- if (msr) {
- unsigned int usec = irtl_2_usec(msr);
-
+ usec = irtl_2_usec(msr);
+ if (usec) {
bxt_cstates[3].exit_latency = usec;
bxt_cstates[3].target_residency = usec;
}
rdmsrl(MSR_PKGC8_IRTL, msr);
- if (msr) {
- unsigned int usec = irtl_2_usec(msr);
-
+ usec = irtl_2_usec(msr);
+ if (usec) {
bxt_cstates[4].exit_latency = usec;
bxt_cstates[4].target_residency = usec;
}
rdmsrl(MSR_PKGC9_IRTL, msr);
- if (msr) {
- unsigned int usec = irtl_2_usec(msr);
-
+ usec = irtl_2_usec(msr);
+ if (usec) {
bxt_cstates[5].exit_latency = usec;
bxt_cstates[5].target_residency = usec;
}
rdmsrl(MSR_PKGC10_IRTL, msr);
- if (msr) {
- unsigned int usec = irtl_2_usec(msr);
-
+ usec = irtl_2_usec(msr);
+ if (usec) {
bxt_cstates[6].exit_latency = usec;
bxt_cstates[6].target_residency = usec;
}
@@ -1261,13 +1293,13 @@ static void intel_idle_state_table_update(void)
{
switch (boot_cpu_data.x86_model) {
- case 0x3e: /* IVT */
+ case INTEL_FAM6_IVYBRIDGE_X:
ivt_idle_state_table_update();
break;
- case 0x5c: /* BXT */
+ case INTEL_FAM6_ATOM_GOLDMONT:
bxt_idle_state_table_update();
break;
- case 0x5e: /* SKL-H */
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
sklh_idle_state_table_update();
break;
}
@@ -1415,34 +1447,12 @@ static int __init intel_idle_init(void)
return 0;
}
+device_initcall(intel_idle_init);
-static void __exit intel_idle_exit(void)
-{
- struct cpuidle_device *dev;
- int i;
-
- cpu_notifier_register_begin();
-
- if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
- on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
- __unregister_cpu_notifier(&cpu_hotplug_notifier);
-
- for_each_possible_cpu(i) {
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
- cpuidle_unregister_device(dev);
- }
-
- cpu_notifier_register_done();
-
- cpuidle_unregister_driver(&intel_idle_driver);
- free_percpu(intel_idle_cpuidle_devices);
-}
-
-module_init(intel_idle_init);
-module_exit(intel_idle_exit);
-
+/*
+ * We are not really modular, but we used to support that. Meaning we also
+ * support "intel_idle.max_cstate=..." at boot and also a read-only export of
+ * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param
+ * is the easiest way (currently) to continue doing that.
+ */
module_param(max_cstate, int, 0444);
-
-MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
-MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 505e921f0b19..6743b18194fb 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -46,6 +46,14 @@ config IIO_CONSUMERS_PER_TRIGGER
This value controls the maximum number of consumers that a
given trigger may handle. Default is 2.
+config IIO_SW_DEVICE
+ tristate "Enable software IIO device support"
+ select IIO_CONFIGFS
+ help
+ Provides IIO core support for software devices. A software
+ device can be created via configfs or directly by a driver
+ using the API provided.
+
config IIO_SW_TRIGGER
tristate "Enable software triggers support"
select IIO_CONFIGFS
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 20f649073462..87e4c4369e2f 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -8,6 +8,7 @@ industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
obj-$(CONFIG_IIO_CONFIGFS) += industrialio-configfs.o
+obj-$(CONFIG_IIO_SW_DEVICE) += industrialio-sw-device.o
obj-$(CONFIG_IIO_SW_TRIGGER) += industrialio-sw-trigger.o
obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index e4a758cd7d35..89d78208de3f 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -17,6 +17,16 @@ config BMA180
To compile this driver as a module, choose M here: the
module will be called bma180.
+config BMA220
+ tristate "Bosch BMA220 3-Axis Accelerometer Driver"
+ depends on SPI
+ help
+ Say yes here to add support for the Bosch BMA220 triaxial
+ acceleration sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bma220_spi.
+
config BMC150_ACCEL
tristate "Bosch BMC150 Accelerometer Driver"
select IIO_BUFFER
@@ -136,13 +146,23 @@ config MMA7455_SPI
To compile this driver as a module, choose M here: the module
will be called mma7455_spi.
+config MMA7660
+ tristate "Freescale MMA7660FC 3-Axis Accelerometer Driver"
+ depends on I2C
+ help
+ Say yes here to get support for the Freescale MMA7660FC 3-Axis
+ accelerometer.
+
+ Choosing M will build the driver as a module. If so, the module
+ will be called mma7660.
+
config MMA8452
- tristate "Freescale MMA8452Q and similar Accelerometers Driver"
+ tristate "Freescale / NXP MMA8452Q and similar Accelerometers Driver"
depends on I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for the following Freescale 3-axis
+ Say yes here to build support for the following Freescale / NXP 3-axis
accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC,
FXLS8471Q.
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 71b6794de885..6cedbecca2ee 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -4,6 +4,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_BMA180) += bma180.o
+obj-$(CONFIG_BMA220) += bma220_spi.o
obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
@@ -15,6 +16,8 @@ obj-$(CONFIG_MMA7455) += mma7455_core.o
obj-$(CONFIG_MMA7455_I2C) += mma7455_i2c.o
obj-$(CONFIG_MMA7455_SPI) += mma7455_spi.o
+obj-$(CONFIG_MMA7660) += mma7660.o
+
obj-$(CONFIG_MMA8452) += mma8452.o
obj-$(CONFIG_MMA9551_CORE) += mma9551_core.o
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index f04b88406995..e3f88ba5faf3 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -654,7 +654,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bma180_data *data = iio_priv(indio_dev);
- int64_t time_ns = iio_get_time_ns();
+ s64 time_ns = iio_get_time_ns(indio_dev);
int bit, ret, i = 0;
mutex_lock(&data->mutex);
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
new file mode 100644
index 000000000000..1098d10df8e8
--- /dev/null
+++ b/drivers/iio/accel/bma220_spi.c
@@ -0,0 +1,338 @@
+/**
+ * BMA220 Digital triaxial acceleration sensor driver
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define BMA220_REG_ID 0x00
+#define BMA220_REG_ACCEL_X 0x02
+#define BMA220_REG_ACCEL_Y 0x03
+#define BMA220_REG_ACCEL_Z 0x04
+#define BMA220_REG_RANGE 0x11
+#define BMA220_REG_SUSPEND 0x18
+
+#define BMA220_CHIP_ID 0xDD
+#define BMA220_READ_MASK 0x80
+#define BMA220_RANGE_MASK 0x03
+#define BMA220_DATA_SHIFT 2
+#define BMA220_SUSPEND_SLEEP 0xFF
+#define BMA220_SUSPEND_WAKE 0x00
+
+#define BMA220_DEVICE_NAME "bma220"
+#define BMA220_SCALE_AVAILABLE "0.623 1.248 2.491 4.983"
+
+#define BMA220_ACCEL_CHANNEL(index, reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 6, \
+ .storagebits = 8, \
+ .shift = BMA220_DATA_SHIFT, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+enum bma220_axis {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z,
+};
+
+static IIO_CONST_ATTR(in_accel_scale_available, BMA220_SCALE_AVAILABLE);
+
+static struct attribute *bma220_attributes[] = {
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bma220_attribute_group = {
+ .attrs = bma220_attributes,
+};
+
+static const int bma220_scale_table[][4] = {
+ {0, 623000}, {1, 248000}, {2, 491000}, {4, 983000}
+};
+
+struct bma220_data {
+ struct spi_device *spi_device;
+ struct mutex lock;
+ s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 8x8 timestamp */
+ u8 tx_buf[2] ____cacheline_aligned;
+};
+
+static const struct iio_chan_spec bma220_channels[] = {
+ BMA220_ACCEL_CHANNEL(0, BMA220_REG_ACCEL_X, X),
+ BMA220_ACCEL_CHANNEL(1, BMA220_REG_ACCEL_Y, Y),
+ BMA220_ACCEL_CHANNEL(2, BMA220_REG_ACCEL_Z, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static inline int bma220_read_reg(struct spi_device *spi, u8 reg)
+{
+ return spi_w8r8(spi, reg | BMA220_READ_MASK);
+}
+
+static const unsigned long bma220_accel_scan_masks[] = {
+ BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
+ 0
+};
+
+static irqreturn_t bma220_trigger_handler(int irq, void *p)
+{
+ int ret;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bma220_data *data = iio_priv(indio_dev);
+ struct spi_device *spi = data->spi_device;
+
+ mutex_lock(&data->lock);
+ data->tx_buf[0] = BMA220_REG_ACCEL_X | BMA220_READ_MASK;
+ ret = spi_write_then_read(spi, data->tx_buf, 1, data->buffer,
+ ARRAY_SIZE(bma220_channels) - 1);
+ if (ret < 0)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ pf->timestamp);
+err:
+ mutex_unlock(&data->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bma220_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ u8 range_idx;
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = bma220_read_reg(data->spi_device, chan->address);
+ if (ret < 0)
+ return -EINVAL;
+ *val = sign_extend32(ret >> BMA220_DATA_SHIFT, 5);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = bma220_read_reg(data->spi_device, BMA220_REG_RANGE);
+ if (ret < 0)
+ return ret;
+ range_idx = ret & BMA220_RANGE_MASK;
+ *val = bma220_scale_table[range_idx][0];
+ *val2 = bma220_scale_table[range_idx][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+static int bma220_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int i;
+ int ret;
+ int index = -1;
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ for (i = 0; i < ARRAY_SIZE(bma220_scale_table); i++)
+ if (val == bma220_scale_table[i][0] &&
+ val2 == bma220_scale_table[i][1]) {
+ index = i;
+ break;
+ }
+ if (index < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ data->tx_buf[0] = BMA220_REG_RANGE;
+ data->tx_buf[1] = index;
+ ret = spi_write(data->spi_device, data->tx_buf,
+ sizeof(data->tx_buf));
+ if (ret < 0)
+ dev_err(&data->spi_device->dev,
+ "failed to set measurement range\n");
+ mutex_unlock(&data->lock);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info bma220_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = bma220_read_raw,
+ .write_raw = bma220_write_raw,
+ .attrs = &bma220_attribute_group,
+};
+
+static int bma220_init(struct spi_device *spi)
+{
+ int ret;
+
+ ret = bma220_read_reg(spi, BMA220_REG_ID);
+ if (ret != BMA220_CHIP_ID)
+ return -ENODEV;
+
+ /* Make sure the chip is powered on */
+ ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
+ if (ret < 0)
+ return ret;
+ else if (ret == BMA220_SUSPEND_WAKE)
+ return bma220_read_reg(spi, BMA220_REG_SUSPEND);
+
+ return 0;
+}
+
+static int bma220_deinit(struct spi_device *spi)
+{
+ int ret;
+
+ /* Make sure the chip is powered off */
+ ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
+ if (ret < 0)
+ return ret;
+ else if (ret == BMA220_SUSPEND_SLEEP)
+ return bma220_read_reg(spi, BMA220_REG_SUSPEND);
+
+ return 0;
+}
+
+static int bma220_probe(struct spi_device *spi)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct bma220_data *data;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&spi->dev, "iio allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->spi_device = spi;
+ spi_set_drvdata(spi, indio_dev);
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &bma220_info;
+ indio_dev->name = BMA220_DEVICE_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = bma220_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bma220_channels);
+ indio_dev->available_scan_masks = bma220_accel_scan_masks;
+
+ ret = bma220_init(data->spi_device);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ bma220_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&spi->dev, "iio triggered buffer setup failed\n");
+ goto err_suspend;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&spi->dev, "iio_device_register failed\n");
+ iio_triggered_buffer_cleanup(indio_dev);
+ goto err_suspend;
+ }
+
+ return 0;
+
+err_suspend:
+ return bma220_deinit(spi);
+}
+
+static int bma220_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return bma220_deinit(spi);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bma220_suspend(struct device *dev)
+{
+ struct bma220_data *data =
+ iio_priv(spi_get_drvdata(to_spi_device(dev)));
+
+ /* The chip can be suspended/woken up by a simple register read. */
+ return bma220_read_reg(data->spi_device, BMA220_REG_SUSPEND);
+}
+
+static int bma220_resume(struct device *dev)
+{
+ struct bma220_data *data =
+ iio_priv(spi_get_drvdata(to_spi_device(dev)));
+
+ return bma220_read_reg(data->spi_device, BMA220_REG_SUSPEND);
+}
+
+static SIMPLE_DEV_PM_OPS(bma220_pm_ops, bma220_suspend, bma220_resume);
+
+#define BMA220_PM_OPS (&bma220_pm_ops)
+#else
+#define BMA220_PM_OPS NULL
+#endif
+
+static const struct spi_device_id bma220_spi_id[] = {
+ {"bma220", 0},
+ {}
+};
+
+static const struct acpi_device_id bma220_acpi_id[] = {
+ {"BMA0220", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(spi, bma220_spi_id);
+
+static struct spi_driver bma220_driver = {
+ .driver = {
+ .name = "bma220_spi",
+ .pm = BMA220_PM_OPS,
+ .acpi_match_table = ACPI_PTR(bma220_acpi_id),
+ },
+ .probe = bma220_probe,
+ .remove = bma220_remove,
+ .id_table = bma220_spi_id,
+};
+
+module_spi_driver(bma220_driver);
+
+MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
+MODULE_DESCRIPTION("BMA220 acceleration sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 197e693e7e7b..bf17aae66145 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -901,7 +901,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
*/
if (!irq) {
data->old_timestamp = data->timestamp;
- data->timestamp = iio_get_time_ns();
+ data->timestamp = iio_get_time_ns(indio_dev);
}
/*
@@ -1303,7 +1303,7 @@ static irqreturn_t bmc150_accel_irq_handler(int irq, void *private)
int i;
data->old_timestamp = data->timestamp;
- data->timestamp = iio_get_time_ns();
+ data->timestamp = iio_get_time_ns(indio_dev);
for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
if (data->triggers[i].enabled) {
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index bfe219a8bea2..765a72362dc6 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1129,7 +1129,7 @@ static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private)
struct iio_dev *indio_dev = private;
struct kxcjk1013_data *data = iio_priv(indio_dev);
- data->timestamp = iio_get_time_ns();
+ data->timestamp = iio_get_time_ns(indio_dev);
if (data->dready_trigger_on)
iio_trigger_poll(data->dready_trig);
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index c902f54c23f5..6551085bedd7 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -97,7 +97,8 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p)
if (ret)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
new file mode 100644
index 000000000000..0acdee516973
--- /dev/null
+++ b/drivers/iio/accel/mma7660.c
@@ -0,0 +1,277 @@
+/**
+ * Freescale MMA7660FC 3-Axis Accelerometer
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for Freescale MMA7660FC; 7-bit I2C address: 0x4c.
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define MMA7660_DRIVER_NAME "mma7660"
+
+#define MMA7660_REG_XOUT 0x00
+#define MMA7660_REG_YOUT 0x01
+#define MMA7660_REG_ZOUT 0x02
+#define MMA7660_REG_OUT_BIT_ALERT BIT(6)
+
+#define MMA7660_REG_MODE 0x07
+#define MMA7660_REG_MODE_BIT_MODE BIT(0)
+#define MMA7660_REG_MODE_BIT_TON BIT(2)
+
+#define MMA7660_I2C_READ_RETRIES 5
+
+/*
+ * The accelerometer has one measurement range:
+ *
+ * -1.5g - +1.5g (6-bit, signed)
+ *
+ * scale = (1.5 + 1.5) * 9.81 / (2^6 - 1) = 0.467142857
+ */
+
+#define MMA7660_SCALE_AVAIL "0.467142857"
+
+const int mma7660_nscale = 467142857;
+
+#define MMA7660_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec mma7660_channels[] = {
+ MMA7660_CHANNEL(MMA7660_REG_XOUT, X),
+ MMA7660_CHANNEL(MMA7660_REG_YOUT, Y),
+ MMA7660_CHANNEL(MMA7660_REG_ZOUT, Z),
+};
+
+enum mma7660_mode {
+ MMA7660_MODE_STANDBY,
+ MMA7660_MODE_ACTIVE
+};
+
+struct mma7660_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ enum mma7660_mode mode;
+};
+
+static IIO_CONST_ATTR(in_accel_scale_available, MMA7660_SCALE_AVAIL);
+
+static struct attribute *mma7660_attributes[] = {
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mma7660_attribute_group = {
+ .attrs = mma7660_attributes
+};
+
+static int mma7660_set_mode(struct mma7660_data *data,
+ enum mma7660_mode mode)
+{
+ int ret;
+ struct i2c_client *client = data->client;
+
+ if (mode == data->mode)
+ return 0;
+
+ ret = i2c_smbus_read_byte_data(client, MMA7660_REG_MODE);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read sensor mode\n");
+ return ret;
+ }
+
+ if (mode == MMA7660_MODE_ACTIVE) {
+ ret &= ~MMA7660_REG_MODE_BIT_TON;
+ ret |= MMA7660_REG_MODE_BIT_MODE;
+ } else {
+ ret &= ~MMA7660_REG_MODE_BIT_TON;
+ ret &= ~MMA7660_REG_MODE_BIT_MODE;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, MMA7660_REG_MODE, ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to change sensor mode\n");
+ return ret;
+ }
+
+ data->mode = mode;
+
+ return ret;
+}
+
+static int mma7660_read_accel(struct mma7660_data *data, u8 address)
+{
+ int ret, retries = MMA7660_I2C_READ_RETRIES;
+ struct i2c_client *client = data->client;
+
+ /*
+ * Read data. If the Alert bit is set, the register was read at
+ * the same time as the device was attempting to update the content.
+ * The solution is to read the register again. Do this only
+ * MMA7660_I2C_READ_RETRIES times to avoid spending too much time
+ * in the kernel.
+ */
+ do {
+ ret = i2c_smbus_read_byte_data(client, address);
+ if (ret < 0) {
+ dev_err(&client->dev, "register read failed\n");
+ return ret;
+ }
+ } while (retries-- > 0 && ret & MMA7660_REG_OUT_BIT_ALERT);
+
+ if (ret & MMA7660_REG_OUT_BIT_ALERT) {
+ dev_err(&client->dev, "all register read retries failed\n");
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int mma7660_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mma7660_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+ ret = mma7660_read_accel(data, chan->address);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 5);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = mma7660_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info mma7660_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = mma7660_read_raw,
+ .attrs = &mma7660_attribute_group,
+};
+
+static int mma7660_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct mma7660_data *data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+ mutex_init(&data->lock);
+ data->mode = MMA7660_MODE_STANDBY;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &mma7660_info;
+ indio_dev->name = MMA7660_DRIVER_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mma7660_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mma7660_channels);
+
+ ret = mma7660_set_mode(data, MMA7660_MODE_ACTIVE);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ mma7660_set_mode(data, MMA7660_MODE_STANDBY);
+ }
+
+ return ret;
+}
+
+static int mma7660_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mma7660_suspend(struct device *dev)
+{
+ struct mma7660_data *data;
+
+ data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return mma7660_set_mode(data, MMA7660_MODE_STANDBY);
+}
+
+static int mma7660_resume(struct device *dev)
+{
+ struct mma7660_data *data;
+
+ data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return mma7660_set_mode(data, MMA7660_MODE_ACTIVE);
+}
+
+static SIMPLE_DEV_PM_OPS(mma7660_pm_ops, mma7660_suspend, mma7660_resume);
+
+#define MMA7660_PM_OPS (&mma7660_pm_ops)
+#else
+#define MMA7660_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id mma7660_i2c_id[] = {
+ {"mma7660", 0},
+ {}
+};
+
+static const struct acpi_device_id mma7660_acpi_id[] = {
+ {"MMA7660", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, mma7660_acpi_id);
+
+static struct i2c_driver mma7660_driver = {
+ .driver = {
+ .name = "mma7660",
+ .pm = MMA7660_PM_OPS,
+ .acpi_match_table = ACPI_PTR(mma7660_acpi_id),
+ },
+ .probe = mma7660_probe,
+ .remove = mma7660_remove,
+ .id_table = mma7660_i2c_id,
+};
+
+module_i2c_driver(mma7660_driver);
+
+MODULE_AUTHOR("Constantin Musca <constantin.musca@intel.com>");
+MODULE_DESCRIPTION("Freescale MMA7660FC 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index e225d3c53bd5..d41e1b588e68 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1,22 +1,22 @@
/*
- * mma8452.c - Support for following Freescale 3-axis accelerometers:
+ * mma8452.c - Support for following Freescale / NXP 3-axis accelerometers:
*
- * MMA8451Q (14 bit)
- * MMA8452Q (12 bit)
- * MMA8453Q (10 bit)
- * MMA8652FC (12 bit)
- * MMA8653FC (10 bit)
- * FXLS8471Q (14 bit)
+ * device name digital output 7-bit I2C slave address (pin selectable)
+ * ---------------------------------------------------------------------
+ * MMA8451Q 14 bit 0x1c / 0x1d
+ * MMA8452Q 12 bit 0x1c / 0x1d
+ * MMA8453Q 10 bit 0x1c / 0x1d
+ * MMA8652FC 12 bit 0x1d
+ * MMA8653FC 10 bit 0x1d
+ * FXLS8471Q 14 bit 0x1e / 0x1d / 0x1c / 0x1f
*
- * Copyright 2015 Martin Kepplinger <martin.kepplinger@theobroma-systems.com>
+ * Copyright 2015 Martin Kepplinger <martink@posteo.de>
* Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
*
* This file is subject to the terms and conditions of version 2 of
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
- * 7-bit I2C slave address 0x1c/0x1d (pin selectable)
- *
* TODO: orientation events
*/
@@ -76,6 +76,8 @@
#define MMA8452_CTRL_DR_DEFAULT 0x4 /* 50 Hz sample frequency */
#define MMA8452_CTRL_REG2 0x2b
#define MMA8452_CTRL_REG2_RST BIT(6)
+#define MMA8452_CTRL_REG2_MODS_SHIFT 3
+#define MMA8452_CTRL_REG2_MODS_MASK 0x1b
#define MMA8452_CTRL_REG4 0x2d
#define MMA8452_CTRL_REG5 0x2e
#define MMA8452_OFF_X 0x2f
@@ -106,7 +108,7 @@ struct mma8452_data {
};
/**
- * struct mma_chip_info - chip specific data for Freescale's accelerometers
+ * struct mma_chip_info - chip specific data
* @chip_id: WHO_AM_I register's value
* @channels: struct iio_chan_spec matching the device's
* capabilities
@@ -257,20 +259,17 @@ static const int mma8452_samp_freq[8][2] = {
{6, 250000}, {1, 560000}
};
-/* Datasheet table 35 (step time vs sample frequency) */
-static const int mma8452_transient_time_step_us[8] = {
- 1250,
- 2500,
- 5000,
- 10000,
- 20000,
- 20000,
- 20000,
- 20000
+/* Datasheet table: step time "Relationship with the ODR" (sample frequency) */
+static const int mma8452_transient_time_step_us[4][8] = {
+ { 1250, 2500, 5000, 10000, 20000, 20000, 20000, 20000 }, /* normal */
+ { 1250, 2500, 5000, 10000, 20000, 80000, 80000, 80000 }, /* l p l n */
+ { 1250, 2500, 2500, 2500, 2500, 2500, 2500, 2500 }, /* high res*/
+ { 1250, 2500, 5000, 10000, 20000, 80000, 160000, 160000 } /* l p */
};
-/* Datasheet table 18 (normal mode) */
-static const int mma8452_hp_filter_cutoff[8][4][2] = {
+/* Datasheet table "High-Pass Filter Cutoff Options" */
+static const int mma8452_hp_filter_cutoff[4][8][4][2] = {
+ { /* normal */
{ {16, 0}, {8, 0}, {4, 0}, {2, 0} }, /* 800 Hz sample */
{ {16, 0}, {8, 0}, {4, 0}, {2, 0} }, /* 400 Hz sample */
{ {8, 0}, {4, 0}, {2, 0}, {1, 0} }, /* 200 Hz sample */
@@ -279,8 +278,61 @@ static const int mma8452_hp_filter_cutoff[8][4][2] = {
{ {2, 0}, {1, 0}, {0, 500000}, {0, 250000} }, /* 12.5 Hz sample */
{ {2, 0}, {1, 0}, {0, 500000}, {0, 250000} }, /* 6.25 Hz sample */
{ {2, 0}, {1, 0}, {0, 500000}, {0, 250000} } /* 1.56 Hz sample */
+ },
+ { /* low noise low power */
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {8, 0}, {4, 0}, {2, 0}, {1, 0} },
+ { {4, 0}, {2, 0}, {1, 0}, {0, 500000} },
+ { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} },
+ { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} },
+ { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} },
+ { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} }
+ },
+ { /* high resolution */
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} }
+ },
+ { /* low power */
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {8, 0}, {4, 0}, {2, 0}, {1, 0} },
+ { {4, 0}, {2, 0}, {1, 0}, {0, 500000} },
+ { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} },
+ { {1, 0}, {0, 500000}, {0, 250000}, {0, 125000} },
+ { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} },
+ { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} },
+ { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} }
+ }
};
+/* Datasheet table "MODS Oversampling modes averaging values at each ODR" */
+static const u16 mma8452_os_ratio[4][8] = {
+ /* 800 Hz, 400 Hz, ... , 1.56 Hz */
+ { 2, 4, 4, 4, 4, 16, 32, 128 }, /* normal */
+ { 2, 4, 4, 4, 4, 4, 8, 32 }, /* low power low noise */
+ { 2, 4, 8, 16, 32, 128, 256, 1024 }, /* high resolution */
+ { 2, 2, 2, 2, 2, 2, 4, 16 } /* low power */
+};
+
+static int mma8452_get_power_mode(struct mma8452_data *data)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(data->client,
+ MMA8452_CTRL_REG2);
+ if (reg < 0)
+ return reg;
+
+ return ((reg & MMA8452_CTRL_REG2_MODS_MASK) >>
+ MMA8452_CTRL_REG2_MODS_SHIFT);
+}
+
static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -306,10 +358,39 @@ static ssize_t mma8452_show_hp_cutoff_avail(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct mma8452_data *data = iio_priv(indio_dev);
+ int i, j;
+
+ i = mma8452_get_odr_index(data);
+ j = mma8452_get_power_mode(data);
+ if (j < 0)
+ return j;
+
+ return mma8452_show_int_plus_micros(buf, mma8452_hp_filter_cutoff[j][i],
+ ARRAY_SIZE(mma8452_hp_filter_cutoff[0][0]));
+}
+
+static ssize_t mma8452_show_os_ratio_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct mma8452_data *data = iio_priv(indio_dev);
int i = mma8452_get_odr_index(data);
+ int j;
+ u16 val = 0;
+ size_t len = 0;
- return mma8452_show_int_plus_micros(buf, mma8452_hp_filter_cutoff[i],
- ARRAY_SIZE(mma8452_hp_filter_cutoff[0]));
+ for (j = 0; j < ARRAY_SIZE(mma8452_os_ratio); j++) {
+ if (val == mma8452_os_ratio[j][i])
+ continue;
+
+ val = mma8452_os_ratio[j][i];
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", val);
+ }
+ buf[len - 1] = '\n';
+
+ return len;
}
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mma8452_show_samp_freq_avail);
@@ -317,6 +398,8 @@ static IIO_DEVICE_ATTR(in_accel_scale_available, S_IRUGO,
mma8452_show_scale_avail, NULL, 0);
static IIO_DEVICE_ATTR(in_accel_filter_high_pass_3db_frequency_available,
S_IRUGO, mma8452_show_hp_cutoff_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_accel_oversampling_ratio_available, S_IRUGO,
+ mma8452_show_os_ratio_avail, NULL, 0);
static int mma8452_get_samp_freq_index(struct mma8452_data *data,
int val, int val2)
@@ -335,24 +418,33 @@ static int mma8452_get_scale_index(struct mma8452_data *data, int val, int val2)
static int mma8452_get_hp_filter_index(struct mma8452_data *data,
int val, int val2)
{
- int i = mma8452_get_odr_index(data);
+ int i, j;
+
+ i = mma8452_get_odr_index(data);
+ j = mma8452_get_power_mode(data);
+ if (j < 0)
+ return j;
- return mma8452_get_int_plus_micros_index(mma8452_hp_filter_cutoff[i],
- ARRAY_SIZE(mma8452_hp_filter_cutoff[0]), val, val2);
+ return mma8452_get_int_plus_micros_index(mma8452_hp_filter_cutoff[j][i],
+ ARRAY_SIZE(mma8452_hp_filter_cutoff[0][0]), val, val2);
}
static int mma8452_read_hp_filter(struct mma8452_data *data, int *hz, int *uHz)
{
- int i, ret;
+ int j, i, ret;
ret = i2c_smbus_read_byte_data(data->client, MMA8452_HP_FILTER_CUTOFF);
if (ret < 0)
return ret;
i = mma8452_get_odr_index(data);
+ j = mma8452_get_power_mode(data);
+ if (j < 0)
+ return j;
+
ret &= MMA8452_HP_FILTER_CUTOFF_SEL_MASK;
- *hz = mma8452_hp_filter_cutoff[i][ret][0];
- *uHz = mma8452_hp_filter_cutoff[i][ret][1];
+ *hz = mma8452_hp_filter_cutoff[j][i][ret][0];
+ *uHz = mma8452_hp_filter_cutoff[j][i][ret][1];
return 0;
}
@@ -414,6 +506,15 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
}
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = mma8452_get_power_mode(data);
+ if (ret < 0)
+ return ret;
+
+ i = mma8452_get_odr_index(data);
+
+ *val = mma8452_os_ratio[ret][i];
+ return IIO_VAL_INT;
}
return -EINVAL;
@@ -480,6 +581,21 @@ fail:
return ret;
}
+static int mma8452_set_power_mode(struct mma8452_data *data, u8 mode)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(data->client,
+ MMA8452_CTRL_REG2);
+ if (reg < 0)
+ return reg;
+
+ reg &= ~MMA8452_CTRL_REG2_MODS_MASK;
+ reg |= mode << MMA8452_CTRL_REG2_MODS_SHIFT;
+
+ return mma8452_change_config(data, MMA8452_CTRL_REG2, reg);
+}
+
/* returns >0 if in freefall mode, 0 if not or <0 if an error occurred */
static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
{
@@ -518,11 +634,7 @@ static int mma8452_set_freefall_mode(struct mma8452_data *data, bool state)
val |= MMA8452_FF_MT_CFG_OAE;
}
- val = mma8452_change_config(data, chip->ev_cfg, val);
- if (val)
- return val;
-
- return 0;
+ return mma8452_change_config(data, chip->ev_cfg, val);
}
static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
@@ -597,6 +709,14 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
return mma8452_change_config(data, MMA8452_DATA_CFG,
data->data_cfg);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = mma8452_get_odr_index(data);
+
+ for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
+ if (mma8452_os_ratio[i][ret] == val)
+ return mma8452_set_power_mode(data, i);
+ }
+
default:
return -EINVAL;
}
@@ -610,7 +730,7 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct mma8452_data *data = iio_priv(indio_dev);
- int ret, us;
+ int ret, us, power_mode;
switch (info) {
case IIO_EV_INFO_VALUE:
@@ -629,7 +749,11 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- us = ret * mma8452_transient_time_step_us[
+ power_mode = mma8452_get_power_mode(data);
+ if (power_mode < 0)
+ return power_mode;
+
+ us = ret * mma8452_transient_time_step_us[power_mode][
mma8452_get_odr_index(data)];
*val = us / USEC_PER_SEC;
*val2 = us % USEC_PER_SEC;
@@ -677,8 +801,12 @@ static int mma8452_write_thresh(struct iio_dev *indio_dev,
val);
case IIO_EV_INFO_PERIOD:
+ ret = mma8452_get_power_mode(data);
+ if (ret < 0)
+ return ret;
+
steps = (val * USEC_PER_SEC + val2) /
- mma8452_transient_time_step_us[
+ mma8452_transient_time_step_us[ret][
mma8452_get_odr_index(data)];
if (steps < 0 || steps > 0xff)
@@ -785,7 +913,7 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
{
struct mma8452_data *data = iio_priv(indio_dev);
- s64 ts = iio_get_time_ns();
+ s64 ts = iio_get_time_ns(indio_dev);
int src;
src = i2c_smbus_read_byte_data(data->client, data->chip_info->ev_src);
@@ -865,7 +993,7 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -978,7 +1106,8 @@ static struct attribute_group mma8452_event_attribute_group = {
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.scan_index = idx, \
.scan_type = { \
.sign = 's', \
@@ -998,7 +1127,8 @@ static struct attribute_group mma8452_event_attribute_group = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
- BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.scan_index = idx, \
.scan_type = { \
.sign = 's', \
@@ -1171,6 +1301,7 @@ static struct attribute *mma8452_attributes[] = {
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_in_accel_scale_available.dev_attr.attr,
&iio_dev_attr_in_accel_filter_high_pass_3db_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_oversampling_ratio_available.dev_attr.attr,
NULL
};
@@ -1444,8 +1575,8 @@ static int mma8452_probe(struct i2c_client *client,
goto buffer_cleanup;
ret = mma8452_set_freefall_mode(data, false);
- if (ret)
- return ret;
+ if (ret < 0)
+ goto buffer_cleanup;
return 0;
@@ -1558,5 +1689,5 @@ static struct i2c_driver mma8452_driver = {
module_i2c_driver(mma8452_driver);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
-MODULE_DESCRIPTION("Freescale MMA8452 accelerometer driver");
+MODULE_DESCRIPTION("Freescale / NXP MMA8452 accelerometer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index d899a4d4307f..bf2704435629 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -391,7 +391,7 @@ static irqreturn_t mma9551_event_handler(int irq, void *private)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_INCLI, 0, (mma_axis + 1),
IIO_EV_TYPE_ROC, IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
out:
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index bb05f3efddca..36bf19733be0 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1001,7 +1001,7 @@ static irqreturn_t mma9553_irq_handler(int irq, void *private)
struct iio_dev *indio_dev = private;
struct mma9553_data *data = iio_priv(indio_dev);
- data->timestamp = iio_get_time_ns();
+ data->timestamp = iio_get_time_ns(indio_dev);
/*
* Since we only configure the interrupt pin when an
* event is enabled, we are sure we have at least
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 57f83a67948c..f8dfdb690563 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -29,6 +29,7 @@
#define LSM330_ACCEL_DEV_NAME "lsm330_accel"
#define LSM303AGR_ACCEL_DEV_NAME "lsm303agr_accel"
#define LIS2DH12_ACCEL_DEV_NAME "lis2dh12_accel"
+#define LIS3L02DQ_ACCEL_DEV_NAME "lis3l02dq"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 4d95bfc4786c..da3fb069ec5c 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -215,6 +215,22 @@
#define ST_ACCEL_6_IHL_IRQ_MASK 0x80
#define ST_ACCEL_6_MULTIREAD_BIT true
+/* CUSTOM VALUES FOR SENSOR 7 */
+#define ST_ACCEL_7_ODR_ADDR 0x20
+#define ST_ACCEL_7_ODR_MASK 0x30
+#define ST_ACCEL_7_ODR_AVL_280HZ_VAL 0x00
+#define ST_ACCEL_7_ODR_AVL_560HZ_VAL 0x01
+#define ST_ACCEL_7_ODR_AVL_1120HZ_VAL 0x02
+#define ST_ACCEL_7_ODR_AVL_4480HZ_VAL 0x03
+#define ST_ACCEL_7_PW_ADDR 0x20
+#define ST_ACCEL_7_PW_MASK 0xc0
+#define ST_ACCEL_7_FS_AVL_2_GAIN IIO_G_TO_M_S_2(488)
+#define ST_ACCEL_7_BDU_ADDR 0x21
+#define ST_ACCEL_7_BDU_MASK 0x40
+#define ST_ACCEL_7_DRDY_IRQ_ADDR 0x21
+#define ST_ACCEL_7_DRDY_IRQ_INT1_MASK 0x04
+#define ST_ACCEL_7_MULTIREAD_BIT false
+
static const struct iio_chan_spec st_accel_8bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -662,6 +678,54 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT,
.bootime = 2,
},
+ {
+ /* No WAI register present */
+ .sensors_supported = {
+ [0] = LIS3L02DQ_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_7_ODR_ADDR,
+ .mask = ST_ACCEL_7_ODR_MASK,
+ .odr_avl = {
+ { 280, ST_ACCEL_7_ODR_AVL_280HZ_VAL, },
+ { 560, ST_ACCEL_7_ODR_AVL_560HZ_VAL, },
+ { 1120, ST_ACCEL_7_ODR_AVL_1120HZ_VAL, },
+ { 4480, ST_ACCEL_7_ODR_AVL_4480HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_7_PW_ADDR,
+ .mask = ST_ACCEL_7_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .gain = ST_ACCEL_7_FS_AVL_2_GAIN,
+ },
+ },
+ },
+ /*
+ * The part has a BDU bit but if set the data is never
+ * updated so don't set it.
+ */
+ .bdu = {
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_7_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_ACCEL_7_DRDY_IRQ_INT1_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
+ .multi_read_bit = ST_ACCEL_7_MULTIREAD_BIT,
+ .bootime = 2,
+ },
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -758,13 +822,15 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
indio_dev->info = &accel_info;
mutex_init(&adata->tb.buf_lock);
- st_sensors_power_enable(indio_dev);
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_accel_sensors_settings),
st_accel_sensors_settings);
if (err < 0)
- return err;
+ goto st_accel_power_off;
adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS;
adata->multiread_bit = adata->sensor_settings->multi_read_bit;
@@ -781,11 +847,11 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
if (err < 0)
- return err;
+ goto st_accel_power_off;
err = st_accel_allocate_ring(indio_dev);
if (err < 0)
- return err;
+ goto st_accel_power_off;
if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -808,6 +874,8 @@ st_accel_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_accel_probe_trigger_error:
st_accel_deallocate_ring(indio_dev);
+st_accel_power_off:
+ st_sensors_power_disable(indio_dev);
return err;
}
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 7333ee9fb11b..e9d427a5df7c 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -80,6 +80,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,h3lis331dl-accel",
.data = H3LIS331DL_DRIVER_NAME,
},
+ {
+ .compatible = "st,lis3l02dq",
+ .data = LIS3L02DQ_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -130,6 +134,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LSM330_ACCEL_DEV_NAME },
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
+ { LIS3L02DQ_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index fcd5847a3fd3..efd43941d45d 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -59,6 +59,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LSM330_ACCEL_DEV_NAME },
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
+ { LIS3L02DQ_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 25378c5882e2..1de31bdd4ce4 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -153,6 +153,18 @@ config AXP288_ADC
To compile this driver as a module, choose M here: the module will be
called axp288_adc.
+config BCM_IPROC_ADC
+ tristate "Broadcom IPROC ADC driver"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on MFD_SYSCON
+ default ARCH_BCM_CYGNUS
+ help
+ Say Y here if you want to add support for the Broadcom static
+ ADC driver.
+
+ Broadcom iProc ADC driver. Broadcom iProc ADC controller has 8
+ channels. The driver allows the user to read voltage values.
+
config BERLIN2_ADC
tristate "Marvell Berlin2 ADC driver"
depends on ARCH_BERLIN
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 38638d46f972..0ba0d500eedb 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_AD799X) += ad799x.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o
obj-$(CONFIG_AXP288_ADC) += axp288_adc.o
+obj-$(CONFIG_BCM_IPROC_ADC) += bcm_iproc_adc.o
obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 2123f0ac2e2a..c0f6a98fd9bd 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -154,12 +154,11 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
-
- ret = ad7266_read_single(st, val, chan->address);
+ ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
+ ret = ad7266_read_single(st, val, chan->address);
+ iio_device_release_direct_mode(indio_dev);
*val = (*val >> 2) & 0xfff;
if (chan->scan_type.sign == 's')
@@ -441,6 +440,7 @@ static int ad7266_probe(struct spi_device *spi)
st->spi = spi;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad7266_info;
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index c0eabf156702..1d90b02732bb 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -115,7 +115,7 @@ static irqreturn_t ad7291_event_handler(int irq, void *private)
u16 t_status, v_status;
u16 command;
int i;
- s64 timestamp = iio_get_time_ns();
+ s64 timestamp = iio_get_time_ns(indio_dev);
if (ad7291_i2c_read(chip, AD7291_T_ALERT_STATUS, &t_status))
return IRQ_HANDLED;
@@ -505,6 +505,7 @@ static int ad7291_probe(struct i2c_client *client,
indio_dev->num_channels = ARRAY_SIZE(ad7291_channels);
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->info = &ad7291_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index 62bb8f7ce4a0..10ec8fce395f 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -163,7 +163,7 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -315,6 +315,7 @@ static int ad7298_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ad7298_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7298_channels);
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index be85c2a0ad97..b7ecf9aab90f 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -70,7 +70,7 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, st->data,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -106,12 +106,11 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad7476_scan_direct(st);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = ad7476_scan_direct(st);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -228,6 +227,7 @@ static int ad7476_probe(struct spi_device *spi)
/* Establish that the iio_dev is a child of the spi device */
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channel;
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index cf172d58cd44..1817ebf5ad84 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -272,30 +272,22 @@ static ssize_t ad7791_write_frequency(struct device *dev,
struct ad7791_state *st = iio_priv(indio_dev);
int i, ret;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
- mutex_unlock(&indio_dev->mlock);
-
- ret = -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++) {
- if (sysfs_streq(ad7791_sample_freq_avail[i], buf)) {
-
- mutex_lock(&indio_dev->mlock);
- st->filter &= ~AD7791_FILTER_RATE_MASK;
- st->filter |= i;
- ad_sd_write_reg(&st->sd, AD7791_REG_FILTER,
- sizeof(st->filter), st->filter);
- mutex_unlock(&indio_dev->mlock);
- ret = 0;
+ for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++)
+ if (sysfs_streq(ad7791_sample_freq_avail[i], buf))
break;
- }
- }
+ if (i == ARRAY_SIZE(ad7791_sample_freq_avail))
+ return -EINVAL;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ st->filter &= ~AD7791_FILTER_RATE_MASK;
+ st->filter |= i;
+ ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter),
+ st->filter);
+ iio_device_release_direct_mode(indio_dev);
- return ret ? ret : len;
+ return len;
}
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
@@ -383,6 +375,7 @@ static int ad7791_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->info->channels;
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 7b07bb651671..847789bae821 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -369,13 +369,6 @@ static ssize_t ad7793_write_frequency(struct device *dev,
long lval;
int i, ret;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
- mutex_unlock(&indio_dev->mlock);
-
ret = kstrtol(buf, 10, &lval);
if (ret)
return ret;
@@ -383,20 +376,21 @@ static ssize_t ad7793_write_frequency(struct device *dev,
if (lval == 0)
return -EINVAL;
- ret = -EINVAL;
-
for (i = 0; i < 16; i++)
- if (lval == st->chip_info->sample_freq_avail[i]) {
- mutex_lock(&indio_dev->mlock);
- st->mode &= ~AD7793_MODE_RATE(-1);
- st->mode |= AD7793_MODE_RATE(i);
- ad_sd_write_reg(&st->sd, AD7793_REG_MODE,
- sizeof(st->mode), st->mode);
- mutex_unlock(&indio_dev->mlock);
- ret = 0;
- }
+ if (lval == st->chip_info->sample_freq_avail[i])
+ break;
+ if (i == 16)
+ return -EINVAL;
- return ret ? ret : len;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ st->mode &= ~AD7793_MODE_RATE(-1);
+ st->mode |= AD7793_MODE_RATE(i);
+ ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
+ iio_device_release_direct_mode(indio_dev);
+
+ return len;
}
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
@@ -790,6 +784,7 @@ static int ad7793_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channels;
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 2d3c397e66ad..7a483bfbd70c 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -122,7 +122,7 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, st->data,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -156,12 +156,11 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad7887_scan_direct(st, chan->address);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = ad7887_scan_direct(st, chan->address);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -265,6 +264,7 @@ static int ad7887_probe(struct spi_device *spi)
/* Estabilish that the iio_dev is a child of the spi device */
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad7887_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 45e29ccd824f..77a675e11ebb 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -181,7 +181,7 @@ static irqreturn_t ad7923_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -233,12 +233,11 @@ static int ad7923_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad7923_scan_direct(st, chan->address);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = ad7923_scan_direct(st, chan->address);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -289,6 +288,7 @@ static int ad7923_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = info->channels;
indio_dev->num_channels = info->num_channels;
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index a3f5254f4e51..b6163764489c 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -212,7 +212,7 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
goto out;
iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -282,12 +282,11 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad799x_scan_direct(st, chan->scan_index);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = ad799x_scan_direct(st, chan->scan_index);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -395,11 +394,9 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev,
struct ad799x_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- ret = -EBUSY;
- goto done;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
if (state)
st->config |= BIT(chan->scan_index) << AD799X_CHANNEL_SHIFT;
@@ -412,10 +409,7 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev,
st->config &= ~AD7998_ALERT_EN;
ret = ad799x_write_config(st, st->config);
-
-done:
- mutex_unlock(&indio_dev->mlock);
-
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -508,7 +502,7 @@ static irqreturn_t ad799x_event_handler(int irq, void *private)
(i >> 1),
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
done:
@@ -812,6 +806,7 @@ static int ad799x_probe(struct i2c_client *client,
st->client = client;
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->name = id->name;
indio_dev->info = st->chip_config->info;
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
new file mode 100644
index 000000000000..21d38c8af21e
--- /dev/null
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -0,0 +1,644 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <linux/iio/iio.h>
+
+/* Below Register's are common to IPROC ADC and Touchscreen IP */
+#define IPROC_REGCTL1 0x00
+#define IPROC_REGCTL2 0x04
+#define IPROC_INTERRUPT_THRES 0x08
+#define IPROC_INTERRUPT_MASK 0x0c
+#define IPROC_INTERRUPT_STATUS 0x10
+#define IPROC_ANALOG_CONTROL 0x1c
+#define IPROC_CONTROLLER_STATUS 0x14
+#define IPROC_AUX_DATA 0x20
+#define IPROC_SOFT_BYPASS_CONTROL 0x38
+#define IPROC_SOFT_BYPASS_DATA 0x3C
+
+/* IPROC ADC Channel register offsets */
+#define IPROC_ADC_CHANNEL_REGCTL1 0x800
+#define IPROC_ADC_CHANNEL_REGCTL2 0x804
+#define IPROC_ADC_CHANNEL_STATUS 0x808
+#define IPROC_ADC_CHANNEL_INTERRUPT_STATUS 0x80c
+#define IPROC_ADC_CHANNEL_INTERRUPT_MASK 0x810
+#define IPROC_ADC_CHANNEL_DATA 0x814
+#define IPROC_ADC_CHANNEL_OFFSET 0x20
+
+/* Bit definitions for IPROC_REGCTL2 */
+#define IPROC_ADC_AUXIN_SCAN_ENA BIT(0)
+#define IPROC_ADC_PWR_LDO BIT(5)
+#define IPROC_ADC_PWR_ADC BIT(4)
+#define IPROC_ADC_PWR_BG BIT(3)
+#define IPROC_ADC_CONTROLLER_EN BIT(17)
+
+/* Bit definitions for IPROC_INTERRUPT_MASK and IPROC_INTERRUPT_STATUS */
+#define IPROC_ADC_AUXDATA_RDY_INTR BIT(3)
+#define IPROC_ADC_INTR 9
+#define IPROC_ADC_INTR_MASK (0xFF << IPROC_ADC_INTR)
+
+/* Bit definitions for IPROC_ANALOG_CONTROL */
+#define IPROC_ADC_CHANNEL_SEL 11
+#define IPROC_ADC_CHANNEL_SEL_MASK (0x7 << IPROC_ADC_CHANNEL_SEL)
+
+/* Bit definitions for IPROC_ADC_CHANNEL_REGCTL1 */
+#define IPROC_ADC_CHANNEL_ROUNDS 0x2
+#define IPROC_ADC_CHANNEL_ROUNDS_MASK (0x3F << IPROC_ADC_CHANNEL_ROUNDS)
+#define IPROC_ADC_CHANNEL_MODE 0x1
+#define IPROC_ADC_CHANNEL_MODE_MASK (0x1 << IPROC_ADC_CHANNEL_MODE)
+#define IPROC_ADC_CHANNEL_MODE_TDM 0x1
+#define IPROC_ADC_CHANNEL_MODE_SNAPSHOT 0x0
+#define IPROC_ADC_CHANNEL_ENABLE 0x0
+#define IPROC_ADC_CHANNEL_ENABLE_MASK 0x1
+
+/* Bit definitions for IPROC_ADC_CHANNEL_REGCTL2 */
+#define IPROC_ADC_CHANNEL_WATERMARK 0x0
+#define IPROC_ADC_CHANNEL_WATERMARK_MASK \
+ (0x3F << IPROC_ADC_CHANNEL_WATERMARK)
+
+#define IPROC_ADC_WATER_MARK_LEVEL 0x1
+
+/* Bit definitions for IPROC_ADC_CHANNEL_STATUS */
+#define IPROC_ADC_CHANNEL_DATA_LOST 0x0
+#define IPROC_ADC_CHANNEL_DATA_LOST_MASK \
+ (0x0 << IPROC_ADC_CHANNEL_DATA_LOST)
+#define IPROC_ADC_CHANNEL_VALID_ENTERIES 0x1
+#define IPROC_ADC_CHANNEL_VALID_ENTERIES_MASK \
+ (0xFF << IPROC_ADC_CHANNEL_VALID_ENTERIES)
+#define IPROC_ADC_CHANNEL_TOTAL_ENTERIES 0x9
+#define IPROC_ADC_CHANNEL_TOTAL_ENTERIES_MASK \
+ (0xFF << IPROC_ADC_CHANNEL_TOTAL_ENTERIES)
+
+/* Bit definitions for IPROC_ADC_CHANNEL_INTERRUPT_MASK */
+#define IPROC_ADC_CHANNEL_WTRMRK_INTR 0x0
+#define IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK \
+ (0x1 << IPROC_ADC_CHANNEL_WTRMRK_INTR)
+#define IPROC_ADC_CHANNEL_FULL_INTR 0x1
+#define IPROC_ADC_CHANNEL_FULL_INTR_MASK \
+ (0x1 << IPROC_ADC_IPROC_ADC_CHANNEL_FULL_INTR)
+#define IPROC_ADC_CHANNEL_EMPTY_INTR 0x2
+#define IPROC_ADC_CHANNEL_EMPTY_INTR_MASK \
+ (0x1 << IPROC_ADC_CHANNEL_EMPTY_INTR)
+
+#define IPROC_ADC_WATER_MARK_INTR_ENABLE 0x1
+
+/* Number of time to retry a set of the interrupt mask reg */
+#define IPROC_ADC_INTMASK_RETRY_ATTEMPTS 10
+
+#define IPROC_ADC_READ_TIMEOUT (HZ*2)
+
+#define iproc_adc_dbg_reg(dev, priv, reg) \
+do { \
+ u32 val; \
+ regmap_read(priv->regmap, reg, &val); \
+ dev_dbg(dev, "%20s= 0x%08x\n", #reg, val); \
+} while (0)
+
+struct iproc_adc_priv {
+ struct regmap *regmap;
+ struct clk *adc_clk;
+ struct mutex mutex;
+ int irqno;
+ int chan_val;
+ int chan_id;
+ struct completion completion;
+};
+
+static void iproc_adc_reg_dump(struct iio_dev *indio_dev)
+{
+ struct device *dev = &indio_dev->dev;
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_REGCTL1);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_REGCTL2);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_THRES);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_MASK);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_STATUS);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_CONTROLLER_STATUS);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_ANALOG_CONTROL);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_AUX_DATA);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_CONTROL);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
+}
+
+static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
+{
+ u32 channel_intr_status;
+ u32 intr_status;
+ u32 intr_mask;
+ struct iio_dev *indio_dev = data;
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+ /*
+ * This interrupt is shared with the touchscreen driver.
+ * Make sure this interrupt is intended for us.
+ * Handle only ADC channel specific interrupts.
+ */
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &intr_mask);
+ intr_status = intr_status & intr_mask;
+ channel_intr_status = (intr_status & IPROC_ADC_INTR_MASK) >>
+ IPROC_ADC_INTR;
+ if (channel_intr_status)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
+{
+ irqreturn_t retval = IRQ_NONE;
+ struct iproc_adc_priv *adc_priv;
+ struct iio_dev *indio_dev = data;
+ unsigned int valid_entries;
+ u32 intr_status;
+ u32 intr_channels;
+ u32 channel_status;
+ u32 ch_intr_status;
+
+ adc_priv = iio_priv(indio_dev);
+
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
+ dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n",
+ intr_status);
+
+ intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
+ if (intr_channels) {
+ regmap_read(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_INTERRUPT_STATUS +
+ IPROC_ADC_CHANNEL_OFFSET * adc_priv->chan_id,
+ &ch_intr_status);
+
+ if (ch_intr_status & IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK) {
+ regmap_read(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_STATUS +
+ IPROC_ADC_CHANNEL_OFFSET *
+ adc_priv->chan_id,
+ &channel_status);
+
+ valid_entries = ((channel_status &
+ IPROC_ADC_CHANNEL_VALID_ENTERIES_MASK) >>
+ IPROC_ADC_CHANNEL_VALID_ENTERIES);
+ if (valid_entries >= 1) {
+ regmap_read(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_DATA +
+ IPROC_ADC_CHANNEL_OFFSET *
+ adc_priv->chan_id,
+ &adc_priv->chan_val);
+ complete(&adc_priv->completion);
+ } else {
+ dev_err(&indio_dev->dev,
+ "No data rcvd on channel %d\n",
+ adc_priv->chan_id);
+ }
+ regmap_write(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_INTERRUPT_MASK +
+ IPROC_ADC_CHANNEL_OFFSET *
+ adc_priv->chan_id,
+ (ch_intr_status &
+ ~(IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK)));
+ }
+ regmap_write(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_INTERRUPT_STATUS +
+ IPROC_ADC_CHANNEL_OFFSET * adc_priv->chan_id,
+ ch_intr_status);
+ regmap_write(adc_priv->regmap, IPROC_INTERRUPT_STATUS,
+ intr_channels);
+ retval = IRQ_HANDLED;
+ }
+
+ return retval;
+}
+
+static int iproc_adc_do_read(struct iio_dev *indio_dev,
+ int channel,
+ u16 *p_adc_data)
+{
+ int read_len = 0;
+ u32 val;
+ u32 mask;
+ u32 val_check;
+ int failed_cnt = 0;
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+ mutex_lock(&adc_priv->mutex);
+
+ /*
+ * After a read is complete the ADC interrupts will be disabled so
+ * we can assume this section of code is safe from interrupts.
+ */
+ adc_priv->chan_val = -1;
+ adc_priv->chan_id = channel;
+
+ reinit_completion(&adc_priv->completion);
+ /* Clear any pending interrupt */
+ regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_STATUS,
+ IPROC_ADC_INTR_MASK | IPROC_ADC_AUXDATA_RDY_INTR,
+ ((0x0 << channel) << IPROC_ADC_INTR) |
+ IPROC_ADC_AUXDATA_RDY_INTR);
+
+ /* Configure channel for snapshot mode and enable */
+ val = (BIT(IPROC_ADC_CHANNEL_ROUNDS) |
+ (IPROC_ADC_CHANNEL_MODE_SNAPSHOT << IPROC_ADC_CHANNEL_MODE) |
+ (0x1 << IPROC_ADC_CHANNEL_ENABLE));
+
+ mask = IPROC_ADC_CHANNEL_ROUNDS_MASK | IPROC_ADC_CHANNEL_MODE_MASK |
+ IPROC_ADC_CHANNEL_ENABLE_MASK;
+ regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_REGCTL1 +
+ IPROC_ADC_CHANNEL_OFFSET * channel),
+ mask, val);
+
+ /* Set the Watermark for a channel */
+ regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_REGCTL2 +
+ IPROC_ADC_CHANNEL_OFFSET * channel),
+ IPROC_ADC_CHANNEL_WATERMARK_MASK,
+ 0x1);
+
+ /* Enable water mark interrupt */
+ regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_INTERRUPT_MASK +
+ IPROC_ADC_CHANNEL_OFFSET *
+ channel),
+ IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK,
+ IPROC_ADC_WATER_MARK_INTR_ENABLE);
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val);
+
+ /* Enable ADC interrupt for a channel */
+ val |= (BIT(channel) << IPROC_ADC_INTR);
+ regmap_write(adc_priv->regmap, IPROC_INTERRUPT_MASK, val);
+
+ /*
+ * There seems to be a very rare issue where writing to this register
+ * does not take effect. To work around the issue we will try multiple
+ * writes. In total we will spend about 10*10 = 100 us attempting this.
+ * Testing has shown that this may loop a few time, but we have never
+ * hit the full count.
+ */
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
+ while (val_check != val) {
+ failed_cnt++;
+
+ if (failed_cnt > IPROC_ADC_INTMASK_RETRY_ATTEMPTS)
+ break;
+
+ udelay(10);
+ regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_MASK,
+ IPROC_ADC_INTR_MASK,
+ ((0x1 << channel) <<
+ IPROC_ADC_INTR));
+
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
+ }
+
+ if (failed_cnt) {
+ dev_dbg(&indio_dev->dev,
+ "IntMask failed (%d times)", failed_cnt);
+ if (failed_cnt > IPROC_ADC_INTMASK_RETRY_ATTEMPTS) {
+ dev_err(&indio_dev->dev,
+ "IntMask set failed. Read will likely fail.");
+ read_len = -EIO;
+ goto adc_err;
+ };
+ }
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
+
+ if (wait_for_completion_timeout(&adc_priv->completion,
+ IPROC_ADC_READ_TIMEOUT) > 0) {
+
+ /* Only the lower 16 bits are relevant */
+ *p_adc_data = adc_priv->chan_val & 0xFFFF;
+ read_len = sizeof(*p_adc_data);
+
+ } else {
+ /*
+ * We never got the interrupt, something went wrong.
+ * Perhaps the interrupt may still be coming, we do not want
+ * that now. Lets disable the ADC interrupt, and clear the
+ * status to put it back in to normal state.
+ */
+ read_len = -ETIMEDOUT;
+ goto adc_err;
+ }
+ mutex_unlock(&adc_priv->mutex);
+
+ return read_len;
+
+adc_err:
+ regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_MASK,
+ IPROC_ADC_INTR_MASK,
+ ((0x0 << channel) << IPROC_ADC_INTR));
+
+ regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_STATUS,
+ IPROC_ADC_INTR_MASK,
+ ((0x0 << channel) << IPROC_ADC_INTR));
+
+ dev_err(&indio_dev->dev, "Timed out waiting for ADC data!\n");
+ iproc_adc_reg_dump(indio_dev);
+ mutex_unlock(&adc_priv->mutex);
+
+ return read_len;
+}
+
+static int iproc_adc_enable(struct iio_dev *indio_dev)
+{
+ u32 val;
+ u32 channel_id;
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+ int ret;
+
+ /* Set i_amux = 3b'000, select channel 0 */
+ ret = regmap_update_bits(adc_priv->regmap, IPROC_ANALOG_CONTROL,
+ IPROC_ADC_CHANNEL_SEL_MASK, 0);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write IPROC_ANALOG_CONTROL %d\n", ret);
+ return ret;
+ }
+ adc_priv->chan_val = -1;
+
+ /*
+ * PWR up LDO, ADC, and Band Gap (0 to enable)
+ * Also enable ADC controller (set high)
+ */
+ ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to read IPROC_REGCTL2 %d\n", ret);
+ return ret;
+ }
+
+ val &= ~(IPROC_ADC_PWR_LDO | IPROC_ADC_PWR_ADC | IPROC_ADC_PWR_BG);
+
+ ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write IPROC_REGCTL2 %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to read IPROC_REGCTL2 %d\n", ret);
+ return ret;
+ }
+
+ val |= IPROC_ADC_CONTROLLER_EN;
+ ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write IPROC_REGCTL2 %d\n", ret);
+ return ret;
+ }
+
+ for (channel_id = 0; channel_id < indio_dev->num_channels;
+ channel_id++) {
+ ret = regmap_write(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_INTERRUPT_MASK +
+ IPROC_ADC_CHANNEL_OFFSET * channel_id, 0);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write ADC_CHANNEL_INTERRUPT_MASK %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regmap_write(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_INTERRUPT_STATUS +
+ IPROC_ADC_CHANNEL_OFFSET * channel_id, 0);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write ADC_CHANNEL_INTERRUPT_STATUS %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void iproc_adc_disable(struct iio_dev *indio_dev)
+{
+ u32 val;
+ int ret;
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+ ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to read IPROC_REGCTL2 %d\n", ret);
+ return;
+ }
+
+ val &= ~IPROC_ADC_CONTROLLER_EN;
+ ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write IPROC_REGCTL2 %d\n", ret);
+ return;
+ }
+}
+
+static int iproc_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ u16 adc_data;
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = iproc_adc_do_read(indio_dev, chan->channel, &adc_data);
+ if (err < 0)
+ return err;
+ *val = adc_data;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = 1800;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info iproc_adc_iio_info = {
+ .read_raw = &iproc_adc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define IPROC_ADC_CHANNEL(_index, _id) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = _id, \
+}
+
+static const struct iio_chan_spec iproc_adc_iio_channels[] = {
+ IPROC_ADC_CHANNEL(0, "adc0"),
+ IPROC_ADC_CHANNEL(1, "adc1"),
+ IPROC_ADC_CHANNEL(2, "adc2"),
+ IPROC_ADC_CHANNEL(3, "adc3"),
+ IPROC_ADC_CHANNEL(4, "adc4"),
+ IPROC_ADC_CHANNEL(5, "adc5"),
+ IPROC_ADC_CHANNEL(6, "adc6"),
+ IPROC_ADC_CHANNEL(7, "adc7"),
+};
+
+static int iproc_adc_probe(struct platform_device *pdev)
+{
+ struct iproc_adc_priv *adc_priv;
+ struct iio_dev *indio_dev = NULL;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(*adc_priv));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ adc_priv = iio_priv(indio_dev);
+ platform_set_drvdata(pdev, indio_dev);
+
+ mutex_init(&adc_priv->mutex);
+
+ init_completion(&adc_priv->completion);
+
+ adc_priv->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "adc-syscon");
+ if (IS_ERR(adc_priv->regmap)) {
+ dev_err(&pdev->dev, "failed to get handle for tsc syscon\n");
+ ret = PTR_ERR(adc_priv->regmap);
+ return ret;
+ }
+
+ adc_priv->adc_clk = devm_clk_get(&pdev->dev, "tsc_clk");
+ if (IS_ERR(adc_priv->adc_clk)) {
+ dev_err(&pdev->dev,
+ "failed getting clock tsc_clk\n");
+ ret = PTR_ERR(adc_priv->adc_clk);
+ return ret;
+ }
+
+ adc_priv->irqno = platform_get_irq(pdev, 0);
+ if (adc_priv->irqno <= 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed\n");
+ ret = -ENODEV;
+ return ret;
+ }
+
+ ret = regmap_update_bits(adc_priv->regmap, IPROC_REGCTL2,
+ IPROC_ADC_AUXIN_SCAN_ENA, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to write IPROC_REGCTL2 %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
+ iproc_adc_interrupt_thread,
+ iproc_adc_interrupt_handler,
+ IRQF_SHARED, "iproc-adc", indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq error %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(adc_priv->adc_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "clk_prepare_enable failed %d\n", ret);
+ return ret;
+ }
+
+ ret = iproc_adc_enable(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable adc %d\n", ret);
+ goto err_adc_enable;
+ }
+
+ indio_dev->name = "iproc-static-adc";
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &iproc_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = iproc_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iproc_adc_iio_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "iio_device_register failed:err %d\n", ret);
+ goto err_clk;
+ }
+
+ return 0;
+
+err_clk:
+ iproc_adc_disable(indio_dev);
+err_adc_enable:
+ clk_disable_unprepare(adc_priv->adc_clk);
+
+ return ret;
+}
+
+static int iproc_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iproc_adc_disable(indio_dev);
+ clk_disable_unprepare(adc_priv->adc_clk);
+
+ return 0;
+}
+
+static const struct of_device_id iproc_adc_of_match[] = {
+ {.compatible = "brcm,iproc-static-adc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, iproc_adc_of_match);
+
+static struct platform_driver iproc_adc_driver = {
+ .probe = iproc_adc_probe,
+ .remove = iproc_adc_remove,
+ .driver = {
+ .name = "iproc-static-adc",
+ .of_match_table = of_match_ptr(iproc_adc_of_match),
+ },
+};
+module_platform_driver(iproc_adc_driver);
+
+MODULE_DESCRIPTION("Broadcom iProc ADC controller driver");
+MODULE_AUTHOR("Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index 8254f529b2a9..91636c0ba5b5 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -186,7 +186,7 @@ done:
if (!sample_invalid)
iio_push_to_buffers_with_timestamp(indio_dev, data,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index c73c6c62a6ac..678e8c7ea763 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -400,7 +400,7 @@ static void hi8435_iio_push_event(struct iio_dev *idev, unsigned int val)
iio_push_event(idev,
IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
IIO_EV_TYPE_THRESH, dir),
- iio_get_time_ns());
+ iio_get_time_ns(idev));
}
}
@@ -455,6 +455,7 @@ static int hi8435_probe(struct spi_device *spi)
mutex_init(&priv->lock);
idev->dev.parent = &spi->dev;
+ idev->dev.of_node = spi->dev.of_node;
idev->name = spi_get_device_id(spi)->name;
idev->modes = INDIO_DIRECT_MODE;
idev->info = &hi8435_info;
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 502f2fbe8aef..955f3fdaf519 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -465,7 +465,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
s64 time_a, time_b;
unsigned int alert;
- time_a = iio_get_time_ns();
+ time_a = iio_get_time_ns(indio_dev);
/*
* Because the timer thread and the chip conversion clock
@@ -504,7 +504,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
data[i++] = val;
}
- time_b = iio_get_time_ns();
+ time_b = iio_get_time_ns(indio_dev);
iio_push_to_buffers_with_timestamp(indio_dev,
(unsigned int *)data, time_a);
@@ -554,7 +554,7 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
chip->allow_async_readout);
- chip->prev_ns = iio_get_time_ns();
+ chip->prev_ns = iio_get_time_ns(indio_dev);
chip->task = kthread_run(ina2xx_capture_thread, (void *)indio_dev,
"%s:%d-%uus", indio_dev->name, indio_dev->id,
@@ -691,6 +691,7 @@ static int ina2xx_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->channels = ina2xx_channels;
indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels);
indio_dev->name = id->name;
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 41d495c6035e..712fbd2b1f16 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -426,6 +426,7 @@ static int max1027_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->info = &max1027_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->info->channels;
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 998dc3caad4c..841a13c9b6ea 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -25,6 +25,8 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -788,7 +790,7 @@ static irqreturn_t max1363_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct max1363_state *st = iio_priv(indio_dev);
- s64 timestamp = iio_get_time_ns();
+ s64 timestamp = iio_get_time_ns(indio_dev);
unsigned long mask, loc;
u8 rx;
u8 tx[2] = { st->setupbyte,
@@ -1506,7 +1508,8 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
if (b_sent < 0)
goto done_free;
- iio_push_to_buffers_with_timestamp(indio_dev, rxbuf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, rxbuf,
+ iio_get_time_ns(indio_dev));
done_free:
kfree(rxbuf);
@@ -1516,6 +1519,56 @@ done:
return IRQ_HANDLED;
}
+#ifdef CONFIG_OF
+
+#define MAX1363_COMPATIBLE(of_compatible, cfg) { \
+ .compatible = of_compatible, \
+ .data = &max1363_chip_info_tbl[cfg], \
+}
+
+static const struct of_device_id max1363_of_match[] = {
+ MAX1363_COMPATIBLE("maxim,max1361", max1361),
+ MAX1363_COMPATIBLE("maxim,max1362", max1362),
+ MAX1363_COMPATIBLE("maxim,max1363", max1363),
+ MAX1363_COMPATIBLE("maxim,max1364", max1364),
+ MAX1363_COMPATIBLE("maxim,max1036", max1036),
+ MAX1363_COMPATIBLE("maxim,max1037", max1037),
+ MAX1363_COMPATIBLE("maxim,max1038", max1038),
+ MAX1363_COMPATIBLE("maxim,max1039", max1039),
+ MAX1363_COMPATIBLE("maxim,max1136", max1136),
+ MAX1363_COMPATIBLE("maxim,max1137", max1137),
+ MAX1363_COMPATIBLE("maxim,max1138", max1138),
+ MAX1363_COMPATIBLE("maxim,max1139", max1139),
+ MAX1363_COMPATIBLE("maxim,max1236", max1236),
+ MAX1363_COMPATIBLE("maxim,max1237", max1237),
+ MAX1363_COMPATIBLE("maxim,max1238", max1238),
+ MAX1363_COMPATIBLE("maxim,max1239", max1239),
+ MAX1363_COMPATIBLE("maxim,max11600", max11600),
+ MAX1363_COMPATIBLE("maxim,max11601", max11601),
+ MAX1363_COMPATIBLE("maxim,max11602", max11602),
+ MAX1363_COMPATIBLE("maxim,max11603", max11603),
+ MAX1363_COMPATIBLE("maxim,max11604", max11604),
+ MAX1363_COMPATIBLE("maxim,max11605", max11605),
+ MAX1363_COMPATIBLE("maxim,max11606", max11606),
+ MAX1363_COMPATIBLE("maxim,max11607", max11607),
+ MAX1363_COMPATIBLE("maxim,max11608", max11608),
+ MAX1363_COMPATIBLE("maxim,max11609", max11609),
+ MAX1363_COMPATIBLE("maxim,max11610", max11610),
+ MAX1363_COMPATIBLE("maxim,max11611", max11611),
+ MAX1363_COMPATIBLE("maxim,max11612", max11612),
+ MAX1363_COMPATIBLE("maxim,max11613", max11613),
+ MAX1363_COMPATIBLE("maxim,max11614", max11614),
+ MAX1363_COMPATIBLE("maxim,max11615", max11615),
+ MAX1363_COMPATIBLE("maxim,max11616", max11616),
+ MAX1363_COMPATIBLE("maxim,max11617", max11617),
+ MAX1363_COMPATIBLE("maxim,max11644", max11644),
+ MAX1363_COMPATIBLE("maxim,max11645", max11645),
+ MAX1363_COMPATIBLE("maxim,max11646", max11646),
+ MAX1363_COMPATIBLE("maxim,max11647", max11647),
+ { /* sentinel */ }
+};
+#endif
+
static int max1363_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1523,6 +1576,7 @@ static int max1363_probe(struct i2c_client *client,
struct max1363_state *st;
struct iio_dev *indio_dev;
struct regulator *vref;
+ const struct of_device_id *match;
indio_dev = devm_iio_device_alloc(&client->dev,
sizeof(struct max1363_state));
@@ -1549,7 +1603,12 @@ static int max1363_probe(struct i2c_client *client,
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
- st->chip_info = &max1363_chip_info_tbl[id->driver_data];
+ match = of_match_device(of_match_ptr(max1363_of_match),
+ &client->dev);
+ if (match)
+ st->chip_info = of_device_get_match_data(&client->dev);
+ else
+ st->chip_info = &max1363_chip_info_tbl[id->driver_data];
st->client = client;
st->vref_uv = st->chip_info->int_vref_mv * 1000;
@@ -1587,6 +1646,7 @@ static int max1363_probe(struct i2c_client *client,
/* Establish that the iio_dev is a child of the i2c device */
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->name = id->name;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
@@ -1692,6 +1752,7 @@ MODULE_DEVICE_TABLE(i2c, max1363_id);
static struct i2c_driver max1363_driver = {
.driver = {
.name = "max1363",
+ .of_match_table = of_match_ptr(max1363_of_match),
},
.probe = max1363_probe,
.remove = max1363_remove,
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index a850ca7d1eda..634717ae12f3 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -308,6 +308,7 @@ static int mcp320x_probe(struct spi_device *spi)
adc->spi = spi;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index d1172dc1e8e2..254135e07792 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -352,6 +352,7 @@ static int mcp3422_probe(struct i2c_client *client,
mutex_init(&adc->lock);
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp3422_info;
diff --git a/drivers/iio/adc/mxs-lradc.c b/drivers/iio/adc/mxs-lradc.c
index ad26da1edbee..b84d37c80a94 100644
--- a/drivers/iio/adc/mxs-lradc.c
+++ b/drivers/iio/adc/mxs-lradc.c
@@ -373,13 +373,6 @@ static u32 mxs_lradc_plate_mask(struct mxs_lradc *lradc)
return LRADC_CTRL0_MX28_PLATE_MASK;
}
-static u32 mxs_lradc_irq_en_mask(struct mxs_lradc *lradc)
-{
- if (lradc->soc == IMX23_LRADC)
- return LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK;
- return LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK;
-}
-
static u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc)
{
if (lradc->soc == IMX23_LRADC)
@@ -1120,18 +1113,16 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
{
struct input_dev *input;
struct device *dev = lradc->dev;
- int ret;
if (!lradc->use_touchscreen)
return 0;
- input = input_allocate_device();
+ input = devm_input_allocate_device(dev);
if (!input)
return -ENOMEM;
input->name = DRIVER_NAME;
input->id.bustype = BUS_HOST;
- input->dev.parent = dev;
input->open = mxs_lradc_ts_open;
input->close = mxs_lradc_ts_close;
@@ -1146,20 +1137,8 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
lradc->ts_input = input;
input_set_drvdata(input, lradc);
- ret = input_register_device(input);
- if (ret)
- input_free_device(lradc->ts_input);
-
- return ret;
-}
-
-static void mxs_lradc_ts_unregister(struct mxs_lradc *lradc)
-{
- if (!lradc->use_touchscreen)
- return;
- mxs_lradc_disable_ts(lradc);
- input_unregister_device(lradc->ts_input);
+ return input_register_device(input);
}
/*
@@ -1510,7 +1489,9 @@ static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
{
int i;
- mxs_lradc_reg_clear(lradc, mxs_lradc_irq_en_mask(lradc), LRADC_CTRL1);
+ mxs_lradc_reg_clear(lradc,
+ lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
+ LRADC_CTRL1);
for (i = 0; i < LRADC_MAX_DELAY_CHANS; i++)
mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(i));
@@ -1721,13 +1702,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
ret = iio_device_register(iio);
if (ret) {
dev_err(dev, "Failed to register IIO device\n");
- goto err_ts;
+ return ret;
}
return 0;
-err_ts:
- mxs_lradc_ts_unregister(lradc);
err_ts_register:
mxs_lradc_hw_stop(lradc);
err_dev:
@@ -1745,7 +1724,6 @@ static int mxs_lradc_remove(struct platform_device *pdev)
struct mxs_lradc *lradc = iio_priv(iio);
iio_device_unregister(iio);
- mxs_lradc_ts_unregister(lradc);
mxs_lradc_hw_stop(lradc);
mxs_lradc_trigger_remove(iio);
iio_triggered_buffer_cleanup(iio);
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index e525aa6475c4..db9b829ccf0d 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -79,10 +79,29 @@ static const struct iio_chan_spec nau7802_chan_array[] = {
static const u16 nau7802_sample_freq_avail[] = {10, 20, 40, 80,
10, 10, 10, 320};
+static ssize_t nau7802_show_scales(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nau7802_state *st = iio_priv(dev_to_iio_dev(dev));
+ int i, len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%09d ",
+ st->scale_avail[i]);
+
+ buf[len-1] = '\n';
+
+ return len;
+}
+
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 40 80 320");
+static IIO_DEVICE_ATTR(in_voltage_scale_available, S_IRUGO, nau7802_show_scales,
+ NULL, 0);
+
static struct attribute *nau7802_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
NULL
};
@@ -414,6 +433,7 @@ static int nau7802_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &nau7802_info;
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 9fd032d9f402..319172cf7da8 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/acpi.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -138,7 +139,8 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p)
if (ret < 0)
goto out;
buf[0] = ret;
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
@@ -149,12 +151,24 @@ static int adc081c_probe(struct i2c_client *client,
{
struct iio_dev *iio;
struct adc081c *adc;
- struct adcxx1c_model *model = &adcxx1c_models[id->driver_data];
+ struct adcxx1c_model *model;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
+ if (ACPI_COMPANION(&client->dev)) {
+ const struct acpi_device_id *ad_id;
+
+ ad_id = acpi_match_device(client->dev.driver->acpi_match_table,
+ &client->dev);
+ if (!ad_id)
+ return -ENODEV;
+ model = &adcxx1c_models[ad_id->driver_data];
+ } else {
+ model = &adcxx1c_models[id->driver_data];
+ }
+
iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!iio)
return -ENOMEM;
@@ -172,6 +186,7 @@ static int adc081c_probe(struct i2c_client *client,
return err;
iio->dev.parent = &client->dev;
+ iio->dev.of_node = client->dev.of_node;
iio->name = dev_name(&client->dev);
iio->modes = INDIO_DIRECT_MODE;
iio->info = &adc081c_info;
@@ -231,10 +246,21 @@ static const struct of_device_id adc081c_of_match[] = {
MODULE_DEVICE_TABLE(of, adc081c_of_match);
#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id adc081c_acpi_match[] = {
+ { "ADC081C", ADC081C },
+ { "ADC101C", ADC101C },
+ { "ADC121C", ADC121C },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match);
+#endif
+
static struct i2c_driver adc081c_driver = {
.driver = {
.name = "adc081c",
.of_match_table = of_match_ptr(adc081c_of_match),
+ .acpi_match_table = ACPI_PTR(adc081c_acpi_match),
},
.probe = adc081c_probe,
.remove = adc081c_remove,
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index 0afeac0c9bad..f4ba23effe9a 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -194,6 +194,7 @@ static int adc0832_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->info = &adc0832_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index bc58867d6e8d..89dfbd31be5c 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -150,6 +150,7 @@ static int adc128_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &adc128_info;
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 73cbf0b54e54..1ef398770a1f 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -55,6 +55,11 @@
#define ADS1015_DEFAULT_DATA_RATE 4
#define ADS1015_DEFAULT_CHAN 0
+enum {
+ ADS1015,
+ ADS1115,
+};
+
enum ads1015_channels {
ADS1015_AIN0_AIN1 = 0,
ADS1015_AIN0_AIN3,
@@ -71,6 +76,10 @@ static const unsigned int ads1015_data_rate[] = {
128, 250, 490, 920, 1600, 2400, 3300, 3300
};
+static const unsigned int ads1115_data_rate[] = {
+ 8, 16, 32, 64, 128, 250, 475, 860
+};
+
static const struct {
int scale;
int uscale;
@@ -101,6 +110,7 @@ static const struct {
.shift = 4, \
.endianness = IIO_CPU, \
}, \
+ .datasheet_name = "AIN"#_chan, \
}
#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) { \
@@ -121,6 +131,45 @@ static const struct {
.shift = 4, \
.endianness = IIO_CPU, \
}, \
+ .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
+}
+
+#define ADS1115_V_CHAN(_chan, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = _addr, \
+ .channel = _chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _addr, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+ .datasheet_name = "AIN"#_chan, \
+}
+
+#define ADS1115_V_DIFF_CHAN(_chan, _chan2, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .differential = 1, \
+ .indexed = 1, \
+ .address = _addr, \
+ .channel = _chan, \
+ .channel2 = _chan2, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _addr, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+ .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
}
struct ads1015_data {
@@ -131,6 +180,8 @@ struct ads1015_data {
*/
struct mutex lock;
struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+
+ unsigned int *data_rate;
};
static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
@@ -157,6 +208,18 @@ static const struct iio_chan_spec ads1015_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
};
+static const struct iio_chan_spec ads1115_channels[] = {
+ ADS1115_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
+ ADS1115_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
+ ADS1115_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
+ ADS1115_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
+ ADS1115_V_CHAN(0, ADS1015_AIN0),
+ ADS1115_V_CHAN(1, ADS1015_AIN1),
+ ADS1115_V_CHAN(2, ADS1015_AIN2),
+ ADS1115_V_CHAN(3, ADS1015_AIN3),
+ IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
+};
+
static int ads1015_set_power_state(struct ads1015_data *data, bool on)
{
int ret;
@@ -196,7 +259,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
return ret;
if (change) {
- conv_time = DIV_ROUND_UP(USEC_PER_SEC, ads1015_data_rate[dr]);
+ conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
usleep_range(conv_time, conv_time + 1);
}
@@ -225,7 +288,8 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
buf[0] = res;
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -263,7 +327,7 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
int i, ret, rindex = -1;
for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++)
- if (ads1015_data_rate[i] == rate) {
+ if (data->data_rate[i] == rate) {
rindex = i;
break;
}
@@ -291,7 +355,9 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
mutex_lock(&data->lock);
switch (mask) {
- case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_RAW: {
+ int shift = chan->scan_type.shift;
+
if (iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
break;
@@ -307,8 +373,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
break;
}
- /* 12 bit res, D0 is bit 4 in conversion register */
- *val = sign_extend32(*val >> 4, 11);
+ *val = sign_extend32(*val >> shift, 15 - shift);
ret = ads1015_set_power_state(data, false);
if (ret < 0)
@@ -316,6 +381,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
+ }
case IIO_CHAN_INFO_SCALE:
idx = data->channel_data[chan->address].pga;
*val = ads1015_scale[idx].scale;
@@ -324,7 +390,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_SAMP_FREQ:
idx = data->channel_data[chan->address].data_rate;
- *val = ads1015_data_rate[idx];
+ *val = data->data_rate[idx];
ret = IIO_VAL_INT;
break;
default:
@@ -380,12 +446,15 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
};
static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125");
-static IIO_CONST_ATTR(sampling_frequency_available,
- "128 250 490 920 1600 2400 3300");
+
+static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available,
+ sampling_frequency_available, "128 250 490 920 1600 2400 3300");
+static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available,
+ sampling_frequency_available, "8 16 32 64 128 250 475 860");
static struct attribute *ads1015_attributes[] = {
&iio_const_attr_scale_available.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -393,11 +462,28 @@ static const struct attribute_group ads1015_attribute_group = {
.attrs = ads1015_attributes,
};
-static const struct iio_info ads1015_info = {
+static struct attribute *ads1115_attributes[] = {
+ &iio_const_attr_scale_available.dev_attr.attr,
+ &iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ads1115_attribute_group = {
+ .attrs = ads1115_attributes,
+};
+
+static struct iio_info ads1015_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = ads1015_read_raw,
+ .write_raw = ads1015_write_raw,
+ .attrs = &ads1015_attribute_group,
+};
+
+static struct iio_info ads1115_info = {
.driver_module = THIS_MODULE,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
- .attrs = &ads1015_attribute_group,
+ .attrs = &ads1115_attribute_group,
};
#ifdef CONFIG_OF
@@ -500,12 +586,25 @@ static int ads1015_probe(struct i2c_client *client,
mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
- indio_dev->info = &ads1015_info;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->name = ADS1015_DRV_NAME;
- indio_dev->channels = ads1015_channels;
- indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
+ switch (id->driver_data) {
+ case ADS1015:
+ indio_dev->channels = ads1015_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
+ indio_dev->info = &ads1015_info;
+ data->data_rate = (unsigned int *) &ads1015_data_rate;
+ break;
+ case ADS1115:
+ indio_dev->channels = ads1115_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads1115_channels);
+ indio_dev->info = &ads1115_info;
+ data->data_rate = (unsigned int *) &ads1115_data_rate;
+ break;
+ }
+
/* we need to keep this ABI the same as used by hwmon ADS1015 driver */
ads1015_get_channels_config(client);
@@ -590,7 +689,8 @@ static const struct dev_pm_ops ads1015_pm_ops = {
};
static const struct i2c_device_id ads1015_id[] = {
- {"ads1015", 0},
+ {"ads1015", ADS1015},
+ {"ads1115", ADS1115},
{}
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 03e907028cb6..c400439900af 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -421,6 +421,7 @@ static int ads8688_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index c1e05532d437..8a368756881b 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -326,8 +326,7 @@ static int tiadc_channel_init(struct iio_dev *indio_dev, int channels)
int i;
indio_dev->num_channels = channels;
- chan_array = kcalloc(channels,
- sizeof(struct iio_chan_spec), GFP_KERNEL);
+ chan_array = kcalloc(channels, sizeof(*chan_array), GFP_KERNEL);
if (chan_array == NULL)
return -ENOMEM;
@@ -467,8 +466,7 @@ static int tiadc_probe(struct platform_device *pdev)
return -EINVAL;
}
- indio_dev = devm_iio_device_alloc(&pdev->dev,
- sizeof(struct tiadc_device));
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev));
if (indio_dev == NULL) {
dev_err(&pdev->dev, "failed to allocate iio device\n");
return -ENOMEM;
@@ -531,8 +529,7 @@ static int tiadc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tiadc_suspend(struct device *dev)
+static int __maybe_unused tiadc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -550,7 +547,7 @@ static int tiadc_suspend(struct device *dev)
return 0;
}
-static int tiadc_resume(struct device *dev)
+static int __maybe_unused tiadc_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -567,14 +564,7 @@ static int tiadc_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops tiadc_pm_ops = {
- .suspend = tiadc_suspend,
- .resume = tiadc_resume,
-};
-#define TIADC_PM_OPS (&tiadc_pm_ops)
-#else
-#define TIADC_PM_OPS NULL
-#endif
+static SIMPLE_DEV_PM_OPS(tiadc_pm_ops, tiadc_suspend, tiadc_resume);
static const struct of_device_id ti_adc_dt_ids[] = {
{ .compatible = "ti,am3359-adc", },
@@ -585,7 +575,7 @@ MODULE_DEVICE_TABLE(of, ti_adc_dt_ids);
static struct platform_driver tiadc_driver = {
.driver = {
.name = "TI-am335x-adc",
- .pm = TIADC_PM_OPS,
+ .pm = &tiadc_pm_ops,
.of_match_table = ti_adc_dt_ids,
},
.probe = tiadc_probe,
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 653bf1379d2e..228a003adeed 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -594,7 +594,8 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
if (iio_buffer_enabled(indio_dev)) {
info->buffer[0] = info->value;
iio_push_to_buffers_with_timestamp(indio_dev,
- info->buffer, iio_get_time_ns());
+ info->buffer,
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
} else
complete(&info->completion);
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
index edcf3aabd70d..6d5c2a6f4e6e 100644
--- a/drivers/iio/adc/xilinx-xadc-events.c
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -46,7 +46,7 @@ static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event)
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
} else {
/*
* For other channels we don't know whether it is a upper or
@@ -56,7 +56,7 @@ static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event)
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
}
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 212cbedc7abb..dd99d273bae9 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -305,7 +305,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
queue->fileio.active_block = NULL;
spin_lock_irq(&queue->list_lock);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
block = queue->fileio.blocks[i];
/* If we can't re-use it free it */
@@ -323,7 +323,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
INIT_LIST_HEAD(&queue->incoming);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
if (queue->fileio.blocks[i]) {
block = queue->fileio.blocks[i];
if (block->state == IIO_BLOCK_STATE_DEAD) {
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index f73290f84c90..4bcc025e8c8a 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -5,15 +5,17 @@
menu "Chemical Sensors"
config ATLAS_PH_SENSOR
- tristate "Atlas Scientific OEM pH-SM sensor"
+ tristate "Atlas Scientific OEM SM sensors"
depends on I2C
select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select IRQ_WORK
help
- Say Y here to build I2C interface support for the Atlas
- Scientific OEM pH-SM sensor.
+ Say Y here to build I2C interface support for the following
+ Atlas Scientific OEM SM sensors:
+ * pH SM sensor
+ * EC SM sensor
To compile this driver as module, choose M here: the
module will be called atlas-ph-sensor.
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index 62b37cd8fb56..ae038a59d256 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -24,6 +24,7 @@
#include <linux/irq_work.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -43,29 +44,50 @@
#define ATLAS_REG_PWR_CONTROL 0x06
-#define ATLAS_REG_CALIB_STATUS 0x0d
-#define ATLAS_REG_CALIB_STATUS_MASK 0x07
-#define ATLAS_REG_CALIB_STATUS_LOW BIT(0)
-#define ATLAS_REG_CALIB_STATUS_MID BIT(1)
-#define ATLAS_REG_CALIB_STATUS_HIGH BIT(2)
+#define ATLAS_REG_PH_CALIB_STATUS 0x0d
+#define ATLAS_REG_PH_CALIB_STATUS_MASK 0x07
+#define ATLAS_REG_PH_CALIB_STATUS_LOW BIT(0)
+#define ATLAS_REG_PH_CALIB_STATUS_MID BIT(1)
+#define ATLAS_REG_PH_CALIB_STATUS_HIGH BIT(2)
-#define ATLAS_REG_TEMP_DATA 0x0e
+#define ATLAS_REG_EC_CALIB_STATUS 0x0f
+#define ATLAS_REG_EC_CALIB_STATUS_MASK 0x0f
+#define ATLAS_REG_EC_CALIB_STATUS_DRY BIT(0)
+#define ATLAS_REG_EC_CALIB_STATUS_SINGLE BIT(1)
+#define ATLAS_REG_EC_CALIB_STATUS_LOW BIT(2)
+#define ATLAS_REG_EC_CALIB_STATUS_HIGH BIT(3)
+
+#define ATLAS_REG_PH_TEMP_DATA 0x0e
#define ATLAS_REG_PH_DATA 0x16
+#define ATLAS_REG_EC_PROBE 0x08
+#define ATLAS_REG_EC_TEMP_DATA 0x10
+#define ATLAS_REG_EC_DATA 0x18
+#define ATLAS_REG_TDS_DATA 0x1c
+#define ATLAS_REG_PSS_DATA 0x20
+
#define ATLAS_PH_INT_TIME_IN_US 450000
+#define ATLAS_EC_INT_TIME_IN_US 650000
+
+enum {
+ ATLAS_PH_SM,
+ ATLAS_EC_SM,
+};
struct atlas_data {
struct i2c_client *client;
struct iio_trigger *trig;
+ struct atlas_device *chip;
struct regmap *regmap;
struct irq_work work;
- __be32 buffer[4]; /* 32-bit pH data + 32-bit pad + 64-bit timestamp */
+ __be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
};
static const struct regmap_range atlas_volatile_ranges[] = {
regmap_reg_range(ATLAS_REG_INT_CONTROL, ATLAS_REG_INT_CONTROL),
regmap_reg_range(ATLAS_REG_PH_DATA, ATLAS_REG_PH_DATA + 4),
+ regmap_reg_range(ATLAS_REG_EC_DATA, ATLAS_REG_PSS_DATA + 4),
};
static const struct regmap_access_table atlas_volatile_table = {
@@ -80,13 +102,14 @@ static const struct regmap_config atlas_regmap_config = {
.val_bits = 8,
.volatile_table = &atlas_volatile_table,
- .max_register = ATLAS_REG_PH_DATA + 4,
+ .max_register = ATLAS_REG_PSS_DATA + 4,
.cache_type = REGCACHE_RBTREE,
};
-static const struct iio_chan_spec atlas_channels[] = {
+static const struct iio_chan_spec atlas_ph_channels[] = {
{
.type = IIO_PH,
+ .address = ATLAS_REG_PH_DATA,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 0,
@@ -100,7 +123,7 @@ static const struct iio_chan_spec atlas_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(1),
{
.type = IIO_TEMP,
- .address = ATLAS_REG_TEMP_DATA,
+ .address = ATLAS_REG_PH_TEMP_DATA,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
.output = 1,
@@ -108,6 +131,142 @@ static const struct iio_chan_spec atlas_channels[] = {
},
};
+#define ATLAS_EC_CHANNEL(_idx, _addr) \
+ {\
+ .type = IIO_CONCENTRATION, \
+ .indexed = 1, \
+ .channel = _idx, \
+ .address = _addr, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = _idx + 1, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec atlas_ec_channels[] = {
+ {
+ .type = IIO_ELECTRICALCONDUCTIVITY,
+ .address = ATLAS_REG_EC_DATA,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ ATLAS_EC_CHANNEL(0, ATLAS_REG_TDS_DATA),
+ ATLAS_EC_CHANNEL(1, ATLAS_REG_PSS_DATA),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+ {
+ .type = IIO_TEMP,
+ .address = ATLAS_REG_EC_TEMP_DATA,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .output = 1,
+ .scan_index = -1
+ },
+};
+
+static int atlas_check_ph_calibration(struct atlas_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, ATLAS_REG_PH_CALIB_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & ATLAS_REG_PH_CALIB_STATUS_MASK)) {
+ dev_warn(dev, "device has not been calibrated\n");
+ return 0;
+ }
+
+ if (!(val & ATLAS_REG_PH_CALIB_STATUS_LOW))
+ dev_warn(dev, "device missing low point calibration\n");
+
+ if (!(val & ATLAS_REG_PH_CALIB_STATUS_MID))
+ dev_warn(dev, "device missing mid point calibration\n");
+
+ if (!(val & ATLAS_REG_PH_CALIB_STATUS_HIGH))
+ dev_warn(dev, "device missing high point calibration\n");
+
+ return 0;
+}
+
+static int atlas_check_ec_calibration(struct atlas_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
+ be16_to_cpu(val) % 100);
+
+ ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & ATLAS_REG_EC_CALIB_STATUS_MASK)) {
+ dev_warn(dev, "device has not been calibrated\n");
+ return 0;
+ }
+
+ if (!(val & ATLAS_REG_EC_CALIB_STATUS_DRY))
+ dev_warn(dev, "device missing dry point calibration\n");
+
+ if (val & ATLAS_REG_EC_CALIB_STATUS_SINGLE) {
+ dev_warn(dev, "device using single point calibration\n");
+ } else {
+ if (!(val & ATLAS_REG_EC_CALIB_STATUS_LOW))
+ dev_warn(dev, "device missing low point calibration\n");
+
+ if (!(val & ATLAS_REG_EC_CALIB_STATUS_HIGH))
+ dev_warn(dev, "device missing high point calibration\n");
+ }
+
+ return 0;
+}
+
+struct atlas_device {
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ int data_reg;
+
+ int (*calibration)(struct atlas_data *data);
+ int delay;
+};
+
+static struct atlas_device atlas_devices[] = {
+ [ATLAS_PH_SM] = {
+ .channels = atlas_ph_channels,
+ .num_channels = 3,
+ .data_reg = ATLAS_REG_PH_DATA,
+ .calibration = &atlas_check_ph_calibration,
+ .delay = ATLAS_PH_INT_TIME_IN_US,
+ },
+ [ATLAS_EC_SM] = {
+ .channels = atlas_ec_channels,
+ .num_channels = 5,
+ .data_reg = ATLAS_REG_EC_DATA,
+ .calibration = &atlas_check_ec_calibration,
+ .delay = ATLAS_EC_INT_TIME_IN_US,
+ },
+
+};
+
static int atlas_set_powermode(struct atlas_data *data, int on)
{
return regmap_write(data->regmap, ATLAS_REG_PWR_CONTROL, on);
@@ -178,12 +337,13 @@ static irqreturn_t atlas_trigger_handler(int irq, void *private)
struct atlas_data *data = iio_priv(indio_dev);
int ret;
- ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
- (u8 *) &data->buffer, sizeof(data->buffer[0]));
+ ret = regmap_bulk_read(data->regmap, data->chip->data_reg,
+ (u8 *) &data->buffer,
+ sizeof(__be32) * (data->chip->num_channels - 2));
if (!ret)
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
@@ -200,7 +360,7 @@ static irqreturn_t atlas_interrupt_handler(int irq, void *private)
return IRQ_HANDLED;
}
-static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val)
+static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
{
struct device *dev = &data->client->dev;
int suspended = pm_runtime_suspended(dev);
@@ -213,11 +373,9 @@ static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val)
}
if (suspended)
- usleep_range(ATLAS_PH_INT_TIME_IN_US,
- ATLAS_PH_INT_TIME_IN_US + 100000);
+ usleep_range(data->chip->delay, data->chip->delay + 100000);
- ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
- (u8 *) val, sizeof(*val));
+ ret = regmap_bulk_read(data->regmap, reg, (u8 *) val, sizeof(*val));
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -242,12 +400,15 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
(u8 *) &reg, sizeof(reg));
break;
case IIO_PH:
+ case IIO_CONCENTRATION:
+ case IIO_ELECTRICALCONDUCTIVITY:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
ret = -EBUSY;
else
- ret = atlas_read_ph_measurement(data, &reg);
+ ret = atlas_read_measurement(data,
+ chan->address, &reg);
mutex_unlock(&indio_dev->mlock);
break;
@@ -271,6 +432,14 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
*val = 1; /* 0.001 */
*val2 = 1000;
break;
+ case IIO_ELECTRICALCONDUCTIVITY:
+ *val = 1; /* 0.00001 */
+ *val = 100000;
+ break;
+ case IIO_CONCENTRATION:
+ *val = 0; /* 0.000000001 */
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_NANO;
default:
return -EINVAL;
}
@@ -303,37 +472,26 @@ static const struct iio_info atlas_info = {
.write_raw = atlas_write_raw,
};
-static int atlas_check_calibration(struct atlas_data *data)
-{
- struct device *dev = &data->client->dev;
- int ret;
- unsigned int val;
-
- ret = regmap_read(data->regmap, ATLAS_REG_CALIB_STATUS, &val);
- if (ret)
- return ret;
-
- if (!(val & ATLAS_REG_CALIB_STATUS_MASK)) {
- dev_warn(dev, "device has not been calibrated\n");
- return 0;
- }
-
- if (!(val & ATLAS_REG_CALIB_STATUS_LOW))
- dev_warn(dev, "device missing low point calibration\n");
-
- if (!(val & ATLAS_REG_CALIB_STATUS_MID))
- dev_warn(dev, "device missing mid point calibration\n");
-
- if (!(val & ATLAS_REG_CALIB_STATUS_HIGH))
- dev_warn(dev, "device missing high point calibration\n");
+static const struct i2c_device_id atlas_id[] = {
+ { "atlas-ph-sm", ATLAS_PH_SM},
+ { "atlas-ec-sm", ATLAS_EC_SM},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, atlas_id);
- return 0;
+static const struct of_device_id atlas_dt_ids[] = {
+ { .compatible = "atlas,ph-sm", .data = (void *)ATLAS_PH_SM, },
+ { .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
+ { }
};
+MODULE_DEVICE_TABLE(of, atlas_dt_ids);
static int atlas_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct atlas_data *data;
+ struct atlas_device *chip;
+ const struct of_device_id *of_id;
struct iio_trigger *trig;
struct iio_dev *indio_dev;
int ret;
@@ -342,10 +500,16 @@ static int atlas_probe(struct i2c_client *client,
if (!indio_dev)
return -ENOMEM;
+ of_id = of_match_device(atlas_dt_ids, &client->dev);
+ if (!of_id)
+ chip = &atlas_devices[id->driver_data];
+ else
+ chip = &atlas_devices[(unsigned long)of_id->data];
+
indio_dev->info = &atlas_info;
indio_dev->name = ATLAS_DRV_NAME;
- indio_dev->channels = atlas_channels;
- indio_dev->num_channels = ARRAY_SIZE(atlas_channels);
+ indio_dev->channels = chip->channels;
+ indio_dev->num_channels = chip->num_channels;
indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE;
indio_dev->dev.parent = &client->dev;
@@ -358,6 +522,7 @@ static int atlas_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
data->client = client;
data->trig = trig;
+ data->chip = chip;
trig->dev.parent = indio_dev->dev.parent;
trig->ops = &atlas_interrupt_trigger_ops;
iio_trigger_set_drvdata(trig, indio_dev);
@@ -379,7 +544,7 @@ static int atlas_probe(struct i2c_client *client,
return -EINVAL;
}
- ret = atlas_check_calibration(data);
+ ret = chip->calibration(data);
if (ret)
return ret;
@@ -480,18 +645,6 @@ static const struct dev_pm_ops atlas_pm_ops = {
atlas_runtime_resume, NULL)
};
-static const struct i2c_device_id atlas_id[] = {
- { "atlas-ph-sm", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, atlas_id);
-
-static const struct of_device_id atlas_dt_ids[] = {
- { .compatible = "atlas,ph-sm" },
- { }
-};
-MODULE_DEVICE_TABLE(of, atlas_dt_ids);
-
static struct i2c_driver atlas_driver = {
.driver = {
.name = ATLAS_DRV_NAME,
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index f1693dbebb8a..d06e728cea37 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -22,34 +22,32 @@
#include <linux/iio/common/st_sensors.h>
-int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
+static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
{
- int i, len;
- int total = 0;
+ int i;
struct st_sensor_data *sdata = iio_priv(indio_dev);
unsigned int num_data_channels = sdata->num_data_channels;
- for (i = 0; i < num_data_channels; i++) {
- unsigned int bytes_to_read;
-
- if (test_bit(i, indio_dev->active_scan_mask)) {
- bytes_to_read = indio_dev->channels[i].scan_type.storagebits >> 3;
- len = sdata->tf->read_multiple_byte(&sdata->tb,
- sdata->dev, indio_dev->channels[i].address,
- bytes_to_read,
- buf + total, sdata->multiread_bit);
-
- if (len < bytes_to_read)
- return -EIO;
-
- /* Advance the buffer pointer */
- total += len;
- }
+ for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) {
+ const struct iio_chan_spec *channel = &indio_dev->channels[i];
+ unsigned int bytes_to_read = channel->scan_type.realbits >> 3;
+ unsigned int storage_bytes =
+ channel->scan_type.storagebits >> 3;
+
+ buf = PTR_ALIGN(buf, storage_bytes);
+ if (sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ channel->address,
+ bytes_to_read, buf,
+ sdata->multiread_bit) <
+ bytes_to_read)
+ return -EIO;
+
+ /* Advance the buffer pointer */
+ buf += storage_bytes;
}
- return total;
+ return 0;
}
-EXPORT_SYMBOL(st_sensors_get_buffer_element);
irqreturn_t st_sensors_trigger_handler(int irq, void *p)
{
@@ -59,11 +57,16 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
struct st_sensor_data *sdata = iio_priv(indio_dev);
s64 timestamp;
- /* If we do timetamping here, do it before reading the values */
+ /*
+ * If we do timetamping here, do it before reading the values, because
+ * once we've read the values, new interrupts can occur (when using
+ * the hardware trigger) and the hw_timestamp may get updated.
+ * By storing it in a local variable first, we are safe.
+ */
if (sdata->hw_irq_trigger)
timestamp = sdata->hw_timestamp;
else
- timestamp = iio_get_time_ns();
+ timestamp = iio_get_time_ns(indio_dev);
len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
if (len < 0)
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 9e59c90f6a8d..2d5282e05482 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -228,7 +228,7 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
}
EXPORT_SYMBOL(st_sensors_set_axis_enable);
-void st_sensors_power_enable(struct iio_dev *indio_dev)
+int st_sensors_power_enable(struct iio_dev *indio_dev)
{
struct st_sensor_data *pdata = iio_priv(indio_dev);
int err;
@@ -237,18 +237,37 @@ void st_sensors_power_enable(struct iio_dev *indio_dev)
pdata->vdd = devm_regulator_get_optional(indio_dev->dev.parent, "vdd");
if (!IS_ERR(pdata->vdd)) {
err = regulator_enable(pdata->vdd);
- if (err != 0)
+ if (err != 0) {
dev_warn(&indio_dev->dev,
"Failed to enable specified Vdd supply\n");
+ return err;
+ }
+ } else {
+ err = PTR_ERR(pdata->vdd);
+ if (err != -ENODEV)
+ return err;
}
pdata->vdd_io = devm_regulator_get_optional(indio_dev->dev.parent, "vddio");
if (!IS_ERR(pdata->vdd_io)) {
err = regulator_enable(pdata->vdd_io);
- if (err != 0)
+ if (err != 0) {
dev_warn(&indio_dev->dev,
"Failed to enable specified Vdd_IO supply\n");
+ goto st_sensors_disable_vdd;
+ }
+ } else {
+ err = PTR_ERR(pdata->vdd_io);
+ if (err != -ENODEV)
+ goto st_sensors_disable_vdd;
}
+
+ return 0;
+
+st_sensors_disable_vdd:
+ if (!IS_ERR_OR_NULL(pdata->vdd))
+ regulator_disable(pdata->vdd);
+ return err;
}
EXPORT_SYMBOL(st_sensors_power_enable);
@@ -256,10 +275,10 @@ void st_sensors_power_disable(struct iio_dev *indio_dev)
{
struct st_sensor_data *pdata = iio_priv(indio_dev);
- if (!IS_ERR(pdata->vdd))
+ if (!IS_ERR_OR_NULL(pdata->vdd))
regulator_disable(pdata->vdd);
- if (!IS_ERR(pdata->vdd_io))
+ if (!IS_ERR_OR_NULL(pdata->vdd_io))
regulator_disable(pdata->vdd_io);
}
EXPORT_SYMBOL(st_sensors_power_disable);
@@ -471,7 +490,7 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
int err;
u8 *outdata;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- unsigned int byte_for_channel = ch->scan_type.storagebits >> 3;
+ unsigned int byte_for_channel = ch->scan_type.realbits >> 3;
outdata = kmalloc(byte_for_channel, GFP_KERNEL);
if (!outdata)
@@ -531,7 +550,7 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
int num_sensors_list,
const struct st_sensor_settings *sensor_settings)
{
- int i, n, err;
+ int i, n, err = 0;
u8 wai;
struct st_sensor_data *sdata = iio_priv(indio_dev);
@@ -551,17 +570,21 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
return -ENODEV;
}
- err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- sensor_settings[i].wai_addr, &wai);
- if (err < 0) {
- dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n");
- return err;
- }
+ if (sensor_settings[i].wai_addr) {
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ sensor_settings[i].wai_addr, &wai);
+ if (err < 0) {
+ dev_err(&indio_dev->dev,
+ "failed to read Who-Am-I register.\n");
+ return err;
+ }
- if (sensor_settings[i].wai != wai) {
- dev_err(&indio_dev->dev, "%s: WhoAmI mismatch (0x%x).\n",
- indio_dev->name, wai);
- return -EINVAL;
+ if (sensor_settings[i].wai != wai) {
+ dev_err(&indio_dev->dev,
+ "%s: WhoAmI mismatch (0x%x).\n",
+ indio_dev->name, wai);
+ return -EINVAL;
+ }
}
sdata->sensor_settings =
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index 98cfee296d46..b43aa36031f8 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -48,8 +48,8 @@ static int st_sensors_i2c_read_multiple_byte(
if (multiread_bit)
reg_addr |= ST_SENSORS_I2C_MULTIREAD;
- return i2c_smbus_read_i2c_block_data(to_i2c_client(dev),
- reg_addr, len, data);
+ return i2c_smbus_read_i2c_block_data_or_emulated(to_i2c_client(dev),
+ reg_addr, len, data);
}
static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb,
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 296e4ff19ae8..e66f12ee8a55 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -18,6 +18,50 @@
#include "st_sensors_core.h"
/**
+ * st_sensors_new_samples_available() - check if more samples came in
+ * returns:
+ * 0 - no new samples available
+ * 1 - new samples available
+ * negative - error or unknown
+ */
+static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
+ struct st_sensor_data *sdata)
+{
+ u8 status;
+ int ret;
+
+ /* How would I know if I can't check it? */
+ if (!sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ return -EINVAL;
+
+ /* No scan mask, no interrupt */
+ if (!indio_dev->active_scan_mask)
+ return 0;
+
+ ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+ &status);
+ if (ret < 0) {
+ dev_err(sdata->dev,
+ "error checking samples available\n");
+ return ret;
+ }
+ /*
+ * the lower bits of .active_scan_mask[0] is directly mapped
+ * to the channels on the sensor: either bit 0 for
+ * one-dimensional sensors, or e.g. x,y,z for accelerometers,
+ * gyroscopes or magnetometers. No sensor use more than 3
+ * channels, so cut the other status bits here.
+ */
+ status &= 0x07;
+
+ if (status & (u8)indio_dev->active_scan_mask[0])
+ return 1;
+
+ return 0;
+}
+
+/**
* st_sensors_irq_handler() - top half of the IRQ-based triggers
* @irq: irq number
* @p: private handler data
@@ -29,7 +73,7 @@ irqreturn_t st_sensors_irq_handler(int irq, void *p)
struct st_sensor_data *sdata = iio_priv(indio_dev);
/* Get the time stamp as close in time as possible */
- sdata->hw_timestamp = iio_get_time_ns();
+ sdata->hw_timestamp = iio_get_time_ns(indio_dev);
return IRQ_WAKE_THREAD;
}
@@ -43,44 +87,43 @@ irqreturn_t st_sensors_irq_thread(int irq, void *p)
struct iio_trigger *trig = p;
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct st_sensor_data *sdata = iio_priv(indio_dev);
- int ret;
/*
* If this trigger is backed by a hardware interrupt and we have a
- * status register, check if this IRQ came from us
+ * status register, check if this IRQ came from us. Notice that
+ * we will process also if st_sensors_new_samples_available()
+ * returns negative: if we can't check status, then poll
+ * unconditionally.
*/
- if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
- u8 status;
-
- ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- sdata->sensor_settings->drdy_irq.addr_stat_drdy,
- &status);
- if (ret < 0) {
- dev_err(sdata->dev, "could not read channel status\n");
- goto out_poll;
- }
- /*
- * the lower bits of .active_scan_mask[0] is directly mapped
- * to the channels on the sensor: either bit 0 for
- * one-dimensional sensors, or e.g. x,y,z for accelerometers,
- * gyroscopes or magnetometers. No sensor use more than 3
- * channels, so cut the other status bits here.
- */
- status &= 0x07;
+ if (sdata->hw_irq_trigger &&
+ st_sensors_new_samples_available(indio_dev, sdata)) {
+ iio_trigger_poll_chained(p);
+ } else {
+ dev_dbg(sdata->dev, "spurious IRQ\n");
+ return IRQ_NONE;
+ }
- /*
- * If this was not caused by any channels on this sensor,
- * return IRQ_NONE
- */
- if (!indio_dev->active_scan_mask)
- return IRQ_NONE;
- if (!(status & (u8)indio_dev->active_scan_mask[0]))
- return IRQ_NONE;
+ /*
+ * If we have proper level IRQs the handler will be re-entered if
+ * the line is still active, so return here and come back in through
+ * the top half if need be.
+ */
+ if (!sdata->edge_irq)
+ return IRQ_HANDLED;
+
+ /*
+ * If we are using egde IRQs, new samples arrived while processing
+ * the IRQ and those may be missed unless we pick them here, so poll
+ * again. If the sensor delivery frequency is very high, this thread
+ * turns into a polled loop handler.
+ */
+ while (sdata->hw_irq_trigger &&
+ st_sensors_new_samples_available(indio_dev, sdata)) {
+ dev_dbg(sdata->dev, "more samples came in during polling\n");
+ sdata->hw_timestamp = iio_get_time_ns(indio_dev);
+ iio_trigger_poll_chained(p);
}
-out_poll:
- /* It's our IRQ: proceed to handle the register polling */
- iio_trigger_poll_chained(p);
return IRQ_HANDLED;
}
@@ -107,13 +150,18 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
* If the IRQ is triggered on falling edge, we need to mark the
* interrupt as active low, if the hardware supports this.
*/
- if (irq_trig == IRQF_TRIGGER_FALLING) {
+ switch(irq_trig) {
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
dev_err(&indio_dev->dev,
- "falling edge specified for IRQ but hardware "
- "only support rising edge, will request "
- "rising edge\n");
- irq_trig = IRQF_TRIGGER_RISING;
+ "falling/low specified for IRQ "
+ "but hardware only support rising/high: "
+ "will request rising/high\n");
+ if (irq_trig == IRQF_TRIGGER_FALLING)
+ irq_trig = IRQF_TRIGGER_RISING;
+ if (irq_trig == IRQF_TRIGGER_LOW)
+ irq_trig = IRQF_TRIGGER_HIGH;
} else {
/* Set up INT active low i.e. falling edge */
err = st_sensors_write_data_with_mask(indio_dev,
@@ -122,20 +170,39 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
if (err < 0)
goto iio_trigger_free;
dev_info(&indio_dev->dev,
- "interrupts on the falling edge\n");
+ "interrupts on the falling edge or "
+ "active low level\n");
}
- } else if (irq_trig == IRQF_TRIGGER_RISING) {
+ break;
+ case IRQF_TRIGGER_RISING:
dev_info(&indio_dev->dev,
"interrupts on the rising edge\n");
-
- } else {
+ break;
+ case IRQF_TRIGGER_HIGH:
+ dev_info(&indio_dev->dev,
+ "interrupts active high level\n");
+ break;
+ default:
+ /* This is the most preferred mode, if possible */
dev_err(&indio_dev->dev,
- "unsupported IRQ trigger specified (%lx), only "
- "rising and falling edges supported, enforce "
+ "unsupported IRQ trigger specified (%lx), enforce "
"rising edge\n", irq_trig);
irq_trig = IRQF_TRIGGER_RISING;
}
+ /* Tell the interrupt handler that we're dealing with edges */
+ if (irq_trig == IRQF_TRIGGER_FALLING ||
+ irq_trig == IRQF_TRIGGER_RISING)
+ sdata->edge_irq = true;
+ else
+ /*
+ * If we're not using edges (i.e. level interrupts) we
+ * just mask off the IRQ, handle one interrupt, then
+ * if the line is still low, we return to the
+ * interrupt handler top half again and start over.
+ */
+ irq_trig |= IRQF_ONESHOT;
+
/*
* If the interrupt pin is Open Drain, by definition this
* means that the interrupt line may be shared with other
@@ -148,9 +215,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.addr_stat_drdy)
irq_trig |= IRQF_SHARED;
- /* Let's create an interrupt thread masking the hard IRQ here */
- irq_trig |= IRQF_ONESHOT;
-
err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
st_sensors_irq_handler,
st_sensors_irq_thread,
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index f7c71da42f15..ca814479fadf 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -248,11 +248,12 @@ config MCP4922
config STX104
tristate "Apex Embedded Systems STX104 DAC driver"
depends on X86 && ISA_BUS_API
+ select GPIOLIB
help
- Say yes here to build support for the 2-channel DAC on the Apex
- Embedded Systems STX104 integrated analog PC/104 card. The base port
- addresses for the devices may be configured via the "base" module
- parameter array.
+ Say yes here to build support for the 2-channel DAC and GPIO on the
+ Apex Embedded Systems STX104 integrated analog PC/104 card. The base
+ port addresses for the devices may be configured via the base array
+ module parameter.
config VF610_DAC
tristate "Vybrid vf610 DAC driver"
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 968712be967f..559061ab1982 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -242,7 +242,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
if (events & AD5421_FAULT_UNDER_CURRENT) {
@@ -251,7 +251,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
if (events & AD5421_FAULT_TEMP_OVER_140) {
@@ -260,7 +260,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data)
0,
IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
old_fault = fault;
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 4e4c20d6d8b5..788b3d6fd1cc 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -223,7 +223,7 @@ static irqreturn_t ad5504_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns((struct iio_dev *)private));
return IRQ_HANDLED;
}
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index bfb350a85a16..0fde593ec0d9 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/platform_data/ad5755.h>
@@ -109,6 +110,51 @@ enum ad5755_type {
ID_AD5737,
};
+#ifdef CONFIG_OF
+static const int ad5755_dcdc_freq_table[][2] = {
+ { 250000, AD5755_DC_DC_FREQ_250kHZ },
+ { 410000, AD5755_DC_DC_FREQ_410kHZ },
+ { 650000, AD5755_DC_DC_FREQ_650kHZ }
+};
+
+static const int ad5755_dcdc_maxv_table[][2] = {
+ { 23000000, AD5755_DC_DC_MAXV_23V },
+ { 24500000, AD5755_DC_DC_MAXV_24V5 },
+ { 27000000, AD5755_DC_DC_MAXV_27V },
+ { 29500000, AD5755_DC_DC_MAXV_29V5 },
+};
+
+static const int ad5755_slew_rate_table[][2] = {
+ { 64000, AD5755_SLEW_RATE_64k },
+ { 32000, AD5755_SLEW_RATE_32k },
+ { 16000, AD5755_SLEW_RATE_16k },
+ { 8000, AD5755_SLEW_RATE_8k },
+ { 4000, AD5755_SLEW_RATE_4k },
+ { 2000, AD5755_SLEW_RATE_2k },
+ { 1000, AD5755_SLEW_RATE_1k },
+ { 500, AD5755_SLEW_RATE_500 },
+ { 250, AD5755_SLEW_RATE_250 },
+ { 125, AD5755_SLEW_RATE_125 },
+ { 64, AD5755_SLEW_RATE_64 },
+ { 32, AD5755_SLEW_RATE_32 },
+ { 16, AD5755_SLEW_RATE_16 },
+ { 8, AD5755_SLEW_RATE_8 },
+ { 4, AD5755_SLEW_RATE_4 },
+ { 0, AD5755_SLEW_RATE_0_5 },
+};
+
+static const int ad5755_slew_step_table[][2] = {
+ { 256, AD5755_SLEW_STEP_SIZE_256 },
+ { 128, AD5755_SLEW_STEP_SIZE_128 },
+ { 64, AD5755_SLEW_STEP_SIZE_64 },
+ { 32, AD5755_SLEW_STEP_SIZE_32 },
+ { 16, AD5755_SLEW_STEP_SIZE_16 },
+ { 4, AD5755_SLEW_STEP_SIZE_4 },
+ { 2, AD5755_SLEW_STEP_SIZE_2 },
+ { 1, AD5755_SLEW_STEP_SIZE_1 },
+};
+#endif
+
static int ad5755_write_unlocked(struct iio_dev *indio_dev,
unsigned int reg, unsigned int val)
{
@@ -556,6 +602,129 @@ static const struct ad5755_platform_data ad5755_default_pdata = {
},
};
+#ifdef CONFIG_OF
+static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *pp;
+ struct ad5755_platform_data *pdata;
+ unsigned int tmp;
+ unsigned int tmparray[3];
+ int devnr, i;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->ext_dc_dc_compenstation_resistor =
+ of_property_read_bool(np, "adi,ext-dc-dc-compenstation-resistor");
+
+ if (!of_property_read_u32(np, "adi,dc-dc-phase", &tmp))
+ pdata->dc_dc_phase = tmp;
+ else
+ pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE;
+
+ pdata->dc_dc_freq = AD5755_DC_DC_FREQ_410kHZ;
+ if (!of_property_read_u32(np, "adi,dc-dc-freq-hz", &tmp)) {
+ for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_freq_table); i++) {
+ if (tmp == ad5755_dcdc_freq_table[i][0]) {
+ pdata->dc_dc_freq = ad5755_dcdc_freq_table[i][1];
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(ad5755_dcdc_freq_table)) {
+ dev_err(dev,
+ "adi,dc-dc-freq out of range selecting 410kHz");
+ }
+ }
+
+ pdata->dc_dc_maxv = AD5755_DC_DC_MAXV_23V;
+ if (!of_property_read_u32(np, "adi,dc-dc-max-microvolt", &tmp)) {
+ for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_maxv_table); i++) {
+ if (tmp == ad5755_dcdc_maxv_table[i][0]) {
+ pdata->dc_dc_maxv = ad5755_dcdc_maxv_table[i][1];
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(ad5755_dcdc_maxv_table)) {
+ dev_err(dev,
+ "adi,dc-dc-maxv out of range selecting 23V");
+ }
+ }
+
+ devnr = 0;
+ for_each_child_of_node(np, pp) {
+ if (devnr > AD5755_NUM_CHANNELS) {
+ dev_err(dev,
+ "There is to many channels defined in DT\n");
+ goto error_out;
+ }
+
+ if (!of_property_read_u32(pp, "adi,mode", &tmp))
+ pdata->dac[devnr].mode = tmp;
+ else
+ pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA;
+
+ pdata->dac[devnr].ext_current_sense_resistor =
+ of_property_read_bool(pp, "adi,ext-current-sense-resistor");
+
+ pdata->dac[devnr].enable_voltage_overrange =
+ of_property_read_bool(pp, "adi,enable-voltage-overrange");
+
+ if (!of_property_read_u32_array(pp, "adi,slew", tmparray, 3)) {
+ pdata->dac[devnr].slew.enable = tmparray[0];
+
+ pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k;
+ for (i = 0; i < ARRAY_SIZE(ad5755_slew_rate_table); i++) {
+ if (tmparray[1] == ad5755_slew_rate_table[i][0]) {
+ pdata->dac[devnr].slew.rate =
+ ad5755_slew_rate_table[i][1];
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(ad5755_slew_rate_table)) {
+ dev_err(dev,
+ "channel %d slew rate out of range selecting 64kHz",
+ devnr);
+ }
+
+ pdata->dac[devnr].slew.step_size = AD5755_SLEW_STEP_SIZE_1;
+ for (i = 0; i < ARRAY_SIZE(ad5755_slew_step_table); i++) {
+ if (tmparray[2] == ad5755_slew_step_table[i][0]) {
+ pdata->dac[devnr].slew.step_size =
+ ad5755_slew_step_table[i][1];
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(ad5755_slew_step_table)) {
+ dev_err(dev,
+ "channel %d slew step size out of range selecting 1 LSB",
+ devnr);
+ }
+ } else {
+ pdata->dac[devnr].slew.enable = false;
+ pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k;
+ pdata->dac[devnr].slew.step_size =
+ AD5755_SLEW_STEP_SIZE_1;
+ }
+ devnr++;
+ }
+
+ return pdata;
+
+ error_out:
+ devm_kfree(dev, pdata);
+ return NULL;
+}
+#else
+static
+struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int ad5755_probe(struct spi_device *spi)
{
enum ad5755_type type = spi_get_device_id(spi)->driver_data;
@@ -583,8 +752,15 @@ static int ad5755_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = AD5755_NUM_CHANNELS;
- if (!pdata)
+ if (spi->dev.of_node)
+ pdata = ad5755_parse_dt(&spi->dev);
+ else
+ pdata = spi->dev.platform_data;
+
+ if (!pdata) {
+ dev_warn(&spi->dev, "no platform data? using default\n");
pdata = &ad5755_default_pdata;
+ }
ret = ad5755_init_channels(indio_dev, pdata);
if (ret)
@@ -607,6 +783,16 @@ static const struct spi_device_id ad5755_id[] = {
};
MODULE_DEVICE_TABLE(spi, ad5755_id);
+static const struct of_device_id ad5755_of_match[] = {
+ { .compatible = "adi,ad5755" },
+ { .compatible = "adi,ad5755-1" },
+ { .compatible = "adi,ad5757" },
+ { .compatible = "adi,ad5735" },
+ { .compatible = "adi,ad5737" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad5755_of_match);
+
static struct spi_driver ad5755_driver = {
.driver = {
.name = "ad5755",
diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/dac/stx104.c
index 27941220872f..792a97164cb2 100644
--- a/drivers/iio/dac/stx104.c
+++ b/drivers/iio/dac/stx104.c
@@ -14,6 +14,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/gpio/driver.h>
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/io.h>
@@ -21,6 +22,7 @@
#include <linux/isa.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
#define STX104_NUM_CHAN 2
@@ -49,6 +51,20 @@ struct stx104_iio {
unsigned base;
};
+/**
+ * struct stx104_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @base: base port address of the GPIO device
+ * @out_state: output bits state
+ */
+struct stx104_gpio {
+ struct gpio_chip chip;
+ spinlock_t lock;
+ unsigned int base;
+ unsigned int out_state;
+};
+
static int stx104_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long mask)
{
@@ -88,15 +104,81 @@ static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
STX104_CHAN(1)
};
+static int stx104_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ if (offset < 4)
+ return 1;
+
+ return 0;
+}
+
+static int stx104_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ if (offset >= 4)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stx104_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ if (offset < 4)
+ return -EINVAL;
+
+ chip->set(chip, offset, value);
+ return 0;
+}
+
+static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+
+ if (offset >= 4)
+ return -EINVAL;
+
+ return !!(inb(stx104gpio->base) & BIT(offset));
+}
+
+static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+ const unsigned int mask = BIT(offset) >> 4;
+ unsigned long flags;
+
+ if (offset < 4)
+ return;
+
+ spin_lock_irqsave(&stx104gpio->lock, flags);
+
+ if (value)
+ stx104gpio->out_state |= mask;
+ else
+ stx104gpio->out_state &= ~mask;
+
+ outb(stx104gpio->out_state, stx104gpio->base);
+
+ spin_unlock_irqrestore(&stx104gpio->lock, flags);
+}
+
static int stx104_probe(struct device *dev, unsigned int id)
{
struct iio_dev *indio_dev;
struct stx104_iio *priv;
+ struct stx104_gpio *stx104gpio;
+ int err;
indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
if (!indio_dev)
return -ENOMEM;
+ stx104gpio = devm_kzalloc(dev, sizeof(*stx104gpio), GFP_KERNEL);
+ if (!stx104gpio)
+ return -ENOMEM;
+
if (!devm_request_region(dev, base[id], STX104_EXTENT,
dev_name(dev))) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -117,14 +199,53 @@ static int stx104_probe(struct device *dev, unsigned int id)
outw(0, base[id] + 4);
outw(0, base[id] + 6);
- return devm_iio_device_register(dev, indio_dev);
+ err = devm_iio_device_register(dev, indio_dev);
+ if (err) {
+ dev_err(dev, "IIO device registering failed (%d)\n", err);
+ return err;
+ }
+
+ stx104gpio->chip.label = dev_name(dev);
+ stx104gpio->chip.parent = dev;
+ stx104gpio->chip.owner = THIS_MODULE;
+ stx104gpio->chip.base = -1;
+ stx104gpio->chip.ngpio = 8;
+ stx104gpio->chip.get_direction = stx104_gpio_get_direction;
+ stx104gpio->chip.direction_input = stx104_gpio_direction_input;
+ stx104gpio->chip.direction_output = stx104_gpio_direction_output;
+ stx104gpio->chip.get = stx104_gpio_get;
+ stx104gpio->chip.set = stx104_gpio_set;
+ stx104gpio->base = base[id] + 3;
+ stx104gpio->out_state = 0x0;
+
+ spin_lock_init(&stx104gpio->lock);
+
+ dev_set_drvdata(dev, stx104gpio);
+
+ err = gpiochip_add_data(&stx104gpio->chip, stx104gpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int stx104_remove(struct device *dev, unsigned int id)
+{
+ struct stx104_gpio *const stx104gpio = dev_get_drvdata(dev);
+
+ gpiochip_remove(&stx104gpio->chip);
+
+ return 0;
}
static struct isa_driver stx104_driver = {
.probe = stx104_probe,
.driver = {
.name = "stx104"
- }
+ },
+ .remove = stx104_remove
};
module_isa_driver(stx104_driver, num_stx104);
diff --git a/drivers/iio/dummy/Kconfig b/drivers/iio/dummy/Kconfig
index 71805ced1aae..aa5824d96a43 100644
--- a/drivers/iio/dummy/Kconfig
+++ b/drivers/iio/dummy/Kconfig
@@ -10,6 +10,7 @@ config IIO_DUMMY_EVGEN
config IIO_SIMPLE_DUMMY
tristate "An example driver with no hardware requirements"
+ depends on IIO_SW_DEVICE
help
Driver intended mainly as documentation for how to write
a driver. May also be useful for testing userspace code
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index 43fe4ba7d0dc..ad3410e528b6 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -17,26 +17,18 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/sw_device.h>
#include "iio_simple_dummy.h"
-/*
- * A few elements needed to fake a bus for this driver
- * Note instances parameter controls how many of these
- * dummy devices are registered.
- */
-static unsigned instances = 1;
-module_param(instances, uint, 0);
-
-/* Pointer array used to fake bus elements */
-static struct iio_dev **iio_dummy_devs;
-
-/* Fake a name for the part number, usually obtained from the id table */
-static const char *iio_dummy_part_number = "iio_dummy_part_no";
+static struct config_item_type iio_dummy_type = {
+ .ct_owner = THIS_MODULE,
+};
/**
* struct iio_dummy_accel_calibscale - realworld to register mapping
@@ -572,12 +564,18 @@ static int iio_dummy_init_device(struct iio_dev *indio_dev)
* const struct i2c_device_id *id)
* SPI: iio_dummy_probe(struct spi_device *spi)
*/
-static int iio_dummy_probe(int index)
+static struct iio_sw_device *iio_dummy_probe(const char *name)
{
int ret;
struct iio_dev *indio_dev;
struct iio_dummy_state *st;
+ struct iio_sw_device *swd;
+ swd = kzalloc(sizeof(*swd), GFP_KERNEL);
+ if (!swd) {
+ ret = -ENOMEM;
+ goto error_kzalloc;
+ }
/*
* Allocate an IIO device.
*
@@ -608,7 +606,7 @@ static int iio_dummy_probe(int index)
* i2c_set_clientdata(client, indio_dev);
* spi_set_drvdata(spi, indio_dev);
*/
- iio_dummy_devs[index] = indio_dev;
+ swd->device = indio_dev;
/*
* Set the device name.
@@ -619,7 +617,7 @@ static int iio_dummy_probe(int index)
* indio_dev->name = id->name;
* indio_dev->name = spi_get_device_id(spi)->name;
*/
- indio_dev->name = iio_dummy_part_number;
+ indio_dev->name = kstrdup(name, GFP_KERNEL);
/* Provide description of available channels */
indio_dev->channels = iio_dummy_channels;
@@ -646,7 +644,9 @@ static int iio_dummy_probe(int index)
if (ret < 0)
goto error_unconfigure_buffer;
- return 0;
+ iio_swd_group_init_type_name(swd, name, &iio_dummy_type);
+
+ return swd;
error_unconfigure_buffer:
iio_simple_dummy_unconfigure_buffer(indio_dev);
error_unregister_events:
@@ -654,16 +654,18 @@ error_unregister_events:
error_free_device:
iio_device_free(indio_dev);
error_ret:
- return ret;
+ kfree(swd);
+error_kzalloc:
+ return ERR_PTR(ret);
}
/**
* iio_dummy_remove() - device instance removal function
- * @index: device index.
+ * @swd: pointer to software IIO device abstraction
*
* Parameters follow those of iio_dummy_probe for buses.
*/
-static void iio_dummy_remove(int index)
+static int iio_dummy_remove(struct iio_sw_device *swd)
{
/*
* Get a pointer to the device instance iio_dev structure
@@ -671,7 +673,7 @@ static void iio_dummy_remove(int index)
* struct iio_dev *indio_dev = i2c_get_clientdata(client);
* struct iio_dev *indio_dev = spi_get_drvdata(spi);
*/
- struct iio_dev *indio_dev = iio_dummy_devs[index];
+ struct iio_dev *indio_dev = swd->device;
/* Unregister the device */
iio_device_unregister(indio_dev);
@@ -684,11 +686,13 @@ static void iio_dummy_remove(int index)
iio_simple_dummy_events_unregister(indio_dev);
/* Free all structures */
+ kfree(indio_dev->name);
iio_device_free(indio_dev);
-}
+ return 0;
+}
/**
- * iio_dummy_init() - device driver registration
+ * module_iio_sw_device_driver() - device driver registration
*
* Varies depending on bus type of the device. As there is no device
* here, call probe directly. For information on device registration
@@ -697,50 +701,18 @@ static void iio_dummy_remove(int index)
* spi:
* Documentation/spi/spi-summary
*/
-static __init int iio_dummy_init(void)
-{
- int i, ret;
-
- if (instances > 10) {
- instances = 1;
- return -EINVAL;
- }
-
- /* Fake a bus */
- iio_dummy_devs = kcalloc(instances, sizeof(*iio_dummy_devs),
- GFP_KERNEL);
- /* Here we have no actual device so call probe */
- for (i = 0; i < instances; i++) {
- ret = iio_dummy_probe(i);
- if (ret < 0)
- goto error_remove_devs;
- }
- return 0;
-
-error_remove_devs:
- while (i--)
- iio_dummy_remove(i);
-
- kfree(iio_dummy_devs);
- return ret;
-}
-module_init(iio_dummy_init);
+static const struct iio_sw_device_ops iio_dummy_device_ops = {
+ .probe = iio_dummy_probe,
+ .remove = iio_dummy_remove,
+};
-/**
- * iio_dummy_exit() - device driver removal
- *
- * Varies depending on bus type of the device.
- * As there is no device here, call remove directly.
- */
-static __exit void iio_dummy_exit(void)
-{
- int i;
+static struct iio_sw_device_type iio_dummy_device = {
+ .name = "dummy",
+ .owner = THIS_MODULE,
+ .ops = &iio_dummy_device_ops,
+};
- for (i = 0; i < instances; i++)
- iio_dummy_remove(i);
- kfree(iio_dummy_devs);
-}
-module_exit(iio_dummy_exit);
+module_iio_sw_device_driver(iio_dummy_device);
MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
MODULE_DESCRIPTION("IIO dummy driver");
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index cf44a6f79431..b383892a5193 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -85,7 +85,8 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
}
}
- iio_push_to_buffers_with_timestamp(indio_dev, data, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_get_time_ns(indio_dev));
kfree(data);
diff --git a/drivers/iio/dummy/iio_simple_dummy_events.c b/drivers/iio/dummy/iio_simple_dummy_events.c
index 6eb600ff7056..ed63ffd849f8 100644
--- a/drivers/iio/dummy/iio_simple_dummy_events.c
+++ b/drivers/iio/dummy/iio_simple_dummy_events.c
@@ -158,7 +158,7 @@ static irqreturn_t iio_simple_dummy_get_timestamp(int irq, void *private)
struct iio_dev *indio_dev = private;
struct iio_dummy_state *st = iio_priv(indio_dev);
- st->event_timestamp = iio_get_time_ns();
+ st->event_timestamp = iio_get_time_ns(indio_dev);
return IRQ_WAKE_THREAD;
}
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 7ccc044063f6..f7fcfa886f72 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -50,6 +50,7 @@
#define BMG160_REG_PMU_BW 0x10
#define BMG160_NO_FILTER 0
#define BMG160_DEF_BW 100
+#define BMG160_REG_PMU_BW_RES BIT(7)
#define BMG160_REG_INT_MAP_0 0x17
#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
@@ -100,7 +101,6 @@ struct bmg160_data {
struct iio_trigger *motion_trig;
struct mutex mutex;
s16 buffer[8];
- u8 bw_bits;
u32 dps_range;
int ev_enable_state;
int slope_thres;
@@ -117,13 +117,16 @@ enum bmg160_axis {
};
static const struct {
- int val;
+ int odr;
+ int filter;
int bw_bits;
-} bmg160_samp_freq_table[] = { {100, 0x07},
- {200, 0x06},
- {400, 0x03},
- {1000, 0x02},
- {2000, 0x01} };
+} bmg160_samp_freq_table[] = { {100, 32, 0x07},
+ {200, 64, 0x06},
+ {100, 12, 0x05},
+ {200, 23, 0x04},
+ {400, 47, 0x03},
+ {1000, 116, 0x02},
+ {2000, 230, 0x01} };
static const struct {
int scale;
@@ -153,7 +156,7 @@ static int bmg160_convert_freq_to_bit(int val)
int i;
for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
- if (bmg160_samp_freq_table[i].val == val)
+ if (bmg160_samp_freq_table[i].odr == val)
return bmg160_samp_freq_table[i].bw_bits;
}
@@ -176,7 +179,53 @@ static int bmg160_set_bw(struct bmg160_data *data, int val)
return ret;
}
- data->bw_bits = bw_bits;
+ return 0;
+}
+
+static int bmg160_get_filter(struct bmg160_data *data, int *val)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ int i;
+ unsigned int bw_bits;
+
+ ret = regmap_read(data->regmap, BMG160_REG_PMU_BW, &bw_bits);
+ if (ret < 0) {
+ dev_err(dev, "Error reading reg_pmu_bw\n");
+ return ret;
+ }
+
+ /* Ignore the readonly reserved bit. */
+ bw_bits &= ~BMG160_REG_PMU_BW_RES;
+
+ for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
+ if (bmg160_samp_freq_table[i].bw_bits == bw_bits)
+ break;
+ }
+
+ *val = bmg160_samp_freq_table[i].filter;
+
+ return ret ? ret : IIO_VAL_INT;
+}
+
+
+static int bmg160_set_filter(struct bmg160_data *data, int val)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
+ if (bmg160_samp_freq_table[i].filter == val)
+ break;
+ }
+
+ ret = regmap_write(data->regmap, BMG160_REG_PMU_BW,
+ bmg160_samp_freq_table[i].bw_bits);
+ if (ret < 0) {
+ dev_err(dev, "Error writing reg_pmu_bw\n");
+ return ret;
+ }
return 0;
}
@@ -386,11 +435,23 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
static int bmg160_get_bw(struct bmg160_data *data, int *val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int i;
+ unsigned int bw_bits;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMG160_REG_PMU_BW, &bw_bits);
+ if (ret < 0) {
+ dev_err(dev, "Error reading reg_pmu_bw\n");
+ return ret;
+ }
+
+ /* Ignore the readonly reserved bit. */
+ bw_bits &= ~BMG160_REG_PMU_BW_RES;
for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
- if (bmg160_samp_freq_table[i].bw_bits == data->bw_bits) {
- *val = bmg160_samp_freq_table[i].val;
+ if (bmg160_samp_freq_table[i].bw_bits == bw_bits) {
+ *val = bmg160_samp_freq_table[i].odr;
return IIO_VAL_INT;
}
}
@@ -507,6 +568,8 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
} else
return -EINVAL;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return bmg160_get_filter(data, val);
case IIO_CHAN_INFO_SCALE:
*val = 0;
switch (chan->type) {
@@ -571,6 +634,26 @@ static int bmg160_write_raw(struct iio_dev *indio_dev,
ret = bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (val2)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ ret = bmg160_set_power_state(data, true);
+ if (ret < 0) {
+ bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ ret = bmg160_set_filter(data, val);
+ if (ret < 0) {
+ bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ ret = bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
case IIO_CHAN_INFO_SCALE:
if (val)
return -EINVAL;
@@ -728,7 +811,8 @@ static const struct iio_event_spec bmg160_event = {
.channel2 = IIO_MOD_##_axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
.scan_index = AXIS_##_axis, \
.scan_type = { \
.sign = 's', \
@@ -885,25 +969,25 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
if (val & BMG160_ANY_MOTION_BIT_X)
iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_ROC,
- dir),
- iio_get_time_ns());
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_ROC,
+ dir),
+ iio_get_time_ns(indio_dev));
if (val & BMG160_ANY_MOTION_BIT_Y)
iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_ROC,
- dir),
- iio_get_time_ns());
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_ROC,
+ dir),
+ iio_get_time_ns(indio_dev));
if (val & BMG160_ANY_MOTION_BIT_Z)
iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_ROC,
- dir),
- iio_get_time_ns());
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_ROC,
+ dir),
+ iio_get_time_ns(indio_dev));
ack_intr_status:
if (!data->dready_trigger_on) {
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index a8012955a1f6..aea034d8fe0f 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -426,13 +426,15 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
indio_dev->info = &gyro_info;
mutex_init(&gdata->tb.buf_lock);
- st_sensors_power_enable(indio_dev);
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_gyro_sensors_settings),
st_gyro_sensors_settings);
if (err < 0)
- return err;
+ goto st_gyro_power_off;
gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS;
gdata->multiread_bit = gdata->sensor_settings->multi_read_bit;
@@ -446,11 +448,11 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev,
(struct st_sensors_platform_data *)&gyro_pdata);
if (err < 0)
- return err;
+ goto st_gyro_power_off;
err = st_gyro_allocate_ring(indio_dev);
if (err < 0)
- return err;
+ goto st_gyro_power_off;
if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -473,6 +475,8 @@ st_gyro_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_gyro_probe_trigger_error:
st_gyro_deallocate_ring(indio_dev);
+st_gyro_power_off:
+ st_sensors_power_disable(indio_dev);
return err;
}
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 88e43f87b926..9a081465c42f 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -1,7 +1,7 @@
/*
* AFE4403 Heart Rate Monitors and Low-Cost Pulse Oximeters
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -39,127 +39,90 @@
#define AFE4403_TIAGAIN 0x20
#define AFE4403_TIA_AMB_GAIN 0x21
-/* AFE4403 GAIN register fields */
-#define AFE4403_TIAGAIN_RES_MASK GENMASK(2, 0)
-#define AFE4403_TIAGAIN_RES_SHIFT 0
-#define AFE4403_TIAGAIN_CAP_MASK GENMASK(7, 3)
-#define AFE4403_TIAGAIN_CAP_SHIFT 3
-
-/* AFE4403 LEDCNTRL register fields */
-#define AFE440X_LEDCNTRL_LED1_MASK GENMASK(15, 8)
-#define AFE440X_LEDCNTRL_LED1_SHIFT 8
-#define AFE440X_LEDCNTRL_LED2_MASK GENMASK(7, 0)
-#define AFE440X_LEDCNTRL_LED2_SHIFT 0
-#define AFE440X_LEDCNTRL_LED_RANGE_MASK GENMASK(17, 16)
-#define AFE440X_LEDCNTRL_LED_RANGE_SHIFT 16
-
-/* AFE4403 CONTROL2 register fields */
-#define AFE440X_CONTROL2_PWR_DWN_TX BIT(2)
-#define AFE440X_CONTROL2_EN_SLOW_DIAG BIT(8)
-#define AFE440X_CONTROL2_DIAG_OUT_TRI BIT(10)
-#define AFE440X_CONTROL2_TX_BRDG_MOD BIT(11)
-#define AFE440X_CONTROL2_TX_REF_MASK GENMASK(18, 17)
-#define AFE440X_CONTROL2_TX_REF_SHIFT 17
-
-/* AFE4404 NULL fields */
-#define NULL_MASK 0
-#define NULL_SHIFT 0
-
-/* AFE4403 LEDCNTRL values */
-#define AFE440X_LEDCNTRL_RANGE_TX_HALF 0x1
-#define AFE440X_LEDCNTRL_RANGE_TX_FULL 0x2
-#define AFE440X_LEDCNTRL_RANGE_TX_OFF 0x3
-
-/* AFE4403 CONTROL2 values */
-#define AFE440X_CONTROL2_TX_REF_025 0x0
-#define AFE440X_CONTROL2_TX_REF_050 0x1
-#define AFE440X_CONTROL2_TX_REF_100 0x2
-#define AFE440X_CONTROL2_TX_REF_075 0x3
-
-/* AFE4403 CONTROL3 values */
-#define AFE440X_CONTROL3_CLK_DIV_2 0x0
-#define AFE440X_CONTROL3_CLK_DIV_4 0x2
-#define AFE440X_CONTROL3_CLK_DIV_6 0x3
-#define AFE440X_CONTROL3_CLK_DIV_8 0x4
-#define AFE440X_CONTROL3_CLK_DIV_12 0x5
-#define AFE440X_CONTROL3_CLK_DIV_1 0x7
-
-/* AFE4403 TIAGAIN_CAP values */
-#define AFE4403_TIAGAIN_CAP_5_P 0x0
-#define AFE4403_TIAGAIN_CAP_10_P 0x1
-#define AFE4403_TIAGAIN_CAP_20_P 0x2
-#define AFE4403_TIAGAIN_CAP_30_P 0x3
-#define AFE4403_TIAGAIN_CAP_55_P 0x8
-#define AFE4403_TIAGAIN_CAP_155_P 0x10
-
-/* AFE4403 TIAGAIN_RES values */
-#define AFE4403_TIAGAIN_RES_500_K 0x0
-#define AFE4403_TIAGAIN_RES_250_K 0x1
-#define AFE4403_TIAGAIN_RES_100_K 0x2
-#define AFE4403_TIAGAIN_RES_50_K 0x3
-#define AFE4403_TIAGAIN_RES_25_K 0x4
-#define AFE4403_TIAGAIN_RES_10_K 0x5
-#define AFE4403_TIAGAIN_RES_1_M 0x6
-#define AFE4403_TIAGAIN_RES_NONE 0x7
+enum afe4403_fields {
+ /* Gains */
+ F_RF_LED1, F_CF_LED1,
+ F_RF_LED, F_CF_LED,
+
+ /* LED Current */
+ F_ILED1, F_ILED2,
+
+ /* sentinel */
+ F_MAX_FIELDS
+};
+
+static const struct reg_field afe4403_reg_fields[] = {
+ /* Gains */
+ [F_RF_LED1] = REG_FIELD(AFE4403_TIAGAIN, 0, 2),
+ [F_CF_LED1] = REG_FIELD(AFE4403_TIAGAIN, 3, 7),
+ [F_RF_LED] = REG_FIELD(AFE4403_TIA_AMB_GAIN, 0, 2),
+ [F_CF_LED] = REG_FIELD(AFE4403_TIA_AMB_GAIN, 3, 7),
+ /* LED Current */
+ [F_ILED1] = REG_FIELD(AFE440X_LEDCNTRL, 0, 7),
+ [F_ILED2] = REG_FIELD(AFE440X_LEDCNTRL, 8, 15),
+};
/**
- * struct afe4403_data
- * @dev - Device structure
- * @spi - SPI device handle
- * @regmap - Register map of the device
- * @regulator - Pointer to the regulator for the IC
- * @trig - IIO trigger for this device
- * @irq - ADC_RDY line interrupt number
+ * struct afe4403_data - AFE4403 device instance data
+ * @dev: Device structure
+ * @spi: SPI device handle
+ * @regmap: Register map of the device
+ * @fields: Register fields of the device
+ * @regulator: Pointer to the regulator for the IC
+ * @trig: IIO trigger for this device
+ * @irq: ADC_RDY line interrupt number
*/
struct afe4403_data {
struct device *dev;
struct spi_device *spi;
struct regmap *regmap;
+ struct regmap_field *fields[F_MAX_FIELDS];
struct regulator *regulator;
struct iio_trigger *trig;
int irq;
};
enum afe4403_chan_id {
+ LED2 = 1,
+ ALED2,
LED1,
ALED1,
- LED2,
- ALED2,
- LED1_ALED1,
LED2_ALED2,
- ILED1,
- ILED2,
+ LED1_ALED1,
};
-static const struct afe440x_reg_info afe4403_reg_info[] = {
- [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, 0, NULL),
- [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, 0, NULL),
- [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, 0, NULL),
- [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
- [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
- [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
- [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED1),
- [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED2),
+static const unsigned int afe4403_channel_values[] = {
+ [LED2] = AFE440X_LED2VAL,
+ [ALED2] = AFE440X_ALED2VAL,
+ [LED1] = AFE440X_LED1VAL,
+ [ALED1] = AFE440X_ALED1VAL,
+ [LED2_ALED2] = AFE440X_LED2_ALED2VAL,
+ [LED1_ALED1] = AFE440X_LED1_ALED1VAL,
+};
+
+static const unsigned int afe4403_channel_leds[] = {
+ [LED2] = F_ILED2,
+ [LED1] = F_ILED1,
};
static const struct iio_chan_spec afe4403_channels[] = {
/* ADC values */
- AFE440X_INTENSITY_CHAN(LED1, "led1", 0),
- AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", 0),
- AFE440X_INTENSITY_CHAN(LED2, "led2", 0),
- AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", 0),
- AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
- AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED2, 0),
+ AFE440X_INTENSITY_CHAN(ALED2, 0),
+ AFE440X_INTENSITY_CHAN(LED1, 0),
+ AFE440X_INTENSITY_CHAN(ALED1, 0),
+ AFE440X_INTENSITY_CHAN(LED2_ALED2, 0),
+ AFE440X_INTENSITY_CHAN(LED1_ALED1, 0),
/* LED current */
- AFE440X_CURRENT_CHAN(ILED1, "led1"),
- AFE440X_CURRENT_CHAN(ILED2, "led2"),
+ AFE440X_CURRENT_CHAN(LED2),
+ AFE440X_CURRENT_CHAN(LED1),
};
static const struct afe440x_val_table afe4403_res_table[] = {
{ 500000 }, { 250000 }, { 100000 }, { 50000 },
{ 25000 }, { 10000 }, { 1000000 }, { 0 },
};
-AFE440X_TABLE_ATTR(tia_resistance_available, afe4403_res_table);
+AFE440X_TABLE_ATTR(in_intensity_resistance_available, afe4403_res_table);
static const struct afe440x_val_table afe4403_cap_table[] = {
{ 0, 5000 }, { 0, 10000 }, { 0, 20000 }, { 0, 25000 },
@@ -171,7 +134,7 @@ static const struct afe440x_val_table afe4403_cap_table[] = {
{ 0, 205000 }, { 0, 210000 }, { 0, 220000 }, { 0, 225000 },
{ 0, 230000 }, { 0, 235000 }, { 0, 245000 }, { 0, 250000 },
};
-AFE440X_TABLE_ATTR(tia_capacitance_available, afe4403_cap_table);
+AFE440X_TABLE_ATTR(in_intensity_capacitance_available, afe4403_cap_table);
static ssize_t afe440x_show_register(struct device *dev,
struct device_attribute *attr,
@@ -180,38 +143,21 @@ static ssize_t afe440x_show_register(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct afe4403_data *afe = iio_priv(indio_dev);
struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
- unsigned int reg_val, type;
+ unsigned int reg_val;
int vals[2];
- int ret, val_len;
+ int ret;
- ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+ ret = regmap_field_read(afe->fields[afe440x_attr->field], &reg_val);
if (ret)
return ret;
- reg_val &= afe440x_attr->mask;
- reg_val >>= afe440x_attr->shift;
-
- switch (afe440x_attr->type) {
- case SIMPLE:
- type = IIO_VAL_INT;
- val_len = 1;
- vals[0] = reg_val;
- break;
- case RESISTANCE:
- case CAPACITANCE:
- type = IIO_VAL_INT_PLUS_MICRO;
- val_len = 2;
- if (reg_val < afe440x_attr->table_size) {
- vals[0] = afe440x_attr->val_table[reg_val].integer;
- vals[1] = afe440x_attr->val_table[reg_val].fract;
- break;
- }
- return -EINVAL;
- default:
+ if (reg_val >= afe440x_attr->table_size)
return -EINVAL;
- }
- return iio_format_value(buf, type, val_len, vals);
+ vals[0] = afe440x_attr->val_table[reg_val].integer;
+ vals[1] = afe440x_attr->val_table[reg_val].fract;
+
+ return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, vals);
}
static ssize_t afe440x_store_register(struct device *dev,
@@ -227,48 +173,43 @@ static ssize_t afe440x_store_register(struct device *dev,
if (ret)
return ret;
- switch (afe440x_attr->type) {
- case SIMPLE:
- val = integer;
- break;
- case RESISTANCE:
- case CAPACITANCE:
- for (val = 0; val < afe440x_attr->table_size; val++)
- if (afe440x_attr->val_table[val].integer == integer &&
- afe440x_attr->val_table[val].fract == fract)
- break;
- if (val == afe440x_attr->table_size)
- return -EINVAL;
- break;
- default:
+ for (val = 0; val < afe440x_attr->table_size; val++)
+ if (afe440x_attr->val_table[val].integer == integer &&
+ afe440x_attr->val_table[val].fract == fract)
+ break;
+ if (val == afe440x_attr->table_size)
return -EINVAL;
- }
- ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
- afe440x_attr->mask,
- (val << afe440x_attr->shift));
+ ret = regmap_field_write(afe->fields[afe440x_attr->field], val);
if (ret)
return ret;
return count;
}
-static AFE440X_ATTR(tia_separate_en, AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+static AFE440X_ATTR(in_intensity1_resistance, F_RF_LED, afe4403_res_table);
+static AFE440X_ATTR(in_intensity1_capacitance, F_CF_LED, afe4403_cap_table);
+
+static AFE440X_ATTR(in_intensity2_resistance, F_RF_LED, afe4403_res_table);
+static AFE440X_ATTR(in_intensity2_capacitance, F_CF_LED, afe4403_cap_table);
-static AFE440X_ATTR(tia_resistance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
-static AFE440X_ATTR(tia_capacitance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_CAP, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+static AFE440X_ATTR(in_intensity3_resistance, F_RF_LED1, afe4403_res_table);
+static AFE440X_ATTR(in_intensity3_capacitance, F_CF_LED1, afe4403_cap_table);
-static AFE440X_ATTR(tia_resistance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
-static AFE440X_ATTR(tia_capacitance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+static AFE440X_ATTR(in_intensity4_resistance, F_RF_LED1, afe4403_res_table);
+static AFE440X_ATTR(in_intensity4_capacitance, F_CF_LED1, afe4403_cap_table);
static struct attribute *afe440x_attributes[] = {
- &afe440x_attr_tia_separate_en.dev_attr.attr,
- &afe440x_attr_tia_resistance1.dev_attr.attr,
- &afe440x_attr_tia_capacitance1.dev_attr.attr,
- &afe440x_attr_tia_resistance2.dev_attr.attr,
- &afe440x_attr_tia_capacitance2.dev_attr.attr,
- &dev_attr_tia_resistance_available.attr,
- &dev_attr_tia_capacitance_available.attr,
+ &dev_attr_in_intensity_resistance_available.attr,
+ &dev_attr_in_intensity_capacitance_available.attr,
+ &afe440x_attr_in_intensity1_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity1_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity2_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity2_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity3_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity3_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity4_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity4_capacitance.dev_attr.attr,
NULL
};
@@ -309,35 +250,26 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4403_data *afe = iio_priv(indio_dev);
- const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+ unsigned int reg = afe4403_channel_values[chan->address];
+ unsigned int field = afe4403_channel_leds[chan->address];
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = afe4403_read(afe, reg_info.reg, val);
- if (ret)
- return ret;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_OFFSET:
- ret = regmap_read(afe->regmap, reg_info.offreg,
- val);
+ ret = afe4403_read(afe, reg, val);
if (ret)
return ret;
- *val &= reg_info.mask;
- *val >>= reg_info.shift;
return IIO_VAL_INT;
}
break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = regmap_read(afe->regmap, reg_info.reg, val);
+ ret = regmap_field_read(afe->fields[field], val);
if (ret)
return ret;
- *val &= reg_info.mask;
- *val >>= reg_info.shift;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -357,25 +289,13 @@ static int afe4403_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct afe4403_data *afe = iio_priv(indio_dev);
- const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+ unsigned int field = afe4403_channel_leds[chan->address];
switch (chan->type) {
- case IIO_INTENSITY:
- switch (mask) {
- case IIO_CHAN_INFO_OFFSET:
- return regmap_update_bits(afe->regmap,
- reg_info.offreg,
- reg_info.mask,
- (val << reg_info.shift));
- }
- break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- return regmap_update_bits(afe->regmap,
- reg_info.reg,
- reg_info.mask,
- (val << reg_info.shift));
+ return regmap_field_write(afe->fields[field], val);
}
break;
default:
@@ -410,7 +330,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = spi_write_then_read(afe->spi,
- &afe4403_reg_info[bit].reg, 1,
+ &afe4403_channel_values[bit], 1,
rx, 3);
if (ret)
goto err;
@@ -472,12 +392,8 @@ static const struct iio_trigger_ops afe4403_trigger_ops = {
static const struct reg_sequence afe4403_reg_sequences[] = {
AFE4403_TIMING_PAIRS,
- { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN | 0x000007},
- { AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES_1_M },
- { AFE440X_LEDCNTRL, (0x14 << AFE440X_LEDCNTRL_LED1_SHIFT) |
- (0x14 << AFE440X_LEDCNTRL_LED2_SHIFT) },
- { AFE440X_CONTROL2, AFE440X_CONTROL2_TX_REF_050 <<
- AFE440X_CONTROL2_TX_REF_SHIFT },
+ { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN },
+ { AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN },
};
static const struct regmap_range afe4403_yes_ranges[] = {
@@ -498,13 +414,11 @@ static const struct regmap_config afe4403_regmap_config = {
.volatile_table = &afe4403_volatile_table,
};
-#ifdef CONFIG_OF
static const struct of_device_id afe4403_of_match[] = {
{ .compatible = "ti,afe4403", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, afe4403_of_match);
-#endif
static int __maybe_unused afe4403_suspend(struct device *dev)
{
@@ -553,7 +467,7 @@ static int afe4403_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct afe4403_data *afe;
- int ret;
+ int i, ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*afe));
if (!indio_dev)
@@ -572,6 +486,15 @@ static int afe4403_probe(struct spi_device *spi)
return PTR_ERR(afe->regmap);
}
+ for (i = 0; i < F_MAX_FIELDS; i++) {
+ afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap,
+ afe4403_reg_fields[i]);
+ if (IS_ERR(afe->fields[i])) {
+ dev_err(afe->dev, "Unable to allocate regmap fields\n");
+ return PTR_ERR(afe->fields[i]);
+ }
+ }
+
afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
if (IS_ERR(afe->regulator)) {
dev_err(afe->dev, "Unable to get regulator\n");
@@ -694,7 +617,7 @@ MODULE_DEVICE_TABLE(spi, afe4403_ids);
static struct spi_driver afe4403_spi_driver = {
.driver = {
.name = AFE4403_DRIVER_NAME,
- .of_match_table = of_match_ptr(afe4403_of_match),
+ .of_match_table = afe4403_of_match,
.pm = &afe4403_pm_ops,
},
.probe = afe4403_probe,
@@ -704,5 +627,5 @@ static struct spi_driver afe4403_spi_driver = {
module_spi_driver(afe4403_spi_driver);
MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
-MODULE_DESCRIPTION("TI AFE4403 Heart Rate and Pulse Oximeter");
+MODULE_DESCRIPTION("TI AFE4403 Heart Rate Monitor and Pulse Oximeter AFE");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 5096a4643784..45266404f7e3 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -1,7 +1,7 @@
/*
* AFE4404 Heart Rate Monitors and Low-Cost Pulse Oximeters
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -48,118 +48,102 @@
#define AFE4404_AVG_LED2_ALED2VAL 0x3f
#define AFE4404_AVG_LED1_ALED1VAL 0x40
-/* AFE4404 GAIN register fields */
-#define AFE4404_TIA_GAIN_RES_MASK GENMASK(2, 0)
-#define AFE4404_TIA_GAIN_RES_SHIFT 0
-#define AFE4404_TIA_GAIN_CAP_MASK GENMASK(5, 3)
-#define AFE4404_TIA_GAIN_CAP_SHIFT 3
+/* AFE4404 CONTROL2 register fields */
+#define AFE440X_CONTROL2_OSC_ENABLE BIT(9)
-/* AFE4404 LEDCNTRL register fields */
-#define AFE4404_LEDCNTRL_ILED1_MASK GENMASK(5, 0)
-#define AFE4404_LEDCNTRL_ILED1_SHIFT 0
-#define AFE4404_LEDCNTRL_ILED2_MASK GENMASK(11, 6)
-#define AFE4404_LEDCNTRL_ILED2_SHIFT 6
-#define AFE4404_LEDCNTRL_ILED3_MASK GENMASK(17, 12)
-#define AFE4404_LEDCNTRL_ILED3_SHIFT 12
+enum afe4404_fields {
+ /* Gains */
+ F_TIA_GAIN_SEP, F_TIA_CF_SEP,
+ F_TIA_GAIN, TIA_CF,
-/* AFE4404 CONTROL2 register fields */
-#define AFE440X_CONTROL2_ILED_2X_MASK BIT(17)
-#define AFE440X_CONTROL2_ILED_2X_SHIFT 17
-
-/* AFE4404 CONTROL3 register fields */
-#define AFE440X_CONTROL3_OSC_ENABLE BIT(9)
-
-/* AFE4404 OFFDAC register current fields */
-#define AFE4404_OFFDAC_CURR_LED1_MASK GENMASK(9, 5)
-#define AFE4404_OFFDAC_CURR_LED1_SHIFT 5
-#define AFE4404_OFFDAC_CURR_LED2_MASK GENMASK(19, 15)
-#define AFE4404_OFFDAC_CURR_LED2_SHIFT 15
-#define AFE4404_OFFDAC_CURR_LED3_MASK GENMASK(4, 0)
-#define AFE4404_OFFDAC_CURR_LED3_SHIFT 0
-#define AFE4404_OFFDAC_CURR_ALED1_MASK GENMASK(14, 10)
-#define AFE4404_OFFDAC_CURR_ALED1_SHIFT 10
-#define AFE4404_OFFDAC_CURR_ALED2_MASK GENMASK(4, 0)
-#define AFE4404_OFFDAC_CURR_ALED2_SHIFT 0
-
-/* AFE4404 NULL fields */
-#define NULL_MASK 0
-#define NULL_SHIFT 0
-
-/* AFE4404 TIA_GAIN_CAP values */
-#define AFE4404_TIA_GAIN_CAP_5_P 0x0
-#define AFE4404_TIA_GAIN_CAP_2_5_P 0x1
-#define AFE4404_TIA_GAIN_CAP_10_P 0x2
-#define AFE4404_TIA_GAIN_CAP_7_5_P 0x3
-#define AFE4404_TIA_GAIN_CAP_20_P 0x4
-#define AFE4404_TIA_GAIN_CAP_17_5_P 0x5
-#define AFE4404_TIA_GAIN_CAP_25_P 0x6
-#define AFE4404_TIA_GAIN_CAP_22_5_P 0x7
-
-/* AFE4404 TIA_GAIN_RES values */
-#define AFE4404_TIA_GAIN_RES_500_K 0x0
-#define AFE4404_TIA_GAIN_RES_250_K 0x1
-#define AFE4404_TIA_GAIN_RES_100_K 0x2
-#define AFE4404_TIA_GAIN_RES_50_K 0x3
-#define AFE4404_TIA_GAIN_RES_25_K 0x4
-#define AFE4404_TIA_GAIN_RES_10_K 0x5
-#define AFE4404_TIA_GAIN_RES_1_M 0x6
-#define AFE4404_TIA_GAIN_RES_2_M 0x7
+ /* LED Current */
+ F_ILED1, F_ILED2, F_ILED3,
+
+ /* Offset DAC */
+ F_OFFDAC_AMB2, F_OFFDAC_LED1, F_OFFDAC_AMB1, F_OFFDAC_LED2,
+
+ /* sentinel */
+ F_MAX_FIELDS
+};
+
+static const struct reg_field afe4404_reg_fields[] = {
+ /* Gains */
+ [F_TIA_GAIN_SEP] = REG_FIELD(AFE4404_TIA_GAIN_SEP, 0, 2),
+ [F_TIA_CF_SEP] = REG_FIELD(AFE4404_TIA_GAIN_SEP, 3, 5),
+ [F_TIA_GAIN] = REG_FIELD(AFE4404_TIA_GAIN, 0, 2),
+ [TIA_CF] = REG_FIELD(AFE4404_TIA_GAIN, 3, 5),
+ /* LED Current */
+ [F_ILED1] = REG_FIELD(AFE440X_LEDCNTRL, 0, 5),
+ [F_ILED2] = REG_FIELD(AFE440X_LEDCNTRL, 6, 11),
+ [F_ILED3] = REG_FIELD(AFE440X_LEDCNTRL, 12, 17),
+ /* Offset DAC */
+ [F_OFFDAC_AMB2] = REG_FIELD(AFE4404_OFFDAC, 0, 4),
+ [F_OFFDAC_LED1] = REG_FIELD(AFE4404_OFFDAC, 5, 9),
+ [F_OFFDAC_AMB1] = REG_FIELD(AFE4404_OFFDAC, 10, 14),
+ [F_OFFDAC_LED2] = REG_FIELD(AFE4404_OFFDAC, 15, 19),
+};
/**
- * struct afe4404_data
- * @dev - Device structure
- * @regmap - Register map of the device
- * @regulator - Pointer to the regulator for the IC
- * @trig - IIO trigger for this device
- * @irq - ADC_RDY line interrupt number
+ * struct afe4404_data - AFE4404 device instance data
+ * @dev: Device structure
+ * @regmap: Register map of the device
+ * @fields: Register fields of the device
+ * @regulator: Pointer to the regulator for the IC
+ * @trig: IIO trigger for this device
+ * @irq: ADC_RDY line interrupt number
*/
struct afe4404_data {
struct device *dev;
struct regmap *regmap;
+ struct regmap_field *fields[F_MAX_FIELDS];
struct regulator *regulator;
struct iio_trigger *trig;
int irq;
};
enum afe4404_chan_id {
+ LED2 = 1,
+ ALED2,
LED1,
ALED1,
- LED2,
- ALED2,
- LED3,
- LED1_ALED1,
LED2_ALED2,
- ILED1,
- ILED2,
- ILED3,
+ LED1_ALED1,
+};
+
+static const unsigned int afe4404_channel_values[] = {
+ [LED2] = AFE440X_LED2VAL,
+ [ALED2] = AFE440X_ALED2VAL,
+ [LED1] = AFE440X_LED1VAL,
+ [ALED1] = AFE440X_ALED1VAL,
+ [LED2_ALED2] = AFE440X_LED2_ALED2VAL,
+ [LED1_ALED1] = AFE440X_LED1_ALED1VAL,
};
-static const struct afe440x_reg_info afe4404_reg_info[] = {
- [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED1),
- [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED1),
- [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED2),
- [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED2),
- [LED3] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
- [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
- [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
- [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED1),
- [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED2),
- [ILED3] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED3),
+static const unsigned int afe4404_channel_leds[] = {
+ [LED2] = F_ILED2,
+ [ALED2] = F_ILED3,
+ [LED1] = F_ILED1,
+};
+
+static const unsigned int afe4404_channel_offdacs[] = {
+ [LED2] = F_OFFDAC_LED2,
+ [ALED2] = F_OFFDAC_AMB2,
+ [LED1] = F_OFFDAC_LED1,
+ [ALED1] = F_OFFDAC_AMB1,
};
static const struct iio_chan_spec afe4404_channels[] = {
/* ADC values */
- AFE440X_INTENSITY_CHAN(LED1, "led1", BIT(IIO_CHAN_INFO_OFFSET)),
- AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
- AFE440X_INTENSITY_CHAN(LED2, "led2", BIT(IIO_CHAN_INFO_OFFSET)),
- AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
- AFE440X_INTENSITY_CHAN(LED3, "led3", BIT(IIO_CHAN_INFO_OFFSET)),
- AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
- AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED2, BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(ALED2, BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(LED1, BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(ALED1, BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(LED2_ALED2, 0),
+ AFE440X_INTENSITY_CHAN(LED1_ALED1, 0),
/* LED current */
- AFE440X_CURRENT_CHAN(ILED1, "led1"),
- AFE440X_CURRENT_CHAN(ILED2, "led2"),
- AFE440X_CURRENT_CHAN(ILED3, "led3"),
+ AFE440X_CURRENT_CHAN(LED2),
+ AFE440X_CURRENT_CHAN(ALED2),
+ AFE440X_CURRENT_CHAN(LED1),
};
static const struct afe440x_val_table afe4404_res_table[] = {
@@ -172,7 +156,7 @@ static const struct afe440x_val_table afe4404_res_table[] = {
{ .integer = 1000000, .fract = 0 },
{ .integer = 2000000, .fract = 0 },
};
-AFE440X_TABLE_ATTR(tia_resistance_available, afe4404_res_table);
+AFE440X_TABLE_ATTR(in_intensity_resistance_available, afe4404_res_table);
static const struct afe440x_val_table afe4404_cap_table[] = {
{ .integer = 0, .fract = 5000 },
@@ -184,7 +168,7 @@ static const struct afe440x_val_table afe4404_cap_table[] = {
{ .integer = 0, .fract = 25000 },
{ .integer = 0, .fract = 22500 },
};
-AFE440X_TABLE_ATTR(tia_capacitance_available, afe4404_cap_table);
+AFE440X_TABLE_ATTR(in_intensity_capacitance_available, afe4404_cap_table);
static ssize_t afe440x_show_register(struct device *dev,
struct device_attribute *attr,
@@ -193,38 +177,21 @@ static ssize_t afe440x_show_register(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct afe4404_data *afe = iio_priv(indio_dev);
struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
- unsigned int reg_val, type;
+ unsigned int reg_val;
int vals[2];
- int ret, val_len;
+ int ret;
- ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+ ret = regmap_field_read(afe->fields[afe440x_attr->field], &reg_val);
if (ret)
return ret;
- reg_val &= afe440x_attr->mask;
- reg_val >>= afe440x_attr->shift;
-
- switch (afe440x_attr->type) {
- case SIMPLE:
- type = IIO_VAL_INT;
- val_len = 1;
- vals[0] = reg_val;
- break;
- case RESISTANCE:
- case CAPACITANCE:
- type = IIO_VAL_INT_PLUS_MICRO;
- val_len = 2;
- if (reg_val < afe440x_attr->table_size) {
- vals[0] = afe440x_attr->val_table[reg_val].integer;
- vals[1] = afe440x_attr->val_table[reg_val].fract;
- break;
- }
- return -EINVAL;
- default:
+ if (reg_val >= afe440x_attr->table_size)
return -EINVAL;
- }
- return iio_format_value(buf, type, val_len, vals);
+ vals[0] = afe440x_attr->val_table[reg_val].integer;
+ vals[1] = afe440x_attr->val_table[reg_val].fract;
+
+ return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, vals);
}
static ssize_t afe440x_store_register(struct device *dev,
@@ -240,48 +207,43 @@ static ssize_t afe440x_store_register(struct device *dev,
if (ret)
return ret;
- switch (afe440x_attr->type) {
- case SIMPLE:
- val = integer;
- break;
- case RESISTANCE:
- case CAPACITANCE:
- for (val = 0; val < afe440x_attr->table_size; val++)
- if (afe440x_attr->val_table[val].integer == integer &&
- afe440x_attr->val_table[val].fract == fract)
- break;
- if (val == afe440x_attr->table_size)
- return -EINVAL;
- break;
- default:
+ for (val = 0; val < afe440x_attr->table_size; val++)
+ if (afe440x_attr->val_table[val].integer == integer &&
+ afe440x_attr->val_table[val].fract == fract)
+ break;
+ if (val == afe440x_attr->table_size)
return -EINVAL;
- }
- ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
- afe440x_attr->mask,
- (val << afe440x_attr->shift));
+ ret = regmap_field_write(afe->fields[afe440x_attr->field], val);
if (ret)
return ret;
return count;
}
-static AFE440X_ATTR(tia_separate_en, AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+static AFE440X_ATTR(in_intensity1_resistance, F_TIA_GAIN_SEP, afe4404_res_table);
+static AFE440X_ATTR(in_intensity1_capacitance, F_TIA_CF_SEP, afe4404_cap_table);
+
+static AFE440X_ATTR(in_intensity2_resistance, F_TIA_GAIN_SEP, afe4404_res_table);
+static AFE440X_ATTR(in_intensity2_capacitance, F_TIA_CF_SEP, afe4404_cap_table);
-static AFE440X_ATTR(tia_resistance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
-static AFE440X_ATTR(tia_capacitance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+static AFE440X_ATTR(in_intensity3_resistance, F_TIA_GAIN, afe4404_res_table);
+static AFE440X_ATTR(in_intensity3_capacitance, TIA_CF, afe4404_cap_table);
-static AFE440X_ATTR(tia_resistance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
-static AFE440X_ATTR(tia_capacitance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+static AFE440X_ATTR(in_intensity4_resistance, F_TIA_GAIN, afe4404_res_table);
+static AFE440X_ATTR(in_intensity4_capacitance, TIA_CF, afe4404_cap_table);
static struct attribute *afe440x_attributes[] = {
- &afe440x_attr_tia_separate_en.dev_attr.attr,
- &afe440x_attr_tia_resistance1.dev_attr.attr,
- &afe440x_attr_tia_capacitance1.dev_attr.attr,
- &afe440x_attr_tia_resistance2.dev_attr.attr,
- &afe440x_attr_tia_capacitance2.dev_attr.attr,
- &dev_attr_tia_resistance_available.attr,
- &dev_attr_tia_capacitance_available.attr,
+ &dev_attr_in_intensity_resistance_available.attr,
+ &dev_attr_in_intensity_capacitance_available.attr,
+ &afe440x_attr_in_intensity1_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity1_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity2_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity2_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity3_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity3_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity4_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity4_capacitance.dev_attr.attr,
NULL
};
@@ -294,35 +256,32 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
- const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+ unsigned int value_reg = afe4404_channel_values[chan->address];
+ unsigned int led_field = afe4404_channel_leds[chan->address];
+ unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = regmap_read(afe->regmap, reg_info.reg, val);
+ ret = regmap_read(afe->regmap, value_reg, val);
if (ret)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
- ret = regmap_read(afe->regmap, reg_info.offreg,
- val);
+ ret = regmap_field_read(afe->fields[offdac_field], val);
if (ret)
return ret;
- *val &= reg_info.mask;
- *val >>= reg_info.shift;
return IIO_VAL_INT;
}
break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = regmap_read(afe->regmap, reg_info.reg, val);
+ ret = regmap_field_read(afe->fields[led_field], val);
if (ret)
return ret;
- *val &= reg_info.mask;
- *val >>= reg_info.shift;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -342,25 +301,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
- const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+ unsigned int led_field = afe4404_channel_leds[chan->address];
+ unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_OFFSET:
- return regmap_update_bits(afe->regmap,
- reg_info.offreg,
- reg_info.mask,
- (val << reg_info.shift));
+ return regmap_field_write(afe->fields[offdac_field], val);
}
break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- return regmap_update_bits(afe->regmap,
- reg_info.reg,
- reg_info.mask,
- (val << reg_info.shift));
+ return regmap_field_write(afe->fields[led_field], val);
}
break;
default:
@@ -387,7 +341,7 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private)
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
- ret = regmap_read(afe->regmap, afe4404_reg_info[bit].reg,
+ ret = regmap_read(afe->regmap, afe4404_channel_values[bit],
&buffer[i++]);
if (ret)
goto err;
@@ -443,11 +397,8 @@ static const struct iio_trigger_ops afe4404_trigger_ops = {
static const struct reg_sequence afe4404_reg_sequences[] = {
AFE4404_TIMING_PAIRS,
{ AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN },
- { AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES_50_K },
- { AFE440X_LEDCNTRL, (0xf << AFE4404_LEDCNTRL_ILED1_SHIFT) |
- (0x3 << AFE4404_LEDCNTRL_ILED2_SHIFT) |
- (0x3 << AFE4404_LEDCNTRL_ILED3_SHIFT) },
- { AFE440X_CONTROL2, AFE440X_CONTROL3_OSC_ENABLE },
+ { AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN },
+ { AFE440X_CONTROL2, AFE440X_CONTROL2_OSC_ENABLE },
};
static const struct regmap_range afe4404_yes_ranges[] = {
@@ -469,13 +420,11 @@ static const struct regmap_config afe4404_regmap_config = {
.volatile_table = &afe4404_volatile_table,
};
-#ifdef CONFIG_OF
static const struct of_device_id afe4404_of_match[] = {
{ .compatible = "ti,afe4404", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, afe4404_of_match);
-#endif
static int __maybe_unused afe4404_suspend(struct device *dev)
{
@@ -525,7 +474,7 @@ static int afe4404_probe(struct i2c_client *client,
{
struct iio_dev *indio_dev;
struct afe4404_data *afe;
- int ret;
+ int i, ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*afe));
if (!indio_dev)
@@ -543,6 +492,15 @@ static int afe4404_probe(struct i2c_client *client,
return PTR_ERR(afe->regmap);
}
+ for (i = 0; i < F_MAX_FIELDS; i++) {
+ afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap,
+ afe4404_reg_fields[i]);
+ if (IS_ERR(afe->fields[i])) {
+ dev_err(afe->dev, "Unable to allocate regmap fields\n");
+ return PTR_ERR(afe->fields[i]);
+ }
+ }
+
afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
if (IS_ERR(afe->regulator)) {
dev_err(afe->dev, "Unable to get regulator\n");
@@ -665,7 +623,7 @@ MODULE_DEVICE_TABLE(i2c, afe4404_ids);
static struct i2c_driver afe4404_i2c_driver = {
.driver = {
.name = AFE4404_DRIVER_NAME,
- .of_match_table = of_match_ptr(afe4404_of_match),
+ .of_match_table = afe4404_of_match,
.pm = &afe4404_pm_ops,
},
.probe = afe4404_probe,
@@ -675,5 +633,5 @@ static struct i2c_driver afe4404_i2c_driver = {
module_i2c_driver(afe4404_i2c_driver);
MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
-MODULE_DESCRIPTION("TI AFE4404 Heart Rate and Pulse Oximeter");
+MODULE_DESCRIPTION("TI AFE4404 Heart Rate Monitor and Pulse Oximeter AFE");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe440x.h b/drivers/iio/health/afe440x.h
index c671ab78a23a..1a0f247043ca 100644
--- a/drivers/iio/health/afe440x.h
+++ b/drivers/iio/health/afe440x.h
@@ -71,8 +71,7 @@
#define AFE440X_CONTROL1_TIMEREN BIT(8)
/* TIAGAIN register fields */
-#define AFE440X_TIAGAIN_ENSEPGAIN_MASK BIT(15)
-#define AFE440X_TIAGAIN_ENSEPGAIN_SHIFT 15
+#define AFE440X_TIAGAIN_ENSEPGAIN BIT(15)
/* CONTROL2 register fields */
#define AFE440X_CONTROL2_PDN_AFE BIT(0)
@@ -89,22 +88,7 @@
#define AFE440X_CONTROL0_WRITE 0x0
#define AFE440X_CONTROL0_READ 0x1
-struct afe440x_reg_info {
- unsigned int reg;
- unsigned int offreg;
- unsigned int shift;
- unsigned int mask;
-};
-
-#define AFE440X_REG_INFO(_reg, _offreg, _sm) \
- { \
- .reg = _reg, \
- .offreg = _offreg, \
- .shift = _sm ## _SHIFT, \
- .mask = _sm ## _MASK, \
- }
-
-#define AFE440X_INTENSITY_CHAN(_index, _name, _mask) \
+#define AFE440X_INTENSITY_CHAN(_index, _mask) \
{ \
.type = IIO_INTENSITY, \
.channel = _index, \
@@ -116,29 +100,23 @@ struct afe440x_reg_info {
.storagebits = 32, \
.endianness = IIO_CPU, \
}, \
- .extend_name = _name, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
_mask, \
+ .indexed = true, \
}
-#define AFE440X_CURRENT_CHAN(_index, _name) \
+#define AFE440X_CURRENT_CHAN(_index) \
{ \
.type = IIO_CURRENT, \
.channel = _index, \
.address = _index, \
- .scan_index = _index, \
- .extend_name = _name, \
+ .scan_index = -1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
+ .indexed = true, \
.output = true, \
}
-enum afe440x_reg_type {
- SIMPLE,
- RESISTANCE,
- CAPACITANCE,
-};
-
struct afe440x_val_table {
int integer;
int fract;
@@ -164,10 +142,7 @@ static DEVICE_ATTR_RO(_name)
struct afe440x_attr {
struct device_attribute dev_attr;
- unsigned int reg;
- unsigned int shift;
- unsigned int mask;
- enum afe440x_reg_type type;
+ unsigned int field;
const struct afe440x_val_table *val_table;
unsigned int table_size;
};
@@ -175,17 +150,14 @@ struct afe440x_attr {
#define to_afe440x_attr(_dev_attr) \
container_of(_dev_attr, struct afe440x_attr, dev_attr)
-#define AFE440X_ATTR(_name, _reg, _field, _type, _table, _size) \
+#define AFE440X_ATTR(_name, _field, _table) \
struct afe440x_attr afe440x_attr_##_name = { \
.dev_attr = __ATTR(_name, (S_IRUGO | S_IWUSR), \
afe440x_show_register, \
afe440x_store_register), \
- .reg = _reg, \
- .shift = _field ## _SHIFT, \
- .mask = _field ## _MASK, \
- .type = _type, \
+ .field = _field, \
.val_table = _table, \
- .table_size = _size, \
+ .table_size = ARRAY_SIZE(_table), \
}
#endif /* _AFE440X_H */
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 11535911a5c6..3e200f69e886 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -276,6 +276,7 @@ static const struct i2c_device_id am2315_i2c_id[] = {
{"am2315", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, am2315_i2c_id);
static const struct acpi_device_id am2315_acpi_id[] = {
{"AOS2315", 0},
diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c
index 11cbc38b450f..0fbbd8c40894 100644
--- a/drivers/iio/humidity/htu21.c
+++ b/drivers/iio/humidity/htu21.c
@@ -236,6 +236,7 @@ static const struct i2c_device_id htu21_id[] = {
{"ms8607-humidity", MS8607},
{}
};
+MODULE_DEVICE_TABLE(i2c, htu21_id);
static struct i2c_driver htu21_driver = {
.probe = htu21_probe,
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 359883525ab7..4c45488e3a7f 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -79,4 +79,7 @@ void iio_device_unregister_eventset(struct iio_dev *indio_dev);
void iio_device_wakeup_eventset(struct iio_dev *indio_dev);
int iio_event_getfd(struct iio_dev *indio_dev);
+struct iio_event_interface;
+bool iio_event_enabled(const struct iio_event_interface *ev_int);
+
#endif
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index b8a290ec984e..e0251b8c1a52 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -20,6 +20,7 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
#include "bmi160.h"
@@ -410,7 +411,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
buf[j++] = sample;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
@@ -466,10 +468,36 @@ static int bmi160_write_raw(struct iio_dev *indio_dev,
return 0;
}
+static
+IIO_CONST_ATTR(in_accel_sampling_frequency_available,
+ "0.78125 1.5625 3.125 6.25 12.5 25 50 100 200 400 800 1600");
+static
+IIO_CONST_ATTR(in_anglvel_sampling_frequency_available,
+ "25 50 100 200 400 800 1600 3200");
+static
+IIO_CONST_ATTR(in_accel_scale_available,
+ "0.000598 0.001197 0.002394 0.004788");
+static
+IIO_CONST_ATTR(in_anglvel_scale_available,
+ "0.001065 0.000532 0.000266 0.000133 0.000066");
+
+static struct attribute *bmi160_attrs[] = {
+ &iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_anglvel_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ &iio_const_attr_in_anglvel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bmi160_attrs_group = {
+ .attrs = bmi160_attrs,
+};
+
static const struct iio_info bmi160_info = {
.driver_module = THIS_MODULE,
.read_raw = bmi160_read_raw,
.write_raw = bmi160_write_raw,
+ .attrs = &bmi160_attrs_group,
};
static const char *bmi160_match_acpi_device(struct device *dev)
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index f756feecfa4c..5483b2ea754d 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -13,8 +13,8 @@ config INV_MPU6050_I2C
select INV_MPU6050_IIO
select REGMAP_I2C
help
- This driver supports the Invensense MPU6050/6500/9150 motion tracking
- devices over I2C.
+ This driver supports the Invensense MPU6050/6500/9150 and ICM20608
+ motion tracking devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -24,7 +24,7 @@ config INV_MPU6050_SPI
select INV_MPU6050_IIO
select REGMAP_SPI
help
- This driver supports the Invensense MPU6000/6500/9150 motion tracking
- devices over SPI.
+ This driver supports the Invensense MPU6050/6500/9150 and ICM20608
+ motion tracking devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index ee40dae5ab58..b9fcbf18aa99 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -113,6 +113,12 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6050,
.config = &chip_config_6050,
},
+ {
+ .whoami = INV_ICM20608_WHOAMI_VALUE,
+ .name = "ICM20608",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6050,
+ },
};
int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index e1fd7fa53e3b..19580d1db597 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -170,6 +170,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"mpu6050", INV_MPU6050},
{"mpu6500", INV_MPU6500},
{"mpu9150", INV_MPU9150},
+ {"icm20608", INV_ICM20608},
{}
};
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 3bf8544ccc9f..f0e8c5dd9fae 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -70,6 +70,7 @@ enum inv_devices {
INV_MPU6500,
INV_MPU6000,
INV_MPU9150,
+ INV_ICM20608,
INV_NUM_PARTS
};
@@ -225,6 +226,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_WHOAMI_VALUE 0x68
#define INV_MPU6500_WHOAMI_VALUE 0x70
#define INV_MPU9150_WHOAMI_VALUE 0x68
+#define INV_ICM20608_WHOAMI_VALUE 0xAF
/* scan element definition */
enum inv_mpu6050_scan {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index d0700628ee6d..3a9f3eac91ab 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -107,7 +107,7 @@ irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
struct inv_mpu6050_state *st = iio_priv(indio_dev);
s64 timestamp;
- timestamp = iio_get_time_ns();
+ timestamp = iio_get_time_ns(indio_dev);
kfifo_in_spinlocked(&st->timestamps, &timestamp, 1,
&st->time_stamp_lock);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 190a4a51c830..6e6476dfa188 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -82,6 +82,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"mpu6000", INV_MPU6000},
{"mpu6500", INV_MPU6500},
{"mpu9150", INV_MPU9150},
+ {"icm20608", INV_ICM20608},
{}
};
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index e6319a9346b2..f914d5d140e4 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -80,6 +80,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_RESISTANCE] = "resistance",
[IIO_PH] = "ph",
[IIO_UVINDEX] = "uvindex",
+ [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
};
static const char * const iio_modifier_names[] = {
@@ -177,6 +178,86 @@ ssize_t iio_read_const_attr(struct device *dev,
}
EXPORT_SYMBOL(iio_read_const_attr);
+static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
+{
+ int ret;
+ const struct iio_event_interface *ev_int = indio_dev->event_interface;
+
+ ret = mutex_lock_interruptible(&indio_dev->mlock);
+ if (ret)
+ return ret;
+ if ((ev_int && iio_event_enabled(ev_int)) ||
+ iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ indio_dev->clock_id = clock_id;
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+/**
+ * iio_get_time_ns() - utility function to get a time stamp for events etc
+ * @indio_dev: device
+ */
+s64 iio_get_time_ns(const struct iio_dev *indio_dev)
+{
+ struct timespec tp;
+
+ switch (iio_device_get_clock(indio_dev)) {
+ case CLOCK_REALTIME:
+ ktime_get_real_ts(&tp);
+ break;
+ case CLOCK_MONOTONIC:
+ ktime_get_ts(&tp);
+ break;
+ case CLOCK_MONOTONIC_RAW:
+ getrawmonotonic(&tp);
+ break;
+ case CLOCK_REALTIME_COARSE:
+ tp = current_kernel_time();
+ break;
+ case CLOCK_MONOTONIC_COARSE:
+ tp = get_monotonic_coarse();
+ break;
+ case CLOCK_BOOTTIME:
+ get_monotonic_boottime(&tp);
+ break;
+ case CLOCK_TAI:
+ timekeeping_clocktai(&tp);
+ break;
+ default:
+ BUG();
+ }
+
+ return timespec_to_ns(&tp);
+}
+EXPORT_SYMBOL(iio_get_time_ns);
+
+/**
+ * iio_get_time_res() - utility function to get time stamp clock resolution in
+ * nano seconds.
+ * @indio_dev: device
+ */
+unsigned int iio_get_time_res(const struct iio_dev *indio_dev)
+{
+ switch (iio_device_get_clock(indio_dev)) {
+ case CLOCK_REALTIME:
+ case CLOCK_MONOTONIC:
+ case CLOCK_MONOTONIC_RAW:
+ case CLOCK_BOOTTIME:
+ case CLOCK_TAI:
+ return hrtimer_resolution;
+ case CLOCK_REALTIME_COARSE:
+ case CLOCK_MONOTONIC_COARSE:
+ return LOW_RES_NSEC;
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(iio_get_time_res);
+
static int __init iio_init(void)
{
int ret;
@@ -989,11 +1070,91 @@ static ssize_t iio_show_dev_name(struct device *dev,
static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
+static ssize_t iio_show_timestamp_clock(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ const clockid_t clk = iio_device_get_clock(indio_dev);
+ const char *name;
+ ssize_t sz;
+
+ switch (clk) {
+ case CLOCK_REALTIME:
+ name = "realtime\n";
+ sz = sizeof("realtime\n");
+ break;
+ case CLOCK_MONOTONIC:
+ name = "monotonic\n";
+ sz = sizeof("monotonic\n");
+ break;
+ case CLOCK_MONOTONIC_RAW:
+ name = "monotonic_raw\n";
+ sz = sizeof("monotonic_raw\n");
+ break;
+ case CLOCK_REALTIME_COARSE:
+ name = "realtime_coarse\n";
+ sz = sizeof("realtime_coarse\n");
+ break;
+ case CLOCK_MONOTONIC_COARSE:
+ name = "monotonic_coarse\n";
+ sz = sizeof("monotonic_coarse\n");
+ break;
+ case CLOCK_BOOTTIME:
+ name = "boottime\n";
+ sz = sizeof("boottime\n");
+ break;
+ case CLOCK_TAI:
+ name = "tai\n";
+ sz = sizeof("tai\n");
+ break;
+ default:
+ BUG();
+ }
+
+ memcpy(buf, name, sz);
+ return sz;
+}
+
+static ssize_t iio_store_timestamp_clock(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ clockid_t clk;
+ int ret;
+
+ if (sysfs_streq(buf, "realtime"))
+ clk = CLOCK_REALTIME;
+ else if (sysfs_streq(buf, "monotonic"))
+ clk = CLOCK_MONOTONIC;
+ else if (sysfs_streq(buf, "monotonic_raw"))
+ clk = CLOCK_MONOTONIC_RAW;
+ else if (sysfs_streq(buf, "realtime_coarse"))
+ clk = CLOCK_REALTIME_COARSE;
+ else if (sysfs_streq(buf, "monotonic_coarse"))
+ clk = CLOCK_MONOTONIC_COARSE;
+ else if (sysfs_streq(buf, "boottime"))
+ clk = CLOCK_BOOTTIME;
+ else if (sysfs_streq(buf, "tai"))
+ clk = CLOCK_TAI;
+ else
+ return -EINVAL;
+
+ ret = iio_device_set_clock(dev_to_iio_dev(dev), clk);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR,
+ iio_show_timestamp_clock, iio_store_timestamp_clock);
+
static int iio_device_register_sysfs(struct iio_dev *indio_dev)
{
int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
struct iio_dev_attr *p;
- struct attribute **attr;
+ struct attribute **attr, *clk = NULL;
/* First count elements in any existing group */
if (indio_dev->info->attrs) {
@@ -1008,16 +1169,25 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
*/
if (indio_dev->channels)
for (i = 0; i < indio_dev->num_channels; i++) {
- ret = iio_device_add_channel_sysfs(indio_dev,
- &indio_dev
- ->channels[i]);
+ const struct iio_chan_spec *chan =
+ &indio_dev->channels[i];
+
+ if (chan->type == IIO_TIMESTAMP)
+ clk = &dev_attr_current_timestamp_clock.attr;
+
+ ret = iio_device_add_channel_sysfs(indio_dev, chan);
if (ret < 0)
goto error_clear_attrs;
attrcount += ret;
}
+ if (indio_dev->event_interface)
+ clk = &dev_attr_current_timestamp_clock.attr;
+
if (indio_dev->name)
attrcount++;
+ if (clk)
+ attrcount++;
indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1,
sizeof(indio_dev->chan_attr_group.attrs[0]),
@@ -1038,6 +1208,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
if (indio_dev->name)
indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
+ if (clk)
+ indio_dev->chan_attr_group.attrs[attrn++] = clk;
indio_dev->groups[indio_dev->groupcounter++] =
&indio_dev->chan_attr_group;
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index cae332b1d7ea..0ebfc923a997 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -44,6 +44,11 @@ struct iio_event_interface {
struct mutex read_lock;
};
+bool iio_event_enabled(const struct iio_event_interface *ev_int)
+{
+ return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+}
+
/**
* iio_push_event() - try to add event to the list for userspace reading
* @indio_dev: IIO device structure
@@ -60,7 +65,7 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
int copied;
/* Does anyone care? */
- if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ if (iio_event_enabled(ev_int)) {
ev.id = ev_code;
ev.timestamp = timestamp;
@@ -180,8 +185,14 @@ int iio_event_getfd(struct iio_dev *indio_dev)
if (ev_int == NULL)
return -ENODEV;
- if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
- return -EBUSY;
+ fd = mutex_lock_interruptible(&indio_dev->mlock);
+ if (fd)
+ return fd;
+
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ fd = -EBUSY;
+ goto unlock;
+ }
iio_device_get(indio_dev);
@@ -194,6 +205,8 @@ int iio_event_getfd(struct iio_dev *indio_dev)
kfifo_reset_out(&ev_int->det_events);
}
+unlock:
+ mutex_unlock(&indio_dev->mlock);
return fd;
}
diff --git a/drivers/iio/industrialio-sw-device.c b/drivers/iio/industrialio-sw-device.c
new file mode 100644
index 000000000000..81b49cfca452
--- /dev/null
+++ b/drivers/iio/industrialio-sw-device.c
@@ -0,0 +1,182 @@
+/*
+ * The Industrial I/O core, software IIO devices functions
+ *
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <linux/iio/sw_device.h>
+#include <linux/iio/configfs.h>
+#include <linux/configfs.h>
+
+static struct config_group *iio_devices_group;
+static struct config_item_type iio_device_type_group_type;
+
+static struct config_item_type iio_devices_group_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static LIST_HEAD(iio_device_types_list);
+static DEFINE_MUTEX(iio_device_types_lock);
+
+static
+struct iio_sw_device_type *__iio_find_sw_device_type(const char *name,
+ unsigned len)
+{
+ struct iio_sw_device_type *d = NULL, *iter;
+
+ list_for_each_entry(iter, &iio_device_types_list, list)
+ if (!strcmp(iter->name, name)) {
+ d = iter;
+ break;
+ }
+
+ return d;
+}
+
+int iio_register_sw_device_type(struct iio_sw_device_type *d)
+{
+ struct iio_sw_device_type *iter;
+ int ret = 0;
+
+ mutex_lock(&iio_device_types_lock);
+ iter = __iio_find_sw_device_type(d->name, strlen(d->name));
+ if (iter)
+ ret = -EBUSY;
+ else
+ list_add_tail(&d->list, &iio_device_types_list);
+ mutex_unlock(&iio_device_types_lock);
+
+ if (ret)
+ return ret;
+
+ d->group = configfs_register_default_group(iio_devices_group, d->name,
+ &iio_device_type_group_type);
+ if (IS_ERR(d->group))
+ ret = PTR_ERR(d->group);
+
+ return ret;
+}
+EXPORT_SYMBOL(iio_register_sw_device_type);
+
+void iio_unregister_sw_device_type(struct iio_sw_device_type *dt)
+{
+ struct iio_sw_device_type *iter;
+
+ mutex_lock(&iio_device_types_lock);
+ iter = __iio_find_sw_device_type(dt->name, strlen(dt->name));
+ if (iter)
+ list_del(&dt->list);
+ mutex_unlock(&iio_device_types_lock);
+
+ configfs_unregister_default_group(dt->group);
+}
+EXPORT_SYMBOL(iio_unregister_sw_device_type);
+
+static
+struct iio_sw_device_type *iio_get_sw_device_type(const char *name)
+{
+ struct iio_sw_device_type *dt;
+
+ mutex_lock(&iio_device_types_lock);
+ dt = __iio_find_sw_device_type(name, strlen(name));
+ if (dt && !try_module_get(dt->owner))
+ dt = NULL;
+ mutex_unlock(&iio_device_types_lock);
+
+ return dt;
+}
+
+struct iio_sw_device *iio_sw_device_create(const char *type, const char *name)
+{
+ struct iio_sw_device *d;
+ struct iio_sw_device_type *dt;
+
+ dt = iio_get_sw_device_type(type);
+ if (!dt) {
+ pr_err("Invalid device type: %s\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+ d = dt->ops->probe(name);
+ if (IS_ERR(d))
+ goto out_module_put;
+
+ d->device_type = dt;
+
+ return d;
+out_module_put:
+ module_put(dt->owner);
+ return d;
+}
+EXPORT_SYMBOL(iio_sw_device_create);
+
+void iio_sw_device_destroy(struct iio_sw_device *d)
+{
+ struct iio_sw_device_type *dt = d->device_type;
+
+ dt->ops->remove(d);
+ module_put(dt->owner);
+}
+EXPORT_SYMBOL(iio_sw_device_destroy);
+
+static struct config_group *device_make_group(struct config_group *group,
+ const char *name)
+{
+ struct iio_sw_device *d;
+
+ d = iio_sw_device_create(group->cg_item.ci_name, name);
+ if (IS_ERR(d))
+ return ERR_CAST(d);
+
+ config_item_set_name(&d->group.cg_item, "%s", name);
+
+ return &d->group;
+}
+
+static void device_drop_group(struct config_group *group,
+ struct config_item *item)
+{
+ struct iio_sw_device *d = to_iio_sw_device(item);
+
+ iio_sw_device_destroy(d);
+ config_item_put(item);
+}
+
+static struct configfs_group_operations device_ops = {
+ .make_group = &device_make_group,
+ .drop_item = &device_drop_group,
+};
+
+static struct config_item_type iio_device_type_group_type = {
+ .ct_group_ops = &device_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int __init iio_sw_device_init(void)
+{
+ iio_devices_group =
+ configfs_register_default_group(&iio_configfs_subsys.su_group,
+ "devices",
+ &iio_devices_group_type);
+ return PTR_ERR_OR_ZERO(iio_devices_group);
+}
+module_init(iio_sw_device_init);
+
+static void __exit iio_sw_device_exit(void)
+{
+ configfs_unregister_default_group(iio_devices_group);
+}
+module_exit(iio_sw_device_exit);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("Industrial I/O software devices support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 0c52dfe64977..7ad82fdd3e5b 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -64,10 +64,16 @@ static struct attribute *iio_trig_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(iio_trig_dev);
+static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
+
int iio_trigger_register(struct iio_trigger *trig_info)
{
int ret;
+ /* trig_info->ops is required for the module member */
+ if (!trig_info->ops)
+ return -EINVAL;
+
trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
if (trig_info->id < 0)
return trig_info->id;
@@ -82,11 +88,19 @@ int iio_trigger_register(struct iio_trigger *trig_info)
/* Add to list of available triggers held by the IIO core */
mutex_lock(&iio_trigger_list_lock);
+ if (__iio_trigger_find_by_name(trig_info->name)) {
+ pr_err("Duplicate trigger name '%s'\n", trig_info->name);
+ ret = -EEXIST;
+ goto error_device_del;
+ }
list_add_tail(&trig_info->list, &iio_trigger_list);
mutex_unlock(&iio_trigger_list_lock);
return 0;
+error_device_del:
+ mutex_unlock(&iio_trigger_list_lock);
+ device_del(&trig_info->dev);
error_unregister_id:
ida_simple_remove(&iio_trigger_ida, trig_info->id);
return ret;
@@ -105,6 +119,18 @@ void iio_trigger_unregister(struct iio_trigger *trig_info)
}
EXPORT_SYMBOL(iio_trigger_unregister);
+/* Search for trigger by name, assuming iio_trigger_list_lock held */
+static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
+{
+ struct iio_trigger *iter;
+
+ list_for_each_entry(iter, &iio_trigger_list, list)
+ if (!strcmp(iter->name, name))
+ return iter;
+
+ return NULL;
+}
+
static struct iio_trigger *iio_trigger_find_by_name(const char *name,
size_t len)
{
@@ -164,8 +190,7 @@ EXPORT_SYMBOL(iio_trigger_poll_chained);
void iio_trigger_notify_done(struct iio_trigger *trig)
{
- if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
- trig->ops->try_reenable)
+ if (atomic_dec_and_test(&trig->use_count) && trig->ops->try_reenable)
if (trig->ops->try_reenable(trig))
/* Missed an interrupt so launch new poll now */
iio_trigger_poll(trig);
@@ -224,7 +249,7 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
goto out_put_irq;
/* Enable trigger in driver */
- if (trig->ops && trig->ops->set_trigger_state && notinuse) {
+ if (trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
if (ret < 0)
goto out_free_irq;
@@ -249,7 +274,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
= (bitmap_weight(trig->pool,
CONFIG_IIO_CONSUMERS_PER_TRIGGER)
== 1);
- if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
+ if (trig->ops->set_trigger_state && no_other_users) {
ret = trig->ops->set_trigger_state(trig, false);
if (ret)
return ret;
@@ -264,7 +289,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
irqreturn_t iio_pollfunc_store_time(int irq, void *p)
{
struct iio_poll_func *pf = p;
- pf->timestamp = iio_get_time_ns();
+ pf->timestamp = iio_get_time_ns(pf->indio_dev);
return IRQ_WAKE_THREAD;
}
EXPORT_SYMBOL(iio_pollfunc_store_time);
@@ -371,7 +396,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
return ret;
}
- if (trig && trig->ops && trig->ops->validate_device) {
+ if (trig && trig->ops->validate_device) {
ret = trig->ops->validate_device(trig, indio_dev);
if (ret)
return ret;
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 53201d99a16c..f0b47c501f4e 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -118,7 +118,7 @@ static void acpi_als_notify(struct acpi_device *device, u32 event)
struct iio_dev *indio_dev = acpi_driver_data(device);
struct acpi_als *als = iio_priv(indio_dev);
s32 *buffer = als->evt_buffer;
- s64 time_ns = iio_get_time_ns();
+ s64 time_ns = iio_get_time_ns(indio_dev);
s32 val;
int ret;
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 09ad5f1ce539..0113fc843a81 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -118,7 +118,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adjd_s311_data *data = iio_priv(indio_dev);
- s64 time_ns = iio_get_time_ns();
+ s64 time_ns = iio_get_time_ns(indio_dev);
int i, j = 0;
int ret = adjd_s311_req_data(indio_dev);
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index e1b9fa5a7e91..649b26f67813 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -396,7 +396,7 @@ static irqreturn_t apds9300_interrupt_handler(int irq, void *private)
IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(dev_info));
apds9300_clear_intr(data);
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 651d57b8abbf..a4304edc3e0f 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -807,7 +807,7 @@ static irqreturn_t apds9960_interrupt_handler(int irq, void *private)
IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
regmap_write(data->regmap, APDS9960_REG_CICLEAR, 1);
}
@@ -816,7 +816,7 @@ static irqreturn_t apds9960_interrupt_handler(int irq, void *private)
IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
regmap_write(data->regmap, APDS9960_REG_PICLEAR, 1);
}
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index c8d7b5ea7e78..9d66e89c57ef 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -268,7 +268,7 @@ static irqreturn_t cm36651_irq_handler(int irq, void *data)
CM36651_CMD_READ_RAW_PROXIMITY,
IIO_EV_TYPE_THRESH, ev_dir);
- iio_push_event(indio_dev, ev_code, iio_get_time_ns());
+ iio_push_event(indio_dev, ev_code, iio_get_time_ns(indio_dev));
return IRQ_HANDLED;
}
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 6d41086f7c64..6ada9149f142 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -851,7 +851,7 @@ static irqreturn_t gp2ap020a00f_prox_sensing_handler(int irq, void *data)
GP2AP020A00F_SCAN_MODE_PROXIMITY,
IIO_EV_TYPE_ROC,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
} else {
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(
@@ -859,7 +859,7 @@ static irqreturn_t gp2ap020a00f_prox_sensing_handler(int irq, void *data)
GP2AP020A00F_SCAN_MODE_PROXIMITY,
IIO_EV_TYPE_ROC,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
}
@@ -925,7 +925,7 @@ static irqreturn_t gp2ap020a00f_thresh_event_handler(int irq, void *data)
IIO_MOD_LIGHT_CLEAR,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
if (test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &priv->flags)) {
@@ -939,7 +939,7 @@ static irqreturn_t gp2ap020a00f_thresh_event_handler(int irq, void *data)
IIO_MOD_LIGHT_CLEAR,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
}
@@ -1287,22 +1287,14 @@ static int gp2ap020a00f_read_raw(struct iio_dev *indio_dev,
struct gp2ap020a00f_data *data = iio_priv(indio_dev);
int err = -EINVAL;
- mutex_lock(&data->lock);
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev)) {
- err = -EBUSY;
- goto error_unlock;
- }
+ if (mask == IIO_CHAN_INFO_RAW) {
+ err = iio_device_claim_direct_mode(indio_dev);
+ if (err)
+ return err;
err = gp2ap020a00f_read_channel(data, chan, val);
- break;
+ iio_device_release_direct_mode(indio_dev);
}
-
-error_unlock:
- mutex_unlock(&data->lock);
-
return err < 0 ? err : IIO_VAL_INT;
}
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index e2945a20e5f6..1d2c0c8a1d4f 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -44,13 +44,15 @@
#define ISL29125_MODE_B 0x3
#define ISL29125_MODE_RGB 0x5
+#define ISL29125_SENSING_RANGE_0 5722 /* 375 lux full range */
+#define ISL29125_SENSING_RANGE_1 152590 /* 10k lux full range */
+
#define ISL29125_MODE_RANGE BIT(3)
#define ISL29125_STATUS_CONV BIT(1)
struct isl29125_data {
struct i2c_client *client;
- struct mutex lock;
u8 conf1;
u16 buffer[8]; /* 3x 16-bit, padding, 8 bytes timestamp */
};
@@ -128,11 +130,11 @@ static int isl29125_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
- mutex_lock(&data->lock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = isl29125_read_data(data, chan->scan_index);
- mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = ret;
@@ -140,9 +142,9 @@ static int isl29125_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
*val = 0;
if (data->conf1 & ISL29125_MODE_RANGE)
- *val2 = 152590; /* 10k lux full range */
+ *val2 = ISL29125_SENSING_RANGE_1; /*10k lux full range*/
else
- *val2 = 5722; /* 375 lux full range */
+ *val2 = ISL29125_SENSING_RANGE_0; /*375 lux full range*/
return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
@@ -158,9 +160,9 @@ static int isl29125_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
if (val != 0)
return -EINVAL;
- if (val2 == 152590)
+ if (val2 == ISL29125_SENSING_RANGE_1)
data->conf1 |= ISL29125_MODE_RANGE;
- else if (val2 == 5722)
+ else if (val2 == ISL29125_SENSING_RANGE_0)
data->conf1 &= ~ISL29125_MODE_RANGE;
else
return -EINVAL;
@@ -189,7 +191,7 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -259,7 +261,6 @@ static int isl29125_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &isl29125_info;
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index 99a62816c3b4..e8a8931b4f50 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -325,9 +325,6 @@ static int jsa1212_probe(struct i2c_client *client,
struct regmap *regmap;
int ret;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -EOPNOTSUPP;
-
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index e56937c40a18..f409c2047c05 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -267,7 +267,7 @@ static irqreturn_t lm3533_als_isr(int irq, void *dev_id)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
out:
return IRQ_HANDLED;
}
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 6bf89d8f3741..3afc53a3d0b6 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1256,7 +1256,8 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
buf[j++] = psdata & LTR501_PS_DATA_MASK;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -1282,14 +1283,14 @@ static irqreturn_t ltr501_interrupt_handler(int irq, void *private)
IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
if (status & LTR501_STATUS_PS_INTR)
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
return IRQ_HANDLED;
}
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index f17cb2ea18f5..6511b20a2a29 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -511,7 +511,8 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index b776c8ed4387..78c9b3a6453a 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -713,13 +713,13 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(iio));
if (ret & OPT3001_CONFIGURATION_FL)
iio_push_event(iio,
IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(iio));
} else if (ret & OPT3001_CONFIGURATION_CRF) {
ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_RESULT);
if (ret < 0) {
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 9e847f8f4f0c..45cf8b0a4363 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -528,7 +528,7 @@ static irqreturn_t stk3310_irq_handler(int irq, void *private)
struct iio_dev *indio_dev = private;
struct stk3310_data *data = iio_priv(indio_dev);
- data->timestamp = iio_get_time_ns();
+ data->timestamp = iio_get_time_ns(indio_dev);
return IRQ_WAKE_THREAD;
}
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index f90f8c5919fe..a795afb7667b 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -53,7 +53,6 @@
struct tcs3414_data {
struct i2c_client *client;
- struct mutex lock;
u8 control;
u8 gain;
u8 timing;
@@ -134,16 +133,16 @@ static int tcs3414_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
- mutex_lock(&data->lock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = tcs3414_req_data(data);
if (ret < 0) {
- mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
ret = i2c_smbus_read_word_data(data->client, chan->address);
- mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = ret;
@@ -217,7 +216,7 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -288,7 +287,6 @@ static int tcs3414_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &tcs3414_info;
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 1b530bf04c89..3aa71e34ae28 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -52,7 +52,6 @@
struct tcs3472_data {
struct i2c_client *client;
- struct mutex lock;
u8 enable;
u8 control;
u8 atime;
@@ -117,17 +116,16 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
-
- mutex_lock(&data->lock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = tcs3472_req_data(data);
if (ret < 0) {
- mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
ret = i2c_smbus_read_word_data(data->client, chan->address);
- mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = ret;
@@ -204,7 +202,7 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -263,7 +261,6 @@ static int tcs3472_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &tcs3472_info;
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 57b108c30e98..04598ae993d4 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -630,7 +630,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(dev_info));
/* clear the interrupt and push the event */
i2c_smbus_write_byte(chip->client, TSL2563_CMD | TSL2563_CLEARINT);
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 45bc2f742f46..20c40f780964 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -833,7 +833,7 @@ static irqreturn_t us5182d_irq_thread_handler(int irq, void *private)
dir = ret & US5182D_CFG0_PROX ? IIO_EV_DIR_RISING : IIO_EV_DIR_FALLING;
ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, IIO_EV_TYPE_THRESH, dir);
- iio_push_event(indio_dev, ev, iio_get_time_ns());
+ iio_push_event(indio_dev, ev, iio_get_time_ns(indio_dev));
ret = i2c_smbus_write_byte_data(data->client, US5182D_REG_CFG0,
ret & ~US5182D_CFG0_PX_IRQ);
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 84e6559ccc65..1f842abcb4a4 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -44,6 +44,7 @@ config BMC150_MAGN_I2C
This driver is only implementing magnetometer part, which has
its own address and register map.
+ This driver also supports I2C Bosch BMC156 and BMM150 chips.
To compile this driver as a module, choose M here: the module will be
called bmc150_magn_i2c.
@@ -60,6 +61,7 @@ config BMC150_MAGN_SPI
This driver is only implementing magnetometer part, which has
its own address and register map.
+ This driver also supports SPI Bosch BMC156 and BMM150 chips.
To compile this driver as a module, choose M here: the module will be
called bmc150_magn_spi.
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 609a2c401b5d..af8606cc7812 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -33,6 +33,7 @@
#include <linux/of_gpio.h>
#include <linux/acpi.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -379,37 +380,40 @@ struct ak8975_data {
u8 cntl_cache;
struct iio_mount_matrix orientation;
struct regulator *vdd;
+ struct regulator *vid;
};
/* Enable attached power regulator if any. */
-static int ak8975_power_on(struct i2c_client *client)
+static int ak8975_power_on(const struct ak8975_data *data)
{
- const struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct ak8975_data *data = iio_priv(indio_dev);
int ret;
- data->vdd = devm_regulator_get(&client->dev, "vdd");
- if (IS_ERR_OR_NULL(data->vdd)) {
- ret = PTR_ERR(data->vdd);
- if (ret == -ENODEV)
- ret = 0;
- } else {
- ret = regulator_enable(data->vdd);
+ ret = regulator_enable(data->vdd);
+ if (ret) {
+ dev_warn(&data->client->dev,
+ "Failed to enable specified Vdd supply\n");
+ return ret;
}
-
- if (ret)
- dev_err(&client->dev, "failed to enable Vdd supply: %d\n", ret);
- return ret;
+ ret = regulator_enable(data->vid);
+ if (ret) {
+ dev_warn(&data->client->dev,
+ "Failed to enable specified Vid supply\n");
+ return ret;
+ }
+ /*
+ * According to the datasheet the power supply rise time i 200us
+ * and the minimum wait time before mode setting is 100us, in
+ * total 300 us. Add some margin and say minimum 500us here.
+ */
+ usleep_range(500, 1000);
+ return 0;
}
/* Disable attached power regulator if any. */
-static void ak8975_power_off(const struct i2c_client *client)
+static void ak8975_power_off(const struct ak8975_data *data)
{
- const struct iio_dev *indio_dev = i2c_get_clientdata(client);
- const struct ak8975_data *data = iio_priv(indio_dev);
-
- if (!IS_ERR_OR_NULL(data->vdd))
- regulator_disable(data->vdd);
+ regulator_disable(data->vid);
+ regulator_disable(data->vdd);
}
/*
@@ -430,8 +434,8 @@ static int ak8975_who_i_am(struct i2c_client *client,
* AK8975 | DEVICE_ID | NA
* AK8963 | DEVICE_ID | NA
*/
- ret = i2c_smbus_read_i2c_block_data(client, AK09912_REG_WIA1,
- 2, wia_val);
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(
+ client, AK09912_REG_WIA1, 2, wia_val);
if (ret < 0) {
dev_err(&client->dev, "Error reading WIA\n");
return ret;
@@ -543,9 +547,9 @@ static int ak8975_setup(struct i2c_client *client)
}
/* Get asa data and store in the device data. */
- ret = i2c_smbus_read_i2c_block_data(client,
- data->def->ctrl_regs[ASA_BASE],
- 3, data->asa);
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(
+ client, data->def->ctrl_regs[ASA_BASE],
+ 3, data->asa);
if (ret < 0) {
dev_err(&client->dev, "Not able to read asa data\n");
return ret;
@@ -686,22 +690,31 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
struct ak8975_data *data = iio_priv(indio_dev);
const struct i2c_client *client = data->client;
const struct ak_def *def = data->def;
+ u16 buff;
int ret;
+ pm_runtime_get_sync(&data->client->dev);
+
mutex_lock(&data->lock);
ret = ak8975_start_read_axis(data, client);
if (ret)
goto exit;
- ret = i2c_smbus_read_word_data(client, def->data_regs[index]);
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(
+ client, def->data_regs[index],
+ sizeof(buff), (u8*)&buff);
if (ret < 0)
goto exit;
mutex_unlock(&data->lock);
- /* Clamp to valid range. */
- *val = clamp_t(s16, ret, -def->range, def->range);
+ pm_runtime_mark_last_busy(&data->client->dev);
+ pm_runtime_put_autosuspend(&data->client->dev);
+
+ /* Swap bytes and convert to valid range. */
+ buff = le16_to_cpu(buff);
+ *val = clamp_t(s16, buff, -def->range, def->range);
return IIO_VAL_INT;
exit:
@@ -825,7 +838,8 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range);
buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range);
- iio_push_to_buffers_with_timestamp(indio_dev, buff, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buff,
+ iio_get_time_ns(indio_dev));
return;
unlock:
@@ -919,7 +933,15 @@ static int ak8975_probe(struct i2c_client *client,
data->def = &ak_def_array[chipset];
- err = ak8975_power_on(client);
+ /* Fetch the regulators */
+ data->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd))
+ return PTR_ERR(data->vdd);
+ data->vid = devm_regulator_get(&client->dev, "vid");
+ if (IS_ERR(data->vid))
+ return PTR_ERR(data->vid);
+
+ err = ak8975_power_on(data);
if (err)
return err;
@@ -959,26 +981,93 @@ static int ak8975_probe(struct i2c_client *client,
goto cleanup_buffer;
}
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ /*
+ * The device comes online in 500us, so add two orders of magnitude
+ * of delay before autosuspending: 50 ms.
+ */
+ pm_runtime_set_autosuspend_delay(&client->dev, 50);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put(&client->dev);
+
return 0;
cleanup_buffer:
iio_triggered_buffer_cleanup(indio_dev);
power_off:
- ak8975_power_off(client);
+ ak8975_power_off(data);
return err;
}
static int ak8975_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ pm_runtime_get_sync(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+ pm_runtime_disable(&client->dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
- ak8975_power_off(client);
+ ak8975_set_mode(data, POWER_DOWN);
+ ak8975_power_off(data);
return 0;
}
+#ifdef CONFIG_PM
+static int ak8975_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* Set the device in power down if it wasn't already */
+ ret = ak8975_set_mode(data, POWER_DOWN);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting power-down mode\n");
+ return ret;
+ }
+ /* Next cut the regulators */
+ ak8975_power_off(data);
+
+ return 0;
+}
+
+static int ak8975_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* Take up the regulators */
+ ak8975_power_on(data);
+ /*
+ * We come up in powered down mode, the reading routines will
+ * put us in the mode to read values later.
+ */
+ ret = ak8975_set_mode(data, POWER_DOWN);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting power-down mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops ak8975_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(ak8975_runtime_suspend,
+ ak8975_runtime_resume, NULL)
+};
+
static const struct i2c_device_id ak8975_id[] = {
{"ak8975", AK8975},
{"ak8963", AK8963},
@@ -1006,6 +1095,7 @@ MODULE_DEVICE_TABLE(of, ak8975_of_match);
static struct i2c_driver ak8975_driver = {
.driver = {
.name = "ak8975",
+ .pm = &ak8975_dev_pm_ops,
.of_match_table = of_match_ptr(ak8975_of_match),
.acpi_match_table = ACPI_PTR(ak_acpi_match),
},
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index eddc7f0d0096..ee05722587aa 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -2,6 +2,7 @@
* 3-axis magnetometer driver supporting following I2C Bosch-Sensortec chips:
* - BMC150
* - BMC156
+ * - BMM150
*
* Copyright (c) 2016, Intel Corporation.
*
@@ -49,6 +50,7 @@ static int bmc150_magn_i2c_remove(struct i2c_client *client)
static const struct acpi_device_id bmc150_magn_acpi_match[] = {
{"BMC150B", 0},
{"BMC156B", 0},
+ {"BMM150B", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
@@ -56,6 +58,7 @@ MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
static const struct i2c_device_id bmc150_magn_i2c_id[] = {
{"bmc150_magn", 0},
{"bmc156_magn", 0},
+ {"bmm150_magn", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, bmc150_magn_i2c_id);
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
index c4c738a07695..7d4152d4d01e 100644
--- a/drivers/iio/magnetometer/bmc150_magn_spi.c
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -2,6 +2,7 @@
* 3-axis magnetometer driver support following SPI Bosch-Sensortec chips:
* - BMC150
* - BMC156
+ * - BMM150
*
* Copyright (c) 2016, Intel Corporation.
*
@@ -41,6 +42,7 @@ static int bmc150_magn_spi_remove(struct spi_device *spi)
static const struct spi_device_id bmc150_magn_spi_id[] = {
{"bmc150_magn", 0},
{"bmc156_magn", 0},
+ {"bmm150_magn", 0},
{}
};
MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id);
@@ -48,6 +50,7 @@ MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id);
static const struct acpi_device_id bmc150_magn_acpi_match[] = {
{"BMC150B", 0},
{"BMC156B", 0},
+ {"BMM150B", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index 77882b466e0f..ba3e2a374ee5 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -451,7 +451,7 @@ static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 261d517428e4..f2be4a049056 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -261,7 +261,7 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 8250fc322c56..3e1f06b2224c 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -589,13 +589,15 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
indio_dev->info = &magn_info;
mutex_init(&mdata->tb.buf_lock);
- st_sensors_power_enable(indio_dev);
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_magn_sensors_settings),
st_magn_sensors_settings);
if (err < 0)
- return err;
+ goto st_magn_power_off;
mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS;
mdata->multiread_bit = mdata->sensor_settings->multi_read_bit;
@@ -608,11 +610,11 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev, NULL);
if (err < 0)
- return err;
+ goto st_magn_power_off;
err = st_magn_allocate_ring(indio_dev);
if (err < 0)
- return err;
+ goto st_magn_power_off;
if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -635,6 +637,8 @@ st_magn_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_magn_probe_trigger_error:
st_magn_deallocate_ring(indio_dev);
+st_magn_power_off:
+ st_sensors_power_disable(indio_dev);
return err;
}
diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig
index 6acb23810bb4..2e9da1cf3297 100644
--- a/drivers/iio/potentiometer/Kconfig
+++ b/drivers/iio/potentiometer/Kconfig
@@ -10,11 +10,22 @@ config DS1803
depends on I2C
help
Say yes here to build support for the Maxim Integrated DS1803
- digital potentiomenter chip.
+ digital potentiometer chip.
To compile this driver as a module, choose M here: the
module will be called ds1803.
+config MAX5487
+ tristate "Maxim MAX5487/MAX5488/MAX5489 Digital Potentiometer driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Maxim
+ MAX5487, MAX5488, MAX5489 digital potentiometer
+ chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called max5487.
+
config MCP4131
tristate "Microchip MCP413X/414X/415X/416X/423X/424X/425X/426X Digital Potentiometer driver"
depends on SPI
@@ -28,7 +39,7 @@ config MCP4131
MCP4241, MCP4242,
MCP4251, MCP4252,
MCP4261, MCP4262,
- digital potentiomenter chips.
+ digital potentiometer chips.
To compile this driver as a module, choose M here: the
module will be called mcp4131.
@@ -38,9 +49,11 @@ config MCP4531
depends on I2C
help
Say yes here to build support for the Microchip
- MCP4531, MCP4532, MCP4551, MCP4552,
- MCP4631, MCP4632, MCP4651, MCP4652
- digital potentiomenter chips.
+ MCP4531, MCP4532, MCP4541, MCP4542,
+ MCP4551, MCP4552, MCP4561, MCP4562,
+ MCP4631, MCP4632, MCP4641, MCP4642,
+ MCP4651, MCP4652, MCP4661, MCP4662
+ digital potentiometer chips.
To compile this driver as a module, choose M here: the
module will be called mcp4531.
diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile
index 6007faa2fb02..8adb58f38c0b 100644
--- a/drivers/iio/potentiometer/Makefile
+++ b/drivers/iio/potentiometer/Makefile
@@ -4,6 +4,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_DS1803) += ds1803.o
+obj-$(CONFIG_MAX5487) += max5487.o
obj-$(CONFIG_MCP4131) += mcp4131.o
obj-$(CONFIG_MCP4531) += mcp4531.o
obj-$(CONFIG_TPL0102) += tpl0102.o
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
new file mode 100644
index 000000000000..6c50939a2e83
--- /dev/null
+++ b/drivers/iio/potentiometer/max5487.c
@@ -0,0 +1,161 @@
+/*
+ * max5487.c - Support for MAX5487, MAX5488, MAX5489 digital potentiometers
+ *
+ * Copyright (C) 2016 Cristina-Gabriela Moraru <cristina.moraru09@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+
+#include <linux/iio/sysfs.h>
+#include <linux/iio/iio.h>
+
+#define MAX5487_WRITE_WIPER_A (0x01 << 8)
+#define MAX5487_WRITE_WIPER_B (0x02 << 8)
+
+/* copy both wiper regs to NV regs */
+#define MAX5487_COPY_AB_TO_NV (0x23 << 8)
+/* copy both NV regs to wiper regs */
+#define MAX5487_COPY_NV_TO_AB (0x33 << 8)
+
+#define MAX5487_MAX_POS 255
+
+struct max5487_data {
+ struct spi_device *spi;
+ int kohms;
+};
+
+#define MAX5487_CHANNEL(ch, addr) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = ch, \
+ .address = addr, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec max5487_channels[] = {
+ MAX5487_CHANNEL(0, MAX5487_WRITE_WIPER_A),
+ MAX5487_CHANNEL(1, MAX5487_WRITE_WIPER_B),
+};
+
+static int max5487_write_cmd(struct spi_device *spi, u16 cmd)
+{
+ return spi_write(spi, (const void *) &cmd, sizeof(u16));
+}
+
+static int max5487_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max5487_data *data = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_SCALE)
+ return -EINVAL;
+
+ *val = 1000 * data->kohms;
+ *val2 = MAX5487_MAX_POS;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int max5487_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct max5487_data *data = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ if (val < 0 || val > MAX5487_MAX_POS)
+ return -EINVAL;
+
+ return max5487_write_cmd(data->spi, chan->address | val);
+}
+
+static const struct iio_info max5487_info = {
+ .read_raw = max5487_read_raw,
+ .write_raw = max5487_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int max5487_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct max5487_data *data;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, indio_dev);
+ data = iio_priv(indio_dev);
+
+ data->spi = spi;
+ data->kohms = id->driver_data;
+
+ indio_dev->info = &max5487_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max5487_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max5487_channels);
+
+ /* restore both wiper regs from NV regs */
+ ret = max5487_write_cmd(data->spi, MAX5487_COPY_NV_TO_AB);
+ if (ret < 0)
+ return ret;
+
+ return iio_device_register(indio_dev);
+}
+
+static int max5487_spi_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev);
+
+ iio_device_unregister(indio_dev);
+
+ /* save both wiper regs to NV regs */
+ return max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV);
+}
+
+static const struct spi_device_id max5487_id[] = {
+ { "MAX5487", 10 },
+ { "MAX5488", 50 },
+ { "MAX5489", 100 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, max5487_id);
+
+static const struct acpi_device_id max5487_acpi_match[] = {
+ { "MAX5487", 10 },
+ { "MAX5488", 50 },
+ { "MAX5489", 100 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, max5487_acpi_match);
+
+static struct spi_driver max5487_driver = {
+ .driver = {
+ .name = "max5487",
+ .owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(max5487_acpi_match),
+ },
+ .id_table = max5487_id,
+ .probe = max5487_spi_probe,
+ .remove = max5487_spi_remove
+};
+module_spi_driver(max5487_driver);
+
+MODULE_AUTHOR("Cristina-Gabriela Moraru <cristina.moraru09@gmail.com>");
+MODULE_DESCRIPTION("max5487 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 3b72e1a595db..13b6ae2fcf7b 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -8,12 +8,20 @@
* DEVID #Wipers #Positions Resistor Opts (kOhm) i2c address
* mcp4531 1 129 5, 10, 50, 100 010111x
* mcp4532 1 129 5, 10, 50, 100 01011xx
+ * mcp4541 1 129 5, 10, 50, 100 010111x
+ * mcp4542 1 129 5, 10, 50, 100 01011xx
* mcp4551 1 257 5, 10, 50, 100 010111x
* mcp4552 1 257 5, 10, 50, 100 01011xx
+ * mcp4561 1 257 5, 10, 50, 100 010111x
+ * mcp4562 1 257 5, 10, 50, 100 01011xx
* mcp4631 2 129 5, 10, 50, 100 0101xxx
* mcp4632 2 129 5, 10, 50, 100 01011xx
+ * mcp4641 2 129 5, 10, 50, 100 0101xxx
+ * mcp4642 2 129 5, 10, 50, 100 01011xx
* mcp4651 2 257 5, 10, 50, 100 0101xxx
* mcp4652 2 257 5, 10, 50, 100 01011xx
+ * mcp4661 2 257 5, 10, 50, 100 0101xxx
+ * mcp4662 2 257 5, 10, 50, 100 01011xx
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -23,6 +31,8 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/iio/iio.h>
@@ -37,18 +47,34 @@ enum mcp4531_type {
MCP453x_103,
MCP453x_503,
MCP453x_104,
+ MCP454x_502,
+ MCP454x_103,
+ MCP454x_503,
+ MCP454x_104,
MCP455x_502,
MCP455x_103,
MCP455x_503,
MCP455x_104,
+ MCP456x_502,
+ MCP456x_103,
+ MCP456x_503,
+ MCP456x_104,
MCP463x_502,
MCP463x_103,
MCP463x_503,
MCP463x_104,
+ MCP464x_502,
+ MCP464x_103,
+ MCP464x_503,
+ MCP464x_104,
MCP465x_502,
MCP465x_103,
MCP465x_503,
MCP465x_104,
+ MCP466x_502,
+ MCP466x_103,
+ MCP466x_503,
+ MCP466x_104,
};
static const struct mcp4531_cfg mcp4531_cfg[] = {
@@ -56,18 +82,34 @@ static const struct mcp4531_cfg mcp4531_cfg[] = {
[MCP453x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
[MCP453x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
[MCP453x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
+ [MCP454x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
+ [MCP454x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
+ [MCP454x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
+ [MCP454x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
[MCP455x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
[MCP455x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
[MCP455x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
[MCP455x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
+ [MCP456x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
+ [MCP456x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
+ [MCP456x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
+ [MCP456x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
[MCP463x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
[MCP463x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
[MCP463x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
[MCP463x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
+ [MCP464x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
+ [MCP464x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
+ [MCP464x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
+ [MCP464x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
[MCP465x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
[MCP465x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
[MCP465x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
[MCP465x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+ [MCP466x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
+ [MCP466x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
+ [MCP466x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
+ [MCP466x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
};
#define MCP4531_WRITE (0 << 2)
@@ -148,12 +190,89 @@ static const struct iio_info mcp4531_info = {
.driver_module = THIS_MODULE,
};
+#ifdef CONFIG_OF
+
+#define MCP4531_COMPATIBLE(of_compatible, cfg) { \
+ .compatible = of_compatible, \
+ .data = &mcp4531_cfg[cfg], \
+}
+
+static const struct of_device_id mcp4531_of_match[] = {
+ MCP4531_COMPATIBLE("microchip,mcp4531-502", MCP453x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4531-103", MCP453x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4531-503", MCP453x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4531-104", MCP453x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4532-502", MCP453x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4532-103", MCP453x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4532-503", MCP453x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4532-104", MCP453x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4541-502", MCP454x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4541-103", MCP454x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4541-503", MCP454x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4541-104", MCP454x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4542-502", MCP454x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4542-103", MCP454x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4542-503", MCP454x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4542-104", MCP454x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4551-502", MCP455x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4551-103", MCP455x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4551-503", MCP455x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4551-104", MCP455x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4552-502", MCP455x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4552-103", MCP455x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4552-503", MCP455x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4552-104", MCP455x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4561-502", MCP456x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4561-103", MCP456x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4561-503", MCP456x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4561-104", MCP456x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4562-502", MCP456x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4562-103", MCP456x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4562-503", MCP456x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4562-104", MCP456x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4631-502", MCP463x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4631-103", MCP463x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4631-503", MCP463x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4631-104", MCP463x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4632-502", MCP463x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4632-103", MCP463x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4632-503", MCP463x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4632-104", MCP463x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4641-502", MCP464x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4641-103", MCP464x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4641-503", MCP464x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4641-104", MCP464x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4642-502", MCP464x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4642-103", MCP464x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4642-503", MCP464x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4642-104", MCP464x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4651-502", MCP465x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4651-103", MCP465x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4651-503", MCP465x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4651-104", MCP465x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4652-502", MCP465x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4652-103", MCP465x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4652-503", MCP465x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4652-104", MCP465x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4661-502", MCP466x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4661-103", MCP466x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4661-503", MCP466x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4661-104", MCP466x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4662-502", MCP466x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4662-103", MCP466x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4662-503", MCP466x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4662-104", MCP466x_104),
+ { /* sentinel */ }
+};
+#endif
+
static int mcp4531_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct mcp4531_data *data;
struct iio_dev *indio_dev;
+ const struct of_device_id *match;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
@@ -167,7 +286,12 @@ static int mcp4531_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->cfg = &mcp4531_cfg[id->driver_data];
+
+ match = of_match_device(of_match_ptr(mcp4531_of_match), dev);
+ if (match)
+ data->cfg = of_device_get_match_data(dev);
+ else
+ data->cfg = &mcp4531_cfg[id->driver_data];
indio_dev->dev.parent = dev;
indio_dev->info = &mcp4531_info;
@@ -187,6 +311,14 @@ static const struct i2c_device_id mcp4531_id[] = {
{ "mcp4532-103", MCP453x_103 },
{ "mcp4532-503", MCP453x_503 },
{ "mcp4532-104", MCP453x_104 },
+ { "mcp4541-502", MCP454x_502 },
+ { "mcp4541-103", MCP454x_103 },
+ { "mcp4541-503", MCP454x_503 },
+ { "mcp4541-104", MCP454x_104 },
+ { "mcp4542-502", MCP454x_502 },
+ { "mcp4542-103", MCP454x_103 },
+ { "mcp4542-503", MCP454x_503 },
+ { "mcp4542-104", MCP454x_104 },
{ "mcp4551-502", MCP455x_502 },
{ "mcp4551-103", MCP455x_103 },
{ "mcp4551-503", MCP455x_503 },
@@ -195,6 +327,14 @@ static const struct i2c_device_id mcp4531_id[] = {
{ "mcp4552-103", MCP455x_103 },
{ "mcp4552-503", MCP455x_503 },
{ "mcp4552-104", MCP455x_104 },
+ { "mcp4561-502", MCP456x_502 },
+ { "mcp4561-103", MCP456x_103 },
+ { "mcp4561-503", MCP456x_503 },
+ { "mcp4561-104", MCP456x_104 },
+ { "mcp4562-502", MCP456x_502 },
+ { "mcp4562-103", MCP456x_103 },
+ { "mcp4562-503", MCP456x_503 },
+ { "mcp4562-104", MCP456x_104 },
{ "mcp4631-502", MCP463x_502 },
{ "mcp4631-103", MCP463x_103 },
{ "mcp4631-503", MCP463x_503 },
@@ -203,6 +343,14 @@ static const struct i2c_device_id mcp4531_id[] = {
{ "mcp4632-103", MCP463x_103 },
{ "mcp4632-503", MCP463x_503 },
{ "mcp4632-104", MCP463x_104 },
+ { "mcp4641-502", MCP464x_502 },
+ { "mcp4641-103", MCP464x_103 },
+ { "mcp4641-503", MCP464x_503 },
+ { "mcp4641-104", MCP464x_104 },
+ { "mcp4642-502", MCP464x_502 },
+ { "mcp4642-103", MCP464x_103 },
+ { "mcp4642-503", MCP464x_503 },
+ { "mcp4642-104", MCP464x_104 },
{ "mcp4651-502", MCP465x_502 },
{ "mcp4651-103", MCP465x_103 },
{ "mcp4651-503", MCP465x_503 },
@@ -211,6 +359,14 @@ static const struct i2c_device_id mcp4531_id[] = {
{ "mcp4652-103", MCP465x_103 },
{ "mcp4652-503", MCP465x_503 },
{ "mcp4652-104", MCP465x_104 },
+ { "mcp4661-502", MCP466x_502 },
+ { "mcp4661-103", MCP466x_103 },
+ { "mcp4661-503", MCP466x_503 },
+ { "mcp4661-104", MCP466x_104 },
+ { "mcp4662-502", MCP466x_502 },
+ { "mcp4662-103", MCP466x_103 },
+ { "mcp4662-503", MCP466x_503 },
+ { "mcp4662-104", MCP466x_104 },
{}
};
MODULE_DEVICE_TABLE(i2c, mcp4531_id);
@@ -218,6 +374,7 @@ MODULE_DEVICE_TABLE(i2c, mcp4531_id);
static struct i2c_driver mcp4531_driver = {
.driver = {
.name = "mcp4531",
+ .of_match_table = of_match_ptr(mcp4531_of_match),
},
.probe = mcp4531_probe,
.id_table = mcp4531_id,
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
index 5c304d42d713..7b6b54531ea2 100644
--- a/drivers/iio/potentiometer/tpl0102.c
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -116,10 +116,6 @@ static int tpl0102_probe(struct i2c_client *client,
struct tpl0102_data *data;
struct iio_dev *indio_dev;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_WORD_DATA))
- return -EOPNOTSUPP;
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index cda9f128f3a4..d130cdc78f43 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -6,16 +6,33 @@
menu "Pressure sensors"
config BMP280
- tristate "Bosch Sensortec BMP180 and BMP280 pressure sensor driver"
- depends on I2C
+ tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
+ depends on (I2C || SPI_MASTER)
depends on !(BMP085_I2C=y || BMP085_I2C=m)
- select REGMAP_I2C
+ depends on !(BMP085_SPI=y || BMP085_SPI=m)
+ select REGMAP
+ select BMP280_I2C if (I2C)
+ select BMP280_SPI if (SPI_MASTER)
help
Say yes here to build support for Bosch Sensortec BMP180 and BMP280
- pressure and temperature sensors.
+ pressure and temperature sensors. Also supports the BE280 with
+ an additional humidity sensor channel.
- To compile this driver as a module, choose M here: the module
- will be called bmp280.
+ To compile this driver as a module, choose M here: the core module
+ will be called bmp280 and you will also get bmp280-i2c for I2C
+ and/or bmp280-spi for SPI support.
+
+config BMP280_I2C
+ tristate
+ depends on BMP280
+ depends on I2C
+ select REGMAP_I2C
+
+config BMP280_SPI
+ tristate
+ depends on BMP280
+ depends on SPI_MASTER
+ select REGMAP
config HID_SENSOR_PRESS
depends on HID_SENSOR_HUB
@@ -130,7 +147,7 @@ config IIO_ST_PRESS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics pressure
- sensors: LPS001WP, LPS25H, LPS331AP.
+ sensors: LPS001WP, LPS25H, LPS331AP, LPS22HB.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 17d6e7afa1ff..7f395bed5e88 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -4,6 +4,9 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_BMP280) += bmp280.o
+bmp280-objs := bmp280-core.o bmp280-regmap.o
+obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
+obj-$(CONFIG_BMP280_SPI) += bmp280-spi.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_HP03) += hp03.o
obj-$(CONFIG_MPL115) += mpl115.o
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280-core.c
index 724452d61846..6943688e66df 100644
--- a/drivers/iio/pressure/bmp280.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1,5 +1,9 @@
/*
+ * Copyright (c) 2010 Christoph Mair <christoph.mair@gmail.com>
+ * Copyright (c) 2012 Bosch Sensortec GmbH
+ * Copyright (c) 2012 Unixphere AB
* Copyright (c) 2014 Intel Corporation
+ * Copyright (c) 2016 Linus Walleij <linus.walleij@linaro.org>
*
* Driver for Bosch Sensortec BMP180 and BMP280 digital pressure sensor.
*
@@ -10,99 +14,63 @@
* Datasheet:
* https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP180-DS000-121.pdf
* https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP280-DS001-12.pdf
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME280_DS001-11.pdf
*/
#define pr_fmt(fmt) "bmp280: " fmt
+#include <linux/device.h>
#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h> /* For irq_get_irq_data() */
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
-/* BMP280 specific registers */
-#define BMP280_REG_TEMP_XLSB 0xFC
-#define BMP280_REG_TEMP_LSB 0xFB
-#define BMP280_REG_TEMP_MSB 0xFA
-#define BMP280_REG_PRESS_XLSB 0xF9
-#define BMP280_REG_PRESS_LSB 0xF8
-#define BMP280_REG_PRESS_MSB 0xF7
-
-#define BMP280_REG_CONFIG 0xF5
-#define BMP280_REG_STATUS 0xF3
-
-#define BMP280_REG_COMP_TEMP_START 0x88
-#define BMP280_COMP_TEMP_REG_COUNT 6
-
-#define BMP280_REG_COMP_PRESS_START 0x8E
-#define BMP280_COMP_PRESS_REG_COUNT 18
-
-#define BMP280_FILTER_MASK (BIT(4) | BIT(3) | BIT(2))
-#define BMP280_FILTER_OFF 0
-#define BMP280_FILTER_2X BIT(2)
-#define BMP280_FILTER_4X BIT(3)
-#define BMP280_FILTER_8X (BIT(3) | BIT(2))
-#define BMP280_FILTER_16X BIT(4)
-
-#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
-#define BMP280_OSRS_TEMP_SKIP 0
-#define BMP280_OSRS_TEMP_X(osrs_t) ((osrs_t) << 5)
-#define BMP280_OSRS_TEMP_1X BMP280_OSRS_TEMP_X(1)
-#define BMP280_OSRS_TEMP_2X BMP280_OSRS_TEMP_X(2)
-#define BMP280_OSRS_TEMP_4X BMP280_OSRS_TEMP_X(3)
-#define BMP280_OSRS_TEMP_8X BMP280_OSRS_TEMP_X(4)
-#define BMP280_OSRS_TEMP_16X BMP280_OSRS_TEMP_X(5)
-
-#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
-#define BMP280_OSRS_PRESS_SKIP 0
-#define BMP280_OSRS_PRESS_X(osrs_p) ((osrs_p) << 2)
-#define BMP280_OSRS_PRESS_1X BMP280_OSRS_PRESS_X(1)
-#define BMP280_OSRS_PRESS_2X BMP280_OSRS_PRESS_X(2)
-#define BMP280_OSRS_PRESS_4X BMP280_OSRS_PRESS_X(3)
-#define BMP280_OSRS_PRESS_8X BMP280_OSRS_PRESS_X(4)
-#define BMP280_OSRS_PRESS_16X BMP280_OSRS_PRESS_X(5)
-
-#define BMP280_MODE_MASK (BIT(1) | BIT(0))
-#define BMP280_MODE_SLEEP 0
-#define BMP280_MODE_FORCED BIT(0)
-#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
-
-/* BMP180 specific registers */
-#define BMP180_REG_OUT_XLSB 0xF8
-#define BMP180_REG_OUT_LSB 0xF7
-#define BMP180_REG_OUT_MSB 0xF6
-
-#define BMP180_REG_CALIB_START 0xAA
-#define BMP180_REG_CALIB_COUNT 22
-
-#define BMP180_MEAS_SCO BIT(5)
-#define BMP180_MEAS_TEMP (0x0E | BMP180_MEAS_SCO)
-#define BMP180_MEAS_PRESS_X(oss) ((oss) << 6 | 0x14 | BMP180_MEAS_SCO)
-#define BMP180_MEAS_PRESS_1X BMP180_MEAS_PRESS_X(0)
-#define BMP180_MEAS_PRESS_2X BMP180_MEAS_PRESS_X(1)
-#define BMP180_MEAS_PRESS_4X BMP180_MEAS_PRESS_X(2)
-#define BMP180_MEAS_PRESS_8X BMP180_MEAS_PRESS_X(3)
-
-/* BMP180 and BMP280 common registers */
-#define BMP280_REG_CTRL_MEAS 0xF4
-#define BMP280_REG_RESET 0xE0
-#define BMP280_REG_ID 0xD0
-
-#define BMP180_CHIP_ID 0x55
-#define BMP280_CHIP_ID 0x58
-#define BMP280_SOFT_RESET_VAL 0xB6
+#include "bmp280.h"
+
+/*
+ * These enums are used for indexing into the array of calibration
+ * coefficients for BMP180.
+ */
+enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
+
+struct bmp180_calib {
+ s16 AC1;
+ s16 AC2;
+ s16 AC3;
+ u16 AC4;
+ u16 AC5;
+ u16 AC6;
+ s16 B1;
+ s16 B2;
+ s16 MB;
+ s16 MC;
+ s16 MD;
+};
struct bmp280_data {
- struct i2c_client *client;
+ struct device *dev;
struct mutex lock;
struct regmap *regmap;
+ struct completion done;
+ bool use_eoc;
const struct bmp280_chip_info *chip_info;
+ struct bmp180_calib calib;
+ struct regulator *vddd;
+ struct regulator *vdda;
+ unsigned int start_up_time; /* in milliseconds */
/* log of base 2 of oversampling rate */
u8 oversampling_press;
u8 oversampling_temp;
+ u8 oversampling_humid;
/*
* Carryover value from temperature conversion, used in pressure
@@ -112,17 +80,19 @@ struct bmp280_data {
};
struct bmp280_chip_info {
- const struct regmap_config *regmap_config;
-
const int *oversampling_temp_avail;
int num_oversampling_temp_avail;
const int *oversampling_press_avail;
int num_oversampling_press_avail;
+ const int *oversampling_humid_avail;
+ int num_oversampling_humid_avail;
+
int (*chip_config)(struct bmp280_data *);
int (*read_temp)(struct bmp280_data *, int *);
int (*read_press)(struct bmp280_data *, int *, int *);
+ int (*read_humid)(struct bmp280_data *, int *, int *);
};
/*
@@ -143,45 +113,75 @@ static const struct iio_chan_spec bmp280_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
},
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
};
-static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case BMP280_REG_CONFIG:
- case BMP280_REG_CTRL_MEAS:
- case BMP280_REG_RESET:
- return true;
- default:
- return false;
- };
-}
+/*
+ * Returns humidity in percent, resolution is 0.01 percent. Output value of
+ * "47445" represents 47445/1024 = 46.333 %RH.
+ *
+ * Taken from BME280 datasheet, Section 4.2.3, "Compensation formula".
+ */
-static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+ s32 adc_humidity)
{
- switch (reg) {
- case BMP280_REG_TEMP_XLSB:
- case BMP280_REG_TEMP_LSB:
- case BMP280_REG_TEMP_MSB:
- case BMP280_REG_PRESS_XLSB:
- case BMP280_REG_PRESS_LSB:
- case BMP280_REG_PRESS_MSB:
- case BMP280_REG_STATUS:
- return true;
- default:
- return false;
+ struct device *dev = data->dev;
+ unsigned int H1, H3, tmp;
+ int H2, H4, H5, H6, ret, var;
+
+ ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &H1);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H1 comp value\n");
+ return ret;
}
-}
-static const struct regmap_config bmp280_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &tmp, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H2 comp value\n");
+ return ret;
+ }
+ H2 = sign_extend32(le16_to_cpu(tmp), 15);
+
+ ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &H3);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H3 comp value\n");
+ return ret;
+ }
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &tmp, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H4 comp value\n");
+ return ret;
+ }
+ H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) |
+ (be16_to_cpu(tmp) & 0xf), 11);
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &tmp, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H5 comp value\n");
+ return ret;
+ }
+ H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11);
+
+ ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H6 comp value\n");
+ return ret;
+ }
+ H6 = sign_extend32(tmp, 7);
- .max_register = BMP280_REG_TEMP_XLSB,
- .cache_type = REGCACHE_RBTREE,
+ var = ((s32)data->t_fine) - 76800;
+ var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) + 16384) >> 15)
+ * (((((((var * H6) >> 10) * (((var * H3) >> 11) + 32768)) >> 10)
+ + 2097152) * H2 + 8192) >> 14);
+ var -= ((((var >> 15) * (var >> 15)) >> 7) * H1) >> 4;
- .writeable_reg = bmp280_is_writeable_reg,
- .volatile_reg = bmp280_is_volatile_reg,
+ return var >> 12;
};
/*
@@ -201,7 +201,7 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data,
ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
buf, BMP280_COMP_TEMP_REG_COUNT);
if (ret < 0) {
- dev_err(&data->client->dev,
+ dev_err(data->dev,
"failed to read temperature calibration parameters\n");
return ret;
}
@@ -241,7 +241,7 @@ static u32 bmp280_compensate_press(struct bmp280_data *data,
ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
buf, BMP280_COMP_PRESS_REG_COUNT);
if (ret < 0) {
- dev_err(&data->client->dev,
+ dev_err(data->dev,
"failed to read pressure calibration parameters\n");
return ret;
}
@@ -276,7 +276,7 @@ static int bmp280_read_temp(struct bmp280_data *data,
ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
(u8 *) &tmp, 3);
if (ret < 0) {
- dev_err(&data->client->dev, "failed to read temperature\n");
+ dev_err(data->dev, "failed to read temperature\n");
return ret;
}
@@ -311,7 +311,7 @@ static int bmp280_read_press(struct bmp280_data *data,
ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
(u8 *) &tmp, 3);
if (ret < 0) {
- dev_err(&data->client->dev, "failed to read pressure\n");
+ dev_err(data->dev, "failed to read pressure\n");
return ret;
}
@@ -324,6 +324,34 @@ static int bmp280_read_press(struct bmp280_data *data,
return IIO_VAL_FRACTIONAL;
}
+static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
+{
+ int ret;
+ __be16 tmp = 0;
+ s32 adc_humidity;
+ u32 comp_humidity;
+
+ /* Read and compensate temperature so we get a reading of t_fine. */
+ ret = bmp280_read_temp(data, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
+ (u8 *) &tmp, 2);
+ if (ret < 0) {
+ dev_err(data->dev, "failed to read humidity\n");
+ return ret;
+ }
+
+ adc_humidity = be16_to_cpu(tmp);
+ comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
+
+ *val = comp_humidity;
+ *val2 = 1024;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
static int bmp280_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -331,11 +359,15 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
int ret;
struct bmp280_data *data = iio_priv(indio_dev);
+ pm_runtime_get_sync(data->dev);
mutex_lock(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ ret = data->chip_info->read_humid(data, val, val2);
+ break;
case IIO_PRESSURE:
ret = data->chip_info->read_press(data, val, val2);
break;
@@ -349,6 +381,10 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ *val = 1 << data->oversampling_humid;
+ ret = IIO_VAL_INT;
+ break;
case IIO_PRESSURE:
*val = 1 << data->oversampling_press;
ret = IIO_VAL_INT;
@@ -368,10 +404,29 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
}
mutex_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
return ret;
}
+static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
+ int val)
+{
+ int i;
+ const int *avail = data->chip_info->oversampling_humid_avail;
+ const int n = data->chip_info->num_oversampling_humid_avail;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i] == val) {
+ data->oversampling_humid = ilog2(val);
+
+ return data->chip_info->chip_config(data);
+ }
+ }
+ return -EINVAL;
+}
+
static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
int val)
{
@@ -415,8 +470,12 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ pm_runtime_get_sync(data->dev);
mutex_lock(&data->lock);
switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ ret = bmp280_write_oversampling_ratio_humid(data, val);
+ break;
case IIO_PRESSURE:
ret = bmp280_write_oversampling_ratio_press(data, val);
break;
@@ -428,6 +487,8 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
break;
}
mutex_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
break;
default:
return -EINVAL;
@@ -502,7 +563,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
BMP280_MODE_MASK,
osrs | BMP280_MODE_NORMAL);
if (ret < 0) {
- dev_err(&data->client->dev,
+ dev_err(data->dev,
"failed to write ctrl_meas register\n");
return ret;
}
@@ -511,7 +572,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
BMP280_FILTER_MASK,
BMP280_FILTER_4X);
if (ret < 0) {
- dev_err(&data->client->dev,
+ dev_err(data->dev,
"failed to write config register\n");
return ret;
}
@@ -522,8 +583,6 @@ static int bmp280_chip_config(struct bmp280_data *data)
static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
static const struct bmp280_chip_info bmp280_chip_info = {
- .regmap_config = &bmp280_regmap_config,
-
.oversampling_temp_avail = bmp280_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
@@ -535,39 +594,32 @@ static const struct bmp280_chip_info bmp280_chip_info = {
.read_press = bmp280_read_press,
};
-static bool bmp180_is_writeable_reg(struct device *dev, unsigned int reg)
+static int bme280_chip_config(struct bmp280_data *data)
{
- switch (reg) {
- case BMP280_REG_CTRL_MEAS:
- case BMP280_REG_RESET:
- return true;
- default:
- return false;
- };
-}
+ int ret = bmp280_chip_config(data);
+ u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1);
-static bool bmp180_is_volatile_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case BMP180_REG_OUT_XLSB:
- case BMP180_REG_OUT_LSB:
- case BMP180_REG_OUT_MSB:
- case BMP280_REG_CTRL_MEAS:
- return true;
- default:
- return false;
- }
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
+ BMP280_OSRS_HUMIDITY_MASK, osrs);
}
-static const struct regmap_config bmp180_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
+static const struct bmp280_chip_info bme280_chip_info = {
+ .oversampling_temp_avail = bmp280_oversampling_avail,
+ .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+ .oversampling_press_avail = bmp280_oversampling_avail,
+ .num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
- .max_register = BMP180_REG_OUT_XLSB,
- .cache_type = REGCACHE_RBTREE,
+ .oversampling_humid_avail = bmp280_oversampling_avail,
+ .num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail),
- .writeable_reg = bmp180_is_writeable_reg,
- .volatile_reg = bmp180_is_volatile_reg,
+ .chip_config = bme280_chip_config,
+ .read_temp = bmp280_read_temp,
+ .read_press = bmp280_read_press,
+ .read_humid = bmp280_read_humid,
};
static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
@@ -577,16 +629,32 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
unsigned int delay_us;
unsigned int ctrl;
+ if (data->use_eoc)
+ init_completion(&data->done);
+
ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
if (ret)
return ret;
- if (ctrl_meas == BMP180_MEAS_TEMP)
- delay_us = 4500;
- else
- delay_us = conversion_time_max[data->oversampling_press];
-
- usleep_range(delay_us, delay_us + 1000);
+ if (data->use_eoc) {
+ /*
+ * If we have a completion interrupt, use it, wait up to
+ * 100ms. The longest conversion time listed is 76.5 ms for
+ * advanced resolution mode.
+ */
+ ret = wait_for_completion_timeout(&data->done,
+ 1 + msecs_to_jiffies(100));
+ if (!ret)
+ dev_err(data->dev, "timeout waiting for completion\n");
+ } else {
+ if (ctrl_meas == BMP180_MEAS_TEMP)
+ delay_us = 4500;
+ else
+ delay_us =
+ conversion_time_max[data->oversampling_press];
+
+ usleep_range(delay_us, delay_us + 1000);
+ }
ret = regmap_read(data->regmap, BMP280_REG_CTRL_MEAS, &ctrl);
if (ret)
@@ -617,26 +685,6 @@ static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
return 0;
}
-/*
- * These enums are used for indexing into the array of calibration
- * coefficients for BMP180.
- */
-enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
-
-struct bmp180_calib {
- s16 AC1;
- s16 AC2;
- s16 AC3;
- u16 AC4;
- u16 AC5;
- u16 AC6;
- s16 B1;
- s16 B2;
- s16 MB;
- s16 MC;
- s16 MD;
-};
-
static int bmp180_read_calib(struct bmp280_data *data,
struct bmp180_calib *calib)
{
@@ -656,6 +704,9 @@ static int bmp180_read_calib(struct bmp280_data *data,
return -EIO;
}
+ /* Toss the calibration data into the entropy pool */
+ add_device_randomness(buf, sizeof(buf));
+
calib->AC1 = be16_to_cpu(buf[AC1]);
calib->AC2 = be16_to_cpu(buf[AC2]);
calib->AC3 = be16_to_cpu(buf[AC3]);
@@ -679,19 +730,11 @@ static int bmp180_read_calib(struct bmp280_data *data,
*/
static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
{
- int ret;
s32 x1, x2;
- struct bmp180_calib calib;
+ struct bmp180_calib *calib = &data->calib;
- ret = bmp180_read_calib(data, &calib);
- if (ret < 0) {
- dev_err(&data->client->dev,
- "failed to read calibration coefficients\n");
- return ret;
- }
-
- x1 = ((adc_temp - calib.AC6) * calib.AC5) >> 15;
- x2 = (calib.MC << 11) / (x1 + calib.MD);
+ x1 = ((adc_temp - calib->AC6) * calib->AC5) >> 15;
+ x2 = (calib->MC << 11) / (x1 + calib->MD);
data->t_fine = x1 + x2;
return (data->t_fine + 8) >> 4;
@@ -746,29 +789,21 @@ static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
*/
static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
{
- int ret;
s32 x1, x2, x3, p;
s32 b3, b6;
u32 b4, b7;
s32 oss = data->oversampling_press;
- struct bmp180_calib calib;
-
- ret = bmp180_read_calib(data, &calib);
- if (ret < 0) {
- dev_err(&data->client->dev,
- "failed to read calibration coefficients\n");
- return ret;
- }
+ struct bmp180_calib *calib = &data->calib;
b6 = data->t_fine - 4000;
- x1 = (calib.B2 * (b6 * b6 >> 12)) >> 11;
- x2 = calib.AC2 * b6 >> 11;
+ x1 = (calib->B2 * (b6 * b6 >> 12)) >> 11;
+ x2 = calib->AC2 * b6 >> 11;
x3 = x1 + x2;
- b3 = ((((s32)calib.AC1 * 4 + x3) << oss) + 2) / 4;
- x1 = calib.AC3 * b6 >> 13;
- x2 = (calib.B1 * ((b6 * b6) >> 12)) >> 16;
+ b3 = ((((s32)calib->AC1 * 4 + x3) << oss) + 2) / 4;
+ x1 = calib->AC3 * b6 >> 13;
+ x2 = (calib->B1 * ((b6 * b6) >> 12)) >> 16;
x3 = (x1 + x2 + 2) >> 2;
- b4 = calib.AC4 * (u32)(x3 + 32768) >> 15;
+ b4 = calib->AC4 * (u32)(x3 + 32768) >> 15;
b7 = ((u32)adc_press - b3) * (50000 >> oss);
if (b7 < 0x80000000)
p = (b7 * 2) / b4;
@@ -815,8 +850,6 @@ static const int bmp180_oversampling_temp_avail[] = { 1 };
static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
static const struct bmp280_chip_info bmp180_chip_info = {
- .regmap_config = &bmp180_regmap_config,
-
.oversampling_temp_avail = bmp180_oversampling_temp_avail,
.num_oversampling_temp_avail =
ARRAY_SIZE(bmp180_oversampling_temp_avail),
@@ -830,92 +863,254 @@ static const struct bmp280_chip_info bmp180_chip_info = {
.read_press = bmp180_read_press,
};
-static int bmp280_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static irqreturn_t bmp085_eoc_irq(int irq, void *d)
+{
+ struct bmp280_data *data = d;
+
+ complete(&data->done);
+
+ return IRQ_HANDLED;
+}
+
+static int bmp085_fetch_eoc_irq(struct device *dev,
+ const char *name,
+ int irq,
+ struct bmp280_data *data)
+{
+ unsigned long irq_trig;
+ int ret;
+
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ if (irq_trig != IRQF_TRIGGER_RISING) {
+ dev_err(dev, "non-rising trigger given for EOC interrupt, "
+ "trying to enforce it\n");
+ irq_trig = IRQF_TRIGGER_RISING;
+ }
+ ret = devm_request_threaded_irq(dev,
+ irq,
+ bmp085_eoc_irq,
+ NULL,
+ irq_trig,
+ name,
+ data);
+ if (ret) {
+ /* Bail out without IRQ but keep the driver in place */
+ dev_err(dev, "unable to request DRDY IRQ\n");
+ return 0;
+ }
+
+ data->use_eoc = true;
+ return 0;
+}
+
+int bmp280_common_probe(struct device *dev,
+ struct regmap *regmap,
+ unsigned int chip,
+ const char *name,
+ int irq)
{
int ret;
struct iio_dev *indio_dev;
struct bmp280_data *data;
unsigned int chip_id;
+ struct gpio_desc *gpiod;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
mutex_init(&data->lock);
- data->client = client;
+ data->dev = dev;
- indio_dev->dev.parent = &client->dev;
- indio_dev->name = id->name;
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
indio_dev->channels = bmp280_channels;
- indio_dev->num_channels = ARRAY_SIZE(bmp280_channels);
indio_dev->info = &bmp280_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- switch (id->driver_data) {
+ switch (chip) {
case BMP180_CHIP_ID:
+ indio_dev->num_channels = 2;
data->chip_info = &bmp180_chip_info;
data->oversampling_press = ilog2(8);
data->oversampling_temp = ilog2(1);
+ data->start_up_time = 10;
break;
case BMP280_CHIP_ID:
+ indio_dev->num_channels = 2;
data->chip_info = &bmp280_chip_info;
data->oversampling_press = ilog2(16);
data->oversampling_temp = ilog2(2);
+ data->start_up_time = 2;
+ break;
+ case BME280_CHIP_ID:
+ indio_dev->num_channels = 3;
+ data->chip_info = &bme280_chip_info;
+ data->oversampling_press = ilog2(16);
+ data->oversampling_humid = ilog2(16);
+ data->oversampling_temp = ilog2(2);
+ data->start_up_time = 2;
break;
default:
return -EINVAL;
}
- data->regmap = devm_regmap_init_i2c(client,
- data->chip_info->regmap_config);
- if (IS_ERR(data->regmap)) {
- dev_err(&client->dev, "failed to allocate register map\n");
- return PTR_ERR(data->regmap);
+ /* Bring up regulators */
+ data->vddd = devm_regulator_get(dev, "vddd");
+ if (IS_ERR(data->vddd)) {
+ dev_err(dev, "failed to get VDDD regulator\n");
+ return PTR_ERR(data->vddd);
+ }
+ ret = regulator_enable(data->vddd);
+ if (ret) {
+ dev_err(dev, "failed to enable VDDD regulator\n");
+ return ret;
+ }
+ data->vdda = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(data->vdda)) {
+ dev_err(dev, "failed to get VDDA regulator\n");
+ ret = PTR_ERR(data->vddd);
+ goto out_disable_vddd;
+ }
+ ret = regulator_enable(data->vdda);
+ if (ret) {
+ dev_err(dev, "failed to enable VDDA regulator\n");
+ goto out_disable_vddd;
+ }
+ /* Wait to make sure we started up properly */
+ mdelay(data->start_up_time);
+
+ /* Bring chip out of reset if there is an assigned GPIO line */
+ gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ /* Deassert the signal */
+ if (!IS_ERR(gpiod)) {
+ dev_info(dev, "release reset\n");
+ gpiod_set_value(gpiod, 0);
}
- ret = regmap_read(data->regmap, BMP280_REG_ID, &chip_id);
+ data->regmap = regmap;
+ ret = regmap_read(regmap, BMP280_REG_ID, &chip_id);
if (ret < 0)
- return ret;
- if (chip_id != id->driver_data) {
- dev_err(&client->dev, "bad chip id. expected %lx got %x\n",
- id->driver_data, chip_id);
- return -EINVAL;
+ goto out_disable_vdda;
+ if (chip_id != chip) {
+ dev_err(dev, "bad chip id: expected %x got %x\n",
+ chip, chip_id);
+ ret = -EINVAL;
+ goto out_disable_vdda;
}
ret = data->chip_info->chip_config(data);
if (ret < 0)
- return ret;
+ goto out_disable_vdda;
+
+ dev_set_drvdata(dev, indio_dev);
+
+ /*
+ * The BMP085 and BMP180 has calibration in an E2PROM, read it out
+ * at probe time. It will not change.
+ */
+ if (chip_id == BMP180_CHIP_ID) {
+ ret = bmp180_read_calib(data, &data->calib);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "failed to read calibration coefficients\n");
+ goto out_disable_vdda;
+ }
+ }
+
+ /*
+ * Attempt to grab an optional EOC IRQ - only the BMP085 has this
+ * however as it happens, the BMP085 shares the chip ID of BMP180
+ * so we look for an IRQ if we have that.
+ */
+ if (irq > 0 || (chip_id == BMP180_CHIP_ID)) {
+ ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
+ if (ret)
+ goto out_disable_vdda;
+ }
+
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Set autosuspend to two orders of magnitude larger than the
+ * start-up time.
+ */
+ pm_runtime_set_autosuspend_delay(dev, data->start_up_time *100);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto out_runtime_pm_disable;
+
- return devm_iio_device_register(&client->dev, indio_dev);
+ return 0;
+
+out_runtime_pm_disable:
+ pm_runtime_get_sync(data->dev);
+ pm_runtime_put_noidle(data->dev);
+ pm_runtime_disable(data->dev);
+out_disable_vdda:
+ regulator_disable(data->vdda);
+out_disable_vddd:
+ regulator_disable(data->vddd);
+ return ret;
}
+EXPORT_SYMBOL(bmp280_common_probe);
-static const struct acpi_device_id bmp280_acpi_match[] = {
- {"BMP0280", BMP280_CHIP_ID },
- {"BMP0180", BMP180_CHIP_ID },
- {"BMP0085", BMP180_CHIP_ID },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, bmp280_acpi_match);
+int bmp280_common_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmp280_data *data = iio_priv(indio_dev);
-static const struct i2c_device_id bmp280_id[] = {
- {"bmp280", BMP280_CHIP_ID },
- {"bmp180", BMP180_CHIP_ID },
- {"bmp085", BMP180_CHIP_ID },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, bmp280_id);
+ iio_device_unregister(indio_dev);
+ pm_runtime_get_sync(data->dev);
+ pm_runtime_put_noidle(data->dev);
+ pm_runtime_disable(data->dev);
+ regulator_disable(data->vdda);
+ regulator_disable(data->vddd);
+ return 0;
+}
+EXPORT_SYMBOL(bmp280_common_remove);
-static struct i2c_driver bmp280_driver = {
- .driver = {
- .name = "bmp280",
- .acpi_match_table = ACPI_PTR(bmp280_acpi_match),
- },
- .probe = bmp280_probe,
- .id_table = bmp280_id,
+#ifdef CONFIG_PM
+static int bmp280_runtime_suspend(struct device *dev)
+{
+ struct bmp280_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_disable(data->vdda);
+ if (ret)
+ return ret;
+ return regulator_disable(data->vddd);
+}
+
+static int bmp280_runtime_resume(struct device *dev)
+{
+ struct bmp280_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_enable(data->vddd);
+ if (ret)
+ return ret;
+ ret = regulator_enable(data->vdda);
+ if (ret)
+ return ret;
+ msleep(data->start_up_time);
+ return data->chip_info->chip_config(data);
+}
+#endif /* CONFIG_PM */
+
+const struct dev_pm_ops bmp280_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(bmp280_runtime_suspend,
+ bmp280_runtime_resume, NULL)
};
-module_i2c_driver(bmp280_driver);
+EXPORT_SYMBOL(bmp280_dev_pm_ops);
MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
new file mode 100644
index 000000000000..03742b15b72a
--- /dev/null
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -0,0 +1,91 @@
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "bmp280.h"
+
+static int bmp280_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const struct regmap_config *regmap_config;
+
+ switch (id->driver_data) {
+ case BMP180_CHIP_ID:
+ regmap_config = &bmp180_regmap_config;
+ break;
+ case BMP280_CHIP_ID:
+ case BME280_CHIP_ID:
+ regmap_config = &bmp280_regmap_config;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap = devm_regmap_init_i2c(client, regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ return bmp280_common_probe(&client->dev,
+ regmap,
+ id->driver_data,
+ id->name,
+ client->irq);
+}
+
+static int bmp280_i2c_remove(struct i2c_client *client)
+{
+ return bmp280_common_remove(&client->dev);
+}
+
+static const struct acpi_device_id bmp280_acpi_i2c_match[] = {
+ {"BMP0280", BMP280_CHIP_ID },
+ {"BMP0180", BMP180_CHIP_ID },
+ {"BMP0085", BMP180_CHIP_ID },
+ {"BME0280", BME280_CHIP_ID },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bmp280_acpi_i2c_match);
+
+#ifdef CONFIG_OF
+static const struct of_device_id bmp280_of_i2c_match[] = {
+ { .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID },
+ { .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID },
+ { .compatible = "bosch,bmp180", .data = (void *)BMP180_CHIP_ID },
+ { .compatible = "bosch,bmp085", .data = (void *)BMP180_CHIP_ID },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bmp280_of_i2c_match);
+#else
+#define bmp280_of_i2c_match NULL
+#endif
+
+static const struct i2c_device_id bmp280_i2c_id[] = {
+ {"bmp280", BMP280_CHIP_ID },
+ {"bmp180", BMP180_CHIP_ID },
+ {"bmp085", BMP180_CHIP_ID },
+ {"bme280", BME280_CHIP_ID },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, bmp280_i2c_id);
+
+static struct i2c_driver bmp280_i2c_driver = {
+ .driver = {
+ .name = "bmp280",
+ .acpi_match_table = ACPI_PTR(bmp280_acpi_i2c_match),
+ .of_match_table = of_match_ptr(bmp280_of_i2c_match),
+ .pm = &bmp280_dev_pm_ops,
+ },
+ .probe = bmp280_i2c_probe,
+ .remove = bmp280_i2c_remove,
+ .id_table = bmp280_i2c_id,
+};
+module_i2c_driver(bmp280_i2c_driver);
+
+MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
+MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
new file mode 100644
index 000000000000..6807113ec09f
--- /dev/null
+++ b/drivers/iio/pressure/bmp280-regmap.c
@@ -0,0 +1,84 @@
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bmp280.h"
+
+static bool bmp180_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool bmp180_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP180_REG_OUT_XLSB:
+ case BMP180_REG_OUT_LSB:
+ case BMP180_REG_OUT_MSB:
+ case BMP280_REG_CTRL_MEAS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config bmp180_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP180_REG_OUT_XLSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp180_is_writeable_reg,
+ .volatile_reg = bmp180_is_volatile_reg,
+};
+EXPORT_SYMBOL(bmp180_regmap_config);
+
+static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CONFIG:
+ case BMP280_REG_CTRL_HUMIDITY:
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_HUMIDITY_LSB:
+ case BMP280_REG_HUMIDITY_MSB:
+ case BMP280_REG_TEMP_XLSB:
+ case BMP280_REG_TEMP_LSB:
+ case BMP280_REG_TEMP_MSB:
+ case BMP280_REG_PRESS_XLSB:
+ case BMP280_REG_PRESS_LSB:
+ case BMP280_REG_PRESS_MSB:
+ case BMP280_REG_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config bmp280_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP280_REG_HUMIDITY_LSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp280_is_writeable_reg,
+ .volatile_reg = bmp280_is_volatile_reg,
+};
+EXPORT_SYMBOL(bmp280_regmap_config);
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
new file mode 100644
index 000000000000..17bc95586f9e
--- /dev/null
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -0,0 +1,125 @@
+/*
+ * SPI interface for the BMP280 driver
+ *
+ * Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c
+ */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+
+#include "bmp280.h"
+
+static int bmp280_regmap_spi_write(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u8 buf[2];
+
+ memcpy(buf, data, 2);
+ /*
+ * The SPI register address (= full register address without bit 7) and
+ * the write command (bit7 = RW = '0')
+ */
+ buf[0] &= ~0x80;
+
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int bmp280_regmap_spi_read(void *context, const void *reg,
+ size_t reg_size, void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static struct regmap_bus bmp280_regmap_bus = {
+ .write = bmp280_regmap_spi_write,
+ .read = bmp280_regmap_spi_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static int bmp280_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+ const struct regmap_config *regmap_config;
+ int ret;
+
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return ret;
+ }
+
+ switch (id->driver_data) {
+ case BMP180_CHIP_ID:
+ regmap_config = &bmp180_regmap_config;
+ break;
+ case BMP280_CHIP_ID:
+ case BME280_CHIP_ID:
+ regmap_config = &bmp280_regmap_config;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap = devm_regmap_init(&spi->dev,
+ &bmp280_regmap_bus,
+ &spi->dev,
+ regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ return bmp280_common_probe(&spi->dev,
+ regmap,
+ id->driver_data,
+ id->name,
+ spi->irq);
+}
+
+static int bmp280_spi_remove(struct spi_device *spi)
+{
+ return bmp280_common_remove(&spi->dev);
+}
+
+static const struct of_device_id bmp280_of_spi_match[] = {
+ { .compatible = "bosch,bmp085", },
+ { .compatible = "bosch,bmp180", },
+ { .compatible = "bosch,bmp181", },
+ { .compatible = "bosch,bmp280", },
+ { .compatible = "bosch,bme280", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bmp280_of_spi_match);
+
+static const struct spi_device_id bmp280_spi_id[] = {
+ { "bmp180", BMP180_CHIP_ID },
+ { "bmp181", BMP180_CHIP_ID },
+ { "bmp280", BMP280_CHIP_ID },
+ { "bme280", BME280_CHIP_ID },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, bmp280_spi_id);
+
+static struct spi_driver bmp280_spi_driver = {
+ .driver = {
+ .name = "bmp280",
+ .of_match_table = bmp280_of_spi_match,
+ .pm = &bmp280_dev_pm_ops,
+ },
+ .id_table = bmp280_spi_id,
+ .probe = bmp280_spi_probe,
+ .remove = bmp280_spi_remove,
+};
+module_spi_driver(bmp280_spi_driver);
+
+MODULE_DESCRIPTION("BMP280 SPI bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
new file mode 100644
index 000000000000..2c770e13be0e
--- /dev/null
+++ b/drivers/iio/pressure/bmp280.h
@@ -0,0 +1,112 @@
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* BMP280 specific registers */
+#define BMP280_REG_HUMIDITY_LSB 0xFE
+#define BMP280_REG_HUMIDITY_MSB 0xFD
+#define BMP280_REG_TEMP_XLSB 0xFC
+#define BMP280_REG_TEMP_LSB 0xFB
+#define BMP280_REG_TEMP_MSB 0xFA
+#define BMP280_REG_PRESS_XLSB 0xF9
+#define BMP280_REG_PRESS_LSB 0xF8
+#define BMP280_REG_PRESS_MSB 0xF7
+
+#define BMP280_REG_CONFIG 0xF5
+#define BMP280_REG_CTRL_MEAS 0xF4
+#define BMP280_REG_STATUS 0xF3
+#define BMP280_REG_CTRL_HUMIDITY 0xF2
+
+/* Due to non linear mapping, and data sizes we can't do a bulk read */
+#define BMP280_REG_COMP_H1 0xA1
+#define BMP280_REG_COMP_H2 0xE1
+#define BMP280_REG_COMP_H3 0xE3
+#define BMP280_REG_COMP_H4 0xE4
+#define BMP280_REG_COMP_H5 0xE5
+#define BMP280_REG_COMP_H6 0xE7
+
+#define BMP280_REG_COMP_TEMP_START 0x88
+#define BMP280_COMP_TEMP_REG_COUNT 6
+
+#define BMP280_REG_COMP_PRESS_START 0x8E
+#define BMP280_COMP_PRESS_REG_COUNT 18
+
+#define BMP280_FILTER_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_FILTER_OFF 0
+#define BMP280_FILTER_2X BIT(2)
+#define BMP280_FILTER_4X BIT(3)
+#define BMP280_FILTER_8X (BIT(3) | BIT(2))
+#define BMP280_FILTER_16X BIT(4)
+
+#define BMP280_OSRS_HUMIDITY_MASK (BIT(2) | BIT(1) | BIT(0))
+#define BMP280_OSRS_HUMIDITIY_X(osrs_h) ((osrs_h) << 0)
+#define BMP280_OSRS_HUMIDITY_SKIP 0
+#define BMP280_OSRS_HUMIDITY_1X BMP280_OSRS_HUMIDITIY_X(1)
+#define BMP280_OSRS_HUMIDITY_2X BMP280_OSRS_HUMIDITIY_X(2)
+#define BMP280_OSRS_HUMIDITY_4X BMP280_OSRS_HUMIDITIY_X(3)
+#define BMP280_OSRS_HUMIDITY_8X BMP280_OSRS_HUMIDITIY_X(4)
+#define BMP280_OSRS_HUMIDITY_16X BMP280_OSRS_HUMIDITIY_X(5)
+
+#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_SKIP 0
+#define BMP280_OSRS_TEMP_X(osrs_t) ((osrs_t) << 5)
+#define BMP280_OSRS_TEMP_1X BMP280_OSRS_TEMP_X(1)
+#define BMP280_OSRS_TEMP_2X BMP280_OSRS_TEMP_X(2)
+#define BMP280_OSRS_TEMP_4X BMP280_OSRS_TEMP_X(3)
+#define BMP280_OSRS_TEMP_8X BMP280_OSRS_TEMP_X(4)
+#define BMP280_OSRS_TEMP_16X BMP280_OSRS_TEMP_X(5)
+
+#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_OSRS_PRESS_SKIP 0
+#define BMP280_OSRS_PRESS_X(osrs_p) ((osrs_p) << 2)
+#define BMP280_OSRS_PRESS_1X BMP280_OSRS_PRESS_X(1)
+#define BMP280_OSRS_PRESS_2X BMP280_OSRS_PRESS_X(2)
+#define BMP280_OSRS_PRESS_4X BMP280_OSRS_PRESS_X(3)
+#define BMP280_OSRS_PRESS_8X BMP280_OSRS_PRESS_X(4)
+#define BMP280_OSRS_PRESS_16X BMP280_OSRS_PRESS_X(5)
+
+#define BMP280_MODE_MASK (BIT(1) | BIT(0))
+#define BMP280_MODE_SLEEP 0
+#define BMP280_MODE_FORCED BIT(0)
+#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
+
+/* BMP180 specific registers */
+#define BMP180_REG_OUT_XLSB 0xF8
+#define BMP180_REG_OUT_LSB 0xF7
+#define BMP180_REG_OUT_MSB 0xF6
+
+#define BMP180_REG_CALIB_START 0xAA
+#define BMP180_REG_CALIB_COUNT 22
+
+#define BMP180_MEAS_SCO BIT(5)
+#define BMP180_MEAS_TEMP (0x0E | BMP180_MEAS_SCO)
+#define BMP180_MEAS_PRESS_X(oss) ((oss) << 6 | 0x14 | BMP180_MEAS_SCO)
+#define BMP180_MEAS_PRESS_1X BMP180_MEAS_PRESS_X(0)
+#define BMP180_MEAS_PRESS_2X BMP180_MEAS_PRESS_X(1)
+#define BMP180_MEAS_PRESS_4X BMP180_MEAS_PRESS_X(2)
+#define BMP180_MEAS_PRESS_8X BMP180_MEAS_PRESS_X(3)
+
+/* BMP180 and BMP280 common registers */
+#define BMP280_REG_CTRL_MEAS 0xF4
+#define BMP280_REG_RESET 0xE0
+#define BMP280_REG_ID 0xD0
+
+#define BMP180_CHIP_ID 0x55
+#define BMP280_CHIP_ID 0x58
+#define BME280_CHIP_ID 0x60
+#define BMP280_SOFT_RESET_VAL 0xB6
+
+/* Regmap configurations */
+extern const struct regmap_config bmp180_regmap_config;
+extern const struct regmap_config bmp280_regmap_config;
+
+/* Probe called from different transports */
+int bmp280_common_probe(struct device *dev,
+ struct regmap *regmap,
+ unsigned int chip,
+ const char *name,
+ int irq);
+int bmp280_common_remove(struct device *dev);
+
+/* PM ops */
+extern const struct dev_pm_ops bmp280_dev_pm_ops;
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index 90f2b6e4a920..12f769e86355 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -401,6 +401,7 @@ static const struct i2c_device_id hp206c_id[] = {
{"hp206c"},
{}
};
+MODULE_DEVICE_TABLE(i2c, hp206c_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id hp206c_acpi_match[] = {
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 01b2e0b18878..6392d7b62841 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -171,7 +171,7 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
mutex_unlock(&data->lock);
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 76578b07bb6e..feb41f82c64a 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -224,7 +224,8 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p)
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index e68052c118e6..953ffbc0ef96 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -1,6 +1,6 @@
/*
- * ms5637.c - Support for Measurement-Specialties ms5637 and ms8607
- * pressure & temperature sensor
+ * ms5637.c - Support for Measurement-Specialties MS5637, MS5805
+ * MS5837 and MS8607 pressure & temperature sensor
*
* Copyright (c) 2015 Measurement-Specialties
*
@@ -11,6 +11,10 @@
* Datasheet:
* http://www.meas-spec.com/downloads/MS5637-02BA03.pdf
* Datasheet:
+ * http://www.meas-spec.com/downloads/MS5805-02BA01.pdf
+ * Datasheet:
+ * http://www.meas-spec.com/downloads/MS5837-30BA.pdf
+ * Datasheet:
* http://www.meas-spec.com/downloads/MS8607-02BA01.pdf
*/
@@ -170,9 +174,12 @@ static int ms5637_probe(struct i2c_client *client,
static const struct i2c_device_id ms5637_id[] = {
{"ms5637", 0},
- {"ms8607-temppressure", 1},
+ {"ms5805", 0},
+ {"ms5837", 0},
+ {"ms8607-temppressure", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, ms5637_id);
static struct i2c_driver ms5637_driver = {
.probe = ms5637_probe,
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index f5f41490060b..903a21e46874 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -17,6 +17,7 @@
#define LPS001WP_PRESS_DEV_NAME "lps001wp"
#define LPS25H_PRESS_DEV_NAME "lps25h"
#define LPS331AP_PRESS_DEV_NAME "lps331ap"
+#define LPS22HB_PRESS_DEV_NAME "lps22hb"
/**
* struct st_sensors_platform_data - default press platform data
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 92a118c3c4ac..55df9a75eb3a 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -28,6 +28,72 @@
#include <linux/iio/common/st_sensors.h>
#include "st_pressure.h"
+/*
+ * About determining pressure scaling factors
+ * ------------------------------------------
+ *
+ * Datasheets specify typical pressure sensitivity so that pressure is computed
+ * according to the following equation :
+ * pressure[mBar] = raw / sensitivity
+ * where :
+ * raw the 24 bits long raw sampled pressure
+ * sensitivity a scaling factor specified by the datasheet in LSB/mBar
+ *
+ * IIO ABI expects pressure to be expressed as kPascal, hence pressure should be
+ * computed according to :
+ * pressure[kPascal] = pressure[mBar] / 10
+ * = raw / (sensitivity * 10) (1)
+ *
+ * Finally, st_press_read_raw() returns pressure scaling factor as an
+ * IIO_VAL_INT_PLUS_NANO with a zero integral part and "gain" as decimal part.
+ * Therefore, from (1), "gain" becomes :
+ * gain = 10^9 / (sensitivity * 10)
+ * = 10^8 / sensitivity
+ *
+ * About determining temperature scaling factors and offsets
+ * ---------------------------------------------------------
+ *
+ * Datasheets specify typical temperature sensitivity and offset so that
+ * temperature is computed according to the following equation :
+ * temp[Celsius] = offset[Celsius] + (raw / sensitivity)
+ * where :
+ * raw the 16 bits long raw sampled temperature
+ * offset a constant specified by the datasheet in degree Celsius
+ * (sometimes zero)
+ * sensitivity a scaling factor specified by the datasheet in LSB/Celsius
+ *
+ * IIO ABI expects temperature to be expressed as milli degree Celsius such as
+ * user space should compute temperature according to :
+ * temp[mCelsius] = temp[Celsius] * 10^3
+ * = (offset[Celsius] + (raw / sensitivity)) * 10^3
+ * = ((offset[Celsius] * sensitivity) + raw) *
+ * (10^3 / sensitivity) (2)
+ *
+ * IIO ABI expects user space to apply offset and scaling factors to raw samples
+ * according to :
+ * temp[mCelsius] = (OFFSET + raw) * SCALE
+ * where :
+ * OFFSET an arbitrary constant exposed by device
+ * SCALE an arbitrary scaling factor exposed by device
+ *
+ * Matching OFFSET and SCALE with members of (2) gives :
+ * OFFSET = offset[Celsius] * sensitivity (3)
+ * SCALE = 10^3 / sensitivity (4)
+ *
+ * st_press_read_raw() returns temperature scaling factor as an
+ * IIO_VAL_FRACTIONAL with a 10^3 numerator and "gain2" as denominator.
+ * Therefore, from (3), "gain2" becomes :
+ * gain2 = sensitivity
+ *
+ * When declared within channel, i.e. for a non zero specified offset,
+ * st_press_read_raw() will return the latter as an IIO_VAL_FRACTIONAL such as :
+ * numerator = OFFSET * 10^3
+ * denominator = 10^3
+ * giving from (4):
+ * numerator = offset[Celsius] * 10^3 * sensitivity
+ * = offset[mCelsius] * gain2
+ */
+
#define MCELSIUS_PER_CELSIUS 1000
/* Default pressure sensitivity */
@@ -39,8 +105,6 @@
#define ST_PRESS_LSB_PER_CELSIUS 480UL
#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL
-#define ST_PRESS_NUMBER_DATA_CHANNELS 1
-
/* FULLSCALE */
#define ST_PRESS_FS_AVL_1100MB 1100
#define ST_PRESS_FS_AVL_1260MB 1260
@@ -48,7 +112,11 @@
#define ST_PRESS_1_OUT_XL_ADDR 0x28
#define ST_TEMP_1_OUT_L_ADDR 0x2b
-/* CUSTOM VALUES FOR LPS331AP SENSOR */
+/*
+ * CUSTOM VALUES FOR LPS331AP SENSOR
+ * See LPS331AP datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
+ */
#define ST_PRESS_LPS331AP_WAI_EXP 0xbb
#define ST_PRESS_LPS331AP_ODR_ADDR 0x20
#define ST_PRESS_LPS331AP_ODR_MASK 0x70
@@ -71,7 +139,9 @@
#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
-/* CUSTOM VALUES FOR LPS001WP SENSOR */
+/*
+ * CUSTOM VALUES FOR THE OBSOLETE LPS001WP SENSOR
+ */
/* LPS001WP pressure resolution */
#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
@@ -94,7 +164,11 @@
#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28
#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a
-/* CUSTOM VALUES FOR LPS25H SENSOR */
+/*
+ * CUSTOM VALUES FOR LPS25H SENSOR
+ * See LPS25H datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps25h.pdf
+ */
#define ST_PRESS_LPS25H_WAI_EXP 0xbd
#define ST_PRESS_LPS25H_ODR_ADDR 0x20
#define ST_PRESS_LPS25H_ODR_MASK 0x70
@@ -117,27 +191,54 @@
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
+/*
+ * CUSTOM VALUES FOR LPS22HB SENSOR
+ * See LPS22HB datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
+ */
+
+/* LPS22HB temperature sensitivity */
+#define ST_PRESS_LPS22HB_LSB_PER_CELSIUS 100UL
+
+#define ST_PRESS_LPS22HB_WAI_EXP 0xb1
+#define ST_PRESS_LPS22HB_ODR_ADDR 0x10
+#define ST_PRESS_LPS22HB_ODR_MASK 0x70
+#define ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL 0x01
+#define ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL 0x02
+#define ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL 0x03
+#define ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL 0x04
+#define ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL 0x05
+#define ST_PRESS_LPS22HB_PW_ADDR 0x10
+#define ST_PRESS_LPS22HB_PW_MASK 0x70
+#define ST_PRESS_LPS22HB_BDU_ADDR 0x10
+#define ST_PRESS_LPS22HB_BDU_MASK 0x02
+#define ST_PRESS_LPS22HB_DRDY_IRQ_ADDR 0x12
+#define ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK 0x04
+#define ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK 0x08
+#define ST_PRESS_LPS22HB_IHL_IRQ_ADDR 0x12
+#define ST_PRESS_LPS22HB_IHL_IRQ_MASK 0x80
+#define ST_PRESS_LPS22HB_OD_IRQ_ADDR 0x12
+#define ST_PRESS_LPS22HB_OD_IRQ_MASK 0x40
+#define ST_PRESS_LPS22HB_MULTIREAD_BIT true
+
static const struct iio_chan_spec st_press_1_channels[] = {
{
.type = IIO_PRESSURE,
- .channel2 = IIO_NO_MOD,
.address = ST_PRESS_1_OUT_XL_ADDR,
- .scan_index = ST_SENSORS_SCAN_X,
+ .scan_index = 0,
.scan_type = {
.sign = 'u',
.realbits = 24,
- .storagebits = 24,
+ .storagebits = 32,
.endianness = IIO_LE,
},
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
- .modified = 0,
},
{
.type = IIO_TEMP,
- .channel2 = IIO_NO_MOD,
.address = ST_TEMP_1_OUT_L_ADDR,
- .scan_index = -1,
+ .scan_index = 1,
.scan_type = {
.sign = 'u',
.realbits = 16,
@@ -148,17 +249,15 @@ static const struct iio_chan_spec st_press_1_channels[] = {
BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET),
- .modified = 0,
},
- IIO_CHAN_SOFT_TIMESTAMP(1)
+ IIO_CHAN_SOFT_TIMESTAMP(2)
};
static const struct iio_chan_spec st_press_lps001wp_channels[] = {
{
.type = IIO_PRESSURE,
- .channel2 = IIO_NO_MOD,
.address = ST_PRESS_LPS001WP_OUT_L_ADDR,
- .scan_index = ST_SENSORS_SCAN_X,
+ .scan_index = 0,
.scan_type = {
.sign = 'u',
.realbits = 16,
@@ -168,13 +267,11 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
- .modified = 0,
},
{
.type = IIO_TEMP,
- .channel2 = IIO_NO_MOD,
.address = ST_TEMP_LPS001WP_OUT_L_ADDR,
- .scan_index = -1,
+ .scan_index = 1,
.scan_type = {
.sign = 'u',
.realbits = 16,
@@ -184,9 +281,42 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
- .modified = 0,
},
- IIO_CHAN_SOFT_TIMESTAMP(1)
+ IIO_CHAN_SOFT_TIMESTAMP(2)
+};
+
+static const struct iio_chan_spec st_press_lps22hb_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .address = ST_PRESS_1_OUT_XL_ADDR,
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ },
+ {
+ .type = IIO_TEMP,
+ .address = ST_TEMP_1_OUT_L_ADDR,
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2)
};
static const struct st_sensor_settings st_press_sensors_settings[] = {
@@ -346,6 +476,59 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
.bootime = 2,
},
+ {
+ .wai = ST_PRESS_LPS22HB_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LPS22HB_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
+ .num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
+ .odr = {
+ .addr = ST_PRESS_LPS22HB_ODR_ADDR,
+ .mask = ST_PRESS_LPS22HB_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL, },
+ { 10, ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL, },
+ { 25, ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL, },
+ { 50, ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL, },
+ { 75, ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_PRESS_LPS22HB_PW_ADDR,
+ .mask = ST_PRESS_LPS22HB_PW_MASK,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .fs_avl = {
+ /*
+ * Pressure and temperature sensitivity values
+ * as defined in table 3 of LPS22HB datasheet.
+ */
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1260MB,
+ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+ .gain2 = ST_PRESS_LPS22HB_LSB_PER_CELSIUS,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_PRESS_LPS22HB_BDU_ADDR,
+ .mask = ST_PRESS_LPS22HB_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_PRESS_LPS22HB_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_PRESS_LPS22HB_IHL_IRQ_ADDR,
+ .mask_ihl = ST_PRESS_LPS22HB_IHL_IRQ_MASK,
+ .addr_od = ST_PRESS_LPS22HB_OD_IRQ_ADDR,
+ .mask_od = ST_PRESS_LPS22HB_OD_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
+ .multi_read_bit = ST_PRESS_LPS22HB_MULTIREAD_BIT,
+ },
};
static int st_press_write_raw(struct iio_dev *indio_dev,
@@ -462,23 +645,30 @@ int st_press_common_probe(struct iio_dev *indio_dev)
indio_dev->info = &press_info;
mutex_init(&press_data->tb.buf_lock);
- st_sensors_power_enable(indio_dev);
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_press_sensors_settings),
st_press_sensors_settings);
if (err < 0)
- return err;
-
- press_data->num_data_channels = ST_PRESS_NUMBER_DATA_CHANNELS;
+ goto st_press_power_off;
+
+ /*
+ * Skip timestamping channel while declaring available channels to
+ * common st_sensor layer. Look at st_sensors_get_buffer_element() to
+ * see how timestamps are explicitly pushed as last samples block
+ * element.
+ */
+ press_data->num_data_channels = press_data->sensor_settings->num_ch - 1;
press_data->multiread_bit = press_data->sensor_settings->multi_read_bit;
indio_dev->channels = press_data->sensor_settings->ch;
indio_dev->num_channels = press_data->sensor_settings->num_ch;
- if (press_data->sensor_settings->fs.addr != 0)
- press_data->current_fullscale =
- (struct st_sensor_fullscale_avl *)
- &press_data->sensor_settings->fs.fs_avl[0];
+ press_data->current_fullscale =
+ (struct st_sensor_fullscale_avl *)
+ &press_data->sensor_settings->fs.fs_avl[0];
press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
@@ -490,11 +680,11 @@ int st_press_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
if (err < 0)
- return err;
+ goto st_press_power_off;
err = st_press_allocate_ring(indio_dev);
if (err < 0)
- return err;
+ goto st_press_power_off;
if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -517,6 +707,8 @@ st_press_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_press_probe_trigger_error:
st_press_deallocate_ring(indio_dev);
+st_press_power_off:
+ st_sensors_power_disable(indio_dev);
return err;
}
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 8fcf9766eaec..ed18701c68c9 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -32,6 +32,10 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps331ap-press",
.data = LPS331AP_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps22hb-press",
+ .data = LPS22HB_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 40c0692ff1de..550508025af1 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -50,6 +50,7 @@ static const struct spi_device_id st_press_id_table[] = {
{ LPS001WP_PRESS_DEV_NAME },
{ LPS25H_PRESS_DEV_NAME },
{ LPS331AP_PRESS_DEV_NAME },
+ { LPS22HB_PRESS_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_press_id_table);
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index e2f926cdcad2..2e3a70e1b245 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -231,10 +231,16 @@ static void as3935_event_work(struct work_struct *work)
{
struct as3935_state *st;
int val;
+ int ret;
st = container_of(work, struct as3935_state, work.work);
- as3935_read(st, AS3935_INT, &val);
+ ret = as3935_read(st, AS3935_INT, &val);
+ if (ret) {
+ dev_warn(&st->spi->dev, "read error\n");
+ return;
+ }
+
val &= AS3935_INT_MASK;
switch (val) {
@@ -242,7 +248,7 @@ static void as3935_event_work(struct work_struct *work)
iio_trigger_poll(st->trig);
break;
case AS3935_NOISE_INT:
- dev_warn(&st->spi->dev, "noise level is too high");
+ dev_warn(&st->spi->dev, "noise level is too high\n");
break;
}
}
@@ -346,7 +352,6 @@ static int as3935_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
st->spi = spi;
- st->tune_cap = 0;
spi_set_drvdata(spi, indio_dev);
mutex_init(&st->lock);
@@ -468,4 +473,3 @@ module_spi_driver(as3935_driver);
MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
MODULE_DESCRIPTION("AS3935 lightning sensor");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:as3935");
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 4f502386aa86..3141c3c161bb 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -203,22 +203,19 @@ static int lidar_read_raw(struct iio_dev *indio_dev,
struct lidar_data *data = iio_priv(indio_dev);
int ret = -EINVAL;
- mutex_lock(&indio_dev->mlock);
-
- if (iio_buffer_enabled(indio_dev) && mask == IIO_CHAN_INFO_RAW) {
- ret = -EBUSY;
- goto error_busy;
- }
-
switch (mask) {
case IIO_CHAN_INFO_RAW: {
u16 reg;
+ if (iio_device_claim_direct_mode(indio_dev))
+ return -EBUSY;
+
ret = lidar_get_measurement(data, &reg);
if (!ret) {
*val = reg;
ret = IIO_VAL_INT;
}
+ iio_device_release_direct_mode(indio_dev);
break;
}
case IIO_CHAN_INFO_SCALE:
@@ -228,9 +225,6 @@ static int lidar_read_raw(struct iio_dev *indio_dev,
break;
}
-error_busy:
- mutex_unlock(&indio_dev->mlock);
-
return ret;
}
@@ -244,7 +238,7 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
ret = lidar_get_measurement(data, data->buffer);
if (!ret) {
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
} else if (ret != -EINVAL) {
dev_err(&data->client->dev, "cannot read LIDAR measurement");
}
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 66cd09a18786..1d74b3aafeed 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -492,7 +492,7 @@ static void sx9500_push_events(struct iio_dev *indio_dev)
dir = new_prox ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
IIO_EV_TYPE_THRESH, dir);
- iio_push_event(indio_dev, ev, iio_get_time_ns());
+ iio_push_event(indio_dev, ev, iio_get_time_ns(indio_dev));
data->prox_stat[chan] = new_prox;
}
}
@@ -669,7 +669,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
out:
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c
index ab6fe8f6f2d1..c0a19a000387 100644
--- a/drivers/iio/temperature/tsys02d.c
+++ b/drivers/iio/temperature/tsys02d.c
@@ -174,6 +174,7 @@ static const struct i2c_device_id tsys02d_id[] = {
{"tsys02d", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, tsys02d_id);
static struct i2c_driver tsys02d_driver = {
.probe = tsys02d_probe,
diff --git a/drivers/iio/trigger/Kconfig b/drivers/iio/trigger/Kconfig
index 519e6772f6f5..809b2e7d58fa 100644
--- a/drivers/iio/trigger/Kconfig
+++ b/drivers/iio/trigger/Kconfig
@@ -24,6 +24,18 @@ config IIO_INTERRUPT_TRIGGER
To compile this driver as a module, choose M here: the
module will be called iio-trig-interrupt.
+config IIO_TIGHTLOOP_TRIGGER
+ tristate "A kthread based hammering loop trigger"
+ depends on IIO_SW_TRIGGER
+ help
+ An experimental trigger, used to allow sensors to be sampled as fast
+ as possible under the limitations of whatever else is going on.
+ Uses a tight loop in a kthread. Will only work with lower half only
+ trigger consumers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iio-trig-loop.
+
config IIO_SYSFS_TRIGGER
tristate "SYSFS trigger"
depends on SYSFS
diff --git a/drivers/iio/trigger/Makefile b/drivers/iio/trigger/Makefile
index fe06eb564367..aab4dc23303d 100644
--- a/drivers/iio/trigger/Makefile
+++ b/drivers/iio/trigger/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_IIO_HRTIMER_TRIGGER) += iio-trig-hrtimer.o
obj-$(CONFIG_IIO_INTERRUPT_TRIGGER) += iio-trig-interrupt.o
obj-$(CONFIG_IIO_SYSFS_TRIGGER) += iio-trig-sysfs.o
+obj-$(CONFIG_IIO_TIGHTLOOP_TRIGGER) += iio-trig-loop.o
diff --git a/drivers/iio/trigger/iio-trig-loop.c b/drivers/iio/trigger/iio-trig-loop.c
new file mode 100644
index 000000000000..dc6be28f96fe
--- /dev/null
+++ b/drivers/iio/trigger/iio-trig-loop.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2016 Jonathan Cameron <jic23@kernel.org>
+ *
+ * Licensed under the GPL-2.
+ *
+ * Based on a mashup of the hrtimer trigger and continuous sampling proposal of
+ * Gregor Boirie <gregor.boirie@parrot.com>
+ *
+ * Note this is still rather experimental and may eat babies.
+ *
+ * Todo
+ * * Protect against connection of devices that 'need' the top half
+ * handler.
+ * * Work out how to run top half handlers in this context if it is
+ * safe to do so (timestamp grabbing for example)
+ *
+ * Tested against a max1363. Used about 33% cpu for the thread and 20%
+ * for generic_buffer piping to /dev/null. Watermark set at 64 on a 128
+ * element kfifo buffer.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irq_work.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/sw_trigger.h>
+
+struct iio_loop_info {
+ struct iio_sw_trigger swt;
+ struct task_struct *task;
+};
+
+static struct config_item_type iio_loop_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static int iio_loop_thread(void *data)
+{
+ struct iio_trigger *trig = data;
+
+ set_freezable();
+
+ do {
+ iio_trigger_poll_chained(trig);
+ } while (likely(!kthread_freezable_should_stop(NULL)));
+
+ return 0;
+}
+
+static int iio_loop_trigger_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_loop_info *loop_trig = iio_trigger_get_drvdata(trig);
+
+ if (state) {
+ loop_trig->task = kthread_run(iio_loop_thread,
+ trig, trig->name);
+ if (unlikely(IS_ERR(loop_trig->task))) {
+ dev_err(&trig->dev,
+ "failed to create trigger loop thread\n");
+ return PTR_ERR(loop_trig->task);
+ }
+ } else {
+ kthread_stop(loop_trig->task);
+ }
+
+ return 0;
+}
+
+static const struct iio_trigger_ops iio_loop_trigger_ops = {
+ .set_trigger_state = iio_loop_trigger_set_state,
+ .owner = THIS_MODULE,
+};
+
+static struct iio_sw_trigger *iio_trig_loop_probe(const char *name)
+{
+ struct iio_loop_info *trig_info;
+ int ret;
+
+ trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
+ if (!trig_info)
+ return ERR_PTR(-ENOMEM);
+
+ trig_info->swt.trigger = iio_trigger_alloc("%s", name);
+ if (!trig_info->swt.trigger) {
+ ret = -ENOMEM;
+ goto err_free_trig_info;
+ }
+
+ iio_trigger_set_drvdata(trig_info->swt.trigger, trig_info);
+ trig_info->swt.trigger->ops = &iio_loop_trigger_ops;
+
+ ret = iio_trigger_register(trig_info->swt.trigger);
+ if (ret)
+ goto err_free_trigger;
+
+ iio_swt_group_init_type_name(&trig_info->swt, name, &iio_loop_type);
+
+ return &trig_info->swt;
+
+err_free_trigger:
+ iio_trigger_free(trig_info->swt.trigger);
+err_free_trig_info:
+ kfree(trig_info);
+
+ return ERR_PTR(ret);
+}
+
+static int iio_trig_loop_remove(struct iio_sw_trigger *swt)
+{
+ struct iio_loop_info *trig_info;
+
+ trig_info = iio_trigger_get_drvdata(swt->trigger);
+
+ iio_trigger_unregister(swt->trigger);
+ iio_trigger_free(swt->trigger);
+ kfree(trig_info);
+
+ return 0;
+}
+
+static const struct iio_sw_trigger_ops iio_trig_loop_ops = {
+ .probe = iio_trig_loop_probe,
+ .remove = iio_trig_loop_remove,
+};
+
+static struct iio_sw_trigger_type iio_trig_loop = {
+ .name = "loop",
+ .owner = THIS_MODULE,
+ .ops = &iio_trig_loop_ops,
+};
+
+module_iio_sw_trigger_driver(iio_trig_loop);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("Loop based trigger for the iio subsystem");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:iio-trig-loop");
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index a5793c8f1590..60df4f8e81be 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -530,6 +530,7 @@ static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192);
static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224);
static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256);
static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288);
+static PORT_PMA_ATTR(port_xmit_wait , 0, 32, 320);
/*
* Counters added by extended set
@@ -560,6 +561,7 @@ static struct attribute *pma_attrs[] = {
&port_pma_attr_port_rcv_data.attr.attr,
&port_pma_attr_port_xmit_packets.attr.attr,
&port_pma_attr_port_rcv_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
NULL
};
@@ -579,6 +581,7 @@ static struct attribute *pma_attrs_ext[] = {
&port_pma_attr_ext_port_xmit_data.attr.attr,
&port_pma_attr_ext_port_rcv_data.attr.attr,
&port_pma_attr_ext_port_xmit_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
&port_pma_attr_ext_port_rcv_packets.attr.attr,
&port_pma_attr_ext_unicast_rcv_packets.attr.attr,
&port_pma_attr_ext_unicast_xmit_packets.attr.attr,
@@ -604,6 +607,7 @@ static struct attribute *pma_attrs_noietf[] = {
&port_pma_attr_ext_port_rcv_data.attr.attr,
&port_pma_attr_ext_port_xmit_packets.attr.attr,
&port_pma_attr_ext_port_rcv_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
NULL
};
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index f5de85178055..dad4d0ebbdff 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -14113,8 +14113,14 @@ static int init_asic_data(struct hfi1_devdata *dd)
{
unsigned long flags;
struct hfi1_devdata *tmp, *peer = NULL;
+ struct hfi1_asic_data *asic_data;
int ret = 0;
+ /* pre-allocate the asic structure in case we are the first device */
+ asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
+ if (!asic_data)
+ return -ENOMEM;
+
spin_lock_irqsave(&hfi1_devs_lock, flags);
/* Find our peer device */
list_for_each_entry(tmp, &hfi1_dev_list, list) {
@@ -14126,18 +14132,14 @@ static int init_asic_data(struct hfi1_devdata *dd)
}
if (peer) {
+ /* use already allocated structure */
dd->asic_data = peer->asic_data;
+ kfree(asic_data);
} else {
- dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
- if (!dd->asic_data) {
- ret = -ENOMEM;
- goto done;
- }
+ dd->asic_data = asic_data;
mutex_init(&dd->asic_data->asic_resource_mutex);
}
dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
-
-done:
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 1e503ad0bebb..be91f6fa1c87 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -678,8 +678,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
bool has_grh = rcv_flags & HFI1_HAS_GRH;
- bool sc4_bit = has_sc4_bit(packet);
- u8 sc;
+ u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
u32 bth1;
int is_mcast;
struct ib_grh *grh = NULL;
@@ -697,10 +696,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
*/
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- u8 sl, sc5;
+ u8 sl;
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
sl = ibp->sc_to_sl[sc5];
process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
@@ -717,10 +714,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
u16 slid = be16_to_cpu(hdr->lrh[3]);
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
}
@@ -745,10 +738,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
if (qp->ibqp.qp_num > 1) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u16 slid;
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
slid = be16_to_cpu(hdr->lrh[3]);
if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
@@ -790,10 +779,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
/* Received on QP0, and so by definition, this is an SMP */
struct opa_smp *smp = (struct opa_smp *)data;
u16 slid = be16_to_cpu(hdr->lrh[3]);
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
goto drop;
@@ -890,9 +875,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
}
wc.slid = be16_to_cpu(hdr->lrh[3]);
- sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc |= sc4_bit;
- wc.sl = ibp->sc_to_sl[sc];
+ wc.sl = ibp->sc_to_sl[sc5];
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index c963cad92f5a..6e9081380a27 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -600,8 +600,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
cqp_init_info.scratch_array = cqp->scratch_array;
status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
if (status) {
- i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
- status, maj_err, min_err);
+ i40iw_pr_err("cqp init status %d\n", status);
goto exit;
}
status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 33959ed14563..283b64c942ee 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1474,6 +1474,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr
info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
info->pd_id = iwpd->sc_pd.pd_id;
info->total_len = iwmr->length;
+ info->remote_access = true;
cqp_info->cqp_cmd = OP_ALLOC_STAG;
cqp_info->post_sq = 1;
cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 3438e98c145a..a529a4535457 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1431,6 +1431,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
int ep_irq_in_idx;
int i, error;
+ if (intf->cur_altsetting->desc.bNumEndpoints != 2)
+ return -ENODEV;
+
for (i = 0; xpad_device[i].idVendor; i++) {
if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
(le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index b368b0515c5a..253df96be427 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -157,11 +157,11 @@ static int rmi_function_match(struct device *dev, struct device_driver *drv)
static void rmi_function_of_probe(struct rmi_function *fn)
{
char of_name[9];
+ struct device_node *node = fn->rmi_dev->xport->dev->of_node;
snprintf(of_name, sizeof(of_name), "rmi4-f%02x",
fn->fd.function_number);
- fn->dev.of_node = of_find_node_by_name(
- fn->rmi_dev->xport->dev->of_node, of_name);
+ fn->dev.of_node = of_get_child_by_name(node, of_name);
}
#else
static inline void rmi_function_of_probe(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index 8dd3fb5e1f94..88e91559c84e 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -66,7 +66,7 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
struct rmi_device *rmi_dev = fn->rmi_dev;
int ret;
int offset;
- u8 buf[14];
+ u8 buf[15];
int pitch_x = 0;
int pitch_y = 0;
int clip_x_low = 0;
@@ -86,9 +86,10 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
- if (item->reg_size > 14) {
- dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
- item->reg_size);
+ if (item->reg_size > sizeof(buf)) {
+ dev_err(&fn->dev,
+ "F12 control8 should be no bigger than %zd bytes, not: %ld\n",
+ sizeof(buf), item->reg_size);
return -ENODEV;
}
diff --git a/drivers/input/touchscreen/ts4800-ts.c b/drivers/input/touchscreen/ts4800-ts.c
index 3c3dd78303be..fed73eeb47b3 100644
--- a/drivers/input/touchscreen/ts4800-ts.c
+++ b/drivers/input/touchscreen/ts4800-ts.c
@@ -118,6 +118,13 @@ static int ts4800_parse_dt(struct platform_device *pdev,
return -ENODEV;
}
+ ts->regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
+ if (IS_ERR(ts->regmap)) {
+ dev_err(dev, "cannot get parent's regmap\n");
+ return PTR_ERR(ts->regmap);
+ }
+
error = of_property_read_u32_index(np, "syscon", 1, &reg);
if (error < 0) {
dev_err(dev, "no offset in syscon\n");
@@ -134,12 +141,6 @@ static int ts4800_parse_dt(struct platform_device *pdev,
ts->bit = BIT(bit);
- ts->regmap = syscon_node_to_regmap(syscon_np);
- if (IS_ERR(ts->regmap)) {
- dev_err(dev, "cannot get parent's regmap\n");
- return PTR_ERR(ts->regmap);
- }
-
return 0;
}
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index 7295c198aa08..6fe55d598fac 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -22,6 +22,11 @@
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2004_input_id = {
+ .bustype = BUS_I2C,
+ .product = 2004,
+};
+
static int tsc2004_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
+ return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
tsc2004_cmd);
}
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index b9f593dfd2ef..f2c5f0e47f77 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -24,6 +24,11 @@
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2005_input_id = {
+ .bustype = BUS_SPI,
+ .product = 2005,
+};
+
static int tsc2005_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi)
if (error)
return error;
- return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
+ return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
devm_regmap_init_spi(spi, &tsc200x_regmap_config),
tsc2005_cmd);
}
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 15240c1ee850..dfa7f1c4f545 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input)
mutex_unlock(&ts->mutex);
}
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd))
{
@@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
snprintf(ts->phys, sizeof(ts->phys),
"%s/input-ts", dev_name(dev));
- input_dev->name = "TSC200X touchscreen";
+ if (tsc_id->product == 2004) {
+ input_dev->name = "TSC200X touchscreen";
+ } else {
+ input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
+ "TSC%04d touchscreen",
+ tsc_id->product);
+ if (!input_dev->name)
+ return -ENOMEM;
+ }
+
input_dev->phys = ts->phys;
- input_dev->id.bustype = bustype;
+ input_dev->id = *tsc_id;
input_dev->dev.parent = dev;
input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h
index 7a482d102614..49a63a3c6840 100644
--- a/drivers/input/touchscreen/tsc200x-core.h
+++ b/drivers/input/touchscreen/tsc200x-core.h
@@ -70,7 +70,7 @@
extern const struct regmap_config tsc200x_regmap_config;
extern const struct dev_pm_ops tsc200x_pm_ops;
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
int tsc200x_remove(struct device *dev);
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 0c9191cf324d..b6fc4bde79de 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -155,6 +155,7 @@ static void parse_multi_touch(struct w8001 *w8001)
bool touch = data[0] & (1 << i);
input_mt_slot(dev, i);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
if (touch) {
x = (data[6 * i + 1] << 7) | data[6 * i + 2];
y = (data[6 * i + 3] << 7) | data[6 * i + 4];
@@ -522,6 +523,8 @@ static int w8001_setup_touch(struct w8001 *w8001, char *basename,
0, touch.x, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y,
0, touch.y, 0, 0);
+ input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
strlcat(basename, " 2FG", basename_sz);
if (w8001->max_pen_x && w8001->max_pen_y)
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index d091defc3426..59741ead7e15 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1568,13 +1568,23 @@ static int __init amd_iommu_init_pci(void)
break;
}
+ /*
+ * Order is important here to make sure any unity map requirements are
+ * fulfilled. The unity mappings are created and written to the device
+ * table during the amd_iommu_init_api() call.
+ *
+ * After that we call init_device_table_dma() to make sure any
+ * uninitialized DTE will block DMA, and in the end we flush the caches
+ * of all IOMMUs to make sure the changes to the device table are
+ * active.
+ */
+ ret = amd_iommu_init_api();
+
init_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
- ret = amd_iommu_init_api();
-
if (!ret)
print_iommu_info();
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index cfe410eedaf0..323dac9900ba 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4602,13 +4602,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
for (i = 0; i < g_num_of_iommus; i++) {
struct intel_iommu *iommu = g_iommus[i];
struct dmar_domain *domain;
- u16 did;
+ int did;
if (!iommu)
continue;
for (did = 0; did < cap_ndoms(iommu->cap); did++) {
- domain = get_iommu_domain(iommu, did);
+ domain = get_iommu_domain(iommu, (u16)did);
if (!domain)
continue;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index fa33c50b0e5a..5495a5ba8039 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -8,6 +8,12 @@ config ARM_GIC
select IRQ_DOMAIN_HIERARCHY
select MULTI_IRQ_HANDLER
+config ARM_GIC_PM
+ bool
+ depends on PM
+ select ARM_GIC
+ select PM_CLK
+
config ARM_GIC_MAX_NR
int
default 2 if ARCH_REALVIEW
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 38853a187607..4c203b6b8163 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
@@ -69,3 +70,4 @@ obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
+obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index ead15be2d20a..b78a169c9c83 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -55,14 +55,14 @@ static void combiner_mask_irq(struct irq_data *data)
{
u32 mask = 1 << (data->hwirq % 32);
- __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
+ writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
}
static void combiner_unmask_irq(struct irq_data *data)
{
u32 mask = 1 << (data->hwirq % 32);
- __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
+ writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
}
static void combiner_handle_cascade_irq(struct irq_desc *desc)
@@ -75,7 +75,7 @@ static void combiner_handle_cascade_irq(struct irq_desc *desc)
chained_irq_enter(chip, desc);
spin_lock(&irq_controller_lock);
- status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
+ status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
spin_unlock(&irq_controller_lock);
status &= chip_data->irq_mask;
@@ -135,7 +135,7 @@ static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
combiner_data->parent_irq = irq;
/* Disable all interrupts */
- __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
+ writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
}
static int combiner_irq_domain_xlate(struct irq_domain *d,
@@ -218,7 +218,7 @@ static int combiner_suspend(void)
for (i = 0; i < max_nr; i++)
combiner_data[i].pm_save =
- __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET);
+ readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
return 0;
}
@@ -235,9 +235,9 @@ static void combiner_resume(void)
int i;
for (i = 0; i < max_nr; i++) {
- __raw_writel(combiner_data[i].irq_mask,
+ writel_relaxed(combiner_data[i].irq_mask,
combiner_data[i].base + COMBINER_ENABLE_CLEAR);
- __raw_writel(combiner_data[i].pm_save,
+ writel_relaxed(combiner_data[i].pm_save,
combiner_data[i].base + COMBINER_ENABLE_SET);
}
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index e7dc6cbda2a1..7c42b1d13faf 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -541,7 +541,7 @@ static void armada_370_xp_mpic_resume(void)
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
-struct syscore_ops armada_370_xp_mpic_syscore_ops = {
+static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
.suspend = armada_370_xp_mpic_suspend,
.resume = armada_370_xp_mpic_resume,
};
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
new file mode 100644
index 000000000000..d24451d5bf8a
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2015 - Ben Herrenschmidt, IBM Corp.
+ *
+ * Driver for Aspeed "new" VIC as found in SoC generation 3 and later
+ *
+ * Based on irq-vic.c:
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <asm/exception.h>
+#include <asm/irq.h>
+
+/* These definitions correspond to the "new mapping" of the
+ * register set that interleaves "high" and "low". The offsets
+ * below are for the "low" register, add 4 to get to the high one
+ */
+#define AVIC_IRQ_STATUS 0x00
+#define AVIC_FIQ_STATUS 0x08
+#define AVIC_RAW_STATUS 0x10
+#define AVIC_INT_SELECT 0x18
+#define AVIC_INT_ENABLE 0x20
+#define AVIC_INT_ENABLE_CLR 0x28
+#define AVIC_INT_TRIGGER 0x30
+#define AVIC_INT_TRIGGER_CLR 0x38
+#define AVIC_INT_SENSE 0x40
+#define AVIC_INT_DUAL_EDGE 0x48
+#define AVIC_INT_EVENT 0x50
+#define AVIC_EDGE_CLR 0x58
+#define AVIC_EDGE_STATUS 0x60
+
+#define NUM_IRQS 64
+
+struct aspeed_vic {
+ void __iomem *base;
+ u32 edge_sources[2];
+ struct irq_domain *dom;
+};
+static struct aspeed_vic *system_avic;
+
+static void vic_init_hw(struct aspeed_vic *vic)
+{
+ u32 sense;
+
+ /* Disable all interrupts */
+ writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR);
+ writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR + 4);
+
+ /* Make sure no soft trigger is on */
+ writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR);
+ writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR + 4);
+
+ /* Set everything to be IRQ */
+ writel(0, vic->base + AVIC_INT_SELECT);
+ writel(0, vic->base + AVIC_INT_SELECT + 4);
+
+ /* Some interrupts have a programable high/low level trigger
+ * (4 GPIO direct inputs), for now we assume this was configured
+ * by firmware. We read which ones are edge now.
+ */
+ sense = readl(vic->base + AVIC_INT_SENSE);
+ vic->edge_sources[0] = ~sense;
+ sense = readl(vic->base + AVIC_INT_SENSE + 4);
+ vic->edge_sources[1] = ~sense;
+
+ /* Clear edge detection latches */
+ writel(0xffffffff, vic->base + AVIC_EDGE_CLR);
+ writel(0xffffffff, vic->base + AVIC_EDGE_CLR + 4);
+}
+
+static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
+{
+ struct aspeed_vic *vic = system_avic;
+ u32 stat, irq;
+
+ for (;;) {
+ irq = 0;
+ stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS);
+ if (!stat) {
+ stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS + 4);
+ irq = 32;
+ }
+ if (stat == 0)
+ break;
+ irq += ffs(stat) - 1;
+ handle_domain_irq(vic->dom, irq, regs);
+ }
+}
+
+static void avic_ack_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ /* Clear edge latch for edge interrupts, nop for level */
+ if (vic->edge_sources[sidx] & sbit)
+ writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4);
+}
+
+static void avic_mask_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4);
+}
+
+static void avic_unmask_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ writel(sbit, vic->base + AVIC_INT_ENABLE + sidx * 4);
+}
+
+/* For level irq, faster than going through a nop "ack" and mask */
+static void avic_mask_ack_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ /* First mask */
+ writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4);
+
+ /* Then clear edge latch for edge interrupts */
+ if (vic->edge_sources[sidx] & sbit)
+ writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4);
+}
+
+static struct irq_chip avic_chip = {
+ .name = "AVIC",
+ .irq_ack = avic_ack_irq,
+ .irq_mask = avic_mask_irq,
+ .irq_unmask = avic_unmask_irq,
+ .irq_mask_ack = avic_mask_ack_irq,
+};
+
+static int avic_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct aspeed_vic *vic = d->host_data;
+ unsigned int sidx = hwirq >> 5;
+ unsigned int sbit = 1u << (hwirq & 0x1f);
+
+ /* Check if interrupt exists */
+ if (sidx > 1)
+ return -EPERM;
+
+ if (vic->edge_sources[sidx] & sbit)
+ irq_set_chip_and_handler(irq, &avic_chip, handle_edge_irq);
+ else
+ irq_set_chip_and_handler(irq, &avic_chip, handle_level_irq);
+ irq_set_chip_data(irq, vic);
+ irq_set_probe(irq);
+ return 0;
+}
+
+static struct irq_domain_ops avic_dom_ops = {
+ .map = avic_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init avic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ void __iomem *regs;
+ struct aspeed_vic *vic;
+
+ if (WARN(parent, "non-root Aspeed VIC not supported"))
+ return -EINVAL;
+ if (WARN(system_avic, "duplicate Aspeed VIC not supported"))
+ return -EINVAL;
+
+ regs = of_iomap(node, 0);
+ if (WARN_ON(!regs))
+ return -EIO;
+
+ vic = kzalloc(sizeof(struct aspeed_vic), GFP_KERNEL);
+ if (WARN_ON(!vic)) {
+ iounmap(regs);
+ return -ENOMEM;
+ }
+ vic->base = regs;
+
+ /* Initialize soures, all masked */
+ vic_init_hw(vic);
+
+ /* Ready to receive interrupts */
+ system_avic = vic;
+ set_handle_irq(avic_handle_irq);
+
+ /* Register our domain */
+ vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0,
+ &avic_dom_ops, vic);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(aspeed_new_vic, "aspeed,ast2400-vic", avic_of_init);
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index bf9cc5f2e839..44d7c38dde47 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -52,7 +52,6 @@
#include <linux/irqdomain.h>
#include <asm/exception.h>
-#include <asm/mach/irq.h>
/* Put the bank and irq (32 bits) into the hwirq */
#define MAKE_HWIRQ(b, n) ((b << 5) | (n))
@@ -242,7 +241,7 @@ static void __exception_irq_entry bcm2835_handle_irq(
u32 hwirq;
while ((hwirq = get_next_armctrl_hwirq()) != ~0)
- handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
+ handle_domain_irq(intc.domain, hwirq, regs);
}
static void bcm2836_chained_handle_irq(struct irq_desc *desc)
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 72ff1d5c5de6..df1949c0aa23 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -180,7 +180,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
} else if (stat) {
u32 hwirq = ffs(stat) - 1;
- handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
+ handle_domain_irq(intc.domain, hwirq, regs);
}
}
@@ -224,8 +224,8 @@ static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
};
#ifdef CONFIG_ARM
-int __init bcm2836_smp_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
+static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
{
unsigned long secondary_startup_phys =
(unsigned long)virt_to_phys((void *)secondary_startup);
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 61b18ab33ad9..0ec92631e23c 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -215,7 +215,7 @@ static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
return 0;
}
-int __init bcm7120_l2_intc_probe(struct device_node *dn,
+static int __init bcm7120_l2_intc_probe(struct device_node *dn,
struct device_node *parent,
int (*iomap_regs_fn)(struct device_node *,
struct bcm7120_l2_intc_data *),
@@ -339,15 +339,15 @@ out_unmap:
return ret;
}
-int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
- struct device_node *parent)
+static int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
+ struct device_node *parent)
{
return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120,
"BCM7120 L2");
}
-int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
- struct device_node *parent)
+static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
+ struct device_node *parent)
{
return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380,
"BCM3380 L2");
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 65cd341f331a..1d4a5b46d9ae 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -112,8 +112,8 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
irq_gc_unlock(gc);
}
-int __init brcmstb_l2_intc_of_init(struct device_node *np,
- struct device_node *parent)
+static int __init brcmstb_l2_intc_of_init(struct device_node *np,
+ struct device_node *parent)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct brcmstb_l2_intc_data *data;
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 89e7423f0ebb..9ae71804b5dd 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -90,8 +90,8 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
return ret;
}
-void __init gic_dist_config(void __iomem *base, int gic_irqs,
- void (*sync_access)(void))
+void gic_dist_config(void __iomem *base, int gic_irqs,
+ void (*sync_access)(void))
{
unsigned int i;
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
new file mode 100644
index 000000000000..4cbffba3ff13
--- /dev/null
+++ b/drivers/irqchip/irq-gic-pm.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+struct gic_clk_data {
+ unsigned int num_clocks;
+ const char *const *clocks;
+};
+
+static int gic_runtime_resume(struct device *dev)
+{
+ struct gic_chip_data *gic = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_clk_resume(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * On the very first resume, the pointer to the driver data
+ * will be NULL and this is intentional, because we do not
+ * want to restore the GIC on the very first resume. So if
+ * the pointer is not valid just return.
+ */
+ if (!gic)
+ return 0;
+
+ gic_dist_restore(gic);
+ gic_cpu_restore(gic);
+
+ return 0;
+}
+
+static int gic_runtime_suspend(struct device *dev)
+{
+ struct gic_chip_data *gic = dev_get_drvdata(dev);
+
+ gic_dist_save(gic);
+ gic_cpu_save(gic);
+
+ return pm_clk_suspend(dev);
+}
+
+static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data)
+{
+ struct clk *clk;
+ unsigned int i;
+ int ret;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ ret = pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < data->num_clocks; i++) {
+ clk = of_clk_get_by_name(dev->of_node, data->clocks[i]);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock %s\n",
+ data->clocks[i]);
+ ret = PTR_ERR(clk);
+ goto error;
+ }
+
+ ret = pm_clk_add_clk(dev, clk);
+ if (ret) {
+ dev_err(dev, "failed to add clock at index %d\n", i);
+ clk_put(clk);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ pm_clk_destroy(dev);
+
+ return ret;
+}
+
+static int gic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct gic_clk_data *data;
+ struct gic_chip_data *gic;
+ int ret, irq;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "no device match found\n");
+ return -ENODEV;
+ }
+
+ irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!irq) {
+ dev_err(dev, "no parent interrupt found!\n");
+ return -EINVAL;
+ }
+
+ ret = gic_get_clocks(dev, data);
+ if (ret)
+ goto irq_dispose;
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto rpm_disable;
+
+ ret = gic_of_init_child(dev, &gic, irq);
+ if (ret)
+ goto rpm_put;
+
+ platform_set_drvdata(pdev, gic);
+
+ pm_runtime_put(dev);
+
+ dev_info(dev, "GIC IRQ controller registered\n");
+
+ return 0;
+
+rpm_put:
+ pm_runtime_put_sync(dev);
+rpm_disable:
+ pm_runtime_disable(dev);
+ pm_clk_destroy(dev);
+irq_dispose:
+ irq_dispose_mapping(irq);
+
+ return ret;
+}
+
+static const struct dev_pm_ops gic_pm_ops = {
+ SET_RUNTIME_PM_OPS(gic_runtime_suspend,
+ gic_runtime_resume, NULL)
+};
+
+static const char * const gic400_clocks[] = {
+ "clk",
+};
+
+static const struct gic_clk_data gic400_data = {
+ .num_clocks = ARRAY_SIZE(gic400_clocks),
+ .clocks = gic400_clocks,
+};
+
+static const struct of_device_id gic_match[] = {
+ { .compatible = "nvidia,tegra210-agic", .data = &gic400_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gic_match);
+
+static struct platform_driver gic_driver = {
+ .probe = gic_probe,
+ .driver = {
+ .name = "gic",
+ .of_match_table = gic_match,
+ .pm = &gic_pm_ops,
+ }
+};
+
+builtin_platform_driver(gic_driver);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index ad0d2960b664..35eb7ac5d21f 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -24,6 +24,7 @@
#include <linux/of_pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/irqchip/arm-gic.h>
/*
* MSI_TYPER:
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 5eb1f9e17a98..7ceaba81efb4 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -56,13 +56,14 @@ struct its_collection {
};
/*
- * The ITS_BASER structure - contains memory information and cached
- * value of BASER register configuration.
+ * The ITS_BASER structure - contains memory information, cached
+ * value of BASER register configuration and ITS page size.
*/
struct its_baser {
void *base;
u64 val;
u32 order;
+ u32 psz;
};
/*
@@ -824,180 +825,241 @@ static const char *its_base_type_string[] = {
[GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
};
-static void its_free_tables(struct its_node *its)
+static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
{
- int i;
+ u32 idx = baser - its->tables;
- for (i = 0; i < GITS_BASER_NR_REGS; i++) {
- if (its->tables[i].base) {
- free_pages((unsigned long)its->tables[i].base,
- its->tables[i].order);
- its->tables[i].base = NULL;
- }
- }
+ return readq_relaxed(its->base + GITS_BASER + (idx << 3));
}
-static int its_alloc_tables(const char *node_name, struct its_node *its)
+static void its_write_baser(struct its_node *its, struct its_baser *baser,
+ u64 val)
{
- int err;
- int i;
- int psz = SZ_64K;
- u64 shr = GITS_BASER_InnerShareable;
- u64 cache;
- u64 typer;
- u32 ids;
+ u32 idx = baser - its->tables;
- if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
- /*
- * erratum 22375: only alloc 8MB table size
- * erratum 24313: ignore memory access type
- */
- cache = 0;
- ids = 0x14; /* 20 bits, 8MB */
- } else {
- cache = GITS_BASER_WaWb;
- typer = readq_relaxed(its->base + GITS_TYPER);
- ids = GITS_TYPER_DEVBITS(typer);
+ writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
+ baser->val = its_read_baser(its, baser);
+}
+
+static int its_setup_baser(struct its_node *its, struct its_baser *baser,
+ u64 cache, u64 shr, u32 psz, u32 order,
+ bool indirect)
+{
+ u64 val = its_read_baser(its, baser);
+ u64 esz = GITS_BASER_ENTRY_SIZE(val);
+ u64 type = GITS_BASER_TYPE(val);
+ u32 alloc_pages;
+ void *base;
+ u64 tmp;
+
+retry_alloc_baser:
+ alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
+ if (alloc_pages > GITS_BASER_PAGES_MAX) {
+ pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
+ &its->phys_base, its_base_type_string[type],
+ alloc_pages, GITS_BASER_PAGES_MAX);
+ alloc_pages = GITS_BASER_PAGES_MAX;
+ order = get_order(GITS_BASER_PAGES_MAX * psz);
}
- its->device_ids = ids;
+ base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!base)
+ return -ENOMEM;
- for (i = 0; i < GITS_BASER_NR_REGS; i++) {
- u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
- u64 type = GITS_BASER_TYPE(val);
- u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
- int order = get_order(psz);
- int alloc_pages;
- u64 tmp;
- void *base;
+retry_baser:
+ val = (virt_to_phys(base) |
+ (type << GITS_BASER_TYPE_SHIFT) |
+ ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
+ ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
+ cache |
+ shr |
+ GITS_BASER_VALID);
+
+ val |= indirect ? GITS_BASER_INDIRECT : 0x0;
+
+ switch (psz) {
+ case SZ_4K:
+ val |= GITS_BASER_PAGE_SIZE_4K;
+ break;
+ case SZ_16K:
+ val |= GITS_BASER_PAGE_SIZE_16K;
+ break;
+ case SZ_64K:
+ val |= GITS_BASER_PAGE_SIZE_64K;
+ break;
+ }
- if (type == GITS_BASER_TYPE_NONE)
- continue;
+ its_write_baser(its, baser, val);
+ tmp = baser->val;
+ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
/*
- * Allocate as many entries as required to fit the
- * range of device IDs that the ITS can grok... The ID
- * space being incredibly sparse, this results in a
- * massive waste of memory.
- *
- * For other tables, only allocate a single page.
+ * Shareability didn't stick. Just use
+ * whatever the read reported, which is likely
+ * to be the only thing this redistributor
+ * supports. If that's zero, make it
+ * non-cacheable as well.
*/
- if (type == GITS_BASER_TYPE_DEVICE) {
- /*
- * 'order' was initialized earlier to the default page
- * granule of the the ITS. We can't have an allocation
- * smaller than that. If the requested allocation
- * is smaller, round up to the default page granule.
- */
- order = max(get_order((1UL << ids) * entry_size),
- order);
- if (order >= MAX_ORDER) {
- order = MAX_ORDER - 1;
- pr_warn("%s: Device Table too large, reduce its page order to %u\n",
- node_name, order);
- }
- }
-
-retry_alloc_baser:
- alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
- if (alloc_pages > GITS_BASER_PAGES_MAX) {
- alloc_pages = GITS_BASER_PAGES_MAX;
- order = get_order(GITS_BASER_PAGES_MAX * psz);
- pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n",
- node_name, order, alloc_pages);
+ shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+ if (!shr) {
+ cache = GITS_BASER_nC;
+ __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
}
+ goto retry_baser;
+ }
- base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!base) {
- err = -ENOMEM;
- goto out_free;
- }
-
- its->tables[i].base = base;
- its->tables[i].order = order;
-
-retry_baser:
- val = (virt_to_phys(base) |
- (type << GITS_BASER_TYPE_SHIFT) |
- ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
- cache |
- shr |
- GITS_BASER_VALID);
+ if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
+ /*
+ * Page size didn't stick. Let's try a smaller
+ * size and retry. If we reach 4K, then
+ * something is horribly wrong...
+ */
+ free_pages((unsigned long)base, order);
+ baser->base = NULL;
switch (psz) {
- case SZ_4K:
- val |= GITS_BASER_PAGE_SIZE_4K;
- break;
case SZ_16K:
- val |= GITS_BASER_PAGE_SIZE_16K;
- break;
+ psz = SZ_4K;
+ goto retry_alloc_baser;
case SZ_64K:
- val |= GITS_BASER_PAGE_SIZE_64K;
- break;
+ psz = SZ_16K;
+ goto retry_alloc_baser;
}
+ }
+
+ if (val != tmp) {
+ pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
+ &its->phys_base, its_base_type_string[type],
+ (unsigned long) val, (unsigned long) tmp);
+ free_pages((unsigned long)base, order);
+ return -ENXIO;
+ }
- val |= alloc_pages - 1;
- its->tables[i].val = val;
+ baser->order = order;
+ baser->base = base;
+ baser->psz = psz;
+ tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
- writeq_relaxed(val, its->base + GITS_BASER + i * 8);
- tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
+ pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
+ &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
+ its_base_type_string[type],
+ (unsigned long)virt_to_phys(base),
+ indirect ? "indirect" : "flat", (int)esz,
+ psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
- if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
+ return 0;
+}
+
+static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser,
+ u32 psz, u32 *order)
+{
+ u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
+ u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb;
+ u32 ids = its->device_ids;
+ u32 new_order = *order;
+ bool indirect = false;
+
+ /* No need to enable Indirection if memory requirement < (psz*2)bytes */
+ if ((esz << ids) > (psz * 2)) {
+ /*
+ * Find out whether hw supports a single or two-level table by
+ * table by reading bit at offset '62' after writing '1' to it.
+ */
+ its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
+ indirect = !!(baser->val & GITS_BASER_INDIRECT);
+
+ if (indirect) {
/*
- * Shareability didn't stick. Just use
- * whatever the read reported, which is likely
- * to be the only thing this redistributor
- * supports. If that's zero, make it
- * non-cacheable as well.
+ * The size of the lvl2 table is equal to ITS page size
+ * which is 'psz'. For computing lvl1 table size,
+ * subtract ID bits that sparse lvl2 table from 'ids'
+ * which is reported by ITS hardware times lvl1 table
+ * entry size.
*/
- shr = tmp & GITS_BASER_SHAREABILITY_MASK;
- if (!shr) {
- cache = GITS_BASER_nC;
- __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
- }
- goto retry_baser;
+ ids -= ilog2(psz / esz);
+ esz = GITS_LVL1_ENTRY_SIZE;
}
+ }
- if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
- /*
- * Page size didn't stick. Let's try a smaller
- * size and retry. If we reach 4K, then
- * something is horribly wrong...
- */
- free_pages((unsigned long)base, order);
- its->tables[i].base = NULL;
+ /*
+ * Allocate as many entries as required to fit the
+ * range of device IDs that the ITS can grok... The ID
+ * space being incredibly sparse, this results in a
+ * massive waste of memory if two-level device table
+ * feature is not supported by hardware.
+ */
+ new_order = max_t(u32, get_order(esz << ids), new_order);
+ if (new_order >= MAX_ORDER) {
+ new_order = MAX_ORDER - 1;
+ ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
+ pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
+ &its->phys_base, its->device_ids, ids);
+ }
- switch (psz) {
- case SZ_16K:
- psz = SZ_4K;
- goto retry_alloc_baser;
- case SZ_64K:
- psz = SZ_16K;
- goto retry_alloc_baser;
- }
- }
+ *order = new_order;
+
+ return indirect;
+}
- if (val != tmp) {
- pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
- node_name, i,
- (unsigned long) val, (unsigned long) tmp);
- err = -ENXIO;
- goto out_free;
+static void its_free_tables(struct its_node *its)
+{
+ int i;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ if (its->tables[i].base) {
+ free_pages((unsigned long)its->tables[i].base,
+ its->tables[i].order);
+ its->tables[i].base = NULL;
}
+ }
+}
+
+static int its_alloc_tables(struct its_node *its)
+{
+ u64 typer = readq_relaxed(its->base + GITS_TYPER);
+ u32 ids = GITS_TYPER_DEVBITS(typer);
+ u64 shr = GITS_BASER_InnerShareable;
+ u64 cache = GITS_BASER_WaWb;
+ u32 psz = SZ_64K;
+ int err, i;
- pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
- (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
- its_base_type_string[type],
- (unsigned long)virt_to_phys(base),
- psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
+ /*
+ * erratum 22375: only alloc 8MB table size
+ * erratum 24313: ignore memory access type
+ */
+ cache = GITS_BASER_nCnB;
+ ids = 0x14; /* 20 bits, 8MB */
}
- return 0;
+ its->device_ids = ids;
-out_free:
- its_free_tables(its);
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ struct its_baser *baser = its->tables + i;
+ u64 val = its_read_baser(its, baser);
+ u64 type = GITS_BASER_TYPE(val);
+ u32 order = get_order(psz);
+ bool indirect = false;
- return err;
+ if (type == GITS_BASER_TYPE_NONE)
+ continue;
+
+ if (type == GITS_BASER_TYPE_DEVICE)
+ indirect = its_parse_baser_device(its, baser, psz, &order);
+
+ err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
+ if (err < 0) {
+ its_free_tables(its);
+ return err;
+ }
+
+ /* Update settings which will be used for next BASERn */
+ psz = baser->psz;
+ cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
+ shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
+ }
+
+ return 0;
}
static int its_alloc_collections(struct its_node *its)
@@ -1185,10 +1247,57 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
return NULL;
}
+static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
+{
+ struct its_baser *baser;
+ struct page *page;
+ u32 esz, idx;
+ __le64 *table;
+
+ baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
+
+ /* Don't allow device id that exceeds ITS hardware limit */
+ if (!baser)
+ return (ilog2(dev_id) < its->device_ids);
+
+ /* Don't allow device id that exceeds single, flat table limit */
+ esz = GITS_BASER_ENTRY_SIZE(baser->val);
+ if (!(baser->val & GITS_BASER_INDIRECT))
+ return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
+
+ /* Compute 1st level table index & check if that exceeds table limit */
+ idx = dev_id >> ilog2(baser->psz / esz);
+ if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
+ return false;
+
+ table = baser->base;
+
+ /* Allocate memory for 2nd level table */
+ if (!table[idx]) {
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
+ if (!page)
+ return false;
+
+ /* Flush Lvl2 table to PoC if hw doesn't support coherency */
+ if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+ __flush_dcache_area(page_address(page), baser->psz);
+
+ table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
+
+ /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
+ if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+ __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
+
+ /* Ensure updated table contents are visible to ITS hardware */
+ dsb(sy);
+ }
+
+ return true;
+}
+
static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nvecs)
{
- struct its_baser *baser;
struct its_device *dev;
unsigned long *lpi_map;
unsigned long flags;
@@ -1199,14 +1308,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nr_ites;
int sz;
- baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
-
- /* Don't allow 'dev_id' that exceeds single, flat table limit */
- if (baser) {
- if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
- GITS_BASER_ENTRY_SIZE(baser->val)))
- return NULL;
- } else if (ilog2(dev_id) >= its->device_ids)
+ if (!its_alloc_device_table(its, dev_id))
return NULL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1569,7 +1671,7 @@ static int __init its_probe(struct device_node *node,
its_enable_quirks(its);
- err = its_alloc_tables(node->full_name, its);
+ err = its_alloc_tables(its);
if (err)
goto out_free_cmd;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index fbc4ae2afd29..1de07eb5839c 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -75,7 +75,7 @@ struct gic_chip_data {
void __iomem *raw_dist_base;
void __iomem *raw_cpu_base;
u32 percpu_offset;
-#ifdef CONFIG_CPU_PM
+#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
@@ -449,7 +449,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic)
}
-static void __init gic_dist_init(struct gic_chip_data *gic)
+static void gic_dist_init(struct gic_chip_data *gic)
{
unsigned int i;
u32 cpumask;
@@ -528,14 +528,14 @@ int gic_cpu_if_down(unsigned int gic_nr)
return 0;
}
-#ifdef CONFIG_CPU_PM
+#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
/*
* Saves the GIC distributor registers during suspend or idle. Must be called
* with interrupts disabled but before powering down the GIC. After calling
* this function, no interrupts will be delivered by the GIC, and another
* platform-specific wakeup source must be enabled.
*/
-static void gic_dist_save(struct gic_chip_data *gic)
+void gic_dist_save(struct gic_chip_data *gic)
{
unsigned int gic_irqs;
void __iomem *dist_base;
@@ -574,7 +574,7 @@ static void gic_dist_save(struct gic_chip_data *gic)
* handled normally, but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source.
*/
-static void gic_dist_restore(struct gic_chip_data *gic)
+void gic_dist_restore(struct gic_chip_data *gic)
{
unsigned int gic_irqs;
unsigned int i;
@@ -620,7 +620,7 @@ static void gic_dist_restore(struct gic_chip_data *gic)
writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
}
-static void gic_cpu_save(struct gic_chip_data *gic)
+void gic_cpu_save(struct gic_chip_data *gic)
{
int i;
u32 *ptr;
@@ -650,7 +650,7 @@ static void gic_cpu_save(struct gic_chip_data *gic)
}
-static void gic_cpu_restore(struct gic_chip_data *gic)
+void gic_cpu_restore(struct gic_chip_data *gic)
{
int i;
u32 *ptr;
@@ -727,7 +727,7 @@ static struct notifier_block gic_notifier_block = {
.notifier_call = gic_notifier,
};
-static int __init gic_pm_init(struct gic_chip_data *gic)
+static int gic_pm_init(struct gic_chip_data *gic)
{
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32));
@@ -757,7 +757,7 @@ free_ppi_enable:
return -ENOMEM;
}
#else
-static int __init gic_pm_init(struct gic_chip_data *gic)
+static int gic_pm_init(struct gic_chip_data *gic)
{
return 0;
}
@@ -1032,32 +1032,31 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.unmap = gic_irq_domain_unmap,
};
-static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
- struct fwnode_handle *handle)
+static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
+ const char *name, bool use_eoimode1)
{
- irq_hw_number_t hwirq_base;
- int gic_irqs, irq_base, i, ret;
-
- if (WARN_ON(!gic || gic->domain))
- return -EINVAL;
-
/* Initialize irq_chip */
gic->chip = gic_chip;
+ gic->chip.name = name;
+ gic->chip.parent_device = dev;
- if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
+ if (use_eoimode1) {
gic->chip.irq_mask = gic_eoimode1_mask_irq;
gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
- gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
- } else {
- gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
- (int)(gic - &gic_data[0]));
}
#ifdef CONFIG_SMP
if (gic == &gic_data[0])
gic->chip.irq_set_affinity = gic_set_affinity;
#endif
+}
+
+static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
+ struct fwnode_handle *handle)
+{
+ irq_hw_number_t hwirq_base;
+ int gic_irqs, irq_base, ret;
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
/* Frankein-GIC without banked registers... */
@@ -1138,6 +1137,36 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
goto error;
}
+ gic_dist_init(gic);
+ ret = gic_cpu_init(gic);
+ if (ret)
+ goto error;
+
+ ret = gic_pm_init(gic);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+ free_percpu(gic->dist_base.percpu_base);
+ free_percpu(gic->cpu_base.percpu_base);
+ }
+
+ return ret;
+}
+
+static int __init __gic_init_bases(struct gic_chip_data *gic,
+ int irq_start,
+ struct fwnode_handle *handle)
+{
+ char *name;
+ int i, ret;
+
+ if (WARN_ON(!gic || gic->domain))
+ return -EINVAL;
+
if (gic == &gic_data[0]) {
/*
* Initialize the CPU interface map to all CPUs.
@@ -1155,24 +1184,17 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
pr_info("GIC: Using split EOI/Deactivate mode\n");
}
- gic_dist_init(gic);
- ret = gic_cpu_init(gic);
- if (ret)
- goto error;
-
- ret = gic_pm_init(gic);
- if (ret)
- goto error;
-
- return 0;
-
-error:
- if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
- free_percpu(gic->dist_base.percpu_base);
- free_percpu(gic->cpu_base.percpu_base);
+ if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
+ name = kasprintf(GFP_KERNEL, "GICv2");
+ gic_init_chip(gic, NULL, name, true);
+ } else {
+ name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0]));
+ gic_init_chip(gic, NULL, name, false);
}
- kfree(gic->chip.name);
+ ret = gic_init_bases(gic, irq_start, handle);
+ if (ret)
+ kfree(name);
return ret;
}
@@ -1250,7 +1272,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
return true;
}
-static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
+static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
{
if (!gic || !node)
return -EINVAL;
@@ -1274,6 +1296,34 @@ error:
return -ENOMEM;
}
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
+{
+ int ret;
+
+ if (!dev || !dev->of_node || !gic || !irq)
+ return -EINVAL;
+
+ *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
+ if (!*gic)
+ return -ENOMEM;
+
+ gic_init_chip(*gic, dev, dev->of_node->name, false);
+
+ ret = gic_of_setup(*gic, dev->of_node);
+ if (ret)
+ return ret;
+
+ ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode);
+ if (ret) {
+ gic_teardown(*gic);
+ return ret;
+ }
+
+ irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
+
+ return 0;
+}
+
static void __init gic_of_setup_kvm_info(struct device_node *node)
{
int ret;
@@ -1353,7 +1403,11 @@ IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
-
+#else
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
+{
+ return -ENOTSUPP;
+}
#endif
#ifdef CONFIG_ACPI
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 8a4adbeb2b8c..3786d0f21972 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -718,7 +718,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
- gic_map_to_vpe(intr, vpe);
+ gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(intr, pcpu_masks[i].pcpu_mask);
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
@@ -959,7 +959,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
switch (bus_token) {
case DOMAIN_BUS_IPI:
is_ipi = d->bus_token == bus_token;
- return to_of_node(d->fwnode) == node && is_ipi;
+ return (!node || to_of_node(d->fwnode) == node) && is_ipi;
break;
default:
return 0;
@@ -1042,12 +1042,14 @@ static void __init __gic_init(unsigned long gic_base_addr,
&gic_irq_domain_ops, NULL);
if (!gic_irq_domain)
panic("Failed to add GIC IRQ domain");
+ gic_irq_domain->name = "mips-gic-irq";
gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0,
GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
node, &gic_dev_domain_ops, NULL);
if (!gic_dev_domain)
panic("Failed to add GIC DEV domain");
+ gic_dev_domain->name = "mips-gic-dev";
gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
IRQ_DOMAIN_FLAG_IPI_PER_CPU,
@@ -1056,6 +1058,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
if (!gic_ipi_domain)
panic("Failed to add GIC IPI domain");
+ gic_ipi_domain->name = "mips-gic-ipi";
gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
if (node &&
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index 9d1bcfc33e4c..b04a8ac6e744 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -23,6 +23,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/irqchip/irq-omap-intc.h>
+
/* Define these here for now until we drop all board-files */
#define OMAP24XX_IC_BASE 0x480fe000
#define OMAP34XX_IC_BASE 0x48200000
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index 5dc5a760c723..c25ce5af091a 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -92,9 +92,9 @@ static void s3c_irq_mask(struct irq_data *data)
unsigned long mask;
unsigned int irqno;
- mask = __raw_readl(intc->reg_mask);
+ mask = readl_relaxed(intc->reg_mask);
mask |= (1UL << irq_data->offset);
- __raw_writel(mask, intc->reg_mask);
+ writel_relaxed(mask, intc->reg_mask);
if (parent_intc) {
parent_data = &parent_intc->irqs[irq_data->parent_irq];
@@ -119,9 +119,9 @@ static void s3c_irq_unmask(struct irq_data *data)
unsigned long mask;
unsigned int irqno;
- mask = __raw_readl(intc->reg_mask);
+ mask = readl_relaxed(intc->reg_mask);
mask &= ~(1UL << irq_data->offset);
- __raw_writel(mask, intc->reg_mask);
+ writel_relaxed(mask, intc->reg_mask);
if (parent_intc) {
irqno = irq_find_mapping(parent_intc->domain,
@@ -136,9 +136,9 @@ static inline void s3c_irq_ack(struct irq_data *data)
struct s3c_irq_intc *intc = irq_data->intc;
unsigned long bitval = 1UL << irq_data->offset;
- __raw_writel(bitval, intc->reg_pending);
+ writel_relaxed(bitval, intc->reg_pending);
if (intc->reg_intpnd)
- __raw_writel(bitval, intc->reg_intpnd);
+ writel_relaxed(bitval, intc->reg_intpnd);
}
static int s3c_irq_type(struct irq_data *data, unsigned int type)
@@ -172,9 +172,9 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
unsigned long newvalue = 0, value;
/* Set the GPIO to external interrupt mode */
- value = __raw_readl(gpcon_reg);
+ value = readl_relaxed(gpcon_reg);
value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset);
- __raw_writel(value, gpcon_reg);
+ writel_relaxed(value, gpcon_reg);
/* Set the external interrupt to pointed trigger type */
switch (type)
@@ -208,9 +208,9 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
return -EINVAL;
}
- value = __raw_readl(extint_reg);
+ value = readl_relaxed(extint_reg);
value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset);
- __raw_writel(value, extint_reg);
+ writel_relaxed(value, extint_reg);
return 0;
}
@@ -315,8 +315,8 @@ static void s3c_irq_demux(struct irq_desc *desc)
chained_irq_enter(chip, desc);
- src = __raw_readl(sub_intc->reg_pending);
- msk = __raw_readl(sub_intc->reg_mask);
+ src = readl_relaxed(sub_intc->reg_pending);
+ msk = readl_relaxed(sub_intc->reg_mask);
src &= ~msk;
src &= irq_data->sub_bits;
@@ -337,7 +337,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
int pnd;
int offset;
- pnd = __raw_readl(intc->reg_intpnd);
+ pnd = readl_relaxed(intc->reg_intpnd);
if (!pnd)
return false;
@@ -352,7 +352,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
*
* Thanks to Klaus, Shannon, et al for helping to debug this problem
*/
- offset = __raw_readl(intc->reg_intpnd + 4);
+ offset = readl_relaxed(intc->reg_intpnd + 4);
/* Find the bit manually, when the offset is wrong.
* The pending register only ever contains the one bit of the next
@@ -406,7 +406,7 @@ int s3c24xx_set_fiq(unsigned int irq, bool on)
intmod = 0;
}
- __raw_writel(intmod, S3C2410_INTMOD);
+ writel_relaxed(intmod, S3C2410_INTMOD);
return 0;
}
@@ -508,14 +508,14 @@ static void s3c24xx_clear_intc(struct s3c_irq_intc *intc)
last = 0;
for (i = 0; i < 4; i++) {
- pend = __raw_readl(reg_source);
+ pend = readl_relaxed(reg_source);
if (pend == 0 || pend == last)
break;
- __raw_writel(pend, intc->reg_pending);
+ writel_relaxed(pend, intc->reg_pending);
if (intc->reg_intpnd)
- __raw_writel(pend, intc->reg_intpnd);
+ writel_relaxed(pend, intc->reg_intpnd);
pr_info("irq: clearing pending status %08x\n", (int)pend);
last = pend;
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index 10cb21b9ba3d..e1336848affa 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -29,6 +29,11 @@
static struct irq_domain *sirfsoc_irqdomain;
+static void __iomem *sirfsoc_irq_get_regbase(void)
+{
+ return (void __iomem __force *)sirfsoc_irqdomain->host_data;
+}
+
static __init void sirfsoc_alloc_gc(void __iomem *base)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
@@ -53,7 +58,7 @@ static __init void sirfsoc_alloc_gc(void __iomem *base)
static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
{
- void __iomem *base = sirfsoc_irqdomain->host_data;
+ void __iomem *base = sirfsoc_irq_get_regbase();
u32 irqstat;
irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID);
@@ -94,7 +99,7 @@ static struct sirfsoc_irq_status sirfsoc_irq_st;
static int sirfsoc_irq_suspend(void)
{
- void __iomem *base = sirfsoc_irqdomain->host_data;
+ void __iomem *base = sirfsoc_irq_get_regbase();
sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0);
sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1);
@@ -106,7 +111,7 @@ static int sirfsoc_irq_suspend(void)
static void sirfsoc_irq_resume(void)
{
- void __iomem *base = sirfsoc_irqdomain->host_data;
+ void __iomem *base = sirfsoc_irq_get_regbase();
writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0);
writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index e902f081e16c..3973a14bb15b 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -90,7 +90,7 @@ static struct tegra_ictlr_info *lic;
static inline void tegra_ictlr_write_mask(struct irq_data *d, unsigned long reg)
{
- void __iomem *base = d->chip_data;
+ void __iomem *base = (void __iomem __force *)d->chip_data;
u32 mask;
mask = BIT(d->hwirq % 32);
@@ -266,7 +266,7 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
&tegra_ictlr_chip,
- info->base[ictlr]);
+ (void __force *)info->base[ictlr]);
}
parent_fwspec = *fwspec;
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index b956dfffe78c..f811a7de5857 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -167,7 +167,7 @@ static int vic_suspend(void)
return 0;
}
-struct syscore_ops vic_syscore_ops = {
+static struct syscore_ops vic_syscore_ops = {
.suspend = vic_suspend,
.resume = vic_resume,
};
@@ -517,7 +517,8 @@ int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
EXPORT_SYMBOL_GPL(vic_init_cascaded);
#ifdef CONFIG_OF
-int __init vic_of_init(struct device_node *node, struct device_node *parent)
+static int __init vic_of_init(struct device_node *node,
+ struct device_node *parent)
{
void __iomem *regs;
u32 interrupt_mask = ~0;
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 85a339030e4b..61c68a1f054a 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -27,11 +27,13 @@ config NVM_DEBUG
It is required to create/remove targets without IOCTLs.
config NVM_GENNVM
- tristate "Generic NVM manager for Open-Channel SSDs"
+ tristate "General Non-Volatile Memory Manager for Open-Channel SSDs"
---help---
- NVM media manager for Open-Channel SSDs that offload management
- functionality to device, while keeping data placement and garbage
- collection decisions on the host.
+ Non-volatile memory media manager for Open-Channel SSDs that implements
+ physical media metadata management and block provisioning API.
+
+ This is the standard media manager for using Open-Channel SSDs, and
+ required for targets to be instantiated.
config NVM_RRPC
tristate "Round-robin Hybrid Open-Channel SSD target"
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 160c1a6838e1..9ebd2cfbd849 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -18,8 +18,6 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/sem.h>
@@ -28,46 +26,42 @@
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
-#include <uapi/linux/lightnvm.h>
static LIST_HEAD(nvm_tgt_types);
+static DECLARE_RWSEM(nvm_tgtt_lock);
static LIST_HEAD(nvm_mgrs);
static LIST_HEAD(nvm_devices);
-static LIST_HEAD(nvm_targets);
static DECLARE_RWSEM(nvm_lock);
-static struct nvm_target *nvm_find_target(const char *name)
+struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
{
- struct nvm_target *tgt;
+ struct nvm_tgt_type *tmp, *tt = NULL;
- list_for_each_entry(tgt, &nvm_targets, list)
- if (!strcmp(name, tgt->disk->disk_name))
- return tgt;
+ if (lock)
+ down_write(&nvm_tgtt_lock);
- return NULL;
-}
-
-static struct nvm_tgt_type *nvm_find_target_type(const char *name)
-{
- struct nvm_tgt_type *tt;
-
- list_for_each_entry(tt, &nvm_tgt_types, list)
- if (!strcmp(name, tt->name))
- return tt;
+ list_for_each_entry(tmp, &nvm_tgt_types, list)
+ if (!strcmp(name, tmp->name)) {
+ tt = tmp;
+ break;
+ }
- return NULL;
+ if (lock)
+ up_write(&nvm_tgtt_lock);
+ return tt;
}
+EXPORT_SYMBOL(nvm_find_target_type);
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
- down_write(&nvm_lock);
- if (nvm_find_target_type(tt->name))
+ down_write(&nvm_tgtt_lock);
+ if (nvm_find_target_type(tt->name, 0))
ret = -EEXIST;
else
list_add(&tt->list, &nvm_tgt_types);
- up_write(&nvm_lock);
+ up_write(&nvm_tgtt_lock);
return ret;
}
@@ -110,7 +104,7 @@ static struct nvmm_type *nvm_find_mgr_type(const char *name)
return NULL;
}
-struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
+static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
{
struct nvmm_type *mt;
int ret;
@@ -182,20 +176,6 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
return NULL;
}
-struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
- unsigned long flags)
-{
- return dev->mt->get_blk_unlocked(dev, lun, flags);
-}
-EXPORT_SYMBOL(nvm_get_blk_unlocked);
-
-/* Assumes that all valid pages have already been moved on release to bm */
-void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
-{
- return dev->mt->put_blk_unlocked(dev, blk);
-}
-EXPORT_SYMBOL(nvm_put_blk_unlocked);
-
struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
unsigned long flags)
{
@@ -210,6 +190,12 @@ void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
}
EXPORT_SYMBOL(nvm_put_blk);
+void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+{
+ return dev->mt->mark_blk(dev, ppa, type);
+}
+EXPORT_SYMBOL(nvm_mark_blk);
+
int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
return dev->mt->submit_io(dev, rqd);
@@ -251,9 +237,10 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
EXPORT_SYMBOL(nvm_generic_to_addr_mode);
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
- struct ppa_addr *ppas, int nr_ppas, int vblk)
+ const struct ppa_addr *ppas, int nr_ppas, int vblk)
{
int i, plane_cnt, pl_idx;
+ struct ppa_addr ppa;
if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
rqd->nr_ppas = nr_ppas;
@@ -278,8 +265,9 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
for (i = 0; i < nr_ppas; i++) {
for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
- ppas[i].g.pl = pl_idx;
- rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ ppa = ppas[i];
+ ppa.g.pl = pl_idx;
+ rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
}
}
}
@@ -337,7 +325,7 @@ static void nvm_end_io_sync(struct nvm_rq *rqd)
complete(waiting);
}
-int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
+static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
int flags, void *buf, int len)
{
DECLARE_COMPLETION_ONSTACK(wait);
@@ -367,7 +355,9 @@ int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
if (hang_check)
- while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
+ while (!wait_for_completion_io_timeout(&wait,
+ hang_check * (HZ/2)))
+ ;
else
wait_for_completion_io(&wait);
@@ -510,7 +500,8 @@ static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
/* The lower page table encoding consists of a list of bytes, where each
* has a lower and an upper half. The first half byte maintains the
* increment value and every value after is an offset added to the
- * previous incrementation value */
+ * previous incrementation value
+ */
dev->lptbl[0] = mlc->pairs[0] & 0xF;
for (i = 1; i < dev->lps_per_blk; i++) {
p = mlc->pairs[i >> 1];
@@ -596,42 +587,11 @@ err_fmtype:
return ret;
}
-static void nvm_remove_target(struct nvm_target *t)
-{
- struct nvm_tgt_type *tt = t->type;
- struct gendisk *tdisk = t->disk;
- struct request_queue *q = tdisk->queue;
-
- lockdep_assert_held(&nvm_lock);
-
- del_gendisk(tdisk);
- blk_cleanup_queue(q);
-
- if (tt->exit)
- tt->exit(tdisk->private_data);
-
- put_disk(tdisk);
-
- list_del(&t->list);
- kfree(t);
-}
-
static void nvm_free_mgr(struct nvm_dev *dev)
{
- struct nvm_target *tgt, *tmp;
-
if (!dev->mt)
return;
- down_write(&nvm_lock);
- list_for_each_entry_safe(tgt, tmp, &nvm_targets, list) {
- if (tgt->dev != dev)
- continue;
-
- nvm_remove_target(tgt);
- }
- up_write(&nvm_lock);
-
dev->mt->unregister_mgr(dev);
dev->mt = NULL;
}
@@ -778,91 +738,6 @@ void nvm_unregister(char *disk_name)
}
EXPORT_SYMBOL(nvm_unregister);
-static const struct block_device_operations nvm_fops = {
- .owner = THIS_MODULE,
-};
-
-static int nvm_create_target(struct nvm_dev *dev,
- struct nvm_ioctl_create *create)
-{
- struct nvm_ioctl_create_simple *s = &create->conf.s;
- struct request_queue *tqueue;
- struct gendisk *tdisk;
- struct nvm_tgt_type *tt;
- struct nvm_target *t;
- void *targetdata;
-
- if (!dev->mt) {
- pr_info("nvm: device has no media manager registered.\n");
- return -ENODEV;
- }
-
- down_write(&nvm_lock);
- tt = nvm_find_target_type(create->tgttype);
- if (!tt) {
- pr_err("nvm: target type %s not found\n", create->tgttype);
- up_write(&nvm_lock);
- return -EINVAL;
- }
-
- t = nvm_find_target(create->tgtname);
- if (t) {
- pr_err("nvm: target name already exists.\n");
- up_write(&nvm_lock);
- return -EINVAL;
- }
- up_write(&nvm_lock);
-
- t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
- if (!t)
- return -ENOMEM;
-
- tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
- if (!tqueue)
- goto err_t;
- blk_queue_make_request(tqueue, tt->make_rq);
-
- tdisk = alloc_disk(0);
- if (!tdisk)
- goto err_queue;
-
- sprintf(tdisk->disk_name, "%s", create->tgtname);
- tdisk->flags = GENHD_FL_EXT_DEVT;
- tdisk->major = 0;
- tdisk->first_minor = 0;
- tdisk->fops = &nvm_fops;
- tdisk->queue = tqueue;
-
- targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
- if (IS_ERR(targetdata))
- goto err_init;
-
- tdisk->private_data = targetdata;
- tqueue->queuedata = targetdata;
-
- blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
-
- set_capacity(tdisk, tt->capacity(targetdata));
- add_disk(tdisk);
-
- t->type = tt;
- t->disk = tdisk;
- t->dev = dev;
-
- down_write(&nvm_lock);
- list_add_tail(&t->list, &nvm_targets);
- up_write(&nvm_lock);
-
- return 0;
-err_init:
- put_disk(tdisk);
-err_queue:
- blk_cleanup_queue(tqueue);
-err_t:
- kfree(t);
- return -ENOMEM;
-}
-
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
@@ -871,11 +746,17 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
down_write(&nvm_lock);
dev = nvm_find_nvm_dev(create->dev);
up_write(&nvm_lock);
+
if (!dev) {
pr_err("nvm: device not found\n");
return -EINVAL;
}
+ if (!dev->mt) {
+ pr_info("nvm: device has no media manager registered.\n");
+ return -ENODEV;
+ }
+
if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
pr_err("nvm: config type not valid\n");
return -EINVAL;
@@ -888,25 +769,7 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- return nvm_create_target(dev, create);
-}
-
-static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
-{
- struct nvm_target *t;
-
- down_write(&nvm_lock);
- t = nvm_find_target(remove->tgtname);
- if (!t) {
- pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
- up_write(&nvm_lock);
- return -EINVAL;
- }
-
- nvm_remove_target(t);
- up_write(&nvm_lock);
-
- return 0;
+ return dev->mt->create_tgt(dev, create);
}
#ifdef CONFIG_NVM_DEBUG
@@ -941,8 +804,9 @@ static int nvm_configure_show(const char *val)
static int nvm_configure_remove(const char *val)
{
struct nvm_ioctl_remove remove;
+ struct nvm_dev *dev;
char opcode;
- int ret;
+ int ret = 0;
ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
if (ret != 2) {
@@ -952,7 +816,13 @@ static int nvm_configure_remove(const char *val)
remove.flags = 0;
- return __nvm_configure_remove(&remove);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ ret = dev->mt->remove_tgt(dev, &remove);
+ if (!ret)
+ break;
+ }
+
+ return ret;
}
static int nvm_configure_create(const char *val)
@@ -1149,6 +1019,8 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
{
struct nvm_ioctl_remove remove;
+ struct nvm_dev *dev;
+ int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1163,7 +1035,13 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
return -EINVAL;
}
- return __nvm_configure_remove(&remove);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ ret = dev->mt->remove_tgt(dev, &remove);
+ if (!ret)
+ break;
+ }
+
+ return ret;
}
static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index ec9fb6876e38..b74174c6d021 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -15,22 +15,160 @@
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
* USA.
*
- * Implementation of a generic nvm manager for Open-Channel SSDs.
+ * Implementation of a general nvm manager for Open-Channel SSDs.
*/
#include "gennvm.h"
-static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
+static struct nvm_target *gen_find_target(struct gen_dev *gn, const char *name)
{
- struct gen_nvm *gn = dev->mp;
- struct gennvm_area *area, *prev, *next;
+ struct nvm_target *tgt;
+
+ list_for_each_entry(tgt, &gn->targets, list)
+ if (!strcmp(name, tgt->disk->disk_name))
+ return tgt;
+
+ return NULL;
+}
+
+static const struct block_device_operations gen_fops = {
+ .owner = THIS_MODULE,
+};
+
+static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
+{
+ struct gen_dev *gn = dev->mp;
+ struct nvm_ioctl_create_simple *s = &create->conf.s;
+ struct request_queue *tqueue;
+ struct gendisk *tdisk;
+ struct nvm_tgt_type *tt;
+ struct nvm_target *t;
+ void *targetdata;
+
+ tt = nvm_find_target_type(create->tgttype, 1);
+ if (!tt) {
+ pr_err("nvm: target type %s not found\n", create->tgttype);
+ return -EINVAL;
+ }
+
+ mutex_lock(&gn->lock);
+ t = gen_find_target(gn, create->tgtname);
+ if (t) {
+ pr_err("nvm: target name already exists.\n");
+ mutex_unlock(&gn->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&gn->lock);
+
+ t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
+ if (!tqueue)
+ goto err_t;
+ blk_queue_make_request(tqueue, tt->make_rq);
+
+ tdisk = alloc_disk(0);
+ if (!tdisk)
+ goto err_queue;
+
+ sprintf(tdisk->disk_name, "%s", create->tgtname);
+ tdisk->flags = GENHD_FL_EXT_DEVT;
+ tdisk->major = 0;
+ tdisk->first_minor = 0;
+ tdisk->fops = &gen_fops;
+ tdisk->queue = tqueue;
+
+ targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
+ if (IS_ERR(targetdata))
+ goto err_init;
+
+ tdisk->private_data = targetdata;
+ tqueue->queuedata = targetdata;
+
+ blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
+
+ set_capacity(tdisk, tt->capacity(targetdata));
+ add_disk(tdisk);
+
+ t->type = tt;
+ t->disk = tdisk;
+ t->dev = dev;
+
+ mutex_lock(&gn->lock);
+ list_add_tail(&t->list, &gn->targets);
+ mutex_unlock(&gn->lock);
+
+ return 0;
+err_init:
+ put_disk(tdisk);
+err_queue:
+ blk_cleanup_queue(tqueue);
+err_t:
+ kfree(t);
+ return -ENOMEM;
+}
+
+static void __gen_remove_target(struct nvm_target *t)
+{
+ struct nvm_tgt_type *tt = t->type;
+ struct gendisk *tdisk = t->disk;
+ struct request_queue *q = tdisk->queue;
+
+ del_gendisk(tdisk);
+ blk_cleanup_queue(q);
+
+ if (tt->exit)
+ tt->exit(tdisk->private_data);
+
+ put_disk(tdisk);
+
+ list_del(&t->list);
+ kfree(t);
+}
+
+/**
+ * gen_remove_tgt - Removes a target from the media manager
+ * @dev: device
+ * @remove: ioctl structure with target name to remove.
+ *
+ * Returns:
+ * 0: on success
+ * 1: on not found
+ * <0: on error
+ */
+static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
+{
+ struct gen_dev *gn = dev->mp;
+ struct nvm_target *t;
+
+ if (!gn)
+ return 1;
+
+ mutex_lock(&gn->lock);
+ t = gen_find_target(gn, remove->tgtname);
+ if (!t) {
+ mutex_unlock(&gn->lock);
+ return 1;
+ }
+ __gen_remove_target(t);
+ mutex_unlock(&gn->lock);
+
+ return 0;
+}
+
+static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
+{
+ struct gen_dev *gn = dev->mp;
+ struct gen_area *area, *prev, *next;
sector_t begin = 0;
sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
if (len > max_sectors)
return -EINVAL;
- area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+ area = kmalloc(sizeof(struct gen_area), GFP_KERNEL);
if (!area)
return -ENOMEM;
@@ -64,10 +202,10 @@ static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
return 0;
}
-static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
+static void gen_put_area(struct nvm_dev *dev, sector_t begin)
{
- struct gen_nvm *gn = dev->mp;
- struct gennvm_area *area;
+ struct gen_dev *gn = dev->mp;
+ struct gen_area *area;
spin_lock(&dev->lock);
list_for_each_entry(area, &gn->area_list, list) {
@@ -82,27 +220,27 @@ static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
spin_unlock(&dev->lock);
}
-static void gennvm_blocks_free(struct nvm_dev *dev)
+static void gen_blocks_free(struct nvm_dev *dev)
{
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
struct gen_lun *lun;
int i;
- gennvm_for_each_lun(gn, lun, i) {
+ gen_for_each_lun(gn, lun, i) {
if (!lun->vlun.blocks)
break;
vfree(lun->vlun.blocks);
}
}
-static void gennvm_luns_free(struct nvm_dev *dev)
+static void gen_luns_free(struct nvm_dev *dev)
{
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
kfree(gn->luns);
}
-static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
+static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
{
struct gen_lun *lun;
int i;
@@ -111,7 +249,7 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
if (!gn->luns)
return -ENOMEM;
- gennvm_for_each_lun(gn, lun, i) {
+ gen_for_each_lun(gn, lun, i) {
spin_lock_init(&lun->vlun.lock);
INIT_LIST_HEAD(&lun->free_list);
INIT_LIST_HEAD(&lun->used_list);
@@ -122,14 +260,11 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
lun->vlun.lun_id = i % dev->luns_per_chnl;
lun->vlun.chnl_id = i / dev->luns_per_chnl;
lun->vlun.nr_free_blocks = dev->blks_per_lun;
- lun->vlun.nr_open_blocks = 0;
- lun->vlun.nr_closed_blocks = 0;
- lun->vlun.nr_bad_blocks = 0;
}
return 0;
}
-static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
+static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
u8 *blks, int nr_blks)
{
struct nvm_dev *dev = gn->dev;
@@ -149,17 +284,16 @@ static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
blk = &lun->vlun.blocks[i];
list_move_tail(&blk->list, &lun->bb_list);
- lun->vlun.nr_bad_blocks++;
lun->vlun.nr_free_blocks--;
}
return 0;
}
-static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
+static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
{
struct nvm_dev *dev = private;
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
u64 elba = slba + nlb;
struct gen_lun *lun;
struct nvm_block *blk;
@@ -167,7 +301,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
int lun_id;
if (unlikely(elba > dev->total_secs)) {
- pr_err("gennvm: L2P data from device is out of bounds!\n");
+ pr_err("gen: L2P data from device is out of bounds!\n");
return -EINVAL;
}
@@ -175,7 +309,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
u64 pba = le64_to_cpu(entries[i]);
if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("gennvm: L2P data entry is out of bounds!\n");
+ pr_err("gen: L2P data entry is out of bounds!\n");
return -EINVAL;
}
@@ -200,16 +334,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
* block state. The block is assumed to be open.
*/
list_move_tail(&blk->list, &lun->used_list);
- blk->state = NVM_BLK_ST_OPEN;
+ blk->state = NVM_BLK_ST_TGT;
lun->vlun.nr_free_blocks--;
- lun->vlun.nr_open_blocks++;
}
}
return 0;
}
-static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
+static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
{
struct gen_lun *lun;
struct nvm_block *block;
@@ -222,7 +355,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
if (!blks)
return -ENOMEM;
- gennvm_for_each_lun(gn, lun, lun_iter) {
+ gen_for_each_lun(gn, lun, lun_iter) {
lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
dev->blks_per_lun);
if (!lun->vlun.blocks) {
@@ -256,20 +389,20 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
ret = nvm_get_bb_tbl(dev, ppa, blks);
if (ret)
- pr_err("gennvm: could not get BB table\n");
+ pr_err("gen: could not get BB table\n");
- ret = gennvm_block_bb(gn, ppa, blks, nr_blks);
+ ret = gen_block_bb(gn, ppa, blks, nr_blks);
if (ret)
- pr_err("gennvm: BB table map failed\n");
+ pr_err("gen: BB table map failed\n");
}
}
if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
- gennvm_block_map, dev);
+ gen_block_map, dev);
if (ret) {
- pr_err("gennvm: could not read L2P table.\n");
- pr_warn("gennvm: default block initialization");
+ pr_err("gen: could not read L2P table.\n");
+ pr_warn("gen: default block initialization");
}
}
@@ -277,67 +410,79 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
return 0;
}
-static void gennvm_free(struct nvm_dev *dev)
+static void gen_free(struct nvm_dev *dev)
{
- gennvm_blocks_free(dev);
- gennvm_luns_free(dev);
+ gen_blocks_free(dev);
+ gen_luns_free(dev);
kfree(dev->mp);
dev->mp = NULL;
}
-static int gennvm_register(struct nvm_dev *dev)
+static int gen_register(struct nvm_dev *dev)
{
- struct gen_nvm *gn;
+ struct gen_dev *gn;
int ret;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
- gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
+ gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
if (!gn)
return -ENOMEM;
gn->dev = dev;
gn->nr_luns = dev->nr_luns;
INIT_LIST_HEAD(&gn->area_list);
+ mutex_init(&gn->lock);
+ INIT_LIST_HEAD(&gn->targets);
dev->mp = gn;
- ret = gennvm_luns_init(dev, gn);
+ ret = gen_luns_init(dev, gn);
if (ret) {
- pr_err("gennvm: could not initialize luns\n");
+ pr_err("gen: could not initialize luns\n");
goto err;
}
- ret = gennvm_blocks_init(dev, gn);
+ ret = gen_blocks_init(dev, gn);
if (ret) {
- pr_err("gennvm: could not initialize blocks\n");
+ pr_err("gen: could not initialize blocks\n");
goto err;
}
return 1;
err:
- gennvm_free(dev);
+ gen_free(dev);
module_put(THIS_MODULE);
return ret;
}
-static void gennvm_unregister(struct nvm_dev *dev)
+static void gen_unregister(struct nvm_dev *dev)
{
- gennvm_free(dev);
+ struct gen_dev *gn = dev->mp;
+ struct nvm_target *t, *tmp;
+
+ mutex_lock(&gn->lock);
+ list_for_each_entry_safe(t, tmp, &gn->targets, list) {
+ if (t->dev != dev)
+ continue;
+ __gen_remove_target(t);
+ }
+ mutex_unlock(&gn->lock);
+
+ gen_free(dev);
module_put(THIS_MODULE);
}
-static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
+static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
struct nvm_lun *vlun, unsigned long flags)
{
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
struct nvm_block *blk = NULL;
int is_gc = flags & NVM_IOTYPE_GC;
- assert_spin_locked(&vlun->lock);
-
+ spin_lock(&vlun->lock);
if (list_empty(&lun->free_list)) {
- pr_err_ratelimited("gennvm: lun %u have no free pages available",
+ pr_err_ratelimited("gen: lun %u have no free pages available",
lun->vlun.id);
goto out;
}
@@ -346,88 +491,58 @@ static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
goto out;
blk = list_first_entry(&lun->free_list, struct nvm_block, list);
- list_move_tail(&blk->list, &lun->used_list);
- blk->state = NVM_BLK_ST_OPEN;
+ list_move_tail(&blk->list, &lun->used_list);
+ blk->state = NVM_BLK_ST_TGT;
lun->vlun.nr_free_blocks--;
- lun->vlun.nr_open_blocks++;
-
out:
- return blk;
-}
-
-static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
- struct nvm_lun *vlun, unsigned long flags)
-{
- struct nvm_block *blk;
-
- spin_lock(&vlun->lock);
- blk = gennvm_get_blk_unlocked(dev, vlun, flags);
spin_unlock(&vlun->lock);
return blk;
}
-static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
+static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
{
struct nvm_lun *vlun = blk->lun;
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
- assert_spin_locked(&vlun->lock);
-
- if (blk->state & NVM_BLK_ST_OPEN) {
- list_move_tail(&blk->list, &lun->free_list);
- lun->vlun.nr_open_blocks--;
- lun->vlun.nr_free_blocks++;
- blk->state = NVM_BLK_ST_FREE;
- } else if (blk->state & NVM_BLK_ST_CLOSED) {
+ spin_lock(&vlun->lock);
+ if (blk->state & NVM_BLK_ST_TGT) {
list_move_tail(&blk->list, &lun->free_list);
- lun->vlun.nr_closed_blocks--;
lun->vlun.nr_free_blocks++;
blk->state = NVM_BLK_ST_FREE;
} else if (blk->state & NVM_BLK_ST_BAD) {
list_move_tail(&blk->list, &lun->bb_list);
- lun->vlun.nr_bad_blocks++;
blk->state = NVM_BLK_ST_BAD;
} else {
WARN_ON_ONCE(1);
- pr_err("gennvm: erroneous block type (%lu -> %u)\n",
+ pr_err("gen: erroneous block type (%lu -> %u)\n",
blk->id, blk->state);
list_move_tail(&blk->list, &lun->bb_list);
- lun->vlun.nr_bad_blocks++;
- blk->state = NVM_BLK_ST_BAD;
}
-}
-
-static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
-{
- struct nvm_lun *vlun = blk->lun;
-
- spin_lock(&vlun->lock);
- gennvm_put_blk_unlocked(dev, blk);
spin_unlock(&vlun->lock);
}
-static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
{
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
struct gen_lun *lun;
struct nvm_block *blk;
- pr_debug("gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
+ pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
if (unlikely(ppa.g.ch > dev->nr_chnls ||
ppa.g.lun > dev->luns_per_chnl ||
ppa.g.blk > dev->blks_per_lun)) {
WARN_ON_ONCE(1);
- pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
+ pr_err("gen: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
ppa.g.ch, dev->nr_chnls,
ppa.g.lun, dev->luns_per_chnl,
ppa.g.blk, dev->blks_per_lun);
return;
}
- lun = &gn->luns[ppa.g.lun * ppa.g.ch];
+ lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
blk = &lun->vlun.blocks[ppa.g.blk];
/* will be moved to bb list on put_blk from target */
@@ -435,9 +550,9 @@ static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
}
/*
- * mark block bad in gennvm. It is expected that the target recovers separately
+ * mark block bad in gen. It is expected that the target recovers separately
*/
-static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
+static void gen_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int bit = -1;
int max_secs = dev->ops->max_phys_sect;
@@ -447,25 +562,25 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
/* look up blocks and mark them as bad */
if (rqd->nr_ppas == 1) {
- gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
+ gen_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
return;
}
while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
- gennvm_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
+ gen_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
}
-static void gennvm_end_io(struct nvm_rq *rqd)
+static void gen_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_instance *ins = rqd->ins;
if (rqd->error == NVM_RSP_ERR_FAILWRITE)
- gennvm_mark_blk_bad(rqd->dev, rqd);
+ gen_mark_blk_bad(rqd->dev, rqd);
ins->tt->end_io(rqd);
}
-static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
if (!dev->ops->submit_io)
return -ENODEV;
@@ -474,11 +589,11 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
nvm_generic_to_addr_mode(dev, rqd);
rqd->dev = dev;
- rqd->end_io = gennvm_end_io;
+ rqd->end_io = gen_end_io;
return dev->ops->submit_io(dev, rqd);
}
-static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
+static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
unsigned long flags)
{
struct ppa_addr addr = block_to_ppa(dev, blk);
@@ -486,19 +601,19 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
return nvm_erase_ppa(dev, &addr, 1);
}
-static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
+static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
{
return test_and_set_bit(lunid, dev->lun_map);
}
-static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
+static void gen_release_lun(struct nvm_dev *dev, int lunid)
{
WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
}
-static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
+static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
{
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
if (unlikely(lunid >= dev->nr_luns))
return NULL;
@@ -506,66 +621,62 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
return &gn->luns[lunid].vlun;
}
-static void gennvm_lun_info_print(struct nvm_dev *dev)
+static void gen_lun_info_print(struct nvm_dev *dev)
{
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
struct gen_lun *lun;
unsigned int i;
- gennvm_for_each_lun(gn, lun, i) {
+ gen_for_each_lun(gn, lun, i) {
spin_lock(&lun->vlun.lock);
- pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
- dev->name, i,
- lun->vlun.nr_free_blocks,
- lun->vlun.nr_open_blocks,
- lun->vlun.nr_closed_blocks,
- lun->vlun.nr_bad_blocks);
+ pr_info("%s: lun%8u\t%u\n", dev->name, i,
+ lun->vlun.nr_free_blocks);
spin_unlock(&lun->vlun.lock);
}
}
-static struct nvmm_type gennvm = {
+static struct nvmm_type gen = {
.name = "gennvm",
.version = {0, 1, 0},
- .register_mgr = gennvm_register,
- .unregister_mgr = gennvm_unregister,
+ .register_mgr = gen_register,
+ .unregister_mgr = gen_unregister,
- .get_blk_unlocked = gennvm_get_blk_unlocked,
- .put_blk_unlocked = gennvm_put_blk_unlocked,
+ .create_tgt = gen_create_tgt,
+ .remove_tgt = gen_remove_tgt,
- .get_blk = gennvm_get_blk,
- .put_blk = gennvm_put_blk,
+ .get_blk = gen_get_blk,
+ .put_blk = gen_put_blk,
- .submit_io = gennvm_submit_io,
- .erase_blk = gennvm_erase_blk,
+ .submit_io = gen_submit_io,
+ .erase_blk = gen_erase_blk,
- .mark_blk = gennvm_mark_blk,
+ .mark_blk = gen_mark_blk,
- .get_lun = gennvm_get_lun,
- .reserve_lun = gennvm_reserve_lun,
- .release_lun = gennvm_release_lun,
- .lun_info_print = gennvm_lun_info_print,
+ .get_lun = gen_get_lun,
+ .reserve_lun = gen_reserve_lun,
+ .release_lun = gen_release_lun,
+ .lun_info_print = gen_lun_info_print,
- .get_area = gennvm_get_area,
- .put_area = gennvm_put_area,
+ .get_area = gen_get_area,
+ .put_area = gen_put_area,
};
-static int __init gennvm_module_init(void)
+static int __init gen_module_init(void)
{
- return nvm_register_mgr(&gennvm);
+ return nvm_register_mgr(&gen);
}
-static void gennvm_module_exit(void)
+static void gen_module_exit(void)
{
- nvm_unregister_mgr(&gennvm);
+ nvm_unregister_mgr(&gen);
}
-module_init(gennvm_module_init);
-module_exit(gennvm_module_exit);
+module_init(gen_module_init);
+module_exit(gen_module_exit);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");
+MODULE_DESCRIPTION("General media manager for Open-Channel SSDs");
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 04d7c23cfc61..8ecfa817d21d 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -34,20 +34,24 @@ struct gen_lun {
*/
};
-struct gen_nvm {
+struct gen_dev {
struct nvm_dev *dev;
int nr_luns;
struct gen_lun *luns;
struct list_head area_list;
+
+ struct mutex lock;
+ struct list_head targets;
};
-struct gennvm_area {
+struct gen_area {
struct list_head list;
sector_t begin;
sector_t end; /* end is excluded */
};
-#define gennvm_for_each_lun(bm, lun, i) \
+
+#define gen_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 2103e97a974f..37fcaadbf80c 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -48,7 +48,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
}
static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
- unsigned len)
+ unsigned int len)
{
sector_t i;
@@ -96,10 +96,13 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
struct nvm_rq *rqd;
- do {
+ while (1) {
rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
+ if (rqd)
+ break;
+
schedule();
- } while (!rqd);
+ }
if (IS_ERR(rqd)) {
pr_err("rrpc: unable to acquire inflight IO\n");
@@ -172,39 +175,32 @@ static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
}
/* requires lun->lock taken */
-static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
+static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
+ struct rrpc_block **cur_rblk)
{
struct rrpc *rrpc = rlun->rrpc;
- BUG_ON(!rblk);
-
- if (rlun->cur) {
- spin_lock(&rlun->cur->lock);
- WARN_ON(!block_is_full(rrpc, rlun->cur));
- spin_unlock(&rlun->cur->lock);
+ if (*cur_rblk) {
+ spin_lock(&(*cur_rblk)->lock);
+ WARN_ON(!block_is_full(rrpc, *cur_rblk));
+ spin_unlock(&(*cur_rblk)->lock);
}
- rlun->cur = rblk;
+ *cur_rblk = new_rblk;
}
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
unsigned long flags)
{
- struct nvm_lun *lun = rlun->parent;
struct nvm_block *blk;
struct rrpc_block *rblk;
- spin_lock(&lun->lock);
- blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
+ blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
if (!blk) {
pr_err("nvm: rrpc: cannot get new block from media manager\n");
- spin_unlock(&lun->lock);
return NULL;
}
rblk = rrpc_get_rblk(rlun, blk->id);
- list_add_tail(&rblk->list, &rlun->open_list);
- spin_unlock(&lun->lock);
-
blk->priv = rblk;
bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
rblk->next_page = 0;
@@ -216,13 +212,7 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_lun *lun = rlun->parent;
-
- spin_lock(&lun->lock);
- nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
- list_del(&rblk->list);
- spin_unlock(&lun->lock);
+ nvm_put_blk(rrpc->dev, rblk->parent);
}
static void rrpc_put_blks(struct rrpc *rrpc)
@@ -342,7 +332,7 @@ try:
/* Perform read to do GC */
bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_private = &wait;
bio->bi_end_io = rrpc_end_sync_bio;
@@ -364,7 +354,7 @@ try:
reinit_completion(&wait);
bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = &wait;
bio->bi_end_io = rrpc_end_sync_bio;
@@ -508,21 +498,11 @@ static void rrpc_gc_queue(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_lun *lun = rblk->parent->lun;
- struct nvm_block *blk = rblk->parent;
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
spin_unlock(&rlun->lock);
- spin_lock(&lun->lock);
- lun->nr_open_blocks--;
- lun->nr_closed_blocks++;
- blk->state &= ~NVM_BLK_ST_OPEN;
- blk->state |= NVM_BLK_ST_CLOSED;
- list_move_tail(&rblk->list, &rlun->closed_list);
- spin_unlock(&lun->lock);
-
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
rblk->parent->id);
@@ -596,21 +576,20 @@ out:
return addr;
}
-/* Simple round-robin Logical to physical address translation.
- *
- * Retrieve the mapping using the active append point. Then update the ap for
- * the next write to the disk.
+/* Map logical address to a physical page. The mapping implements a round robin
+ * approach and allocates a page from the next lun available.
*
- * Returns rrpc_addr with the physical address and block. Remember to return to
- * rrpc->addr_cache when request is finished.
+ * Returns rrpc_addr with the physical address and block. Returns NULL if no
+ * blocks in the next rlun are available.
*/
static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
int is_gc)
{
struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
+ struct rrpc_block *rblk, **cur_rblk;
struct nvm_lun *lun;
u64 paddr;
+ int gc_force = 0;
rlun = rrpc_get_lun_rr(rrpc, is_gc);
lun = rlun->parent;
@@ -618,41 +597,65 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
return NULL;
- spin_lock(&rlun->lock);
+ /*
+ * page allocation steps:
+ * 1. Try to allocate new page from current rblk
+ * 2a. If succeed, proceed to map it in and return
+ * 2b. If fail, first try to allocate a new block from media manger,
+ * and then retry step 1. Retry until the normal block pool is
+ * exhausted.
+ * 3. If exhausted, and garbage collector is requesting the block,
+ * go to the reserved block and retry step 1.
+ * In the case that this fails as well, or it is not GC
+ * requesting, report not able to retrieve a block and let the
+ * caller handle further processing.
+ */
+ spin_lock(&rlun->lock);
+ cur_rblk = &rlun->cur;
rblk = rlun->cur;
retry:
paddr = rrpc_alloc_addr(rrpc, rblk);
- if (paddr == ADDR_EMPTY) {
- rblk = rrpc_get_blk(rrpc, rlun, 0);
- if (rblk) {
- rrpc_set_lun_cur(rlun, rblk);
- goto retry;
- }
+ if (paddr != ADDR_EMPTY)
+ goto done;
- if (is_gc) {
- /* retry from emergency gc block */
- paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
- if (paddr == ADDR_EMPTY) {
- rblk = rrpc_get_blk(rrpc, rlun, 1);
- if (!rblk) {
- pr_err("rrpc: no more blocks");
- goto err;
- }
-
- rlun->gc_cur = rblk;
- paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
- }
- rblk = rlun->gc_cur;
- }
+ if (!list_empty(&rlun->wblk_list)) {
+new_blk:
+ rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
+ prio);
+ rrpc_set_lun_cur(rlun, rblk, cur_rblk);
+ list_del(&rblk->prio);
+ goto retry;
+ }
+ spin_unlock(&rlun->lock);
+
+ rblk = rrpc_get_blk(rrpc, rlun, gc_force);
+ if (rblk) {
+ spin_lock(&rlun->lock);
+ list_add_tail(&rblk->prio, &rlun->wblk_list);
+ /*
+ * another thread might already have added a new block,
+ * Therefore, make sure that one is used, instead of the
+ * one just added.
+ */
+ goto new_blk;
}
+ if (unlikely(is_gc) && !gc_force) {
+ /* retry from emergency gc block */
+ cur_rblk = &rlun->gc_cur;
+ rblk = rlun->gc_cur;
+ gc_force = 1;
+ spin_lock(&rlun->lock);
+ goto retry;
+ }
+
+ pr_err("rrpc: failed to allocate new block\n");
+ return NULL;
+done:
spin_unlock(&rlun->lock);
return rrpc_update_map(rrpc, laddr, rblk, paddr);
-err:
- spin_unlock(&rlun->lock);
- return NULL;
}
static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -850,14 +853,14 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
return NVM_IO_ERR;
}
- if (bio_rw(bio) == WRITE)
+ if (bio_op(bio) == REQ_OP_WRITE)
return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
npages);
return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
}
- if (bio_rw(bio) == WRITE)
+ if (bio_op(bio) == REQ_OP_WRITE)
return rrpc_write_rq(rrpc, bio, rqd, flags);
return rrpc_read_rq(rrpc, bio, rqd, flags);
@@ -908,7 +911,7 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
struct nvm_rq *rqd;
int err;
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio_op(bio) == REQ_OP_DISCARD) {
rrpc_discard(rrpc, bio);
return BLK_QC_T_NONE;
}
@@ -1196,8 +1199,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
rlun->rrpc = rrpc;
INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->open_list);
- INIT_LIST_HEAD(&rlun->closed_list);
+ INIT_LIST_HEAD(&rlun->wblk_list);
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
spin_lock_init(&rlun->lock);
@@ -1338,14 +1340,13 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
rblk = rrpc_get_blk(rrpc, rlun, 0);
if (!rblk)
goto err;
-
- rrpc_set_lun_cur(rlun, rblk);
+ rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
/* Emergency gc block */
rblk = rrpc_get_blk(rrpc, rlun, 1);
if (!rblk)
goto err;
- rlun->gc_cur = rblk;
+ rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
}
return 0;
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 87e84b5fc1cc..5e87d52cb983 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -56,7 +56,6 @@ struct rrpc_block {
struct nvm_block *parent;
struct rrpc_lun *rlun;
struct list_head prio;
- struct list_head list;
#define MAX_INVALID_PAGES_STORAGE 8
/* Bitmap for invalid page intries */
@@ -77,13 +76,7 @@ struct rrpc_lun {
struct rrpc_block *blocks; /* Reference to block allocation */
struct list_head prio_list; /* Blocks that may be GC'ed */
- struct list_head open_list; /* In-use open blocks. These are blocks
- * that can be both written to and read
- * from
- */
- struct list_head closed_list; /* In-use closed blocks. These are
- * blocks that can _only_ be read from
- */
+ struct list_head wblk_list; /* Queued blocks to be written to */
struct work_struct ws_gc;
@@ -188,7 +181,7 @@ static inline int request_intersects(struct rrpc_inflight_rq *r,
}
static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned pages, struct rrpc_inflight_rq *r)
+ unsigned int pages, struct rrpc_inflight_rq *r)
{
sector_t laddr_end = laddr + pages - 1;
struct rrpc_inflight_rq *rtmp;
@@ -213,7 +206,7 @@ static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
}
static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned pages,
+ unsigned int pages,
struct rrpc_inflight_rq *r)
{
BUG_ON((laddr + pages) > rrpc->nr_sects);
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index 994697ac786e..a75bd28aaca3 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -39,7 +39,8 @@ static inline int scan_ppa_idx(int row, int blkid)
return (row * MAX_BLKS_PR_SYSBLK) + blkid;
}
-void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
+static void nvm_sysblk_to_cpu(struct nvm_sb_info *info,
+ struct nvm_system_block *sb)
{
info->seqnr = be32_to_cpu(sb->seqnr);
info->erase_cnt = be32_to_cpu(sb->erase_cnt);
@@ -48,7 +49,8 @@ void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
}
-void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
+static void nvm_cpu_to_sysblk(struct nvm_system_block *sb,
+ struct nvm_sb_info *info)
{
sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
sb->seqnr = cpu_to_be32(info->seqnr);
@@ -86,7 +88,7 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
return nr_rows;
}
-void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
+static void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
struct ppa_addr *sysblk_ppas)
{
memset(s, 0, sizeof(struct sysblk_scan));
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 52ba8dd82821..3cbda1af87a0 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -3,7 +3,8 @@
#
dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
- dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o
+ dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o \
+ dm-rq.o
dm-multipath-y += dm-path-selector.o dm-mpath.o
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-snap-persistent.o
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index eab505ee0027..76f7534d1dd1 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -294,10 +294,10 @@ static void bch_btree_node_read(struct btree *b)
closure_init_stack(&cl);
bio = bch_bbio_alloc(b->c);
- bio->bi_rw = REQ_META|READ_SYNC;
bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl;
+ bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
bch_bio_map(bio, b->keys.set[0].data);
@@ -396,8 +396,8 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl;
- b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
+ bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
bch_bio_map(b->bio, i);
/*
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 9eaf1d6e8302..864e673aec39 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -112,7 +112,7 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
EXPORT_SYMBOL(closure_wait);
/**
- * closure_sync - sleep until a closure a closure has nothing left to wait on
+ * closure_sync - sleep until a closure has nothing left to wait on
*
* Sleeps until the refcount hits 1 - the thread that's running the closure owns
* the last refcount.
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 782cc2c8a185..9b2fe2d3e3a9 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -31,7 +31,8 @@
* passing it, as you might expect, the function to run when nothing is pending
* and the workqueue to run that function out of.
*
- * continue_at() also, critically, is a macro that returns the calling function.
+ * continue_at() also, critically, requires a 'return' immediately following the
+ * location where this macro is referenced, to return to the calling function.
* There's good reason for this.
*
* To use safely closures asynchronously, they must always have a refcount while
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 8b1f1d5c1819..c28df164701e 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -52,9 +52,10 @@ void bch_btree_verify(struct btree *b)
bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
+ bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
bch_bio_map(bio, sorted);
- submit_bio_wait(REQ_META|READ_SYNC, bio);
+ submit_bio_wait(bio);
bch_bbio_free(bio, b->c);
memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
@@ -113,11 +114,12 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
check = bio_clone(bio, GFP_NOIO);
if (!check)
return;
+ bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC);
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
- submit_bio_wait(READ_SYNC, check);
+ submit_bio_wait(check);
bio_for_each_segment(bv, bio, iter) {
void *p1 = kmap_atomic(bv.bv_page);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 86a0bb87124e..e97b0acf7b8d 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -25,7 +25,6 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bio *bio = &b->bio;
bio_init(bio);
- bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
bio->bi_max_vecs = bucket_pages(c);
bio->bi_io_vec = bio->bi_inline_vecs;
@@ -111,7 +110,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
struct bbio *b = container_of(bio, struct bbio, bio);
struct cache *ca = PTR_CACHE(c, &b->key, 0);
- unsigned threshold = bio->bi_rw & REQ_WRITE
+ unsigned threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us
: c->congested_read_threshold_us;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 29eba7219b01..6925023e12d4 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -54,11 +54,11 @@ reread: left = ca->sb.bucket_size - offset;
bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset;
bio->bi_bdev = ca->bdev;
- bio->bi_rw = READ;
bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
bch_bio_map(bio, data);
closure_bio_submit(bio, &cl);
@@ -418,7 +418,7 @@ static void journal_discard_work(struct work_struct *work)
struct journal_device *ja =
container_of(work, struct journal_device, discard_work);
- submit_bio(0, &ja->discard_bio);
+ submit_bio(&ja->discard_bio);
}
static void do_journal_discard(struct cache *ca)
@@ -449,10 +449,10 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
bio_init(bio);
+ bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_WRITE|REQ_DISCARD;
bio->bi_max_vecs = 1;
bio->bi_io_vec = bio->bi_inline_vecs;
bio->bi_iter.bi_size = bucket_bytes(ca);
@@ -626,11 +626,12 @@ static void journal_write_unlocked(struct closure *cl)
bio_reset(bio);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
+ bio_set_op_attrs(bio, REQ_OP_WRITE,
+ REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index b929fc944e9c..1881319f2298 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -163,7 +163,7 @@ static void read_moving(struct cache_set *c)
moving_init(io);
bio = &io->bio.bio;
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_end_io = read_moving_endio;
if (bio_alloc_pages(bio, GFP_KERNEL))
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 25fa8445bb24..69f16f43f8ab 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -205,10 +205,10 @@ static void bch_data_insert_start(struct closure *cl)
return bch_data_invalidate(cl);
/*
- * Journal writes are marked REQ_FLUSH; if the original write was a
+ * Journal writes are marked REQ_PREFLUSH; if the original write was a
* flush, it'll wait on the journal write.
*/
- bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
+ bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA);
do {
unsigned i;
@@ -253,7 +253,7 @@ static void bch_data_insert_start(struct closure *cl)
trace_bcache_cache_insert(k);
bch_keylist_push(&op->insert_keys);
- n->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(n, REQ_OP_WRITE, 0);
bch_submit_bbio(n, op->c, k, 0);
} while (n != bio);
@@ -378,12 +378,12 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
- (bio->bi_rw & REQ_DISCARD))
+ (bio_op(bio) == REQ_OP_DISCARD))
goto skip;
if (mode == CACHE_MODE_NONE ||
(mode == CACHE_MODE_WRITEAROUND &&
- (bio->bi_rw & REQ_WRITE)))
+ op_is_write(bio_op(bio))))
goto skip;
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
@@ -404,7 +404,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (!congested &&
mode == CACHE_MODE_WRITEBACK &&
- (bio->bi_rw & REQ_WRITE) &&
+ op_is_write(bio_op(bio)) &&
(bio->bi_rw & REQ_SYNC))
goto rescale;
@@ -657,7 +657,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->cache_miss = NULL;
s->d = d;
s->recoverable = 1;
- s->write = (bio->bi_rw & REQ_WRITE) != 0;
+ s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
s->start_time = jiffies;
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.write_prio = 0;
s->iop.error = 0;
s->iop.flags = 0;
- s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+ s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0;
s->iop.wq = bcache_wq;
return s;
@@ -899,7 +899,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
* But check_overlapping drops dirty keys for which io hasn't started,
* so we still want to call it.
*/
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD)
s->iop.bypass = true;
if (should_writeback(dc, s->orig_bio,
@@ -913,22 +913,22 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
s->iop.bio = s->orig_bio;
bio_get(s->iop.bio);
- if (!(bio->bi_rw & REQ_DISCARD) ||
+ if ((bio_op(bio) != REQ_OP_DISCARD) ||
blk_queue_discard(bdev_get_queue(dc->bdev)))
closure_bio_submit(bio, cl);
} else if (s->iop.writeback) {
bch_writeback_add(dc);
s->iop.bio = bio;
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_rw & REQ_PREFLUSH) {
/* Also need to send a flush to the backing device */
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split);
- flush->bi_rw = WRITE_FLUSH;
flush->bi_bdev = bio->bi_bdev;
flush->bi_end_io = request_endio;
flush->bi_private = cl;
+ bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH);
closure_bio_submit(flush, cl);
}
@@ -992,7 +992,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
cached_dev_read(dc, s);
}
} else {
- if ((bio->bi_rw & REQ_DISCARD) &&
+ if ((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(dc->bdev)))
bio_endio(bio);
else
@@ -1103,7 +1103,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
- s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
+ s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
s->iop.writeback = true;
s->iop.bio = bio;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f5dbb4e884d8..88ef6d14cce3 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -134,7 +134,6 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
case BCACHE_SB_VERSION_CDEV:
case BCACHE_SB_VERSION_CDEV_WITH_UUID:
sb->nbuckets = le64_to_cpu(s->nbuckets);
- sb->block_size = le16_to_cpu(s->block_size);
sb->bucket_size = le16_to_cpu(s->bucket_size);
sb->nr_in_set = le16_to_cpu(s->nr_in_set);
@@ -212,8 +211,8 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
unsigned i;
bio->bi_iter.bi_sector = SB_SECTOR;
- bio->bi_rw = REQ_SYNC|REQ_META;
bio->bi_iter.bi_size = SB_SIZE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
bch_bio_map(bio, NULL);
out->offset = cpu_to_le64(sb->offset);
@@ -238,7 +237,7 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
pr_debug("ver %llu, flags %llu, seq %llu",
sb->version, sb->flags, sb->seq);
- submit_bio(REQ_WRITE, bio);
+ submit_bio(bio);
}
static void bch_write_bdev_super_unlock(struct closure *cl)
@@ -333,7 +332,7 @@ static void uuid_io_unlock(struct closure *cl)
up(&c->uuid_write_mutex);
}
-static void uuid_io(struct cache_set *c, unsigned long rw,
+static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
struct bkey *k, struct closure *parent)
{
struct closure *cl = &c->uuid_write;
@@ -348,21 +347,22 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
for (i = 0; i < KEY_PTRS(k); i++) {
struct bio *bio = bch_bbio_alloc(c);
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
+ bio->bi_rw = REQ_SYNC|REQ_META|op_flags;
bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
bio->bi_private = cl;
+ bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
bch_bio_map(bio, c->uuids);
bch_submit_bbio(bio, c, k, i);
- if (!(rw & WRITE))
+ if (op != REQ_OP_WRITE)
break;
}
bch_extent_to_text(buf, sizeof(buf), k);
- pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
+ pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
if (!bch_is_zero(u->uuid, 16))
@@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
- uuid_io(c, READ_SYNC, k, cl);
+ uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
struct uuid_entry_v0 *u0 = (void *) c->uuids;
@@ -426,7 +426,7 @@ static int __uuid_write(struct cache_set *c)
return 1;
SET_KEY_SIZE(&k.key, c->sb.bucket_size);
- uuid_io(c, REQ_WRITE, &k.key, &cl);
+ uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl);
bkey_copy(&c->uuid_bucket, &k.key);
@@ -498,7 +498,8 @@ static void prio_endio(struct bio *bio)
closure_put(&ca->prio);
}
-static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
+static void prio_io(struct cache *ca, uint64_t bucket, int op,
+ unsigned long op_flags)
{
struct closure *cl = &ca->prio;
struct bio *bio = bch_bbio_alloc(ca->set);
@@ -507,11 +508,11 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = prio_endio;
bio->bi_private = ca;
+ bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
bch_bio_map(bio, ca->disk_buckets);
closure_bio_submit(bio, &ca->prio);
@@ -557,7 +558,7 @@ void bch_prio_write(struct cache *ca)
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
- prio_io(ca, bucket, REQ_WRITE);
+ prio_io(ca, bucket, REQ_OP_WRITE, 0);
mutex_lock(&ca->set->bucket_lock);
ca->prio_buckets[i] = bucket;
@@ -599,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
ca->prio_last_buckets[bucket_nr] = bucket;
bucket_nr++;
- prio_io(ca, bucket, READ_SYNC);
+ prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities");
@@ -1518,7 +1519,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
!(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
!(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
- !(c->moving_gc_wq = create_workqueue("bcache_gc")) ||
+ !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
+ WQ_MEM_RECLAIM, 0)) ||
bch_journal_alloc(c) ||
bch_btree_cache_alloc(c) ||
bch_open_buckets_alloc(c) ||
@@ -1803,7 +1805,7 @@ void bch_cache_release(struct kobject *kobj)
module_put(THIS_MODULE);
}
-static int cache_alloc(struct cache_sb *sb, struct cache *ca)
+static int cache_alloc(struct cache *ca)
{
size_t free;
struct bucket *b;
@@ -1858,7 +1860,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
- ret = cache_alloc(sb, ca);
+ ret = cache_alloc(ca);
if (ret != 0)
goto err;
@@ -2097,7 +2099,7 @@ static int __init bcache_init(void)
return bcache_major;
}
- if (!(bcache_wq = create_workqueue("bcache")) ||
+ if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
sysfs_create_files(bcache_kobj, files) ||
bch_request_init() ||
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 60123677b382..d9fd2a62e5f6 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -182,7 +182,7 @@ static void write_dirty(struct closure *cl)
struct keybuf_key *w = io->bio.bi_private;
dirty_init(w);
- io->bio.bi_rw = WRITE;
+ bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
io->bio.bi_iter.bi_sector = KEY_START(&w->key);
io->bio.bi_bdev = io->dc->bdev;
io->bio.bi_end_io = dirty_endio;
@@ -251,10 +251,10 @@ static void read_dirty(struct cached_dev *dc)
io->dc = dc;
dirty_init(w);
+ bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
&w->key, 0)->bdev;
- io->bio.bi_rw = READ;
io->bio.bi_end_io = read_dirty_endio;
if (bio_alloc_pages(&io->bio, GFP_KERNEL))
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index d8129ec93ebd..6fff794e0c72 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -162,7 +162,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
if (sync_page_io(rdev, target,
roundup(size, bdev_logical_block_size(rdev->bdev)),
- page, READ, true)) {
+ page, REQ_OP_READ, 0, true)) {
page->index = index;
return 0;
}
@@ -297,7 +297,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(WRITE | REQ_SYNC, bh);
+ submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
bh = bh->b_this_page;
}
@@ -392,7 +392,7 @@ static int read_page(struct file *file, unsigned long index,
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(READ, bh);
+ submit_bh(REQ_OP_READ, 0, bh);
}
block++;
bh = bh->b_this_page;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index cd77216beff1..6571c81465e1 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -574,7 +574,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
{
int r;
struct dm_io_request io_req = {
- .bi_rw = rw,
+ .bi_op = rw,
+ .bi_op_flags = 0,
.notify.fn = dmio_complete,
.notify.context = b,
.client = b->c->dm_io,
@@ -634,6 +635,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
* the dm_buffer's inline bio is local to bufio.
*/
b->bio.bi_private = end_io;
+ bio_set_op_attrs(&b->bio, rw, 0);
/*
* We assume that if len >= PAGE_SIZE ptr is page-aligned.
@@ -660,7 +662,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
ptr += PAGE_SIZE;
} while (len > 0);
- submit_bio(rw, &b->bio);
+ submit_bio(&b->bio);
}
static void submit_io(struct dm_buffer *b, int rw, sector_t block,
@@ -1326,7 +1328,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
- .bi_rw = WRITE_FLUSH,
+ .bi_op = REQ_OP_WRITE,
+ .bi_op_flags = WRITE_FLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
index 6c9049c51b2b..f092771878c2 100644
--- a/drivers/md/dm-builtin.c
+++ b/drivers/md/dm-builtin.c
@@ -1,4 +1,4 @@
-#include "dm.h"
+#include "dm-core.h"
/*
* The kobject release method must not be placed in the module itself,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index ee0510f9a85e..718744db62df 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -788,7 +788,8 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio &&
- !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
+ !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) &&
+ bio_op(bio) != REQ_OP_DISCARD) {
pb->tick = true;
cache->need_tick_bio = false;
}
@@ -829,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
static int bio_triggers_commit(struct cache *cache, struct bio *bio)
{
- return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+ return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
}
/*
@@ -851,7 +852,7 @@ static void inc_ds(struct cache *cache, struct bio *bio,
static bool accountable_bio(struct cache *cache, struct bio *bio)
{
return ((bio->bi_bdev == cache->origin_dev->bdev) &&
- !(bio->bi_rw & REQ_DISCARD));
+ bio_op(bio) != REQ_OP_DISCARD);
}
static void accounted_begin(struct cache *cache, struct bio *bio)
@@ -1067,7 +1068,8 @@ static void dec_io_migrations(struct cache *cache)
static bool discard_or_flush(struct bio *bio)
{
- return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
+ return bio_op(bio) == REQ_OP_DISCARD ||
+ bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1612,8 +1614,8 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
remap_to_cache(cache, bio, 0);
/*
- * REQ_FLUSH is not directed at any particular block so we don't
- * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH
+ * REQ_PREFLUSH is not directed at any particular block so we don't
+ * need to inc_ds(). REQ_FUA's are split into a write + REQ_PREFLUSH
* by dm-core.
*/
issue(cache, bio);
@@ -1978,9 +1980,9 @@ static void process_deferred_bios(struct cache *cache)
bio = bio_list_pop(&bios);
- if (bio->bi_rw & REQ_FLUSH)
+ if (bio->bi_rw & REQ_PREFLUSH)
process_flush_bio(cache, bio);
- else if (bio->bi_rw & REQ_DISCARD)
+ else if (bio_op(bio) == REQ_OP_DISCARD)
process_discard_bio(cache, &structs, bio);
else
process_bio(cache, &structs, bio);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
new file mode 100644
index 000000000000..40ceba1fe8be
--- /dev/null
+++ b/drivers/md/dm-core.h
@@ -0,0 +1,149 @@
+/*
+ * Internal header file _only_ for device mapper core
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef DM_CORE_INTERNAL_H
+#define DM_CORE_INTERNAL_H
+
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/blk-mq.h>
+
+#include <trace/events/block.h>
+
+#include "dm.h"
+
+#define DM_RESERVED_MAX_IOS 1024
+
+struct dm_kobject_holder {
+ struct kobject kobj;
+ struct completion completion;
+};
+
+/*
+ * DM core internal structure that used directly by dm.c and dm-rq.c
+ * DM targets must _not_ deference a mapped_device to directly access its members!
+ */
+struct mapped_device {
+ struct srcu_struct io_barrier;
+ struct mutex suspend_lock;
+
+ /*
+ * The current mapping (struct dm_table *).
+ * Use dm_get_live_table{_fast} or take suspend_lock for
+ * dereference.
+ */
+ void __rcu *map;
+
+ struct list_head table_devices;
+ struct mutex table_devices_lock;
+
+ unsigned long flags;
+
+ struct request_queue *queue;
+ int numa_node_id;
+
+ unsigned type;
+ /* Protect queue and type against concurrent access. */
+ struct mutex type_lock;
+
+ atomic_t holders;
+ atomic_t open_count;
+
+ struct dm_target *immutable_target;
+ struct target_type *immutable_target_type;
+
+ struct gendisk *disk;
+ char name[16];
+
+ void *interface_ptr;
+
+ /*
+ * A list of ios that arrived while we were suspended.
+ */
+ atomic_t pending[2];
+ wait_queue_head_t wait;
+ struct work_struct work;
+ spinlock_t deferred_lock;
+ struct bio_list deferred;
+
+ /*
+ * Event handling.
+ */
+ wait_queue_head_t eventq;
+ atomic_t event_nr;
+ atomic_t uevent_seq;
+ struct list_head uevent_list;
+ spinlock_t uevent_lock; /* Protect access to uevent_list */
+
+ /* the number of internal suspends */
+ unsigned internal_suspend_count;
+
+ /*
+ * Processing queue (flush)
+ */
+ struct workqueue_struct *wq;
+
+ /*
+ * io objects are allocated from here.
+ */
+ mempool_t *io_pool;
+ mempool_t *rq_pool;
+
+ struct bio_set *bs;
+
+ /*
+ * freeze/thaw support require holding onto a super block
+ */
+ struct super_block *frozen_sb;
+
+ /* forced geometry settings */
+ struct hd_geometry geometry;
+
+ struct block_device *bdev;
+
+ /* kobject and completion */
+ struct dm_kobject_holder kobj_holder;
+
+ /* zero-length flush that will be cloned and submitted to targets */
+ struct bio flush_bio;
+
+ struct dm_stats stats;
+
+ struct kthread_worker kworker;
+ struct task_struct *kworker_task;
+
+ /* for request-based merge heuristic in dm_request_fn() */
+ unsigned seq_rq_merge_deadline_usecs;
+ int last_rq_rw;
+ sector_t last_rq_pos;
+ ktime_t last_rq_start_time;
+
+ /* for blk-mq request-based DM support */
+ struct blk_mq_tag_set *tag_set;
+ bool use_blk_mq:1;
+ bool init_tio_pdu:1;
+};
+
+void dm_init_md_queue(struct mapped_device *md);
+void dm_init_normal_md_queue(struct mapped_device *md);
+int md_in_flight(struct mapped_device *md);
+void disable_write_same(struct mapped_device *md);
+
+static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
+{
+ return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
+}
+
+unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
+
+static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
+{
+ return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
+#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4f3cb3554944..8f2e3e2ffd26 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -683,7 +683,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
u8 *data)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
- u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+ __le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 buf[TCW_WHITENING_SIZE];
SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
int i, r;
@@ -722,7 +722,7 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
- u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+ __le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 *src;
int r = 0;
@@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- clone->bi_rw = io->base_bio->bi_rw;
+ bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw);
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1911,11 +1911,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
struct crypt_config *cc = ti->private;
/*
- * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
- * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
- * - for REQ_DISCARD caller must use flush if IO ordering matters
+ * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
+ * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
+ * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
*/
- if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
+ if (unlikely(bio->bi_rw & REQ_PREFLUSH ||
+ bio_op(bio) == REQ_OP_DISCARD)) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
bio->bi_iter.bi_sector = cc->start +
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 665bf3285618..2faf49d8f4d7 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1540,9 +1540,9 @@ static int era_map(struct dm_target *ti, struct bio *bio)
remap_to_origin(era, bio);
/*
- * REQ_FLUSH bios carry no data, so we're not interested in them.
+ * REQ_PREFLUSH bios carry no data, so we're not interested in them.
*/
- if (!(bio->bi_rw & REQ_FLUSH) &&
+ if (!(bio->bi_rw & REQ_PREFLUSH) &&
(bio_data_dir(bio) == WRITE) &&
!metadata_current_marked(era->md, block)) {
defer_bio(era, bio);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b7341de87015..29b99fb6a16a 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -266,7 +266,7 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
- "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
+ "(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
(unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 06d426eb5a30..daa03e41654a 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,7 +5,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include "dm-core.h"
#include <linux/device-mapper.h>
@@ -278,8 +278,9 @@ static void km_dp_init(struct dpages *dp, void *data)
/*-----------------------------------------------------------------
* IO routines that accept a list of pages.
*---------------------------------------------------------------*/
-static void do_region(int rw, unsigned region, struct dm_io_region *where,
- struct dpages *dp, struct io *io)
+static void do_region(int op, int op_flags, unsigned region,
+ struct dm_io_region *where, struct dpages *dp,
+ struct io *io)
{
struct bio *bio;
struct page *page;
@@ -295,24 +296,25 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
/*
* Reject unsupported discard and write same requests.
*/
- if (rw & REQ_DISCARD)
+ if (op == REQ_OP_DISCARD)
special_cmd_max_sectors = q->limits.max_discard_sectors;
- else if (rw & REQ_WRITE_SAME)
+ else if (op == REQ_OP_WRITE_SAME)
special_cmd_max_sectors = q->limits.max_write_same_sectors;
- if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
+ if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
+ special_cmd_max_sectors == 0) {
dec_count(io, region, -EOPNOTSUPP);
return;
}
/*
- * where->count may be zero if rw holds a flush and we need to
+ * where->count may be zero if op holds a flush and we need to
* send a zero-sized flush.
*/
do {
/*
* Allocate a suitably sized-bio.
*/
- if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
+ if ((op == REQ_OP_DISCARD) || (op == REQ_OP_WRITE_SAME))
num_bvecs = 1;
else
num_bvecs = min_t(int, BIO_MAX_PAGES,
@@ -322,13 +324,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
+ bio_set_op_attrs(bio, op, op_flags);
store_io_and_region_in_bio(bio, io, region);
- if (rw & REQ_DISCARD) {
+ if (op == REQ_OP_DISCARD) {
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
- } else if (rw & REQ_WRITE_SAME) {
+ } else if (op == REQ_OP_WRITE_SAME) {
/*
* WRITE SAME only uses a single page.
*/
@@ -355,11 +358,11 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
}
atomic_inc(&io->count);
- submit_bio(rw, bio);
+ submit_bio(bio);
} while (remaining);
}
-static void dispatch_io(int rw, unsigned int num_regions,
+static void dispatch_io(int op, int op_flags, unsigned int num_regions,
struct dm_io_region *where, struct dpages *dp,
struct io *io, int sync)
{
@@ -369,7 +372,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync)
- rw |= REQ_SYNC;
+ op_flags |= REQ_SYNC;
/*
* For multiple regions we need to be careful to rewind
@@ -377,8 +380,8 @@ static void dispatch_io(int rw, unsigned int num_regions,
*/
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
- if (where[i].count || (rw & REQ_FLUSH))
- do_region(rw, i, where + i, dp, io);
+ if (where[i].count || (op_flags & REQ_PREFLUSH))
+ do_region(op, op_flags, i, where + i, dp, io);
}
/*
@@ -402,13 +405,13 @@ static void sync_io_complete(unsigned long error, void *context)
}
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
- struct dm_io_region *where, int rw, struct dpages *dp,
- unsigned long *error_bits)
+ struct dm_io_region *where, int op, int op_flags,
+ struct dpages *dp, unsigned long *error_bits)
{
struct io *io;
struct sync_io sio;
- if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
+ if (num_regions > 1 && !op_is_write(op)) {
WARN_ON(1);
return -EIO;
}
@@ -425,7 +428,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(rw, num_regions, where, dp, io, 1);
+ dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
wait_for_completion_io(&sio.wait);
@@ -436,12 +439,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
}
static int async_io(struct dm_io_client *client, unsigned int num_regions,
- struct dm_io_region *where, int rw, struct dpages *dp,
- io_notify_fn fn, void *context)
+ struct dm_io_region *where, int op, int op_flags,
+ struct dpages *dp, io_notify_fn fn, void *context)
{
struct io *io;
- if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
+ if (num_regions > 1 && !op_is_write(op)) {
WARN_ON(1);
fn(1, context);
return -EIO;
@@ -457,7 +460,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(rw, num_regions, where, dp, io, 0);
+ dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
return 0;
}
@@ -480,7 +483,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
case DM_IO_VMA:
flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
- if ((io_req->bi_rw & RW_MASK) == READ) {
+ if (io_req->bi_op == REQ_OP_READ) {
dp->vma_invalidate_address = io_req->mem.ptr.vma;
dp->vma_invalidate_size = size;
}
@@ -518,10 +521,12 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
if (!io_req->notify.fn)
return sync_io(io_req->client, num_regions, where,
- io_req->bi_rw, &dp, sync_error_bits);
+ io_req->bi_op, io_req->bi_op_flags, &dp,
+ sync_error_bits);
- return async_io(io_req->client, num_regions, where, io_req->bi_rw,
- &dp, io_req->notify.fn, io_req->notify.context);
+ return async_io(io_req->client, num_regions, where, io_req->bi_op,
+ io_req->bi_op_flags, &dp, io_req->notify.fn,
+ io_req->notify.context);
}
EXPORT_SYMBOL(dm_io);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 2c7ca258c4e4..966eb4b61aed 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -5,7 +5,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include "dm-core.h"
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -1267,6 +1267,15 @@ static int populate_table(struct dm_table *table,
return dm_table_complete(table);
}
+static bool is_valid_type(unsigned cur, unsigned new)
+{
+ if (cur == new ||
+ (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED))
+ return true;
+
+ return false;
+}
+
static int table_load(struct dm_ioctl *param, size_t param_size)
{
int r;
@@ -1309,7 +1318,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
DMWARN("unable to set up device queue for new table.");
goto err_unlock_md_type;
}
- } else if (dm_get_md_type(md) != dm_table_get_type(t)) {
+ } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
DMWARN("can't change device type after initial table load.");
r = -EINVAL;
goto err_unlock_md_type;
@@ -1670,8 +1679,7 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
return r;
}
-#define DM_PARAMS_KMALLOC 0x0001 /* Params alloced with kmalloc */
-#define DM_PARAMS_VMALLOC 0x0002 /* Params alloced with vmalloc */
+#define DM_PARAMS_MALLOC 0x0001 /* Params allocated with kvmalloc() */
#define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */
static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
@@ -1679,10 +1687,8 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
if (param_flags & DM_WIPE_BUFFER)
memset(param, 0, param_size);
- if (param_flags & DM_PARAMS_KMALLOC)
- kfree(param);
- if (param_flags & DM_PARAMS_VMALLOC)
- vfree(param);
+ if (param_flags & DM_PARAMS_MALLOC)
+ kvfree(param);
}
static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
@@ -1714,19 +1720,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
* Use kmalloc() rather than vmalloc() when we can.
*/
dmi = NULL;
- if (param_kernel->data_size <= KMALLOC_MAX_SIZE) {
+ if (param_kernel->data_size <= KMALLOC_MAX_SIZE)
dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
- if (dmi)
- *param_flags |= DM_PARAMS_KMALLOC;
- }
if (!dmi) {
unsigned noio_flag;
noio_flag = memalloc_noio_save();
dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
memalloc_noio_restore(noio_flag);
- if (dmi)
- *param_flags |= DM_PARAMS_VMALLOC;
}
if (!dmi) {
@@ -1735,6 +1736,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
return -ENOMEM;
}
+ *param_flags |= DM_PARAMS_MALLOC;
+
if (copy_from_user(dmi, user, param_kernel->data_size))
goto bad;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 1452ed9aacb4..9e9d04cb7d51 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -26,7 +26,7 @@
#include <linux/device-mapper.h>
#include <linux/dm-kcopyd.h>
-#include "dm.h"
+#include "dm-core.h"
#define SUB_JOB_SIZE 128
#define SPLIT_COUNT 8
@@ -465,7 +465,7 @@ static void complete_io(unsigned long error, void *context)
io_job_finish(kc->throttle);
if (error) {
- if (job->rw & WRITE)
+ if (op_is_write(job->rw))
job->write_err |= error;
else
job->read_err = 1;
@@ -477,7 +477,7 @@ static void complete_io(unsigned long error, void *context)
}
}
- if (job->rw & WRITE)
+ if (op_is_write(job->rw))
push(&kc->complete_jobs, job);
else {
@@ -496,7 +496,8 @@ static int run_io_job(struct kcopyd_job *job)
{
int r;
struct dm_io_request io_req = {
- .bi_rw = job->rw,
+ .bi_op = job->rw,
+ .bi_op_flags = 0,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
.mem.offset = 0,
@@ -550,7 +551,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
if (r < 0) {
/* error this rogue job */
- if (job->rw & WRITE)
+ if (op_is_write(job->rw))
job->write_err = (unsigned long) -1L;
else
job->read_err = 1;
@@ -734,7 +735,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
/*
* Use WRITE SAME to optimize zeroing if all dests support it.
*/
- job->rw = WRITE | REQ_WRITE_SAME;
+ job->rw = REQ_OP_WRITE_SAME;
for (i = 0; i < job->num_dests; i++)
if (!bdev_write_same(job->dests[i].bdev)) {
job->rw = WRITE;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 05c35aacb3aa..6d35dd4e9efb 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -141,9 +141,27 @@ static int linear_iterate_devices(struct dm_target *ti,
return fn(ti, lc->dev, lc->start, ti->len, data);
}
+static long linear_direct_access(struct dm_target *ti, sector_t sector,
+ void __pmem **kaddr, pfn_t *pfn, long size)
+{
+ struct linear_c *lc = ti->private;
+ struct block_device *bdev = lc->dev->bdev;
+ struct blk_dax_ctl dax = {
+ .sector = linear_map_sector(ti, sector),
+ .size = size,
+ };
+ long ret;
+
+ ret = bdev_direct_access(bdev, &dax);
+ *kaddr = dax.addr;
+ *pfn = dax.pfn;
+
+ return ret;
+}
+
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 2, 1},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
@@ -151,6 +169,7 @@ static struct target_type linear_target = {
.status = linear_status,
.prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
+ .direct_access = linear_direct_access,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 608302e222af..b5dbf7a0515e 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -205,6 +205,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -226,7 +227,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
DMERR("Couldn't add page to the log block");
goto error_bio;
}
- submit_bio(WRITE, bio);
+ submit_bio(bio);
return 0;
error_bio:
bio_put(bio);
@@ -269,6 +270,7 @@ static int log_one_block(struct log_writes_c *lc,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < block->vec_cnt; i++) {
/*
@@ -279,7 +281,7 @@ static int log_one_block(struct log_writes_c *lc,
block->vecs[i].bv_len, 0);
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
- submit_bio(WRITE, bio);
+ submit_bio(bio);
bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
if (!bio) {
DMERR("Couldn't alloc log bio");
@@ -290,6 +292,7 @@ static int log_one_block(struct log_writes_c *lc,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
@@ -301,7 +304,7 @@ static int log_one_block(struct log_writes_c *lc,
}
sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
}
- submit_bio(WRITE, bio);
+ submit_bio(bio);
out:
kfree(block->data);
kfree(block);
@@ -552,9 +555,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
struct bio_vec bv;
size_t alloc_size;
int i = 0;
- bool flush_bio = (bio->bi_rw & REQ_FLUSH);
+ bool flush_bio = (bio->bi_rw & REQ_PREFLUSH);
bool fua_bio = (bio->bi_rw & REQ_FUA);
- bool discard_bio = (bio->bi_rw & REQ_DISCARD);
+ bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
pb->block = NULL;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 627d19186d5a..4ca2d1df5b44 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -293,7 +293,7 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis
static int rw_header(struct log_c *lc, int rw)
{
- lc->io_req.bi_rw = rw;
+ lc->io_req.bi_op = rw;
return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
}
@@ -306,7 +306,8 @@ static int flush_header(struct log_c *lc)
.count = 0,
};
- lc->io_req.bi_rw = WRITE_FLUSH;
+ lc->io_req.bi_op = REQ_OP_WRITE;
+ lc->io_req.bi_op_flags = WRITE_FLUSH;
return dm_io(&lc->io_req, 1, &null_location, NULL);
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 52baf8a5b0f4..7eac080fcb18 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -7,7 +7,8 @@
#include <linux/device-mapper.h>
-#include "dm.h"
+#include "dm-rq.h"
+#include "dm-bio-record.h"
#include "dm-path-selector.h"
#include "dm-uevent.h"
@@ -89,6 +90,8 @@ struct multipath {
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */
+ unsigned queue_mode;
+
/*
* We must use a mempool of dm_mpath_io structs so that we
* can resubmit bios on error.
@@ -97,10 +100,13 @@ struct multipath {
struct mutex work_mutex;
struct work_struct trigger_event;
+
+ struct work_struct process_queued_bios;
+ struct bio_list queued_bios;
};
/*
- * Context information attached to each bio we process.
+ * Context information attached to each io we process.
*/
struct dm_mpath_io {
struct pgpath *pgpath;
@@ -114,6 +120,7 @@ static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
+static void process_queued_bios(struct work_struct *work);
/*-----------------------------------------------
* Multipath state flags.
@@ -185,7 +192,7 @@ static void free_priority_group(struct priority_group *pg,
kfree(pg);
}
-static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
+static struct multipath *alloc_multipath(struct dm_target *ti)
{
struct multipath *m;
@@ -203,15 +210,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
mutex_init(&m->work_mutex);
m->mpio_pool = NULL;
- if (!use_blk_mq) {
- unsigned min_ios = dm_get_reserved_rq_based_ios();
-
- m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
- if (!m->mpio_pool) {
- kfree(m);
- return NULL;
- }
- }
+ m->queue_mode = DM_TYPE_NONE;
m->ti = ti;
ti->private = m;
@@ -220,6 +219,39 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
return m;
}
+static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
+{
+ if (m->queue_mode == DM_TYPE_NONE) {
+ /*
+ * Default to request-based.
+ */
+ if (dm_use_blk_mq(dm_table_get_md(ti->table)))
+ m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
+ else
+ m->queue_mode = DM_TYPE_REQUEST_BASED;
+ }
+
+ if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
+ unsigned min_ios = dm_get_reserved_rq_based_ios();
+
+ m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
+ if (!m->mpio_pool)
+ return -ENOMEM;
+ }
+ else if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ INIT_WORK(&m->process_queued_bios, process_queued_bios);
+ /*
+ * bio-based doesn't support any direct scsi_dh management;
+ * it just discovers if a scsi_dh is attached.
+ */
+ set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+ }
+
+ dm_table_set_type(ti->table, m->queue_mode);
+
+ return 0;
+}
+
static void free_multipath(struct multipath *m)
{
struct priority_group *pg, *tmp;
@@ -272,6 +304,41 @@ static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
}
}
+static size_t multipath_per_bio_data_size(void)
+{
+ return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
+}
+
+static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
+{
+ return dm_per_bio_data(bio, multipath_per_bio_data_size());
+}
+
+static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
+{
+ /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
+ struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
+ void *bio_details = mpio + 1;
+
+ return bio_details;
+}
+
+static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
+ struct dm_bio_details **bio_details_p)
+{
+ struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
+ struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
+
+ memset(mpio, 0, sizeof(*mpio));
+ memset(bio_details, 0, sizeof(*bio_details));
+ dm_bio_record(bio_details, bio);
+
+ if (mpio_p)
+ *mpio_p = mpio;
+ if (bio_details_p)
+ *bio_details_p = bio_details;
+}
+
/*-----------------------------------------------
* Path selection
*-----------------------------------------------*/
@@ -431,16 +498,26 @@ failed:
* and multipath_resume() calls and we have no need to check
* for the DMF_NOFLUSH_SUSPENDING flag.
*/
-static int must_push_back(struct multipath *m)
+static bool __must_push_back(struct multipath *m)
+{
+ return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
+ dm_noflush_suspending(m->ti));
+}
+
+static bool must_push_back_rq(struct multipath *m)
{
return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
- ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
- dm_noflush_suspending(m->ti)));
+ __must_push_back(m));
+}
+
+static bool must_push_back_bio(struct multipath *m)
+{
+ return __must_push_back(m);
}
/*
- * Map cloned requests
+ * Map cloned requests (request-based multipath)
*/
static int __multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context,
@@ -459,7 +536,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
- if (!must_push_back(m))
+ if (!must_push_back_rq(m))
r = -EIO; /* Failed */
return r;
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
@@ -530,6 +607,108 @@ static void multipath_release_clone(struct request *clone)
}
/*
+ * Map cloned bios (bio-based multipath)
+ */
+static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
+{
+ size_t nr_bytes = bio->bi_iter.bi_size;
+ struct pgpath *pgpath;
+ unsigned long flags;
+ bool queue_io;
+
+ /* Do we need to select a new pgpath? */
+ pgpath = lockless_dereference(m->current_pgpath);
+ queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
+ if (!pgpath || !queue_io)
+ pgpath = choose_pgpath(m, nr_bytes);
+
+ if ((pgpath && queue_io) ||
+ (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
+ /* Queue for the daemon to resubmit */
+ spin_lock_irqsave(&m->lock, flags);
+ bio_list_add(&m->queued_bios, bio);
+ spin_unlock_irqrestore(&m->lock, flags);
+ /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
+ if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
+ pg_init_all_paths(m);
+ else if (!queue_io)
+ queue_work(kmultipathd, &m->process_queued_bios);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ if (!pgpath) {
+ if (!must_push_back_bio(m))
+ return -EIO;
+ return DM_MAPIO_REQUEUE;
+ }
+
+ mpio->pgpath = pgpath;
+ mpio->nr_bytes = nr_bytes;
+
+ bio->bi_error = 0;
+ bio->bi_bdev = pgpath->path.dev->bdev;
+ bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
+
+ if (pgpath->pg->ps.type->start_io)
+ pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
+ &pgpath->path,
+ nr_bytes);
+ return DM_MAPIO_REMAPPED;
+}
+
+static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct multipath *m = ti->private;
+ struct dm_mpath_io *mpio = NULL;
+
+ multipath_init_per_bio_data(bio, &mpio, NULL);
+
+ return __multipath_map_bio(m, bio, mpio);
+}
+
+static void process_queued_bios_list(struct multipath *m)
+{
+ if (m->queue_mode == DM_TYPE_BIO_BASED)
+ queue_work(kmultipathd, &m->process_queued_bios);
+}
+
+static void process_queued_bios(struct work_struct *work)
+{
+ int r;
+ unsigned long flags;
+ struct bio *bio;
+ struct bio_list bios;
+ struct blk_plug plug;
+ struct multipath *m =
+ container_of(work, struct multipath, process_queued_bios);
+
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&m->lock, flags);
+
+ if (bio_list_empty(&m->queued_bios)) {
+ spin_unlock_irqrestore(&m->lock, flags);
+ return;
+ }
+
+ bio_list_merge(&bios, &m->queued_bios);
+ bio_list_init(&m->queued_bios);
+
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ blk_start_plug(&plug);
+ while ((bio = bio_list_pop(&bios))) {
+ r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
+ if (r < 0 || r == DM_MAPIO_REQUEUE) {
+ bio->bi_error = r;
+ bio_endio(bio);
+ } else if (r == DM_MAPIO_REMAPPED)
+ generic_make_request(bio);
+ }
+ blk_finish_plug(&plug);
+}
+
+/*
* If we run out of usable paths, should we queue I/O or error it?
*/
static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
@@ -557,8 +736,10 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
spin_unlock_irqrestore(&m->lock, flags);
- if (!queue_if_no_path)
+ if (!queue_if_no_path) {
dm_table_run_md_queue_async(m->ti->table);
+ process_queued_bios_list(m);
+ }
return 0;
}
@@ -798,6 +979,12 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
if (!hw_argc)
return 0;
+ if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ dm_consume_args(as, hw_argc);
+ DMERR("bio-based multipath doesn't allow hardware handler args");
+ return 0;
+ }
+
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
if (hw_argc > 1) {
@@ -833,7 +1020,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
const char *arg_name;
static struct dm_arg _args[] = {
- {0, 6, "invalid number of feature args"},
+ {0, 8, "invalid number of feature args"},
{1, 50, "pg_init_retries must be between 1 and 50"},
{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
};
@@ -873,6 +1060,24 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
continue;
}
+ if (!strcasecmp(arg_name, "queue_mode") &&
+ (argc >= 1)) {
+ const char *queue_mode_name = dm_shift_arg(as);
+
+ if (!strcasecmp(queue_mode_name, "bio"))
+ m->queue_mode = DM_TYPE_BIO_BASED;
+ else if (!strcasecmp(queue_mode_name, "rq"))
+ m->queue_mode = DM_TYPE_REQUEST_BASED;
+ else if (!strcasecmp(queue_mode_name, "mq"))
+ m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
+ else {
+ ti->error = "Unknown 'queue_mode' requested";
+ r = -EINVAL;
+ }
+ argc--;
+ continue;
+ }
+
ti->error = "Unrecognised multipath feature request";
r = -EINVAL;
} while (argc && !r);
@@ -880,8 +1085,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
return r;
}
-static int multipath_ctr(struct dm_target *ti, unsigned int argc,
- char **argv)
+static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
/* target arguments */
static struct dm_arg _args[] = {
@@ -894,12 +1098,11 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
- bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
as.argc = argc;
as.argv = argv;
- m = alloc_multipath(ti, use_blk_mq);
+ m = alloc_multipath(ti);
if (!m) {
ti->error = "can't allocate multipath";
return -EINVAL;
@@ -909,6 +1112,10 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
if (r)
goto bad;
+ r = alloc_multipath_stage2(ti, m);
+ if (r)
+ goto bad;
+
r = parse_hw_handler(&as, m);
if (r)
goto bad;
@@ -958,7 +1165,9 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
- if (use_blk_mq)
+ if (m->queue_mode == DM_TYPE_BIO_BASED)
+ ti->per_io_data_size = multipath_per_bio_data_size();
+ else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
ti->per_io_data_size = sizeof(struct dm_mpath_io);
return 0;
@@ -1083,8 +1292,10 @@ static int reinstate_path(struct pgpath *pgpath)
out:
spin_unlock_irqrestore(&m->lock, flags);
- if (run_queue)
+ if (run_queue) {
dm_table_run_md_queue_async(m->ti->table);
+ process_queued_bios_list(m);
+ }
return r;
}
@@ -1281,6 +1492,8 @@ static void pg_init_done(void *data, int errors)
}
clear_bit(MPATHF_QUEUE_IO, &m->flags);
+ process_queued_bios_list(m);
+
/*
* Wake up any thread waiting to suspend.
*/
@@ -1328,7 +1541,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
* during end I/O handling, since those clone requests don't have
* bio clones. If we queue them inside the multipath target,
* we need to make bio clones, that requires memory allocation.
- * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
+ * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
* don't have bio clones.)
* Instead of queueing the clone request here, we queue the original
* request into dm core, which will remake a clone request and
@@ -1347,7 +1560,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!atomic_read(&m->nr_valid_paths)) {
if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- if (!must_push_back(m))
+ if (!must_push_back_rq(m))
r = -EIO;
} else {
if (error == -EBADE)
@@ -1381,6 +1594,64 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
return r;
}
+static int do_end_io_bio(struct multipath *m, struct bio *clone,
+ int error, struct dm_mpath_io *mpio)
+{
+ unsigned long flags;
+
+ if (!error)
+ return 0; /* I/O complete */
+
+ if (noretry_error(error))
+ return error;
+
+ if (mpio->pgpath)
+ fail_path(mpio->pgpath);
+
+ if (!atomic_read(&m->nr_valid_paths)) {
+ if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ if (!must_push_back_bio(m))
+ return -EIO;
+ return DM_ENDIO_REQUEUE;
+ } else {
+ if (error == -EBADE)
+ return error;
+ }
+ }
+
+ /* Queue for the daemon to resubmit */
+ dm_bio_restore(get_bio_details_from_bio(clone), clone);
+
+ spin_lock_irqsave(&m->lock, flags);
+ bio_list_add(&m->queued_bios, clone);
+ spin_unlock_irqrestore(&m->lock, flags);
+ if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
+ queue_work(kmultipathd, &m->process_queued_bios);
+
+ return DM_ENDIO_INCOMPLETE;
+}
+
+static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
+{
+ struct multipath *m = ti->private;
+ struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
+ struct pgpath *pgpath;
+ struct path_selector *ps;
+ int r;
+
+ BUG_ON(!mpio);
+
+ r = do_end_io_bio(m, clone, error, mpio);
+ pgpath = mpio->pgpath;
+ if (pgpath) {
+ ps = &pgpath->pg->ps;
+ if (ps->type->end_io)
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
+ }
+
+ return r;
+}
+
/*
* Suspend can't complete until all the I/O is processed so if
* the last path fails we must error any remaining I/O.
@@ -1454,7 +1725,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
(m->pg_init_retries > 0) * 2 +
(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
- test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags));
+ test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
+ (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
+
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
DMEMIT("queue_if_no_path ");
if (m->pg_init_retries)
@@ -1463,6 +1736,16 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
DMEMIT("retain_attached_hw_handler ");
+ if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
+ switch(m->queue_mode) {
+ case DM_TYPE_BIO_BASED:
+ DMEMIT("queue_mode bio ");
+ break;
+ case DM_TYPE_MQ_REQUEST_BASED:
+ DMEMIT("queue_mode mq ");
+ break;
+ }
+ }
}
if (!m->hw_handler_name || type == STATUSTYPE_INFO)
@@ -1642,6 +1925,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg_init_all_paths(m);
dm_table_run_md_queue_async(m->ti->table);
+ process_queued_bios_list(m);
}
/*
@@ -1748,7 +2032,7 @@ static int multipath_busy(struct dm_target *ti)
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 11, 0},
+ .version = {1, 12, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
.module = THIS_MODULE,
.ctr = multipath_ctr,
@@ -1757,6 +2041,8 @@ static struct target_type multipath_target = {
.clone_and_map_rq = multipath_clone_and_map,
.release_clone_rq = multipath_release_clone,
.rq_end_io = multipath_end_io,
+ .map = multipath_map_bio,
+ .end_io = multipath_end_io_bio,
.presuspend = multipath_presuspend,
.postsuspend = multipath_postsuspend,
.resume = multipath_resume,
@@ -1771,14 +2057,14 @@ static int __init dm_multipath_init(void)
{
int r;
- /* allocate a slab for the dm_ios */
+ /* allocate a slab for the dm_mpath_ios */
_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
if (!_mpio_cache)
return -ENOMEM;
r = dm_register_target(&multipath_target);
if (r < 0) {
- DMERR("register failed %d", r);
+ DMERR("request-based register failed %d", r);
r = -EINVAL;
goto bad_register_target;
}
@@ -1804,10 +2090,6 @@ static int __init dm_multipath_init(void)
goto bad_alloc_kmpath_handlerd;
}
- DMINFO("version %u.%u.%u loaded",
- multipath_target.version[0], multipath_target.version[1],
- multipath_target.version[2]);
-
return 0;
bad_alloc_kmpath_handlerd:
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 52532745a50f..84983549b5e1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010-2011 Neil Brown
- * Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -17,7 +17,12 @@
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "raid"
-#define MAX_RAID_DEVICES 253 /* raid4/5/6 limit */
+#define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
+
+/*
+ * Minimum sectors of free reshape space per raid device
+ */
+#define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
static bool devices_handle_discard_safely = false;
@@ -25,12 +30,12 @@ static bool devices_handle_discard_safely = false;
* The following flags are used by dm-raid.c to set up the array state.
* They must be cleared before md_run is called.
*/
-#define FirstUse 10 /* rdev flag */
+#define FirstUse 10 /* rdev flag */
struct raid_dev {
/*
* Two DM devices, one to hold metadata and one to hold the
- * actual data/parity. The reason for this is to not confuse
+ * actual data/parity. The reason for this is to not confuse
* ti->len and give more flexibility in altering size and
* characteristics.
*
@@ -46,25 +51,175 @@ struct raid_dev {
};
/*
+ * Bits for establishing rs->ctr_flags
+ *
+ * 1 = no flag value
+ * 2 = flag with value
+ */
+#define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */
+#define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */
+#define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */
+#define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */
+#define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */
+#define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */
+#define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */
+#define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */
+#define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */
+#define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */
+#define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */
+#define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */
+/* New for v1.9.0 */
+#define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid1/4/5/6/10! */
+#define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */
+#define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */
+
+/*
* Flags for rs->ctr_flags field.
*/
-#define CTR_FLAG_SYNC 0x1
-#define CTR_FLAG_NOSYNC 0x2
-#define CTR_FLAG_REBUILD 0x4
-#define CTR_FLAG_DAEMON_SLEEP 0x8
-#define CTR_FLAG_MIN_RECOVERY_RATE 0x10
-#define CTR_FLAG_MAX_RECOVERY_RATE 0x20
-#define CTR_FLAG_MAX_WRITE_BEHIND 0x40
-#define CTR_FLAG_STRIPE_CACHE 0x80
-#define CTR_FLAG_REGION_SIZE 0x100
-#define CTR_FLAG_RAID10_COPIES 0x200
-#define CTR_FLAG_RAID10_FORMAT 0x400
+#define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
+#define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
+#define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
+#define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
+#define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
+#define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
+#define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
+#define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
+#define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
+#define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
+#define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
+#define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
+#define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
+#define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
+#define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
+
+/*
+ * Definitions of various constructor flags to
+ * be used in checks of valid / invalid flags
+ * per raid level.
+ */
+/* Define all any sync flags */
+#define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
+
+/* Define flags for options without argument (e.g. 'nosync') */
+#define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
+ CTR_FLAG_RAID10_USE_NEAR_SETS)
+
+/* Define flags for options with one argument (e.g. 'delta_disks +2') */
+#define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
+ CTR_FLAG_WRITE_MOSTLY | \
+ CTR_FLAG_DAEMON_SLEEP | \
+ CTR_FLAG_MIN_RECOVERY_RATE | \
+ CTR_FLAG_MAX_RECOVERY_RATE | \
+ CTR_FLAG_MAX_WRITE_BEHIND | \
+ CTR_FLAG_STRIPE_CACHE | \
+ CTR_FLAG_REGION_SIZE | \
+ CTR_FLAG_RAID10_COPIES | \
+ CTR_FLAG_RAID10_FORMAT | \
+ CTR_FLAG_DELTA_DISKS | \
+ CTR_FLAG_DATA_OFFSET)
+
+/* Valid options definitions per raid level... */
+
+/* "raid0" does only accept data offset */
+#define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
+
+/* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */
+#define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
+ CTR_FLAG_REBUILD | \
+ CTR_FLAG_WRITE_MOSTLY | \
+ CTR_FLAG_DAEMON_SLEEP | \
+ CTR_FLAG_MIN_RECOVERY_RATE | \
+ CTR_FLAG_MAX_RECOVERY_RATE | \
+ CTR_FLAG_MAX_WRITE_BEHIND | \
+ CTR_FLAG_REGION_SIZE | \
+ CTR_FLAG_DELTA_DISKS | \
+ CTR_FLAG_DATA_OFFSET)
+
+/* "raid10" does not accept any raid1 or stripe cache options */
+#define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
+ CTR_FLAG_REBUILD | \
+ CTR_FLAG_DAEMON_SLEEP | \
+ CTR_FLAG_MIN_RECOVERY_RATE | \
+ CTR_FLAG_MAX_RECOVERY_RATE | \
+ CTR_FLAG_REGION_SIZE | \
+ CTR_FLAG_RAID10_COPIES | \
+ CTR_FLAG_RAID10_FORMAT | \
+ CTR_FLAG_DELTA_DISKS | \
+ CTR_FLAG_DATA_OFFSET | \
+ CTR_FLAG_RAID10_USE_NEAR_SETS)
+
+/*
+ * "raid4/5/6" do not accept any raid1 or raid10 specific options
+ *
+ * "raid6" does not accept "nosync", because it is not guaranteed
+ * that both parity and q-syndrome are being written properly with
+ * any writes
+ */
+#define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
+ CTR_FLAG_REBUILD | \
+ CTR_FLAG_DAEMON_SLEEP | \
+ CTR_FLAG_MIN_RECOVERY_RATE | \
+ CTR_FLAG_MAX_RECOVERY_RATE | \
+ CTR_FLAG_MAX_WRITE_BEHIND | \
+ CTR_FLAG_STRIPE_CACHE | \
+ CTR_FLAG_REGION_SIZE | \
+ CTR_FLAG_DELTA_DISKS | \
+ CTR_FLAG_DATA_OFFSET)
+
+#define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
+ CTR_FLAG_REBUILD | \
+ CTR_FLAG_DAEMON_SLEEP | \
+ CTR_FLAG_MIN_RECOVERY_RATE | \
+ CTR_FLAG_MAX_RECOVERY_RATE | \
+ CTR_FLAG_MAX_WRITE_BEHIND | \
+ CTR_FLAG_STRIPE_CACHE | \
+ CTR_FLAG_REGION_SIZE | \
+ CTR_FLAG_DELTA_DISKS | \
+ CTR_FLAG_DATA_OFFSET)
+/* ...valid options definitions per raid level */
+
+/*
+ * Flags for rs->runtime_flags field
+ * (RT_FLAG prefix meaning "runtime flag")
+ *
+ * These are all internal and used to define runtime state,
+ * e.g. to prevent another resume from preresume processing
+ * the raid set all over again.
+ */
+#define RT_FLAG_RS_PRERESUMED 0
+#define RT_FLAG_RS_RESUMED 1
+#define RT_FLAG_RS_BITMAP_LOADED 2
+#define RT_FLAG_UPDATE_SBS 3
+#define RT_FLAG_RESHAPE_RS 4
+#define RT_FLAG_KEEP_RS_FROZEN 5
+
+/* Array elements of 64 bit needed for rebuild/failed disk bits */
+#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
+
+/*
+ * raid set level, layout and chunk sectors backup/restore
+ */
+struct rs_layout {
+ int new_level;
+ int new_layout;
+ int new_chunk_sectors;
+};
struct raid_set {
struct dm_target *ti;
uint32_t bitmap_loaded;
- uint32_t ctr_flags;
+ uint32_t stripe_cache_entries;
+ unsigned long ctr_flags;
+ unsigned long runtime_flags;
+
+ uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
+
+ int raid_disks;
+ int delta_disks;
+ int data_offset;
+ int raid10_copies;
+ int requested_bitmap_chunk_sectors;
struct mddev md;
struct raid_type *raid_type;
@@ -73,82 +228,446 @@ struct raid_set {
struct raid_dev dev[0];
};
+static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
+{
+ struct mddev *mddev = &rs->md;
+
+ l->new_level = mddev->new_level;
+ l->new_layout = mddev->new_layout;
+ l->new_chunk_sectors = mddev->new_chunk_sectors;
+}
+
+static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
+{
+ struct mddev *mddev = &rs->md;
+
+ mddev->new_level = l->new_level;
+ mddev->new_layout = l->new_layout;
+ mddev->new_chunk_sectors = l->new_chunk_sectors;
+}
+
+/* raid10 algorithms (i.e. formats) */
+#define ALGORITHM_RAID10_DEFAULT 0
+#define ALGORITHM_RAID10_NEAR 1
+#define ALGORITHM_RAID10_OFFSET 2
+#define ALGORITHM_RAID10_FAR 3
+
/* Supported raid types and properties. */
static struct raid_type {
const char *name; /* RAID algorithm. */
const char *descr; /* Descriptor text for logging. */
- const unsigned parity_devs; /* # of parity devices. */
- const unsigned minimal_devs; /* minimal # of devices in set. */
- const unsigned level; /* RAID level. */
- const unsigned algorithm; /* RAID algorithm. */
+ const unsigned int parity_devs; /* # of parity devices. */
+ const unsigned int minimal_devs;/* minimal # of devices in set. */
+ const unsigned int level; /* RAID level. */
+ const unsigned int algorithm; /* RAID algorithm. */
} raid_types[] = {
- {"raid0", "RAID0 (striping)", 0, 2, 0, 0 /* NONE */},
- {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
- {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */},
- {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
- {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
- {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
- {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
- {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
- {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
- {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
- {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
+ {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */},
+ {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */},
+ {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
+ {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
+ {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
+ {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
+ {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
+ {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
+ {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
+ {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
+ {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
+ {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
+ {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
+ {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
+ {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
+ {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
+ {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
+ {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
+ {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
+ {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
+};
+
+/* True, if @v is in inclusive range [@min, @max] */
+static bool __within_range(long v, long min, long max)
+{
+ return v >= min && v <= max;
+}
+
+/* All table line arguments are defined here */
+static struct arg_name_flag {
+ const unsigned long flag;
+ const char *name;
+} __arg_name_flags[] = {
+ { CTR_FLAG_SYNC, "sync"},
+ { CTR_FLAG_NOSYNC, "nosync"},
+ { CTR_FLAG_REBUILD, "rebuild"},
+ { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
+ { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
+ { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
+ { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
+ { CTR_FLAG_WRITE_MOSTLY, "write_mostly"},
+ { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
+ { CTR_FLAG_REGION_SIZE, "region_size"},
+ { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
+ { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
+ { CTR_FLAG_DATA_OFFSET, "data_offset"},
+ { CTR_FLAG_DELTA_DISKS, "delta_disks"},
+ { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
};
-static char *raid10_md_layout_to_format(int layout)
+/* Return argument name string for given @flag */
+static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
+{
+ if (hweight32(flag) == 1) {
+ struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
+
+ while (anf-- > __arg_name_flags)
+ if (flag & anf->flag)
+ return anf->name;
+
+ } else
+ DMERR("%s called with more than one flag!", __func__);
+
+ return NULL;
+}
+
+/*
+ * Bool helpers to test for various raid levels of a raid set.
+ * It's level as reported by the superblock rather than
+ * the requested raid_type passed to the constructor.
+ */
+/* Return true, if raid set in @rs is raid0 */
+static bool rs_is_raid0(struct raid_set *rs)
+{
+ return !rs->md.level;
+}
+
+/* Return true, if raid set in @rs is raid1 */
+static bool rs_is_raid1(struct raid_set *rs)
+{
+ return rs->md.level == 1;
+}
+
+/* Return true, if raid set in @rs is raid10 */
+static bool rs_is_raid10(struct raid_set *rs)
+{
+ return rs->md.level == 10;
+}
+
+/* Return true, if raid set in @rs is level 6 */
+static bool rs_is_raid6(struct raid_set *rs)
+{
+ return rs->md.level == 6;
+}
+
+/* Return true, if raid set in @rs is level 4, 5 or 6 */
+static bool rs_is_raid456(struct raid_set *rs)
+{
+ return __within_range(rs->md.level, 4, 6);
+}
+
+/* Return true, if raid set in @rs is reshapable */
+static bool __is_raid10_far(int layout);
+static bool rs_is_reshapable(struct raid_set *rs)
+{
+ return rs_is_raid456(rs) ||
+ (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
+}
+
+/* Return true, if raid set in @rs is recovering */
+static bool rs_is_recovering(struct raid_set *rs)
+{
+ return rs->md.recovery_cp < rs->dev[0].rdev.sectors;
+}
+
+/* Return true, if raid set in @rs is reshaping */
+static bool rs_is_reshaping(struct raid_set *rs)
+{
+ return rs->md.reshape_position != MaxSector;
+}
+
+/*
+ * bool helpers to test for various raid levels of a raid type @rt
+ */
+
+/* Return true, if raid type in @rt is raid0 */
+static bool rt_is_raid0(struct raid_type *rt)
+{
+ return !rt->level;
+}
+
+/* Return true, if raid type in @rt is raid1 */
+static bool rt_is_raid1(struct raid_type *rt)
+{
+ return rt->level == 1;
+}
+
+/* Return true, if raid type in @rt is raid10 */
+static bool rt_is_raid10(struct raid_type *rt)
+{
+ return rt->level == 10;
+}
+
+/* Return true, if raid type in @rt is raid4/5 */
+static bool rt_is_raid45(struct raid_type *rt)
+{
+ return __within_range(rt->level, 4, 5);
+}
+
+/* Return true, if raid type in @rt is raid6 */
+static bool rt_is_raid6(struct raid_type *rt)
+{
+ return rt->level == 6;
+}
+
+/* Return true, if raid type in @rt is raid4/5/6 */
+static bool rt_is_raid456(struct raid_type *rt)
+{
+ return __within_range(rt->level, 4, 6);
+}
+/* END: raid level bools */
+
+/* Return valid ctr flags for the raid level of @rs */
+static unsigned long __valid_flags(struct raid_set *rs)
+{
+ if (rt_is_raid0(rs->raid_type))
+ return RAID0_VALID_FLAGS;
+ else if (rt_is_raid1(rs->raid_type))
+ return RAID1_VALID_FLAGS;
+ else if (rt_is_raid10(rs->raid_type))
+ return RAID10_VALID_FLAGS;
+ else if (rt_is_raid45(rs->raid_type))
+ return RAID45_VALID_FLAGS;
+ else if (rt_is_raid6(rs->raid_type))
+ return RAID6_VALID_FLAGS;
+
+ return 0;
+}
+
+/*
+ * Check for valid flags set on @rs
+ *
+ * Has to be called after parsing of the ctr flags!
+ */
+static int rs_check_for_valid_flags(struct raid_set *rs)
+{
+ if (rs->ctr_flags & ~__valid_flags(rs)) {
+ rs->ti->error = "Invalid flags combination";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* MD raid10 bit definitions and helpers */
+#define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */
+#define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */
+#define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */
+#define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */
+
+/* Return md raid10 near copies for @layout */
+static unsigned int __raid10_near_copies(int layout)
+{
+ return layout & 0xFF;
+}
+
+/* Return md raid10 far copies for @layout */
+static unsigned int __raid10_far_copies(int layout)
+{
+ return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
+}
+
+/* Return true if md raid10 offset for @layout */
+static bool __is_raid10_offset(int layout)
+{
+ return !!(layout & RAID10_OFFSET);
+}
+
+/* Return true if md raid10 near for @layout */
+static bool __is_raid10_near(int layout)
+{
+ return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
+}
+
+/* Return true if md raid10 far for @layout */
+static bool __is_raid10_far(int layout)
+{
+ return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
+}
+
+/* Return md raid10 layout string for @layout */
+static const char *raid10_md_layout_to_format(int layout)
{
/*
- * Bit 16 and 17 stand for "offset" and "use_far_sets"
+ * Bit 16 stands for "offset"
+ * (i.e. adjacent stripes hold copies)
+ *
* Refer to MD's raid10.c for details
*/
- if ((layout & 0x10000) && (layout & 0x20000))
+ if (__is_raid10_offset(layout))
return "offset";
- if ((layout & 0xFF) > 1)
+ if (__raid10_near_copies(layout) > 1)
return "near";
+ WARN_ON(__raid10_far_copies(layout) < 2);
+
return "far";
}
-static unsigned raid10_md_layout_to_copies(int layout)
+/* Return md raid10 algorithm for @name */
+static int raid10_name_to_format(const char *name)
{
- if ((layout & 0xFF) > 1)
- return layout & 0xFF;
- return (layout >> 8) & 0xFF;
+ if (!strcasecmp(name, "near"))
+ return ALGORITHM_RAID10_NEAR;
+ else if (!strcasecmp(name, "offset"))
+ return ALGORITHM_RAID10_OFFSET;
+ else if (!strcasecmp(name, "far"))
+ return ALGORITHM_RAID10_FAR;
+
+ return -EINVAL;
}
-static int raid10_format_to_md_layout(char *format, unsigned copies)
+/* Return md raid10 copies for @layout */
+static unsigned int raid10_md_layout_to_copies(int layout)
{
- unsigned n = 1, f = 1;
+ return max(__raid10_near_copies(layout), __raid10_far_copies(layout));
+}
- if (!strcasecmp("near", format))
+/* Return md raid10 format id for @format string */
+static int raid10_format_to_md_layout(struct raid_set *rs,
+ unsigned int algorithm,
+ unsigned int copies)
+{
+ unsigned int n = 1, f = 1, r = 0;
+
+ /*
+ * MD resilienece flaw:
+ *
+ * enabling use_far_sets for far/offset formats causes copies
+ * to be colocated on the same devs together with their origins!
+ *
+ * -> disable it for now in the definition above
+ */
+ if (algorithm == ALGORITHM_RAID10_DEFAULT ||
+ algorithm == ALGORITHM_RAID10_NEAR)
n = copies;
- else
+
+ else if (algorithm == ALGORITHM_RAID10_OFFSET) {
f = copies;
+ r = RAID10_OFFSET;
+ if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
+ r |= RAID10_USE_FAR_SETS;
- if (!strcasecmp("offset", format))
- return 0x30000 | (f << 8) | n;
+ } else if (algorithm == ALGORITHM_RAID10_FAR) {
+ f = copies;
+ r = !RAID10_OFFSET;
+ if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
+ r |= RAID10_USE_FAR_SETS;
- if (!strcasecmp("far", format))
- return 0x20000 | (f << 8) | n;
+ } else
+ return -EINVAL;
- return (f << 8) | n;
+ return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
}
+/* END: MD raid10 bit definitions and helpers */
-static struct raid_type *get_raid_type(char *name)
+/* Check for any of the raid10 algorithms */
+static bool __got_raid10(struct raid_type *rtp, const int layout)
{
- int i;
+ if (rtp->level == 10) {
+ switch (rtp->algorithm) {
+ case ALGORITHM_RAID10_DEFAULT:
+ case ALGORITHM_RAID10_NEAR:
+ return __is_raid10_near(layout);
+ case ALGORITHM_RAID10_OFFSET:
+ return __is_raid10_offset(layout);
+ case ALGORITHM_RAID10_FAR:
+ return __is_raid10_far(layout);
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
- for (i = 0; i < ARRAY_SIZE(raid_types); i++)
- if (!strcmp(raid_types[i].name, name))
- return &raid_types[i];
+/* Return raid_type for @name */
+static struct raid_type *get_raid_type(const char *name)
+{
+ struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
+
+ while (rtp-- > raid_types)
+ if (!strcasecmp(rtp->name, name))
+ return rtp;
return NULL;
}
-static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
+/* Return raid_type for @name based derived from @level and @layout */
+static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
{
- unsigned i;
+ struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
+
+ while (rtp-- > raid_types) {
+ /* RAID10 special checks based on @layout flags/properties */
+ if (rtp->level == level &&
+ (__got_raid10(rtp, layout) || rtp->algorithm == layout))
+ return rtp;
+ }
+
+ return NULL;
+}
+
+/*
+ * Conditionally change bdev capacity of @rs
+ * in case of a disk add/remove reshape
+ */
+static void rs_set_capacity(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+ struct md_rdev *rdev;
+ struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
+
+ /*
+ * raid10 sets rdev->sector to the device size, which
+ * is unintended in case of out-of-place reshaping
+ */
+ rdev_for_each(rdev, mddev)
+ rdev->sectors = mddev->dev_sectors;
+
+ set_capacity(gendisk, mddev->array_sectors);
+ revalidate_disk(gendisk);
+}
+
+/*
+ * Set the mddev properties in @rs to the current
+ * ones retrieved from the freshest superblock
+ */
+static void rs_set_cur(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+
+ mddev->new_level = mddev->level;
+ mddev->new_layout = mddev->layout;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+}
+
+/*
+ * Set the mddev properties in @rs to the new
+ * ones requested by the ctr
+ */
+static void rs_set_new(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+
+ mddev->level = mddev->new_level;
+ mddev->layout = mddev->new_layout;
+ mddev->chunk_sectors = mddev->new_chunk_sectors;
+ mddev->raid_disks = rs->raid_disks;
+ mddev->delta_disks = 0;
+}
+
+static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
+ unsigned int raid_devs)
+{
+ unsigned int i;
struct raid_set *rs;
if (raid_devs <= raid_type->parity_devs) {
@@ -164,15 +683,19 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
mddev_init(&rs->md);
+ rs->raid_disks = raid_devs;
+ rs->delta_disks = 0;
+
rs->ti = ti;
rs->raid_type = raid_type;
+ rs->stripe_cache_entries = 256;
rs->md.raid_disks = raid_devs;
rs->md.level = raid_type->level;
rs->md.new_level = rs->md.level;
rs->md.layout = raid_type->algorithm;
rs->md.new_layout = rs->md.layout;
rs->md.delta_disks = 0;
- rs->md.recovery_cp = 0;
+ rs->md.recovery_cp = MaxSector;
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
@@ -189,11 +712,11 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
return rs;
}
-static void context_free(struct raid_set *rs)
+static void raid_set_free(struct raid_set *rs)
{
int i;
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
if (rs->dev[i].meta_dev)
dm_put_device(rs->ti, rs->dev[i].meta_dev);
md_rdev_clear(&rs->dev[i].rdev);
@@ -218,16 +741,22 @@ static void context_free(struct raid_set *rs)
* <meta_dev> -
*
* This code parses those words. If there is a failure,
- * the caller must use context_free to unwind the operations.
+ * the caller must use raid_set_free() to unwind the operations.
*/
-static int dev_parms(struct raid_set *rs, char **argv)
+static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
{
int i;
int rebuild = 0;
int metadata_available = 0;
- int ret = 0;
+ int r = 0;
+ const char *arg;
- for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
+ /* Put off the number of raid devices argument to get to dev pairs */
+ arg = dm_shift_arg(as);
+ if (!arg)
+ return -EINVAL;
+
+ for (i = 0; i < rs->raid_disks; i++) {
rs->dev[i].rdev.raid_disk = i;
rs->dev[i].meta_dev = NULL;
@@ -240,39 +769,49 @@ static int dev_parms(struct raid_set *rs, char **argv)
rs->dev[i].rdev.data_offset = 0;
rs->dev[i].rdev.mddev = &rs->md;
- if (strcmp(argv[0], "-")) {
- ret = dm_get_device(rs->ti, argv[0],
- dm_table_get_mode(rs->ti->table),
- &rs->dev[i].meta_dev);
- rs->ti->error = "RAID metadata device lookup failure";
- if (ret)
- return ret;
+ arg = dm_shift_arg(as);
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp(arg, "-")) {
+ r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
+ &rs->dev[i].meta_dev);
+ if (r) {
+ rs->ti->error = "RAID metadata device lookup failure";
+ return r;
+ }
rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
- if (!rs->dev[i].rdev.sb_page)
+ if (!rs->dev[i].rdev.sb_page) {
+ rs->ti->error = "Failed to allocate superblock page";
return -ENOMEM;
+ }
}
- if (!strcmp(argv[1], "-")) {
+ arg = dm_shift_arg(as);
+ if (!arg)
+ return -EINVAL;
+
+ if (!strcmp(arg, "-")) {
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
(!rs->dev[i].rdev.recovery_offset)) {
rs->ti->error = "Drive designated for rebuild not specified";
return -EINVAL;
}
- rs->ti->error = "No data device supplied with metadata device";
- if (rs->dev[i].meta_dev)
+ if (rs->dev[i].meta_dev) {
+ rs->ti->error = "No data device supplied with metadata device";
return -EINVAL;
+ }
continue;
}
- ret = dm_get_device(rs->ti, argv[1],
- dm_table_get_mode(rs->ti->table),
- &rs->dev[i].data_dev);
- if (ret) {
+ r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
+ &rs->dev[i].data_dev);
+ if (r) {
rs->ti->error = "RAID device lookup failure";
- return ret;
+ return r;
}
if (rs->dev[i].meta_dev) {
@@ -280,7 +819,7 @@ static int dev_parms(struct raid_set *rs, char **argv)
rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
}
rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
- list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
+ list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
rebuild++;
}
@@ -301,8 +840,7 @@ static int dev_parms(struct raid_set *rs, char **argv)
*
* User could specify 'nosync' option if desperate.
*/
- DMERR("Unable to rebuild drive while array is not in-sync");
- rs->ti->error = "RAID device lookup failure";
+ rs->ti->error = "Unable to rebuild drive while array is not in-sync";
return -EINVAL;
}
@@ -325,7 +863,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
if (!region_size) {
/*
- * Choose a reasonable default. All figures in sectors.
+ * Choose a reasonable default. All figures in sectors.
*/
if (min_region_size > (1 << 13)) {
/* If not a power of 2, make it the next power of 2 */
@@ -366,7 +904,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
/*
* Convert sectors to bytes.
*/
- rs->md.bitmap_info.chunksize = (region_size << 9);
+ rs->md.bitmap_info.chunksize = to_bytes(region_size);
return 0;
}
@@ -382,9 +920,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
*/
static int validate_raid_redundancy(struct raid_set *rs)
{
- unsigned i, rebuild_cnt = 0;
- unsigned rebuilds_per_group = 0, copies, d;
- unsigned group_size, last_group_start;
+ unsigned int i, rebuild_cnt = 0;
+ unsigned int rebuilds_per_group = 0, copies;
+ unsigned int group_size, last_group_start;
for (i = 0; i < rs->md.raid_disks; i++)
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
@@ -403,7 +941,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
goto too_many;
break;
case 10:
- copies = raid10_md_layout_to_copies(rs->md.layout);
+ copies = raid10_md_layout_to_copies(rs->md.new_layout);
if (rebuild_cnt < copies)
break;
@@ -417,17 +955,16 @@ static int validate_raid_redundancy(struct raid_set *rs)
* simple case where the number of devices is a multiple of the
* number of copies, we must also handle cases where the number
* of devices is not a multiple of the number of copies.
- * E.g. dev1 dev2 dev3 dev4 dev5
- * A A B B C
- * C D D E E
+ * E.g. dev1 dev2 dev3 dev4 dev5
+ * A A B B C
+ * C D D E E
*/
- if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
- for (i = 0; i < rs->md.raid_disks * copies; i++) {
+ if (__is_raid10_near(rs->md.new_layout)) {
+ for (i = 0; i < rs->md.raid_disks; i++) {
if (!(i % copies))
rebuilds_per_group = 0;
- d = i % rs->md.raid_disks;
- if ((!rs->dev[d].rdev.sb_page ||
- !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+ if ((!rs->dev[i].rdev.sb_page ||
+ !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
(++rebuilds_per_group >= copies))
goto too_many;
}
@@ -442,7 +979,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
* use the 'use_far_sets' variant.)
*
* This check is somewhat complicated by the need to account
- * for arrays that are not a multiple of (far) copies. This
+ * for arrays that are not a multiple of (far) copies. This
* results in the need to treat the last (potentially larger)
* set differently.
*/
@@ -475,42 +1012,48 @@ too_many:
*
* Argument definitions
* <chunk_size> The number of sectors per disk that
- * will form the "stripe"
+ * will form the "stripe"
* [[no]sync] Force or prevent recovery of the
- * entire array
+ * entire array
* [rebuild <idx>] Rebuild the drive indicated by the index
* [daemon_sleep <ms>] Time between bitmap daemon work to
- * clear bits
+ * clear bits
* [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
* [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
* [write_mostly <idx>] Indicate a write mostly drive via index
* [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
- * [region_size <sectors>] Defines granularity of bitmap
+ * [region_size <sectors>] Defines granularity of bitmap
*
* RAID10-only options:
- * [raid10_copies <# copies>] Number of copies. (Default: 2)
+ * [raid10_copies <# copies>] Number of copies. (Default: 2)
* [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
*/
-static int parse_raid_params(struct raid_set *rs, char **argv,
- unsigned num_raid_params)
-{
- char *raid10_format = "near";
- unsigned raid10_copies = 2;
- unsigned i;
- unsigned long value, region_size = 0;
- sector_t sectors_per_dev = rs->ti->len;
+static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
+ unsigned int num_raid_params)
+{
+ int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
+ unsigned int raid10_copies = 2;
+ unsigned int i, write_mostly = 0;
+ unsigned int region_size = 0;
sector_t max_io_len;
- char *key;
+ const char *arg, *key;
+ struct raid_dev *rd;
+ struct raid_type *rt = rs->raid_type;
+
+ arg = dm_shift_arg(as);
+ num_raid_params--; /* Account for chunk_size argument */
+
+ if (kstrtoint(arg, 10, &value) < 0) {
+ rs->ti->error = "Bad numerical argument given for chunk_size";
+ return -EINVAL;
+ }
/*
* First, parse the in-order required arguments
* "chunk_size" is the only argument of this type.
*/
- if ((kstrtoul(argv[0], 10, &value) < 0)) {
- rs->ti->error = "Bad chunk size";
- return -EINVAL;
- } else if (rs->raid_type->level == 1) {
+ if (rt_is_raid1(rt)) {
if (value)
DMERR("Ignoring chunk size parameter for RAID 1");
value = 0;
@@ -523,8 +1066,6 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
- argv++;
- num_raid_params--;
/*
* We set each individual device as In_sync with a completed
@@ -532,18 +1073,18 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
* replacement then one of the following cases applies:
*
* 1) User specifies 'rebuild'.
- * - Device is reset when param is read.
+ * - Device is reset when param is read.
* 2) A new device is supplied.
- * - No matching superblock found, resets device.
+ * - No matching superblock found, resets device.
* 3) Device failure was transient and returns on reload.
- * - Failure noticed, resets device for bitmap replay.
+ * - Failure noticed, resets device for bitmap replay.
* 4) Device hadn't completed recovery after previous failure.
- * - Superblock is read and overrides recovery_offset.
+ * - Superblock is read and overrides recovery_offset.
*
* What is found in the superblocks of the devices is always
* authoritative, unless 'rebuild' or '[no]sync' was specified.
*/
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
set_bit(In_sync, &rs->dev[i].rdev.flags);
rs->dev[i].rdev.recovery_offset = MaxSector;
}
@@ -552,72 +1093,112 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
* Second, parse the unordered optional arguments
*/
for (i = 0; i < num_raid_params; i++) {
- if (!strcasecmp(argv[i], "nosync")) {
- rs->md.recovery_cp = MaxSector;
- rs->ctr_flags |= CTR_FLAG_NOSYNC;
+ key = dm_shift_arg(as);
+ if (!key) {
+ rs->ti->error = "Not enough raid parameters given";
+ return -EINVAL;
+ }
+
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
+ if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
+ rs->ti->error = "Only one 'nosync' argument allowed";
+ return -EINVAL;
+ }
continue;
}
- if (!strcasecmp(argv[i], "sync")) {
- rs->md.recovery_cp = 0;
- rs->ctr_flags |= CTR_FLAG_SYNC;
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
+ if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
+ rs->ti->error = "Only one 'sync' argument allowed";
+ return -EINVAL;
+ }
+ continue;
+ }
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
+ if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
+ rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
+ return -EINVAL;
+ }
continue;
}
- /* The rest of the optional arguments come in key/value pairs */
- if ((i + 1) >= num_raid_params) {
+ arg = dm_shift_arg(as);
+ i++; /* Account for the argument pairs */
+ if (!arg) {
rs->ti->error = "Wrong number of raid parameters given";
return -EINVAL;
}
- key = argv[i++];
+ /*
+ * Parameters that take a string value are checked here.
+ */
- /* Parameters that take a string value are checked here. */
- if (!strcasecmp(key, "raid10_format")) {
- if (rs->raid_type->level != 10) {
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
+ if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
+ rs->ti->error = "Only one 'raid10_format' argument pair allowed";
+ return -EINVAL;
+ }
+ if (!rt_is_raid10(rt)) {
rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
return -EINVAL;
}
- if (strcmp("near", argv[i]) &&
- strcmp("far", argv[i]) &&
- strcmp("offset", argv[i])) {
+ raid10_format = raid10_name_to_format(arg);
+ if (raid10_format < 0) {
rs->ti->error = "Invalid 'raid10_format' value given";
- return -EINVAL;
+ return raid10_format;
}
- raid10_format = argv[i];
- rs->ctr_flags |= CTR_FLAG_RAID10_FORMAT;
continue;
}
- if (kstrtoul(argv[i], 10, &value) < 0) {
+ if (kstrtoint(arg, 10, &value) < 0) {
rs->ti->error = "Bad numerical argument given in raid params";
return -EINVAL;
}
- /* Parameters that take a numeric value are checked here */
- if (!strcasecmp(key, "rebuild")) {
- if (value >= rs->md.raid_disks) {
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
+ /*
+ * "rebuild" is being passed in by userspace to provide
+ * indexes of replaced devices and to set up additional
+ * devices on raid level takeover.
+ */
+ if (!__within_range(value, 0, rs->raid_disks - 1)) {
rs->ti->error = "Invalid rebuild index given";
return -EINVAL;
}
- clear_bit(In_sync, &rs->dev[value].rdev.flags);
- rs->dev[value].rdev.recovery_offset = 0;
- rs->ctr_flags |= CTR_FLAG_REBUILD;
- } else if (!strcasecmp(key, "write_mostly")) {
- if (rs->raid_type->level != 1) {
+
+ if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
+ rs->ti->error = "rebuild for this index already given";
+ return -EINVAL;
+ }
+
+ rd = rs->dev + value;
+ clear_bit(In_sync, &rd->rdev.flags);
+ clear_bit(Faulty, &rd->rdev.flags);
+ rd->rdev.recovery_offset = 0;
+ set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
+ if (!rt_is_raid1(rt)) {
rs->ti->error = "write_mostly option is only valid for RAID1";
return -EINVAL;
}
- if (value >= rs->md.raid_disks) {
- rs->ti->error = "Invalid write_mostly drive index given";
+
+ if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
+ rs->ti->error = "Invalid write_mostly index given";
return -EINVAL;
}
+
+ write_mostly++;
set_bit(WriteMostly, &rs->dev[value].rdev.flags);
- } else if (!strcasecmp(key, "max_write_behind")) {
- if (rs->raid_type->level != 1) {
+ set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
+ if (!rt_is_raid1(rt)) {
rs->ti->error = "max_write_behind option is only valid for RAID1";
return -EINVAL;
}
- rs->ctr_flags |= CTR_FLAG_MAX_WRITE_BEHIND;
+
+ if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
+ rs->ti->error = "Only one max_write_behind argument pair allowed";
+ return -EINVAL;
+ }
/*
* In device-mapper, we specify things in sectors, but
@@ -628,64 +1209,121 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "Max write-behind limit out of range";
return -EINVAL;
}
+
rs->md.bitmap_info.max_write_behind = value;
- } else if (!strcasecmp(key, "daemon_sleep")) {
- rs->ctr_flags |= CTR_FLAG_DAEMON_SLEEP;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
+ if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
+ rs->ti->error = "Only one daemon_sleep argument pair allowed";
+ return -EINVAL;
+ }
if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
rs->ti->error = "daemon sleep period out of range";
return -EINVAL;
}
rs->md.bitmap_info.daemon_sleep = value;
- } else if (!strcasecmp(key, "stripe_cache")) {
- rs->ctr_flags |= CTR_FLAG_STRIPE_CACHE;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
+ /* Userspace passes new data_offset after having extended the the data image LV */
+ if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
+ rs->ti->error = "Only one data_offset argument pair allowed";
+ return -EINVAL;
+ }
+ /* Ensure sensible data offset */
+ if (value < 0 ||
+ (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
+ rs->ti->error = "Bogus data_offset value";
+ return -EINVAL;
+ }
+ rs->data_offset = value;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
+ /* Define the +/-# of disks to add to/remove from the given raid set */
+ if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
+ rs->ti->error = "Only one delta_disks argument pair allowed";
+ return -EINVAL;
+ }
+ /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */
+ if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
+ rs->ti->error = "Too many delta_disk requested";
+ return -EINVAL;
+ }
- /*
- * In device-mapper, we specify things in sectors, but
- * MD records this value in kB
- */
- value /= 2;
+ rs->delta_disks = value;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
+ if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
+ rs->ti->error = "Only one stripe_cache argument pair allowed";
+ return -EINVAL;
+ }
- if ((rs->raid_type->level != 5) &&
- (rs->raid_type->level != 6)) {
+ if (!rt_is_raid456(rt)) {
rs->ti->error = "Inappropriate argument: stripe_cache";
return -EINVAL;
}
- if (raid5_set_cache_size(&rs->md, (int)value)) {
- rs->ti->error = "Bad stripe_cache size";
+
+ rs->stripe_cache_entries = value;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
+ if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
+ rs->ti->error = "Only one min_recovery_rate argument pair allowed";
return -EINVAL;
}
- } else if (!strcasecmp(key, "min_recovery_rate")) {
- rs->ctr_flags |= CTR_FLAG_MIN_RECOVERY_RATE;
if (value > INT_MAX) {
rs->ti->error = "min_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_min = (int)value;
- } else if (!strcasecmp(key, "max_recovery_rate")) {
- rs->ctr_flags |= CTR_FLAG_MAX_RECOVERY_RATE;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
+ if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
+ rs->ti->error = "Only one max_recovery_rate argument pair allowed";
+ return -EINVAL;
+ }
if (value > INT_MAX) {
rs->ti->error = "max_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_max = (int)value;
- } else if (!strcasecmp(key, "region_size")) {
- rs->ctr_flags |= CTR_FLAG_REGION_SIZE;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
+ if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
+ rs->ti->error = "Only one region_size argument pair allowed";
+ return -EINVAL;
+ }
+
region_size = value;
- } else if (!strcasecmp(key, "raid10_copies") &&
- (rs->raid_type->level == 10)) {
- if ((value < 2) || (value > 0xFF)) {
+ rs->requested_bitmap_chunk_sectors = value;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
+ if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
+ rs->ti->error = "Only one raid10_copies argument pair allowed";
+ return -EINVAL;
+ }
+
+ if (!__within_range(value, 2, rs->md.raid_disks)) {
rs->ti->error = "Bad value for 'raid10_copies'";
return -EINVAL;
}
- rs->ctr_flags |= CTR_FLAG_RAID10_COPIES;
+
raid10_copies = value;
} else {
DMERR("Unable to parse RAID parameter: %s", key);
- rs->ti->error = "Unable to parse RAID parameters";
+ rs->ti->error = "Unable to parse RAID parameter";
return -EINVAL;
}
}
+ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
+ test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
+ rs->ti->error = "sync and nosync are mutually exclusive";
+ return -EINVAL;
+ }
+
+ if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) &&
+ (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ||
+ test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) {
+ rs->ti->error = "sync/nosync and rebuild are mutually exclusive";
+ return -EINVAL;
+ }
+
+ if (write_mostly >= rs->md.raid_disks) {
+ rs->ti->error = "Can't set all raid1 devices to write_mostly";
+ return -EINVAL;
+ }
+
if (validate_region_size(rs, region_size))
return -EINVAL;
@@ -697,47 +1335,193 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
if (dm_set_target_max_io_len(rs->ti, max_io_len))
return -EINVAL;
- if (rs->raid_type->level == 10) {
+ if (rt_is_raid10(rt)) {
if (raid10_copies > rs->md.raid_disks) {
rs->ti->error = "Not enough devices to satisfy specification";
return -EINVAL;
}
- /*
- * If the format is not "near", we only support
- * two copies at the moment.
- */
- if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
- rs->ti->error = "Too many copies for given RAID10 format.";
+ rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
+ if (rs->md.new_layout < 0) {
+ rs->ti->error = "Error getting raid10 format";
+ return rs->md.new_layout;
+ }
+
+ rt = get_raid_type_by_ll(10, rs->md.new_layout);
+ if (!rt) {
+ rs->ti->error = "Failed to recognize new raid10 layout";
return -EINVAL;
}
- /* (Len * #mirrors) / #devices */
- sectors_per_dev = rs->ti->len * raid10_copies;
- sector_div(sectors_per_dev, rs->md.raid_disks);
-
- rs->md.layout = raid10_format_to_md_layout(raid10_format,
- raid10_copies);
- rs->md.new_layout = rs->md.layout;
- } else if ((!rs->raid_type->level || rs->raid_type->level > 1) &&
- sector_div(sectors_per_dev,
- (rs->md.raid_disks - rs->raid_type->parity_devs))) {
- rs->ti->error = "Target length not divisible by number of data devices";
- return -EINVAL;
+ if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
+ rt->algorithm == ALGORITHM_RAID10_NEAR) &&
+ test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
+ rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
+ return -EINVAL;
+ }
}
- rs->md.dev_sectors = sectors_per_dev;
+
+ rs->raid10_copies = raid10_copies;
/* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0;
rs->md.external = 1;
+ /* Check, if any invalid ctr arguments have been passed in for the raid level */
+ return rs_check_for_valid_flags(rs);
+}
+
+/* Set raid4/5/6 cache size */
+static int rs_set_raid456_stripe_cache(struct raid_set *rs)
+{
+ int r;
+ struct r5conf *conf;
+ struct mddev *mddev = &rs->md;
+ uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
+ uint32_t nr_stripes = rs->stripe_cache_entries;
+
+ if (!rt_is_raid456(rs->raid_type)) {
+ rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
+ return -EINVAL;
+ }
+
+ if (nr_stripes < min_stripes) {
+ DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
+ nr_stripes, min_stripes);
+ nr_stripes = min_stripes;
+ }
+
+ conf = mddev->private;
+ if (!conf) {
+ rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
+ return -EINVAL;
+ }
+
+ /* Try setting number of stripes in raid456 stripe cache */
+ if (conf->min_nr_stripes != nr_stripes) {
+ r = raid5_set_cache_size(mddev, nr_stripes);
+ if (r) {
+ rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
+ return r;
+ }
+
+ DMINFO("%u stripe cache entries", nr_stripes);
+ }
+
return 0;
}
+/* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */
+static unsigned int mddev_data_stripes(struct raid_set *rs)
+{
+ return rs->md.raid_disks - rs->raid_type->parity_devs;
+}
+
+/* Return # of data stripes of @rs (i.e. as of ctr) */
+static unsigned int rs_data_stripes(struct raid_set *rs)
+{
+ return rs->raid_disks - rs->raid_type->parity_devs;
+}
+
+/* Calculate the sectors per device and per array used for @rs */
+static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
+{
+ int delta_disks;
+ unsigned int data_stripes;
+ struct mddev *mddev = &rs->md;
+ struct md_rdev *rdev;
+ sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
+
+ if (use_mddev) {
+ delta_disks = mddev->delta_disks;
+ data_stripes = mddev_data_stripes(rs);
+ } else {
+ delta_disks = rs->delta_disks;
+ data_stripes = rs_data_stripes(rs);
+ }
+
+ /* Special raid1 case w/o delta_disks support (yet) */
+ if (rt_is_raid1(rs->raid_type))
+ ;
+ else if (rt_is_raid10(rs->raid_type)) {
+ if (rs->raid10_copies < 2 ||
+ delta_disks < 0) {
+ rs->ti->error = "Bogus raid10 data copies or delta disks";
+ return -EINVAL;
+ }
+
+ dev_sectors *= rs->raid10_copies;
+ if (sector_div(dev_sectors, data_stripes))
+ goto bad;
+
+ array_sectors = (data_stripes + delta_disks) * dev_sectors;
+ if (sector_div(array_sectors, rs->raid10_copies))
+ goto bad;
+
+ } else if (sector_div(dev_sectors, data_stripes))
+ goto bad;
+
+ else
+ /* Striped layouts */
+ array_sectors = (data_stripes + delta_disks) * dev_sectors;
+
+ rdev_for_each(rdev, mddev)
+ rdev->sectors = dev_sectors;
+
+ mddev->array_sectors = array_sectors;
+ mddev->dev_sectors = dev_sectors;
+
+ return 0;
+bad:
+ rs->ti->error = "Target length not divisible by number of data devices";
+ return -EINVAL;
+}
+
+/* Setup recovery on @rs */
+static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
+{
+ /* raid0 does not recover */
+ if (rs_is_raid0(rs))
+ rs->md.recovery_cp = MaxSector;
+ /*
+ * A raid6 set has to be recovered either
+ * completely or for the grown part to
+ * ensure proper parity and Q-Syndrome
+ */
+ else if (rs_is_raid6(rs))
+ rs->md.recovery_cp = dev_sectors;
+ /*
+ * Other raid set types may skip recovery
+ * depending on the 'nosync' flag.
+ */
+ else
+ rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
+ ? MaxSector : dev_sectors;
+}
+
+/* Setup recovery on @rs based on raid type, device size and 'nosync' flag */
+static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
+{
+ if (!dev_sectors)
+ /* New raid set or 'sync' flag provided */
+ __rs_setup_recovery(rs, 0);
+ else if (dev_sectors == MaxSector)
+ /* Prevent recovery */
+ __rs_setup_recovery(rs, MaxSector);
+ else if (rs->dev[0].rdev.sectors < dev_sectors)
+ /* Grown raid set */
+ __rs_setup_recovery(rs, rs->dev[0].rdev.sectors);
+ else
+ __rs_setup_recovery(rs, MaxSector);
+}
+
static void do_table_event(struct work_struct *ws)
{
struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
+ smp_rmb(); /* Make sure we access most actual mddev properties */
+ if (!rs_is_reshaping(rs))
+ rs_set_capacity(rs);
dm_table_event(rs->ti->table);
}
@@ -749,19 +1533,225 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
}
/*
+ * Make sure a valid takover (level switch) is being requested on @rs
+ *
+ * Conversions of raid sets from one MD personality to another
+ * have to conform to restrictions which are enforced here.
+ */
+static int rs_check_takeover(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+ unsigned int near_copies;
+
+ if (rs->md.degraded) {
+ rs->ti->error = "Can't takeover degraded raid set";
+ return -EPERM;
+ }
+
+ if (rs_is_reshaping(rs)) {
+ rs->ti->error = "Can't takeover reshaping raid set";
+ return -EPERM;
+ }
+
+ switch (mddev->level) {
+ case 0:
+ /* raid0 -> raid1/5 with one disk */
+ if ((mddev->new_level == 1 || mddev->new_level == 5) &&
+ mddev->raid_disks == 1)
+ return 0;
+
+ /* raid0 -> raid10 */
+ if (mddev->new_level == 10 &&
+ !(rs->raid_disks % mddev->raid_disks))
+ return 0;
+
+ /* raid0 with multiple disks -> raid4/5/6 */
+ if (__within_range(mddev->new_level, 4, 6) &&
+ mddev->new_layout == ALGORITHM_PARITY_N &&
+ mddev->raid_disks > 1)
+ return 0;
+
+ break;
+
+ case 10:
+ /* Can't takeover raid10_offset! */
+ if (__is_raid10_offset(mddev->layout))
+ break;
+
+ near_copies = __raid10_near_copies(mddev->layout);
+
+ /* raid10* -> raid0 */
+ if (mddev->new_level == 0) {
+ /* Can takeover raid10_near with raid disks divisable by data copies! */
+ if (near_copies > 1 &&
+ !(mddev->raid_disks % near_copies)) {
+ mddev->raid_disks /= near_copies;
+ mddev->delta_disks = mddev->raid_disks;
+ return 0;
+ }
+
+ /* Can takeover raid10_far */
+ if (near_copies == 1 &&
+ __raid10_far_copies(mddev->layout) > 1)
+ return 0;
+
+ break;
+ }
+
+ /* raid10_{near,far} -> raid1 */
+ if (mddev->new_level == 1 &&
+ max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
+ return 0;
+
+ /* raid10_{near,far} with 2 disks -> raid4/5 */
+ if (__within_range(mddev->new_level, 4, 5) &&
+ mddev->raid_disks == 2)
+ return 0;
+ break;
+
+ case 1:
+ /* raid1 with 2 disks -> raid4/5 */
+ if (__within_range(mddev->new_level, 4, 5) &&
+ mddev->raid_disks == 2) {
+ mddev->degraded = 1;
+ return 0;
+ }
+
+ /* raid1 -> raid0 */
+ if (mddev->new_level == 0 &&
+ mddev->raid_disks == 1)
+ return 0;
+
+ /* raid1 -> raid10 */
+ if (mddev->new_level == 10)
+ return 0;
+ break;
+
+ case 4:
+ /* raid4 -> raid0 */
+ if (mddev->new_level == 0)
+ return 0;
+
+ /* raid4 -> raid1/5 with 2 disks */
+ if ((mddev->new_level == 1 || mddev->new_level == 5) &&
+ mddev->raid_disks == 2)
+ return 0;
+
+ /* raid4 -> raid5/6 with parity N */
+ if (__within_range(mddev->new_level, 5, 6) &&
+ mddev->layout == ALGORITHM_PARITY_N)
+ return 0;
+ break;
+
+ case 5:
+ /* raid5 with parity N -> raid0 */
+ if (mddev->new_level == 0 &&
+ mddev->layout == ALGORITHM_PARITY_N)
+ return 0;
+
+ /* raid5 with parity N -> raid4 */
+ if (mddev->new_level == 4 &&
+ mddev->layout == ALGORITHM_PARITY_N)
+ return 0;
+
+ /* raid5 with 2 disks -> raid1/4/10 */
+ if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
+ mddev->raid_disks == 2)
+ return 0;
+
+ /* raid5_* -> raid6_*_6 with Q-Syndrome N (e.g. raid5_ra -> raid6_ra_6 */
+ if (mddev->new_level == 6 &&
+ ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
+ __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
+ return 0;
+ break;
+
+ case 6:
+ /* raid6 with parity N -> raid0 */
+ if (mddev->new_level == 0 &&
+ mddev->layout == ALGORITHM_PARITY_N)
+ return 0;
+
+ /* raid6 with parity N -> raid4 */
+ if (mddev->new_level == 4 &&
+ mddev->layout == ALGORITHM_PARITY_N)
+ return 0;
+
+ /* raid6_*_n with Q-Syndrome N -> raid5_* */
+ if (mddev->new_level == 5 &&
+ ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
+ __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
+ return 0;
+
+ default:
+ break;
+ }
+
+ rs->ti->error = "takeover not possible";
+ return -EINVAL;
+}
+
+/* True if @rs requested to be taken over */
+static bool rs_takeover_requested(struct raid_set *rs)
+{
+ return rs->md.new_level != rs->md.level;
+}
+
+/* True if @rs is requested to reshape by ctr */
+static bool rs_reshape_requested(struct raid_set *rs)
+{
+ bool change;
+ struct mddev *mddev = &rs->md;
+
+ if (rs_takeover_requested(rs))
+ return false;
+
+ if (!mddev->level)
+ return false;
+
+ change = mddev->new_layout != mddev->layout ||
+ mddev->new_chunk_sectors != mddev->chunk_sectors ||
+ rs->delta_disks;
+
+ /* Historical case to support raid1 reshape without delta disks */
+ if (mddev->level == 1) {
+ if (rs->delta_disks)
+ return !!rs->delta_disks;
+
+ return !change &&
+ mddev->raid_disks != rs->raid_disks;
+ }
+
+ if (mddev->level == 10)
+ return change &&
+ !__is_raid10_far(mddev->new_layout) &&
+ rs->delta_disks >= 0;
+
+ return change;
+}
+
+/* Features */
+#define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */
+
+/* State flags for sb->flags */
+#define SB_FLAG_RESHAPE_ACTIVE 0x1
+#define SB_FLAG_RESHAPE_BACKWARDS 0x2
+
+/*
* This structure is never routinely used by userspace, unlike md superblocks.
* Devices with this superblock should only ever be accessed via device-mapper.
*/
#define DM_RAID_MAGIC 0x64526D44
struct dm_raid_superblock {
__le32 magic; /* "DmRd" */
- __le32 features; /* Used to indicate possible future changes */
+ __le32 compat_features; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */
- __le32 num_devices; /* Number of devices in this array. (Max 64) */
- __le32 array_position; /* The position of this drive in the array */
+ __le32 num_devices; /* Number of devices in this raid set. (Max 64) */
+ __le32 array_position; /* The position of this drive in the raid set */
__le64 events; /* Incremented by md when superblock updated */
- __le64 failed_devices; /* Bit field of devices to indicate failures */
+ __le64 failed_devices; /* Pre 1.9.0 part of bit field of devices to */
+ /* indicate failures (see extension below) */
/*
* This offset tracks the progress of the repair or replacement of
@@ -770,21 +1760,95 @@ struct dm_raid_superblock {
__le64 disk_recovery_offset;
/*
- * This offset tracks the progress of the initial array
+ * This offset tracks the progress of the initial raid set
* synchronisation/parity calculation.
*/
__le64 array_resync_offset;
/*
- * RAID characteristics
+ * raid characteristics
*/
__le32 level;
__le32 layout;
__le32 stripe_sectors;
- /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
+ /********************************************************************
+ * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
+ *
+ * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist
+ */
+
+ __le32 flags; /* Flags defining array states for reshaping */
+
+ /*
+ * This offset tracks the progress of a raid
+ * set reshape in order to be able to restart it
+ */
+ __le64 reshape_position;
+
+ /*
+ * These define the properties of the array in case of an interrupted reshape
+ */
+ __le32 new_level;
+ __le32 new_layout;
+ __le32 new_stripe_sectors;
+ __le32 delta_disks;
+
+ __le64 array_sectors; /* Array size in sectors */
+
+ /*
+ * Sector offsets to data on devices (reshaping).
+ * Needed to support out of place reshaping, thus
+ * not writing over any stripes whilst converting
+ * them from old to new layout
+ */
+ __le64 data_offset;
+ __le64 new_data_offset;
+
+ __le64 sectors; /* Used device size in sectors */
+
+ /*
+ * Additonal Bit field of devices indicating failures to support
+ * up to 256 devices with the 1.9.0 on-disk metadata format
+ */
+ __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
+
+ __le32 incompat_features; /* Used to indicate any incompatible features */
+
+ /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */
} __packed;
+/*
+ * Check for reshape constraints on raid set @rs:
+ *
+ * - reshape function non-existent
+ * - degraded set
+ * - ongoing recovery
+ * - ongoing reshape
+ *
+ * Returns 0 if none or -EPERM if given constraint
+ * and error message reference in @errmsg
+ */
+static int rs_check_reshape(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+
+ if (!mddev->pers || !mddev->pers->check_reshape)
+ rs->ti->error = "Reshape not supported";
+ else if (mddev->degraded)
+ rs->ti->error = "Can't reshape degraded raid set";
+ else if (rs_is_recovering(rs))
+ rs->ti->error = "Convert request on recovering raid set prohibited";
+ else if (rs_is_reshaping(rs))
+ rs->ti->error = "raid set already reshaping!";
+ else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs)))
+ rs->ti->error = "Reshaping only supported for raid1/4/5/6/10";
+ else
+ return 0;
+
+ return -EPERM;
+}
+
static int read_disk_sb(struct md_rdev *rdev, int size)
{
BUG_ON(!rdev->sb_page);
@@ -792,7 +1856,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
if (rdev->sb_loaded)
return 0;
- if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
DMERR("Failed to read superblock of device at position %d",
rdev->raid_disk);
md_error(rdev->mddev, rdev);
@@ -804,31 +1868,67 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
return 0;
}
+static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
+{
+ failed_devices[0] = le64_to_cpu(sb->failed_devices);
+ memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
+
+ if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
+ int i = ARRAY_SIZE(sb->extended_failed_devices);
+
+ while (i--)
+ failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
+ }
+}
+
+static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
+{
+ int i = ARRAY_SIZE(sb->extended_failed_devices);
+
+ sb->failed_devices = cpu_to_le64(failed_devices[0]);
+ while (i--)
+ sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
+}
+
+/*
+ * Synchronize the superblock members with the raid set properties
+ *
+ * All superblock data is little endian.
+ */
static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
{
- int i;
- uint64_t failed_devices;
+ bool update_failed_devices = false;
+ unsigned int i;
+ uint64_t failed_devices[DISKS_ARRAY_ELEMS];
struct dm_raid_superblock *sb;
struct raid_set *rs = container_of(mddev, struct raid_set, md);
+ /* No metadata device, no superblock */
+ if (!rdev->meta_bdev)
+ return;
+
+ BUG_ON(!rdev->sb_page);
+
sb = page_address(rdev->sb_page);
- failed_devices = le64_to_cpu(sb->failed_devices);
- for (i = 0; i < mddev->raid_disks; i++)
- if (!rs->dev[i].data_dev ||
- test_bit(Faulty, &(rs->dev[i].rdev.flags)))
- failed_devices |= (1ULL << i);
+ sb_retrieve_failed_devices(sb, failed_devices);
- memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
+ for (i = 0; i < rs->raid_disks; i++)
+ if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
+ update_failed_devices = true;
+ set_bit(i, (void *) failed_devices);
+ }
+
+ if (update_failed_devices)
+ sb_update_failed_devices(sb, failed_devices);
sb->magic = cpu_to_le32(DM_RAID_MAGIC);
- sb->features = cpu_to_le32(0); /* No features yet */
+ sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
sb->num_devices = cpu_to_le32(mddev->raid_disks);
sb->array_position = cpu_to_le32(rdev->raid_disk);
sb->events = cpu_to_le64(mddev->events);
- sb->failed_devices = cpu_to_le64(failed_devices);
sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
@@ -836,6 +1936,33 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
+
+ sb->new_level = cpu_to_le32(mddev->new_level);
+ sb->new_layout = cpu_to_le32(mddev->new_layout);
+ sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
+
+ sb->delta_disks = cpu_to_le32(mddev->delta_disks);
+
+ smp_rmb(); /* Make sure we access most recent reshape position */
+ sb->reshape_position = cpu_to_le64(mddev->reshape_position);
+ if (le64_to_cpu(sb->reshape_position) != MaxSector) {
+ /* Flag ongoing reshape */
+ sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
+
+ if (mddev->delta_disks < 0 || mddev->reshape_backwards)
+ sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
+ } else {
+ /* Clear reshape flags */
+ sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
+ }
+
+ sb->array_sectors = cpu_to_le64(mddev->array_sectors);
+ sb->data_offset = cpu_to_le64(rdev->data_offset);
+ sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
+ sb->sectors = cpu_to_le64(rdev->sectors);
+
+ /* Zero out the rest of the payload after the size of the superblock */
+ memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
}
/*
@@ -848,7 +1975,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
*/
static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
{
- int ret;
+ int r;
struct dm_raid_superblock *sb;
struct dm_raid_superblock *refsb;
uint64_t events_sb, events_refsb;
@@ -860,9 +1987,9 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
return -EINVAL;
}
- ret = read_disk_sb(rdev, rdev->sb_size);
- if (ret)
- return ret;
+ r = read_disk_sb(rdev, rdev->sb_size);
+ if (r)
+ return r;
sb = page_address(rdev->sb_page);
@@ -876,6 +2003,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
super_sync(rdev->mddev, rdev);
set_bit(FirstUse, &rdev->flags);
+ sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
/* Force writing of superblocks to disk */
set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
@@ -895,129 +2023,212 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
return (events_sb > events_refsb) ? 1 : 0;
}
-static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
+static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
{
int role;
- struct raid_set *rs = container_of(mddev, struct raid_set, md);
+ unsigned int d;
+ struct mddev *mddev = &rs->md;
uint64_t events_sb;
- uint64_t failed_devices;
+ uint64_t failed_devices[DISKS_ARRAY_ELEMS];
struct dm_raid_superblock *sb;
- uint32_t new_devs = 0;
- uint32_t rebuilds = 0;
+ uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
struct md_rdev *r;
struct dm_raid_superblock *sb2;
sb = page_address(rdev->sb_page);
events_sb = le64_to_cpu(sb->events);
- failed_devices = le64_to_cpu(sb->failed_devices);
/*
* Initialise to 1 if this is a new superblock.
*/
mddev->events = events_sb ? : 1;
+ mddev->reshape_position = MaxSector;
+
/*
- * Reshaping is not currently allowed
+ * Reshaping is supported, e.g. reshape_position is valid
+ * in superblock and superblock content is authoritative.
*/
- if (le32_to_cpu(sb->level) != mddev->level) {
- DMERR("Reshaping arrays not yet supported. (RAID level change)");
- return -EINVAL;
- }
- if (le32_to_cpu(sb->layout) != mddev->layout) {
- DMERR("Reshaping arrays not yet supported. (RAID layout change)");
- DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
- DMERR(" Old layout: %s w/ %d copies",
- raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
- raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
- DMERR(" New layout: %s w/ %d copies",
- raid10_md_layout_to_format(mddev->layout),
- raid10_md_layout_to_copies(mddev->layout));
- return -EINVAL;
- }
- if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
- DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
- return -EINVAL;
- }
+ if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
+ /* Superblock is authoritative wrt given raid set layout! */
+ mddev->raid_disks = le32_to_cpu(sb->num_devices);
+ mddev->level = le32_to_cpu(sb->level);
+ mddev->layout = le32_to_cpu(sb->layout);
+ mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
+ mddev->new_level = le32_to_cpu(sb->new_level);
+ mddev->new_layout = le32_to_cpu(sb->new_layout);
+ mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
+ mddev->delta_disks = le32_to_cpu(sb->delta_disks);
+ mddev->array_sectors = le64_to_cpu(sb->array_sectors);
+
+ /* raid was reshaping and got interrupted */
+ if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
+ if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
+ DMERR("Reshape requested but raid set is still reshaping");
+ return -EINVAL;
+ }
- /* We can only change the number of devices in RAID1 right now */
- if ((rs->raid_type->level != 1) &&
- (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
- DMERR("Reshaping arrays not yet supported. (device count change)");
- return -EINVAL;
+ if (mddev->delta_disks < 0 ||
+ (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
+ mddev->reshape_backwards = 1;
+ else
+ mddev->reshape_backwards = 0;
+
+ mddev->reshape_position = le64_to_cpu(sb->reshape_position);
+ rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
+ }
+
+ } else {
+ /*
+ * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
+ */
+ if (le32_to_cpu(sb->level) != mddev->level) {
+ DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
+ return -EINVAL;
+ }
+ if (le32_to_cpu(sb->layout) != mddev->layout) {
+ DMERR("Reshaping raid sets not yet supported. (raid layout change)");
+ DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
+ DMERR(" Old layout: %s w/ %d copies",
+ raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
+ raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
+ DMERR(" New layout: %s w/ %d copies",
+ raid10_md_layout_to_format(mddev->layout),
+ raid10_md_layout_to_copies(mddev->layout));
+ return -EINVAL;
+ }
+ if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+ DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
+ return -EINVAL;
+ }
+
+ /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */
+ if (!rt_is_raid1(rs->raid_type) &&
+ (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
+ DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)",
+ sb->num_devices, mddev->raid_disks);
+ return -EINVAL;
+ }
+
+ /* Table line is checked vs. authoritative superblock */
+ rs_set_new(rs);
}
- if (!(rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)))
+ if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
/*
* During load, we set FirstUse if a new superblock was written.
* There are two reasons we might not have a superblock:
- * 1) The array is brand new - in which case, all of the
- * devices must have their In_sync bit set. Also,
+ * 1) The raid set is brand new - in which case, all of the
+ * devices must have their In_sync bit set. Also,
* recovery_cp must be 0, unless forced.
- * 2) This is a new device being added to an old array
+ * 2) This is a new device being added to an old raid set
* and the new device needs to be rebuilt - in which
* case the In_sync bit will /not/ be set and
* recovery_cp must be MaxSector.
+ * 3) This is/are a new device(s) being added to an old
+ * raid set during takeover to a higher raid level
+ * to provide capacity for redundancy or during reshape
+ * to add capacity to grow the raid set.
*/
+ d = 0;
rdev_for_each(r, mddev) {
+ if (test_bit(FirstUse, &r->flags))
+ new_devs++;
+
if (!test_bit(In_sync, &r->flags)) {
- DMINFO("Device %d specified for rebuild: "
- "Clearing superblock", r->raid_disk);
+ DMINFO("Device %d specified for rebuild; clearing superblock",
+ r->raid_disk);
rebuilds++;
- } else if (test_bit(FirstUse, &r->flags))
- new_devs++;
+
+ if (test_bit(FirstUse, &r->flags))
+ rebuild_and_new++;
+ }
+
+ d++;
}
- if (!rebuilds) {
- if (new_devs == mddev->raid_disks) {
- DMINFO("Superblocks created for new array");
+ if (new_devs == rs->raid_disks || !rebuilds) {
+ /* Replace a broken device */
+ if (new_devs == 1 && !rs->delta_disks)
+ ;
+ if (new_devs == rs->raid_disks) {
+ DMINFO("Superblocks created for new raid set");
set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
- } else if (new_devs) {
- DMERR("New device injected "
- "into existing array without 'rebuild' "
- "parameter specified");
+ } else if (new_devs != rebuilds &&
+ new_devs != rs->delta_disks) {
+ DMERR("New device injected into existing raid set without "
+ "'delta_disks' or 'rebuild' parameter specified");
return -EINVAL;
}
- } else if (new_devs) {
- DMERR("'rebuild' devices cannot be "
- "injected into an array with other first-time devices");
- return -EINVAL;
- } else if (mddev->recovery_cp != MaxSector) {
- DMERR("'rebuild' specified while array is not in-sync");
+ } else if (new_devs && new_devs != rebuilds) {
+ DMERR("%u 'rebuild' devices cannot be injected into"
+ " a raid set with %u other first-time devices",
+ rebuilds, new_devs);
return -EINVAL;
+ } else if (rebuilds) {
+ if (rebuild_and_new && rebuilds != rebuild_and_new) {
+ DMERR("new device%s provided without 'rebuild'",
+ new_devs > 1 ? "s" : "");
+ return -EINVAL;
+ } else if (rs_is_recovering(rs)) {
+ DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
+ (unsigned long long) mddev->recovery_cp);
+ return -EINVAL;
+ } else if (rs_is_reshaping(rs)) {
+ DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
+ (unsigned long long) mddev->reshape_position);
+ return -EINVAL;
+ }
}
/*
* Now we set the Faulty bit for those devices that are
* recorded in the superblock as failed.
*/
+ sb_retrieve_failed_devices(sb, failed_devices);
rdev_for_each(r, mddev) {
if (!r->sb_page)
continue;
sb2 = page_address(r->sb_page);
sb2->failed_devices = 0;
+ memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
/*
* Check for any device re-ordering.
*/
if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
role = le32_to_cpu(sb2->array_position);
+ if (role < 0)
+ continue;
+
if (role != r->raid_disk) {
- if (rs->raid_type->level != 1) {
- rs->ti->error = "Cannot change device "
- "positions in RAID array";
+ if (__is_raid10_near(mddev->layout)) {
+ if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
+ rs->raid_disks % rs->raid10_copies) {
+ rs->ti->error =
+ "Cannot change raid10 near set to odd # of devices!";
+ return -EINVAL;
+ }
+
+ sb2->array_position = cpu_to_le32(r->raid_disk);
+
+ } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
+ !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
+ !rt_is_raid1(rs->raid_type)) {
+ rs->ti->error = "Cannot change device positions in raid set";
return -EINVAL;
}
- DMINFO("RAID1 device #%d now at position #%d",
- role, r->raid_disk);
+
+ DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
}
/*
* Partial recovery is performed on
* returning failed devices.
*/
- if (failed_devices & (1 << role))
+ if (test_bit(role, (void *) failed_devices))
set_bit(Faulty, &r->flags);
}
}
@@ -1028,41 +2239,60 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
{
struct mddev *mddev = &rs->md;
- struct dm_raid_superblock *sb = page_address(rdev->sb_page);
+ struct dm_raid_superblock *sb;
+
+ if (rs_is_raid0(rs) || !rdev->sb_page)
+ return 0;
+
+ sb = page_address(rdev->sb_page);
/*
* If mddev->events is not set, we know we have not yet initialized
* the array.
*/
- if (!mddev->events && super_init_validation(mddev, rdev))
+ if (!mddev->events && super_init_validation(rs, rdev))
return -EINVAL;
- if (le32_to_cpu(sb->features)) {
- rs->ti->error = "Unable to assemble array: No feature flags supported yet";
+ if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
+ rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
+ return -EINVAL;
+ }
+
+ if (sb->incompat_features) {
+ rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
return -EINVAL;
}
/* Enable bitmap creation for RAID levels != 0 */
- mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
+ mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
- if (!test_bit(FirstUse, &rdev->flags)) {
+ if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
+ /* Retrieve device size stored in superblock to be prepared for shrink */
+ rdev->sectors = le64_to_cpu(sb->sectors);
rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
- if (rdev->recovery_offset != MaxSector)
- clear_bit(In_sync, &rdev->flags);
+ if (rdev->recovery_offset == MaxSector)
+ set_bit(In_sync, &rdev->flags);
+ /*
+ * If no reshape in progress -> we're recovering single
+ * disk(s) and have to set the device(s) to out-of-sync
+ */
+ else if (!rs_is_reshaping(rs))
+ clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */
}
/*
* If a device comes back, set it as not In_sync and no longer faulty.
*/
- if (test_bit(Faulty, &rdev->flags)) {
- clear_bit(Faulty, &rdev->flags);
+ if (test_and_clear_bit(Faulty, &rdev->flags)) {
+ rdev->recovery_offset = 0;
clear_bit(In_sync, &rdev->flags);
rdev->saved_raid_disk = rdev->raid_disk;
- rdev->recovery_offset = 0;
}
- clear_bit(FirstUse, &rdev->flags);
+ /* Reshape support -> restore repective data offsets */
+ rdev->data_offset = le64_to_cpu(sb->data_offset);
+ rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
return 0;
}
@@ -1072,7 +2302,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
*/
static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
{
- int ret;
+ int r;
struct raid_dev *dev;
struct md_rdev *rdev, *tmp, *freshest;
struct mddev *mddev = &rs->md;
@@ -1082,24 +2312,22 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
/*
* Skipping super_load due to CTR_FLAG_SYNC will cause
* the array to undergo initialization again as
- * though it were new. This is the intended effect
+ * though it were new. This is the intended effect
* of the "sync" directive.
*
* When reshaping capability is added, we must ensure
* that the "sync" directive is disallowed during the
* reshape.
*/
- rdev->sectors = to_sector(i_size_read(rdev->bdev->bd_inode));
-
- if (rs->ctr_flags & CTR_FLAG_SYNC)
+ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
continue;
if (!rdev->meta_bdev)
continue;
- ret = super_load(rdev, freshest);
+ r = super_load(rdev, freshest);
- switch (ret) {
+ switch (r) {
case 1:
freshest = rdev;
break;
@@ -1148,25 +2376,336 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
* Validation of the freshest device provides the source of
* validation for the remaining devices.
*/
- ti->error = "Unable to assemble array: Invalid superblocks";
+ rs->ti->error = "Unable to assemble array: Invalid superblocks";
if (super_validate(rs, freshest))
return -EINVAL;
rdev_for_each(rdev, mddev)
if ((rdev != freshest) && super_validate(rs, rdev))
return -EINVAL;
+ return 0;
+}
+
+/*
+ * Adjust data_offset and new_data_offset on all disk members of @rs
+ * for out of place reshaping if requested by contructor
+ *
+ * We need free space at the beginning of each raid disk for forward
+ * and at the end for backward reshapes which userspace has to provide
+ * via remapping/reordering of space.
+ */
+static int rs_adjust_data_offsets(struct raid_set *rs)
+{
+ sector_t data_offset = 0, new_data_offset = 0;
+ struct md_rdev *rdev;
+
+ /* Constructor did not request data offset change */
+ if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
+ if (!rs_is_reshapable(rs))
+ goto out;
+
+ return 0;
+ }
+
+ /* HM FIXME: get InSync raid_dev? */
+ rdev = &rs->dev[0].rdev;
+
+ if (rs->delta_disks < 0) {
+ /*
+ * Removing disks (reshaping backwards):
+ *
+ * - before reshape: data is at offset 0 and free space
+ * is at end of each component LV
+ *
+ * - after reshape: data is at offset rs->data_offset != 0 on each component LV
+ */
+ data_offset = 0;
+ new_data_offset = rs->data_offset;
+
+ } else if (rs->delta_disks > 0) {
+ /*
+ * Adding disks (reshaping forwards):
+ *
+ * - before reshape: data is at offset rs->data_offset != 0 and
+ * free space is at begin of each component LV
+ *
+ * - after reshape: data is at offset 0 on each component LV
+ */
+ data_offset = rs->data_offset;
+ new_data_offset = 0;
+
+ } else {
+ /*
+ * User space passes in 0 for data offset after having removed reshape space
+ *
+ * - or - (data offset != 0)
+ *
+ * Changing RAID layout or chunk size -> toggle offsets
+ *
+ * - before reshape: data is at offset rs->data_offset 0 and
+ * free space is at end of each component LV
+ * -or-
+ * data is at offset rs->data_offset != 0 and
+ * free space is at begin of each component LV
+ *
+ * - after reshape: data is at offset 0 if it was at offset != 0
+ * or at offset != 0 if it was at offset 0
+ * on each component LV
+ *
+ */
+ data_offset = rs->data_offset ? rdev->data_offset : 0;
+ new_data_offset = data_offset ? 0 : rs->data_offset;
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ }
+
+ /*
+ * Make sure we got a minimum amount of free sectors per device
+ */
+ if (rs->data_offset &&
+ to_sector(i_size_read(rdev->bdev->bd_inode)) - rdev->sectors < MIN_FREE_RESHAPE_SPACE) {
+ rs->ti->error = data_offset ? "No space for forward reshape" :
+ "No space for backward reshape";
+ return -ENOSPC;
+ }
+out:
+ /* Adjust data offsets on all rdevs */
+ rdev_for_each(rdev, &rs->md) {
+ rdev->data_offset = data_offset;
+ rdev->new_data_offset = new_data_offset;
+ }
+
+ return 0;
+}
+
+/* Userpace reordered disks -> adjust raid_disk indexes in @rs */
+static void __reorder_raid_disk_indexes(struct raid_set *rs)
+{
+ int i = 0;
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, &rs->md) {
+ rdev->raid_disk = i++;
+ rdev->saved_raid_disk = rdev->new_raid_disk = -1;
+ }
+}
+
+/*
+ * Setup @rs for takeover by a different raid level
+ */
+static int rs_setup_takeover(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+ struct md_rdev *rdev;
+ unsigned int d = mddev->raid_disks = rs->raid_disks;
+ sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
+
+ if (rt_is_raid10(rs->raid_type)) {
+ if (mddev->level == 0) {
+ /* Userpace reordered disks -> adjust raid_disk indexes */
+ __reorder_raid_disk_indexes(rs);
+
+ /* raid0 -> raid10_far layout */
+ mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
+ rs->raid10_copies);
+ } else if (mddev->level == 1)
+ /* raid1 -> raid10_near layout */
+ mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
+ rs->raid_disks);
+ else
+ return -EINVAL;
+
+ }
+
+ clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+ mddev->recovery_cp = MaxSector;
+
+ while (d--) {
+ rdev = &rs->dev[d].rdev;
+
+ if (test_bit(d, (void *) rs->rebuild_disks)) {
+ clear_bit(In_sync, &rdev->flags);
+ clear_bit(Faulty, &rdev->flags);
+ mddev->recovery_cp = rdev->recovery_offset = 0;
+ /* Bitmap has to be created when we do an "up" takeover */
+ set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+ }
+
+ rdev->new_data_offset = new_data_offset;
+ }
+
+ return 0;
+}
+
+/* Prepare @rs for reshape */
+static int rs_prepare_reshape(struct raid_set *rs)
+{
+ bool reshape;
+ struct mddev *mddev = &rs->md;
+
+ if (rs_is_raid10(rs)) {
+ if (rs->raid_disks != mddev->raid_disks &&
+ __is_raid10_near(mddev->layout) &&
+ rs->raid10_copies &&
+ rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
+ /*
+ * raid disk have to be multiple of data copies to allow this conversion,
+ *
+ * This is actually not a reshape it is a
+ * rebuild of any additional mirrors per group
+ */
+ if (rs->raid_disks % rs->raid10_copies) {
+ rs->ti->error = "Can't reshape raid10 mirror groups";
+ return -EINVAL;
+ }
+
+ /* Userpace reordered disks to add/remove mirrors -> adjust raid_disk indexes */
+ __reorder_raid_disk_indexes(rs);
+ mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
+ rs->raid10_copies);
+ mddev->new_layout = mddev->layout;
+ reshape = false;
+ } else
+ reshape = true;
+
+ } else if (rs_is_raid456(rs))
+ reshape = true;
+
+ else if (rs_is_raid1(rs)) {
+ if (rs->delta_disks) {
+ /* Process raid1 via delta_disks */
+ mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
+ reshape = true;
+ } else {
+ /* Process raid1 without delta_disks */
+ mddev->raid_disks = rs->raid_disks;
+ set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
+ reshape = false;
+ }
+ } else {
+ rs->ti->error = "Called with bogus raid type";
+ return -EINVAL;
+ }
+
+ if (reshape) {
+ set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
+ } else if (mddev->raid_disks < rs->raid_disks)
+ /* Create new superblocks and bitmaps, if any new disks */
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
return 0;
}
/*
+ *
+ * - change raid layout
+ * - change chunk size
+ * - add disks
+ * - remove disks
+ */
+static int rs_setup_reshape(struct raid_set *rs)
+{
+ int r = 0;
+ unsigned int cur_raid_devs, d;
+ struct mddev *mddev = &rs->md;
+ struct md_rdev *rdev;
+
+ mddev->delta_disks = rs->delta_disks;
+ cur_raid_devs = mddev->raid_disks;
+
+ /* Ignore impossible layout change whilst adding/removing disks */
+ if (mddev->delta_disks &&
+ mddev->layout != mddev->new_layout) {
+ DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
+ mddev->new_layout = mddev->layout;
+ }
+
+ /*
+ * Adjust array size:
+ *
+ * - in case of adding disks, array size has
+ * to grow after the disk adding reshape,
+ * which'll hapen in the event handler;
+ * reshape will happen forward, so space has to
+ * be available at the beginning of each disk
+ *
+ * - in case of removing disks, array size
+ * has to shrink before starting the reshape,
+ * which'll happen here;
+ * reshape will happen backward, so space has to
+ * be available at the end of each disk
+ *
+ * - data_offset and new_data_offset are
+ * adjusted for aforementioned out of place
+ * reshaping based on userspace passing in
+ * the "data_offset <sectors>" key/value
+ * pair via the constructor
+ */
+
+ /* Add disk(s) */
+ if (rs->delta_disks > 0) {
+ /* Prepare disks for check in raid4/5/6/10 {check|start}_reshape */
+ for (d = cur_raid_devs; d < rs->raid_disks; d++) {
+ rdev = &rs->dev[d].rdev;
+ clear_bit(In_sync, &rdev->flags);
+
+ /*
+ * save_raid_disk needs to be -1, or recovery_offset will be set to 0
+ * by md, which'll store that erroneously in the superblock on reshape
+ */
+ rdev->saved_raid_disk = -1;
+ rdev->raid_disk = d;
+
+ rdev->sectors = mddev->dev_sectors;
+ rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
+ }
+
+ mddev->reshape_backwards = 0; /* adding disks -> forward reshape */
+
+ /* Remove disk(s) */
+ } else if (rs->delta_disks < 0) {
+ r = rs_set_dev_and_array_sectors(rs, true);
+ mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
+
+ /* Change layout and/or chunk size */
+ } else {
+ /*
+ * Reshape layout (e.g. raid5_ls -> raid5_n) and/or chunk size:
+ *
+ * keeping number of disks and do layout change ->
+ *
+ * toggle reshape_backward depending on data_offset:
+ *
+ * - free space upfront -> reshape forward
+ *
+ * - free space at the end -> reshape backward
+ *
+ *
+ * This utilizes free reshape space avoiding the need
+ * for userspace to move (parts of) LV segments in
+ * case of layout/chunksize change (for disk
+ * adding/removing reshape space has to be at
+ * the proper address (see above with delta_disks):
+ *
+ * add disk(s) -> begin
+ * remove disk(s)-> end
+ */
+ mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
+ }
+
+ return r;
+}
+
+/*
* Enable/disable discard support on RAID set depending on
* RAID level and discard properties of underlying RAID members.
*/
-static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
+static void configure_discard_support(struct raid_set *rs)
{
int i;
bool raid456;
+ struct dm_target *ti = rs->ti;
/* Assume discards not supported until after checks below. */
ti->discards_supported = false;
@@ -1174,7 +2713,7 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
/* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
struct request_queue *q;
if (!rs->dev[i].rdev.bdev)
@@ -1207,118 +2746,252 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
}
/*
- * Construct a RAID4/5/6 mapping:
+ * Construct a RAID0/1/10/4/5/6 mapping:
* Args:
- * <raid_type> <#raid_params> <raid_params> \
- * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
+ * <raid_type> <#raid_params> <raid_params>{0,} \
+ * <#raid_devs> [<meta_dev1> <dev1>]{1,}
*
- * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
+ * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
* details on possible <raid_params>.
+ *
+ * Userspace is free to initialize the metadata devices, hence the superblocks to
+ * enforce recreation based on the passed in table parameters.
+ *
*/
-static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
- int ret;
+ int r;
+ bool resize;
struct raid_type *rt;
- unsigned long num_raid_params, num_raid_devs;
+ unsigned int num_raid_params, num_raid_devs;
+ sector_t calculated_dev_sectors;
struct raid_set *rs = NULL;
-
- /* Must have at least <raid_type> <#raid_params> */
- if (argc < 2) {
- ti->error = "Too few arguments";
+ const char *arg;
+ struct rs_layout rs_layout;
+ struct dm_arg_set as = { argc, argv }, as_nrd;
+ struct dm_arg _args[] = {
+ { 0, as.argc, "Cannot understand number of raid parameters" },
+ { 1, 254, "Cannot understand number of raid devices parameters" }
+ };
+
+ /* Must have <raid_type> */
+ arg = dm_shift_arg(&as);
+ if (!arg) {
+ ti->error = "No arguments";
return -EINVAL;
}
- /* raid type */
- rt = get_raid_type(argv[0]);
+ rt = get_raid_type(arg);
if (!rt) {
ti->error = "Unrecognised raid_type";
return -EINVAL;
}
- argc--;
- argv++;
-
- /* number of RAID parameters */
- if (kstrtoul(argv[0], 10, &num_raid_params) < 0) {
- ti->error = "Cannot understand number of RAID parameters";
- return -EINVAL;
- }
- argc--;
- argv++;
- /* Skip over RAID params for now and find out # of devices */
- if (num_raid_params >= argc) {
- ti->error = "Arguments do not agree with counts given";
+ /* Must have <#raid_params> */
+ if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
return -EINVAL;
- }
- if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
- (num_raid_devs > MAX_RAID_DEVICES)) {
- ti->error = "Cannot understand number of raid devices";
+ /* number of raid device tupples <meta_dev data_dev> */
+ as_nrd = as;
+ dm_consume_args(&as_nrd, num_raid_params);
+ _args[1].max = (as_nrd.argc - 1) / 2;
+ if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
return -EINVAL;
- }
- argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
- if (argc != (num_raid_devs * 2)) {
- ti->error = "Supplied RAID devices does not match the count given";
+ if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
+ ti->error = "Invalid number of supplied raid devices";
return -EINVAL;
}
- rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
+ rs = raid_set_alloc(ti, rt, num_raid_devs);
if (IS_ERR(rs))
return PTR_ERR(rs);
- ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
- if (ret)
+ r = parse_raid_params(rs, &as, num_raid_params);
+ if (r)
goto bad;
- argv += num_raid_params + 1;
-
- ret = dev_parms(rs, argv);
- if (ret)
+ r = parse_dev_params(rs, &as);
+ if (r)
goto bad;
rs->md.sync_super = super_sync;
- ret = analyse_superblocks(ti, rs);
- if (ret)
+
+ /*
+ * Calculate ctr requested array and device sizes to allow
+ * for superblock analysis needing device sizes defined.
+ *
+ * Any existing superblock will overwrite the array and device sizes
+ */
+ r = rs_set_dev_and_array_sectors(rs, false);
+ if (r)
+ goto bad;
+
+ calculated_dev_sectors = rs->dev[0].rdev.sectors;
+
+ /*
+ * Backup any new raid set level, layout, ...
+ * requested to be able to compare to superblock
+ * members for conversion decisions.
+ */
+ rs_config_backup(rs, &rs_layout);
+
+ r = analyse_superblocks(ti, rs);
+ if (r)
goto bad;
+ resize = calculated_dev_sectors != rs->dev[0].rdev.sectors;
+
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
ti->num_flush_bios = 1;
+ /* Restore any requested new layout for conversion decision */
+ rs_config_restore(rs, &rs_layout);
+
/*
- * Disable/enable discard support on RAID set.
+ * Now that we have any superblock metadata available,
+ * check for new, recovering, reshaping, to be taken over,
+ * to be reshaped or an existing, unchanged raid set to
+ * run in sequence.
*/
- configure_discard_support(ti, rs);
+ if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
+ /* A new raid6 set has to be recovered to ensure proper parity and Q-Syndrome */
+ if (rs_is_raid6(rs) &&
+ test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
+ ti->error = "'nosync' not allowed for new raid6 set";
+ r = -EINVAL;
+ goto bad;
+ }
+ rs_setup_recovery(rs, 0);
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ rs_set_new(rs);
+ } else if (rs_is_recovering(rs)) {
+ /* A recovering raid set may be resized */
+ ; /* skip setup rs */
+ } else if (rs_is_reshaping(rs)) {
+ /* Have to reject size change request during reshape */
+ if (resize) {
+ ti->error = "Can't resize a reshaping raid set";
+ r = -EPERM;
+ goto bad;
+ }
+ /* skip setup rs */
+ } else if (rs_takeover_requested(rs)) {
+ if (rs_is_reshaping(rs)) {
+ ti->error = "Can't takeover a reshaping raid set";
+ r = -EPERM;
+ goto bad;
+ }
+
+ /*
+ * If a takeover is needed, userspace sets any additional
+ * devices to rebuild and we can check for a valid request here.
+ *
+ * If acceptible, set the level to the new requested
+ * one, prohibit requesting recovery, allow the raid
+ * set to run and store superblocks during resume.
+ */
+ r = rs_check_takeover(rs);
+ if (r)
+ goto bad;
+
+ r = rs_setup_takeover(rs);
+ if (r)
+ goto bad;
+
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags);
+ /* Takeover ain't recovery, so disable recovery */
+ rs_setup_recovery(rs, MaxSector);
+ rs_set_new(rs);
+ } else if (rs_reshape_requested(rs)) {
+ /*
+ * We can only prepare for a reshape here, because the
+ * raid set needs to run to provide the repective reshape
+ * check functions via its MD personality instance.
+ *
+ * So do the reshape check after md_run() succeeded.
+ */
+ r = rs_prepare_reshape(rs);
+ if (r)
+ return r;
+
+ /* Reshaping ain't recovery, so disable recovery */
+ rs_setup_recovery(rs, MaxSector);
+ rs_set_cur(rs);
+ } else {
+ /* May not set recovery when a device rebuild is requested */
+ if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+ rs_setup_recovery(rs, MaxSector);
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ } else
+ rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
+ 0 : (resize ? calculated_dev_sectors : MaxSector));
+ rs_set_cur(rs);
+ }
+
+ /* If constructor requested it, change data and new_data offsets */
+ r = rs_adjust_data_offsets(rs);
+ if (r)
+ goto bad;
+
+ /* Start raid set read-only and assumed clean to change in raid_resume() */
+ rs->md.ro = 1;
+ rs->md.in_sync = 1;
+ set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */
mddev_lock_nointr(&rs->md);
- ret = md_run(&rs->md);
+ r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
- mddev_unlock(&rs->md);
- if (ret) {
- ti->error = "Fail to run raid array";
+ if (r) {
+ ti->error = "Failed to run raid array";
+ mddev_unlock(&rs->md);
goto bad;
}
- if (ti->len != rs->md.array_sectors) {
- ti->error = "Array size does not match requested target length";
- ret = -EINVAL;
- goto size_mismatch;
- }
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
mddev_suspend(&rs->md);
+
+ /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
+ if (rs_is_raid456(rs)) {
+ r = rs_set_raid456_stripe_cache(rs);
+ if (r)
+ goto bad_stripe_cache;
+ }
+
+ /* Now do an early reshape check */
+ if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
+ r = rs_check_reshape(rs);
+ if (r)
+ goto bad_check_reshape;
+
+ /* Restore new, ctr requested layout to perform check */
+ rs_config_restore(rs, &rs_layout);
+
+ if (rs->md.pers->start_reshape) {
+ r = rs->md.pers->check_reshape(&rs->md);
+ if (r) {
+ ti->error = "Reshape check failed";
+ goto bad_check_reshape;
+ }
+ }
+ }
+
+ mddev_unlock(&rs->md);
return 0;
-size_mismatch:
+bad_stripe_cache:
+bad_check_reshape:
md_stop(&rs->md);
bad:
- context_free(rs);
+ raid_set_free(rs);
- return ret;
+ return r;
}
static void raid_dtr(struct dm_target *ti)
@@ -1327,7 +3000,7 @@ static void raid_dtr(struct dm_target *ti)
list_del_init(&rs->callbacks.list);
md_stop(&rs->md);
- context_free(rs);
+ raid_set_free(rs);
}
static int raid_map(struct dm_target *ti, struct bio *bio)
@@ -1335,11 +3008,23 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
+ /*
+ * If we're reshaping to add disk(s)), ti->len and
+ * mddev->array_sectors will differ during the process
+ * (ti->len > mddev->array_sectors), so we have to requeue
+ * bios with addresses > mddev->array_sectors here or
+ * there will occur accesses past EOD of the component
+ * data images thus erroring the raid set.
+ */
+ if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
+ return DM_MAPIO_REQUEUE;
+
mddev->pers->make_request(mddev, bio);
return DM_MAPIO_SUBMITTED;
}
+/* Return string describing the current sync action of @mddev */
static const char *decipher_sync_action(struct mddev *mddev)
{
if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
@@ -1365,195 +3050,260 @@ static const char *decipher_sync_action(struct mddev *mddev)
return "idle";
}
-static void raid_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+/*
+ * Return status string @rdev
+ *
+ * Status characters:
+ *
+ * 'D' = Dead/Failed device
+ * 'a' = Alive but not in-sync
+ * 'A' = Alive and in-sync
+ */
+static const char *__raid_dev_status(struct md_rdev *rdev, bool array_in_sync)
{
- struct raid_set *rs = ti->private;
- unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
- unsigned sz = 0;
- int i, array_in_sync = 0;
- sector_t sync;
+ if (test_bit(Faulty, &rdev->flags))
+ return "D";
+ else if (!array_in_sync || !test_bit(In_sync, &rdev->flags))
+ return "a";
+ else
+ return "A";
+}
- switch (type) {
- case STATUSTYPE_INFO:
- DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
+/* Helper to return resync/reshape progress for @rs and @array_in_sync */
+static sector_t rs_get_progress(struct raid_set *rs,
+ sector_t resync_max_sectors, bool *array_in_sync)
+{
+ sector_t r, recovery_cp, curr_resync_completed;
+ struct mddev *mddev = &rs->md;
- if (rs->raid_type->level) {
- if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
- sync = rs->md.curr_resync_completed;
- else
- sync = rs->md.recovery_cp;
-
- if (sync >= rs->md.resync_max_sectors) {
- /*
- * Sync complete.
- */
- array_in_sync = 1;
- sync = rs->md.resync_max_sectors;
- } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
- /*
- * If "check" or "repair" is occurring, the array has
- * undergone and initial sync and the health characters
- * should not be 'a' anymore.
- */
- array_in_sync = 1;
+ curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
+ recovery_cp = mddev->recovery_cp;
+ *array_in_sync = false;
+
+ if (rs_is_raid0(rs)) {
+ r = resync_max_sectors;
+ *array_in_sync = true;
+
+ } else {
+ r = mddev->reshape_position;
+
+ /* Reshape is relative to the array size */
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
+ r != MaxSector) {
+ if (r == MaxSector) {
+ *array_in_sync = true;
+ r = resync_max_sectors;
} else {
- /*
- * The array may be doing an initial sync, or it may
- * be rebuilding individual components. If all the
- * devices are In_sync, then it is the array that is
- * being initialized.
- */
- for (i = 0; i < rs->md.raid_disks; i++)
- if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
- array_in_sync = 1;
+ /* Got to reverse on backward reshape */
+ if (mddev->reshape_backwards)
+ r = mddev->array_sectors - r;
+
+ /* Devide by # of data stripes */
+ sector_div(r, mddev_data_stripes(rs));
}
+
+ /* Sync is relative to the component device size */
+ } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ r = curr_resync_completed;
+ else
+ r = recovery_cp;
+
+ if (r == MaxSector) {
+ /*
+ * Sync complete.
+ */
+ *array_in_sync = true;
+ r = resync_max_sectors;
+ } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ /*
+ * If "check" or "repair" is occurring, the raid set has
+ * undergone an initial sync and the health characters
+ * should not be 'a' anymore.
+ */
+ *array_in_sync = true;
} else {
- /* RAID0 */
- array_in_sync = 1;
- sync = rs->md.resync_max_sectors;
- }
+ struct md_rdev *rdev;
- /*
- * Status characters:
- * 'D' = Dead/Failed device
- * 'a' = Alive but not in-sync
- * 'A' = Alive and in-sync
- */
- for (i = 0; i < rs->md.raid_disks; i++) {
- if (test_bit(Faulty, &rs->dev[i].rdev.flags))
- DMEMIT("D");
- else if (!array_in_sync ||
- !test_bit(In_sync, &rs->dev[i].rdev.flags))
- DMEMIT("a");
- else
- DMEMIT("A");
+ /*
+ * The raid set may be doing an initial sync, or it may
+ * be rebuilding individual components. If all the
+ * devices are In_sync, then it is the raid set that is
+ * being initialized.
+ */
+ rdev_for_each(rdev, mddev)
+ if (!test_bit(In_sync, &rdev->flags))
+ *array_in_sync = true;
+#if 0
+ r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */
+#endif
}
+ }
+
+ return r;
+}
+
+/* Helper to return @dev name or "-" if !@dev */
+static const char *__get_dev_name(struct dm_dev *dev)
+{
+ return dev ? dev->name : "-";
+}
+
+static void raid_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct raid_set *rs = ti->private;
+ struct mddev *mddev = &rs->md;
+ struct r5conf *conf = mddev->private;
+ int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
+ bool array_in_sync;
+ unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
+ unsigned int sz = 0;
+ unsigned int rebuild_disks;
+ unsigned int write_mostly_params = 0;
+ sector_t progress, resync_max_sectors, resync_mismatches;
+ const char *sync_action;
+ struct raid_type *rt;
+ struct md_rdev *rdev;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ /* *Should* always succeed */
+ rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
+ if (!rt)
+ return;
+
+ DMEMIT("%s %d ", rt->name, mddev->raid_disks);
+
+ /* Access most recent mddev properties for status output */
+ smp_rmb();
+ /* Get sensible max sectors even if raid set not yet started */
+ resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
+ mddev->resync_max_sectors : mddev->dev_sectors;
+ progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
+ resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
+ atomic64_read(&mddev->resync_mismatches) : 0;
+ sync_action = decipher_sync_action(&rs->md);
+
+ /* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */
+ rdev_for_each(rdev, mddev)
+ DMEMIT(__raid_dev_status(rdev, array_in_sync));
/*
- * In-sync ratio:
+ * In-sync/Reshape ratio:
* The in-sync ratio shows the progress of:
- * - Initializing the array
- * - Rebuilding a subset of devices of the array
+ * - Initializing the raid set
+ * - Rebuilding a subset of devices of the raid set
* The user can distinguish between the two by referring
* to the status characters.
+ *
+ * The reshape ratio shows the progress of
+ * changing the raid layout or the number of
+ * disks of a raid set
*/
- DMEMIT(" %llu/%llu",
- (unsigned long long) sync,
- (unsigned long long) rs->md.resync_max_sectors);
+ DMEMIT(" %llu/%llu", (unsigned long long) progress,
+ (unsigned long long) resync_max_sectors);
/*
+ * v1.5.0+:
+ *
* Sync action:
- * See Documentation/device-mapper/dm-raid.c for
+ * See Documentation/device-mapper/dm-raid.txt for
* information on each of these states.
*/
- DMEMIT(" %s", decipher_sync_action(&rs->md));
+ DMEMIT(" %s", sync_action);
/*
+ * v1.5.0+:
+ *
* resync_mismatches/mismatch_cnt
* This field shows the number of discrepancies found when
- * performing a "check" of the array.
+ * performing a "check" of the raid set.
*/
- DMEMIT(" %llu",
- (strcmp(rs->md.last_sync_action, "check")) ? 0 :
- (unsigned long long)
- atomic64_read(&rs->md.resync_mismatches));
- break;
- case STATUSTYPE_TABLE:
- /* The string you would use to construct this array */
- for (i = 0; i < rs->md.raid_disks; i++) {
- if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
- rs->dev[i].data_dev &&
- !test_bit(In_sync, &rs->dev[i].rdev.flags))
- raid_param_cnt += 2; /* for rebuilds */
- if (rs->dev[i].data_dev &&
- test_bit(WriteMostly, &rs->dev[i].rdev.flags))
- raid_param_cnt += 2;
- }
-
- raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2);
- if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))
- raid_param_cnt--;
-
- DMEMIT("%s %u %u", rs->raid_type->name,
- raid_param_cnt, rs->md.chunk_sectors);
-
- if ((rs->ctr_flags & CTR_FLAG_SYNC) &&
- (rs->md.recovery_cp == MaxSector))
- DMEMIT(" sync");
- if (rs->ctr_flags & CTR_FLAG_NOSYNC)
- DMEMIT(" nosync");
-
- for (i = 0; i < rs->md.raid_disks; i++)
- if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
- rs->dev[i].data_dev &&
- !test_bit(In_sync, &rs->dev[i].rdev.flags))
- DMEMIT(" rebuild %u", i);
-
- if (rs->ctr_flags & CTR_FLAG_DAEMON_SLEEP)
- DMEMIT(" daemon_sleep %lu",
- rs->md.bitmap_info.daemon_sleep);
-
- if (rs->ctr_flags & CTR_FLAG_MIN_RECOVERY_RATE)
- DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
-
- if (rs->ctr_flags & CTR_FLAG_MAX_RECOVERY_RATE)
- DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
-
- for (i = 0; i < rs->md.raid_disks; i++)
- if (rs->dev[i].data_dev &&
- test_bit(WriteMostly, &rs->dev[i].rdev.flags))
- DMEMIT(" write_mostly %u", i);
-
- if (rs->ctr_flags & CTR_FLAG_MAX_WRITE_BEHIND)
- DMEMIT(" max_write_behind %lu",
- rs->md.bitmap_info.max_write_behind);
-
- if (rs->ctr_flags & CTR_FLAG_STRIPE_CACHE) {
- struct r5conf *conf = rs->md.private;
-
- /* convert from kiB to sectors */
- DMEMIT(" stripe_cache %d",
- conf ? conf->max_nr_stripes * 2 : 0);
- }
-
- if (rs->ctr_flags & CTR_FLAG_REGION_SIZE)
- DMEMIT(" region_size %lu",
- rs->md.bitmap_info.chunksize >> 9);
-
- if (rs->ctr_flags & CTR_FLAG_RAID10_COPIES)
- DMEMIT(" raid10_copies %u",
- raid10_md_layout_to_copies(rs->md.layout));
-
- if (rs->ctr_flags & CTR_FLAG_RAID10_FORMAT)
- DMEMIT(" raid10_format %s",
- raid10_md_layout_to_format(rs->md.layout));
+ DMEMIT(" %llu", (unsigned long long) resync_mismatches);
- DMEMIT(" %d", rs->md.raid_disks);
- for (i = 0; i < rs->md.raid_disks; i++) {
- if (rs->dev[i].meta_dev)
- DMEMIT(" %s", rs->dev[i].meta_dev->name);
- else
- DMEMIT(" -");
+ /*
+ * v1.9.0+:
+ *
+ * data_offset (needed for out of space reshaping)
+ * This field shows the data offset into the data
+ * image LV where the first stripes data starts.
+ *
+ * We keep data_offset equal on all raid disks of the set,
+ * so retrieving it from the first raid disk is sufficient.
+ */
+ DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
+ break;
- if (rs->dev[i].data_dev)
- DMEMIT(" %s", rs->dev[i].data_dev->name);
- else
- DMEMIT(" -");
- }
+ case STATUSTYPE_TABLE:
+ /* Report the table line string you would use to construct this raid set */
+
+ /* Calculate raid parameter count */
+ for (i = 0; i < rs->raid_disks; i++)
+ if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ write_mostly_params += 2;
+ rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks));
+ raid_param_cnt += rebuild_disks * 2 +
+ write_mostly_params +
+ hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
+ hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
+ /* Emit table line */
+ DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
+ if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
+ DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
+ raid10_md_layout_to_format(mddev->layout));
+ if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
+ raid10_md_layout_to_copies(mddev->layout));
+ if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
+ DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
+ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
+ DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
+ if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
+ DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
+ (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
+ if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
+ DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
+ (unsigned long long) rs->data_offset);
+ if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
+ DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
+ mddev->bitmap_info.daemon_sleep);
+ if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
+ max(rs->delta_disks, mddev->delta_disks));
+ if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
+ max_nr_stripes);
+ if (rebuild_disks)
+ for (i = 0; i < rs->raid_disks; i++)
+ if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks))
+ DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
+ rs->dev[i].rdev.raid_disk);
+ if (write_mostly_params)
+ for (i = 0; i < rs->raid_disks; i++)
+ if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
+ rs->dev[i].rdev.raid_disk);
+ if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
+ DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
+ mddev->bitmap_info.max_write_behind);
+ if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
+ mddev->sync_speed_max);
+ if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
+ mddev->sync_speed_min);
+ DMEMIT(" %d", rs->raid_disks);
+ for (i = 0; i < rs->raid_disks; i++)
+ DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
+ __get_dev_name(rs->dev[i].data_dev));
}
}
-static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
+static int raid_message(struct dm_target *ti, unsigned int argc, char **argv)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
- if (!strcasecmp(argv[0], "reshape")) {
- DMERR("Reshape not supported.");
- return -EINVAL;
- }
-
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
@@ -1571,11 +3321,10 @@ static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return -EBUSY;
else if (!strcasecmp(argv[0], "resync"))
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- else if (!strcasecmp(argv[0], "recover")) {
+ ; /* MD_RECOVERY_NEEDED set below */
+ else if (!strcasecmp(argv[0], "recover"))
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- } else {
+ else {
if (!strcasecmp(argv[0], "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
else if (!!strcasecmp(argv[0], "repair"))
@@ -1588,11 +3337,11 @@ static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
* canceling read-auto mode
*/
mddev->ro = 0;
- if (!mddev->suspended)
+ if (!mddev->suspended && mddev->sync_thread)
md_wakeup_thread(mddev->sync_thread);
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (!mddev->suspended)
+ if (!mddev->suspended && mddev->thread)
md_wakeup_thread(mddev->thread);
return 0;
@@ -1602,28 +3351,27 @@ static int raid_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct raid_set *rs = ti->private;
- unsigned i;
- int ret = 0;
+ unsigned int i;
+ int r = 0;
- for (i = 0; !ret && i < rs->md.raid_disks; i++)
+ for (i = 0; !r && i < rs->md.raid_disks; i++)
if (rs->dev[i].data_dev)
- ret = fn(ti,
+ r = fn(ti,
rs->dev[i].data_dev,
0, /* No offset on data devs */
rs->md.dev_sectors,
data);
- return ret;
+ return r;
}
static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct raid_set *rs = ti->private;
- unsigned chunk_size = rs->md.chunk_sectors << 9;
- struct r5conf *conf = rs->md.private;
+ unsigned int chunk_size = to_bytes(rs->md.chunk_sectors);
blk_limits_io_min(limits, chunk_size);
- blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
+ blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
}
static void raid_presuspend(struct dm_target *ti)
@@ -1637,7 +3385,11 @@ static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- mddev_suspend(&rs->md);
+ if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
+ if (!rs->md.suspended)
+ mddev_suspend(&rs->md);
+ rs->md.ro = 1;
+ }
}
static void attempt_restore_of_faulty_devices(struct raid_set *rs)
@@ -1651,7 +3403,8 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
for (i = 0; i < rs->md.raid_disks; i++) {
r = &rs->dev[i].rdev;
if (test_bit(Faulty, &r->flags) && r->sb_page &&
- sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
+ sync_page_io(r, 0, r->sb_size, r->sb_page,
+ REQ_OP_READ, 0, true)) {
DMINFO("Faulty %s device #%d has readable super block."
" Attempting to revive it.",
rs->raid_type->name, i);
@@ -1660,7 +3413,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
* Faulty bit may be set, but sometimes the array can
* be suspended before the personalities can respond
* by removing the device from the array (i.e. calling
- * 'hot_remove_disk'). If they haven't yet removed
+ * 'hot_remove_disk'). If they haven't yet removed
* the failed device, its 'raid_disk' number will be
* '>= 0' - meaning we must call this function
* ourselves.
@@ -1696,34 +3449,192 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
}
}
-static void raid_resume(struct dm_target *ti)
+static int __load_dirty_region_bitmap(struct raid_set *rs)
{
- struct raid_set *rs = ti->private;
+ int r = 0;
+
+ /* Try loading the bitmap unless "raid0", which does not have one */
+ if (!rs_is_raid0(rs) &&
+ !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
+ r = bitmap_load(&rs->md);
+ if (r)
+ DMERR("Failed to load bitmap");
+ }
- if (rs->raid_type->level) {
- set_bit(MD_CHANGE_DEVS, &rs->md.flags);
+ return r;
+}
- if (!rs->bitmap_loaded) {
- bitmap_load(&rs->md);
- rs->bitmap_loaded = 1;
- } else {
- /*
- * A secondary resume while the device is active.
- * Take this opportunity to check whether any failed
- * devices are reachable again.
- */
- attempt_restore_of_faulty_devices(rs);
+/* Enforce updating all superblocks */
+static void rs_update_sbs(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+ int ro = mddev->ro;
+
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ mddev->ro = 0;
+ md_update_sb(mddev, 1);
+ mddev->ro = ro;
+}
+
+/*
+ * Reshape changes raid algorithm of @rs to new one within personality
+ * (e.g. raid6_zr -> raid6_nc), changes stripe size, adds/removes
+ * disks from a raid set thus growing/shrinking it or resizes the set
+ *
+ * Call mddev_lock_nointr() before!
+ */
+static int rs_start_reshape(struct raid_set *rs)
+{
+ int r;
+ struct mddev *mddev = &rs->md;
+ struct md_personality *pers = mddev->pers;
+
+ r = rs_setup_reshape(rs);
+ if (r)
+ return r;
+
+ /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
+ if (mddev->suspended)
+ mddev_resume(mddev);
+
+ /*
+ * Check any reshape constraints enforced by the personalility
+ *
+ * May as well already kick the reshape off so that * pers->start_reshape() becomes optional.
+ */
+ r = pers->check_reshape(mddev);
+ if (r) {
+ rs->ti->error = "pers->check_reshape() failed";
+ return r;
+ }
+
+ /*
+ * Personality may not provide start reshape method in which
+ * case check_reshape above has already covered everything
+ */
+ if (pers->start_reshape) {
+ r = pers->start_reshape(mddev);
+ if (r) {
+ rs->ti->error = "pers->start_reshape() failed";
+ return r;
}
+ }
+
+ /* Suspend because a resume will happen in raid_resume() */
+ if (!mddev->suspended)
+ mddev_suspend(mddev);
+
+ /*
+ * Now reshape got set up, update superblocks to
+ * reflect the fact so that a table reload will
+ * access proper superblock content in the ctr.
+ */
+ rs_update_sbs(rs);
+
+ return 0;
+}
+
+static int raid_preresume(struct dm_target *ti)
+{
+ int r;
+ struct raid_set *rs = ti->private;
+ struct mddev *mddev = &rs->md;
+
+ /* This is a resume after a suspend of the set -> it's already started */
+ if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
+ return 0;
+
+ /*
+ * The superblocks need to be updated on disk if the
+ * array is new or new devices got added (thus zeroed
+ * out by userspace) or __load_dirty_region_bitmap
+ * will overwrite them in core with old data or fail.
+ */
+ if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
+ rs_update_sbs(rs);
- clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
+ /*
+ * Disable/enable discard support on raid set after any
+ * conversion, because devices can have been added
+ */
+ configure_discard_support(rs);
+
+ /* Load the bitmap from disk unless raid0 */
+ r = __load_dirty_region_bitmap(rs);
+ if (r)
+ return r;
+
+ /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) &&
+ mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
+ r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
+ to_bytes(rs->requested_bitmap_chunk_sectors), 0);
+ if (r)
+ DMERR("Failed to resize bitmap");
+ }
+
+ /* Check for any resize/reshape on @rs and adjust/initiate */
+ /* Be prepared for mddev_resume() in raid_resume() */
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ mddev->resync_min = mddev->recovery_cp;
}
- mddev_resume(&rs->md);
+ rs_set_capacity(rs);
+
+ /* Check for any reshape request unless new raid set */
+ if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
+ /* Initiate a reshape. */
+ mddev_lock_nointr(mddev);
+ r = rs_start_reshape(rs);
+ mddev_unlock(mddev);
+ if (r)
+ DMWARN("Failed to check/start reshape, continuing without change");
+ r = 0;
+ }
+
+ return r;
+}
+
+static void raid_resume(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+ struct mddev *mddev = &rs->md;
+
+ if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
+ /*
+ * A secondary resume while the device is active.
+ * Take this opportunity to check whether any failed
+ * devices are reachable again.
+ */
+ attempt_restore_of_faulty_devices(rs);
+ } else {
+ mddev->ro = 0;
+ mddev->in_sync = 0;
+
+ /*
+ * When passing in flags to the ctr, we expect userspace
+ * to reset them because they made it to the superblocks
+ * and reload the mapping anyway.
+ *
+ * -> only unfreeze recovery in case of a table reload or
+ * we'll have a bogus recovery/reshape position
+ * retrieved from the superblock by the ctr because
+ * the ongoing recovery/reshape will change it after read.
+ */
+ if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags))
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+ if (mddev->suspended)
+ mddev_resume(mddev);
+ }
}
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
@@ -1734,6 +3645,7 @@ static struct target_type raid_target = {
.io_hints = raid_io_hints,
.presuspend = raid_presuspend,
.postsuspend = raid_postsuspend,
+ .preresume = raid_preresume,
.resume = raid_resume,
};
@@ -1758,11 +3670,13 @@ module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
"Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
-MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
+MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
+MODULE_ALIAS("dm-raid0");
MODULE_ALIAS("dm-raid1");
MODULE_ALIAS("dm-raid10");
MODULE_ALIAS("dm-raid4");
MODULE_ALIAS("dm-raid5");
MODULE_ALIAS("dm-raid6");
MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index b3ccf1e0d4f2..dac55b254a09 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -260,7 +260,8 @@ static int mirror_flush(struct dm_target *ti)
struct dm_io_region io[ms->nr_mirrors];
struct mirror *m;
struct dm_io_request io_req = {
- .bi_rw = WRITE_FLUSH,
+ .bi_op = REQ_OP_WRITE,
+ .bi_op_flags = WRITE_FLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = ms->io_client,
@@ -527,7 +528,7 @@ static void read_callback(unsigned long error, void *context)
DMWARN_LIMIT("Read failure on mirror device %s. "
"Trying alternative device.",
m->dev->name);
- queue_bio(m->ms, bio, bio_rw(bio));
+ queue_bio(m->ms, bio, bio_data_dir(bio));
return;
}
@@ -541,7 +542,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
{
struct dm_io_region io;
struct dm_io_request io_req = {
- .bi_rw = READ,
+ .bi_op = REQ_OP_READ,
+ .bi_op_flags = 0,
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = read_callback,
@@ -624,7 +626,7 @@ static void write_callback(unsigned long error, void *context)
* If the bio is discard, return an error, but do not
* degrade the array.
*/
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio_op(bio) == REQ_OP_DISCARD) {
bio->bi_error = -EOPNOTSUPP;
bio_endio(bio);
return;
@@ -654,7 +656,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct dm_io_region io[ms->nr_mirrors], *dest = io;
struct mirror *m;
struct dm_io_request io_req = {
- .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
+ .bi_op = REQ_OP_WRITE,
+ .bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA,
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = write_callback,
@@ -662,8 +665,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
.client = ms->io_client,
};
- if (bio->bi_rw & REQ_DISCARD) {
- io_req.bi_rw |= REQ_DISCARD;
+ if (bio_op(bio) == REQ_OP_DISCARD) {
+ io_req.bi_op = REQ_OP_DISCARD;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = NULL;
}
@@ -701,8 +704,8 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
bio_list_init(&requeue);
while ((bio = bio_list_pop(writes))) {
- if ((bio->bi_rw & REQ_FLUSH) ||
- (bio->bi_rw & REQ_DISCARD)) {
+ if ((bio->bi_rw & REQ_PREFLUSH) ||
+ (bio_op(bio) == REQ_OP_DISCARD)) {
bio_list_add(&sync, bio);
continue;
}
@@ -1190,7 +1193,7 @@ static void mirror_dtr(struct dm_target *ti)
*/
static int mirror_map(struct dm_target *ti, struct bio *bio)
{
- int r, rw = bio_rw(bio);
+ int r, rw = bio_data_dir(bio);
struct mirror *m;
struct mirror_set *ms = ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
@@ -1214,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
* If region is not in-sync queue the bio.
*/
if (!r || (r == -EWOULDBLOCK)) {
- if (rw == READA)
+ if (bio->bi_rw & REQ_RAHEAD)
return -EWOULDBLOCK;
queue_bio(ms, bio, rw);
@@ -1239,7 +1242,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
{
- int rw = bio_rw(bio);
+ int rw = bio_data_dir(bio);
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct mirror *m = NULL;
struct dm_bio_details *bd = NULL;
@@ -1250,7 +1253,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
+ if (!(bio->bi_rw & REQ_PREFLUSH) &&
+ bio_op(bio) != REQ_OP_DISCARD)
dm_rh_dec(ms->rh, bio_record->write_region);
return error;
}
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 74cb7b991d41..b11813431f31 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -398,12 +398,12 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
region_t region = dm_rh_bio_to_region(rh, bio);
int recovering = 0;
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_rw & REQ_PREFLUSH) {
rh->flush_failure = 1;
return;
}
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD)
return;
/* We must inform the log that the sync count has changed. */
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
- if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
+ if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
new file mode 100644
index 000000000000..7a9661868496
--- /dev/null
+++ b/drivers/md/dm-rq.c
@@ -0,0 +1,970 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-core.h"
+#include "dm-rq.h"
+
+#include <linux/elevator.h> /* for rq_end_sector() */
+#include <linux/blk-mq.h>
+
+#define DM_MSG_PREFIX "core-rq"
+
+#define DM_MQ_NR_HW_QUEUES 1
+#define DM_MQ_QUEUE_DEPTH 2048
+static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
+static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
+
+/*
+ * Request-based DM's mempools' reserved IOs set by the user.
+ */
+#define RESERVED_REQUEST_BASED_IOS 256
+static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+
+#ifdef CONFIG_DM_MQ_DEFAULT
+static bool use_blk_mq = true;
+#else
+static bool use_blk_mq = false;
+#endif
+
+bool dm_use_blk_mq_default(void)
+{
+ return use_blk_mq;
+}
+
+bool dm_use_blk_mq(struct mapped_device *md)
+{
+ return md->use_blk_mq;
+}
+EXPORT_SYMBOL_GPL(dm_use_blk_mq);
+
+unsigned dm_get_reserved_rq_based_ios(void)
+{
+ return __dm_get_module_param(&reserved_rq_based_ios,
+ RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
+}
+EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
+
+static unsigned dm_get_blk_mq_nr_hw_queues(void)
+{
+ return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
+}
+
+static unsigned dm_get_blk_mq_queue_depth(void)
+{
+ return __dm_get_module_param(&dm_mq_queue_depth,
+ DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
+}
+
+int dm_request_based(struct mapped_device *md)
+{
+ return blk_queue_stackable(md->queue);
+}
+
+static void dm_old_start_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+void dm_start_queue(struct request_queue *q)
+{
+ if (!q->mq_ops)
+ dm_old_start_queue(q);
+ else {
+ blk_mq_start_stopped_hw_queues(q, true);
+ blk_mq_kick_requeue_list(q);
+ }
+}
+
+static void dm_old_stop_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (blk_queue_stopped(q)) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ return;
+ }
+
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+void dm_stop_queue(struct request_queue *q)
+{
+ if (!q->mq_ops)
+ dm_old_stop_queue(q);
+ else
+ blk_mq_stop_hw_queues(q);
+}
+
+static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
+ gfp_t gfp_mask)
+{
+ return mempool_alloc(md->io_pool, gfp_mask);
+}
+
+static void free_old_rq_tio(struct dm_rq_target_io *tio)
+{
+ mempool_free(tio, tio->md->io_pool);
+}
+
+static struct request *alloc_old_clone_request(struct mapped_device *md,
+ gfp_t gfp_mask)
+{
+ return mempool_alloc(md->rq_pool, gfp_mask);
+}
+
+static void free_old_clone_request(struct mapped_device *md, struct request *rq)
+{
+ mempool_free(rq, md->rq_pool);
+}
+
+/*
+ * Partial completion handling for request-based dm
+ */
+static void end_clone_bio(struct bio *clone)
+{
+ struct dm_rq_clone_bio_info *info =
+ container_of(clone, struct dm_rq_clone_bio_info, clone);
+ struct dm_rq_target_io *tio = info->tio;
+ struct bio *bio = info->orig;
+ unsigned int nr_bytes = info->orig->bi_iter.bi_size;
+ int error = clone->bi_error;
+
+ bio_put(clone);
+
+ if (tio->error)
+ /*
+ * An error has already been detected on the request.
+ * Once error occurred, just let clone->end_io() handle
+ * the remainder.
+ */
+ return;
+ else if (error) {
+ /*
+ * Don't notice the error to the upper layer yet.
+ * The error handling decision is made by the target driver,
+ * when the request is completed.
+ */
+ tio->error = error;
+ return;
+ }
+
+ /*
+ * I/O for the bio successfully completed.
+ * Notice the data completion to the upper layer.
+ */
+
+ /*
+ * bios are processed from the head of the list.
+ * So the completing bio should always be rq->bio.
+ * If it's not, something wrong is happening.
+ */
+ if (tio->orig->bio != bio)
+ DMERR("bio completion is going in the middle of the request");
+
+ /*
+ * Update the original request.
+ * Do not use blk_end_request() here, because it may complete
+ * the original request before the clone, and break the ordering.
+ */
+ blk_update_request(tio->orig, 0, nr_bytes);
+}
+
+static struct dm_rq_target_io *tio_from_request(struct request *rq)
+{
+ return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
+}
+
+static void rq_end_stats(struct mapped_device *md, struct request *orig)
+{
+ if (unlikely(dm_stats_used(&md->stats))) {
+ struct dm_rq_target_io *tio = tio_from_request(orig);
+ tio->duration_jiffies = jiffies - tio->duration_jiffies;
+ dm_stats_account_io(&md->stats, rq_data_dir(orig),
+ blk_rq_pos(orig), tio->n_sectors, true,
+ tio->duration_jiffies, &tio->stats_aux);
+ }
+}
+
+/*
+ * Don't touch any member of the md after calling this function because
+ * the md may be freed in dm_put() at the end of this function.
+ * Or do dm_get() before calling this function and dm_put() later.
+ */
+static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
+{
+ atomic_dec(&md->pending[rw]);
+
+ /* nudge anyone waiting on suspend queue */
+ if (!md_in_flight(md))
+ wake_up(&md->wait);
+
+ /*
+ * Run this off this callpath, as drivers could invoke end_io while
+ * inside their request_fn (and holding the queue lock). Calling
+ * back into ->request_fn() could deadlock attempting to grab the
+ * queue lock again.
+ */
+ if (!md->queue->mq_ops && run_queue)
+ blk_run_queue_async(md->queue);
+
+ /*
+ * dm_put() must be at the end of this function. See the comment above
+ */
+ dm_put(md);
+}
+
+static void free_rq_clone(struct request *clone)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+
+ blk_rq_unprep_clone(clone);
+
+ /*
+ * It is possible for a clone_old_rq() allocated clone to
+ * get passed in -- it may not yet have a request_queue.
+ * This is known to occur if the error target replaces
+ * a multipath target that has a request_fn queue stacked
+ * on blk-mq queue(s).
+ */
+ if (clone->q && clone->q->mq_ops)
+ /* stacked on blk-mq queue(s) */
+ tio->ti->type->release_clone_rq(clone);
+ else if (!md->queue->mq_ops)
+ /* request_fn queue stacked on request_fn queue(s) */
+ free_old_clone_request(md, clone);
+
+ if (!md->queue->mq_ops)
+ free_old_rq_tio(tio);
+}
+
+/*
+ * Complete the clone and the original request.
+ * Must be called without clone's queue lock held,
+ * see end_clone_request() for more details.
+ */
+static void dm_end_request(struct request *clone, int error)
+{
+ int rw = rq_data_dir(clone);
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
+
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ rq->errors = clone->errors;
+ rq->resid_len = clone->resid_len;
+
+ if (rq->sense)
+ /*
+ * We are using the sense buffer of the original
+ * request.
+ * So setting the length of the sense data is enough.
+ */
+ rq->sense_len = clone->sense_len;
+ }
+
+ free_rq_clone(clone);
+ rq_end_stats(md, rq);
+ if (!rq->q->mq_ops)
+ blk_end_request_all(rq, error);
+ else
+ blk_mq_end_request(rq, error);
+ rq_completed(md, rw, true);
+}
+
+static void dm_unprep_request(struct request *rq)
+{
+ struct dm_rq_target_io *tio = tio_from_request(rq);
+ struct request *clone = tio->clone;
+
+ if (!rq->q->mq_ops) {
+ rq->special = NULL;
+ rq->cmd_flags &= ~REQ_DONTPREP;
+ }
+
+ if (clone)
+ free_rq_clone(clone);
+ else if (!tio->md->queue->mq_ops)
+ free_old_rq_tio(tio);
+}
+
+/*
+ * Requeue the original request of a clone.
+ */
+static void dm_old_requeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, rq);
+ blk_run_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_mq_requeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ blk_mq_requeue_request(rq);
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!blk_queue_stopped(q))
+ blk_mq_kick_requeue_list(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_requeue_original_request(struct mapped_device *md,
+ struct request *rq)
+{
+ int rw = rq_data_dir(rq);
+
+ rq_end_stats(md, rq);
+ dm_unprep_request(rq);
+
+ if (!rq->q->mq_ops)
+ dm_old_requeue_request(rq);
+ else
+ dm_mq_requeue_request(rq);
+
+ rq_completed(md, rw, false);
+}
+
+static void dm_done(struct request *clone, int error, bool mapped)
+{
+ int r = error;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ dm_request_endio_fn rq_end_io = NULL;
+
+ if (tio->ti) {
+ rq_end_io = tio->ti->type->rq_end_io;
+
+ if (mapped && rq_end_io)
+ r = rq_end_io(tio->ti, clone, error, &tio->info);
+ }
+
+ if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
+ !clone->q->limits.max_write_same_sectors))
+ disable_write_same(tio->md);
+
+ if (r <= 0)
+ /* The target wants to complete the I/O */
+ dm_end_request(clone, r);
+ else if (r == DM_ENDIO_INCOMPLETE)
+ /* The target will handle the I/O */
+ return;
+ else if (r == DM_ENDIO_REQUEUE)
+ /* The target wants to requeue the I/O */
+ dm_requeue_original_request(tio->md, tio->orig);
+ else {
+ DMWARN("unimplemented target endio return value: %d", r);
+ BUG();
+ }
+}
+
+/*
+ * Request completion handler for request-based dm
+ */
+static void dm_softirq_done(struct request *rq)
+{
+ bool mapped = true;
+ struct dm_rq_target_io *tio = tio_from_request(rq);
+ struct request *clone = tio->clone;
+ int rw;
+
+ if (!clone) {
+ rq_end_stats(tio->md, rq);
+ rw = rq_data_dir(rq);
+ if (!rq->q->mq_ops) {
+ blk_end_request_all(rq, tio->error);
+ rq_completed(tio->md, rw, false);
+ free_old_rq_tio(tio);
+ } else {
+ blk_mq_end_request(rq, tio->error);
+ rq_completed(tio->md, rw, false);
+ }
+ return;
+ }
+
+ if (rq->cmd_flags & REQ_FAILED)
+ mapped = false;
+
+ dm_done(clone, tio->error, mapped);
+}
+
+/*
+ * Complete the clone and the original request with the error status
+ * through softirq context.
+ */
+static void dm_complete_request(struct request *rq, int error)
+{
+ struct dm_rq_target_io *tio = tio_from_request(rq);
+
+ tio->error = error;
+ if (!rq->q->mq_ops)
+ blk_complete_request(rq);
+ else
+ blk_mq_complete_request(rq, error);
+}
+
+/*
+ * Complete the not-mapped clone and the original request with the error status
+ * through softirq context.
+ * Target's rq_end_io() function isn't called.
+ * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
+ */
+static void dm_kill_unmapped_request(struct request *rq, int error)
+{
+ rq->cmd_flags |= REQ_FAILED;
+ dm_complete_request(rq, error);
+}
+
+/*
+ * Called with the clone's queue lock held (in the case of .request_fn)
+ */
+static void end_clone_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ if (!clone->q->mq_ops) {
+ /*
+ * For just cleaning up the information of the queue in which
+ * the clone was dispatched.
+ * The clone is *NOT* freed actually here because it is alloced
+ * from dm own mempool (REQ_ALLOCED isn't set).
+ */
+ __blk_put_request(clone->q, clone);
+ }
+
+ /*
+ * Actual request completion is done in a softirq context which doesn't
+ * hold the clone's queue lock. Otherwise, deadlock could occur because:
+ * - another request may be submitted by the upper level driver
+ * of the stacking during the completion
+ * - the submission which requires queue lock may be done
+ * against this clone's queue
+ */
+ dm_complete_request(tio->orig, error);
+}
+
+static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
+{
+ int r;
+
+ if (blk_queue_io_stat(clone->q))
+ clone->cmd_flags |= REQ_IO_STAT;
+
+ clone->start_time = jiffies;
+ r = blk_insert_cloned_request(clone->q, clone);
+ if (r)
+ /* must complete clone in terms of original request */
+ dm_complete_request(rq, r);
+}
+
+static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
+ void *data)
+{
+ struct dm_rq_target_io *tio = data;
+ struct dm_rq_clone_bio_info *info =
+ container_of(bio, struct dm_rq_clone_bio_info, clone);
+
+ info->orig = bio_orig;
+ info->tio = tio;
+ bio->bi_end_io = end_clone_bio;
+
+ return 0;
+}
+
+static int setup_clone(struct request *clone, struct request *rq,
+ struct dm_rq_target_io *tio, gfp_t gfp_mask)
+{
+ int r;
+
+ r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
+ dm_rq_bio_constructor, tio);
+ if (r)
+ return r;
+
+ clone->cmd = rq->cmd;
+ clone->cmd_len = rq->cmd_len;
+ clone->sense = rq->sense;
+ clone->end_io = end_clone_request;
+ clone->end_io_data = tio;
+
+ tio->clone = clone;
+
+ return 0;
+}
+
+static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
+ struct dm_rq_target_io *tio, gfp_t gfp_mask)
+{
+ /*
+ * Create clone for use with .request_fn request_queue
+ */
+ struct request *clone;
+
+ clone = alloc_old_clone_request(md, gfp_mask);
+ if (!clone)
+ return NULL;
+
+ blk_rq_init(NULL, clone);
+ if (setup_clone(clone, rq, tio, gfp_mask)) {
+ /* -ENOMEM */
+ free_old_clone_request(md, clone);
+ return NULL;
+ }
+
+ return clone;
+}
+
+static void map_tio_request(struct kthread_work *work);
+
+static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
+ struct mapped_device *md)
+{
+ tio->md = md;
+ tio->ti = NULL;
+ tio->clone = NULL;
+ tio->orig = rq;
+ tio->error = 0;
+ /*
+ * Avoid initializing info for blk-mq; it passes
+ * target-specific data through info.ptr
+ * (see: dm_mq_init_request)
+ */
+ if (!md->init_tio_pdu)
+ memset(&tio->info, 0, sizeof(tio->info));
+ if (md->kworker_task)
+ init_kthread_work(&tio->work, map_tio_request);
+}
+
+static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
+ struct mapped_device *md,
+ gfp_t gfp_mask)
+{
+ struct dm_rq_target_io *tio;
+ int srcu_idx;
+ struct dm_table *table;
+
+ tio = alloc_old_rq_tio(md, gfp_mask);
+ if (!tio)
+ return NULL;
+
+ init_tio(tio, rq, md);
+
+ table = dm_get_live_table(md, &srcu_idx);
+ /*
+ * Must clone a request if this .request_fn DM device
+ * is stacked on .request_fn device(s).
+ */
+ if (!dm_table_all_blk_mq_devices(table)) {
+ if (!clone_old_rq(rq, md, tio, gfp_mask)) {
+ dm_put_live_table(md, srcu_idx);
+ free_old_rq_tio(tio);
+ return NULL;
+ }
+ }
+ dm_put_live_table(md, srcu_idx);
+
+ return tio;
+}
+
+/*
+ * Called with the queue lock held.
+ */
+static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_rq_target_io *tio;
+
+ if (unlikely(rq->special)) {
+ DMWARN("Already has something in rq->special.");
+ return BLKPREP_KILL;
+ }
+
+ tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
+ if (!tio)
+ return BLKPREP_DEFER;
+
+ rq->special = tio;
+ rq->cmd_flags |= REQ_DONTPREP;
+
+ return BLKPREP_OK;
+}
+
+/*
+ * Returns:
+ * 0 : the request has been processed
+ * DM_MAPIO_REQUEUE : the original request needs to be requeued
+ * < 0 : the request was completed due to failure
+ */
+static int map_request(struct dm_rq_target_io *tio, struct request *rq,
+ struct mapped_device *md)
+{
+ int r;
+ struct dm_target *ti = tio->ti;
+ struct request *clone = NULL;
+
+ if (tio->clone) {
+ clone = tio->clone;
+ r = ti->type->map_rq(ti, clone, &tio->info);
+ } else {
+ r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
+ if (r < 0) {
+ /* The target wants to complete the I/O */
+ dm_kill_unmapped_request(rq, r);
+ return r;
+ }
+ if (r != DM_MAPIO_REMAPPED)
+ return r;
+ if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
+ /* -ENOMEM */
+ ti->type->release_clone_rq(clone);
+ return DM_MAPIO_REQUEUE;
+ }
+ }
+
+ switch (r) {
+ case DM_MAPIO_SUBMITTED:
+ /* The target has taken the I/O to submit by itself later */
+ break;
+ case DM_MAPIO_REMAPPED:
+ /* The target has remapped the I/O so dispatch it */
+ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
+ blk_rq_pos(rq));
+ dm_dispatch_clone_request(clone, rq);
+ break;
+ case DM_MAPIO_REQUEUE:
+ /* The target wants to requeue the I/O */
+ dm_requeue_original_request(md, tio->orig);
+ break;
+ default:
+ if (r > 0) {
+ DMWARN("unimplemented target map return value: %d", r);
+ BUG();
+ }
+
+ /* The target wants to complete the I/O */
+ dm_kill_unmapped_request(rq, r);
+ return r;
+ }
+
+ return 0;
+}
+
+static void dm_start_request(struct mapped_device *md, struct request *orig)
+{
+ if (!orig->q->mq_ops)
+ blk_start_request(orig);
+ else
+ blk_mq_start_request(orig);
+ atomic_inc(&md->pending[rq_data_dir(orig)]);
+
+ if (md->seq_rq_merge_deadline_usecs) {
+ md->last_rq_pos = rq_end_sector(orig);
+ md->last_rq_rw = rq_data_dir(orig);
+ md->last_rq_start_time = ktime_get();
+ }
+
+ if (unlikely(dm_stats_used(&md->stats))) {
+ struct dm_rq_target_io *tio = tio_from_request(orig);
+ tio->duration_jiffies = jiffies;
+ tio->n_sectors = blk_rq_sectors(orig);
+ dm_stats_account_io(&md->stats, rq_data_dir(orig),
+ blk_rq_pos(orig), tio->n_sectors, false, 0,
+ &tio->stats_aux);
+ }
+
+ /*
+ * Hold the md reference here for the in-flight I/O.
+ * We can't rely on the reference count by device opener,
+ * because the device may be closed during the request completion
+ * when all bios are completed.
+ * See the comment in rq_completed() too.
+ */
+ dm_get(md);
+}
+
+static void map_tio_request(struct kthread_work *work)
+{
+ struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
+ struct request *rq = tio->orig;
+ struct mapped_device *md = tio->md;
+
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
+ dm_requeue_original_request(md, rq);
+}
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
+{
+ return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
+}
+
+#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count)
+{
+ unsigned deadline;
+
+ if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
+ return count;
+
+ if (kstrtouint(buf, 10, &deadline))
+ return -EINVAL;
+
+ if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
+ deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
+
+ md->seq_rq_merge_deadline_usecs = deadline;
+
+ return count;
+}
+
+static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
+{
+ ktime_t kt_deadline;
+
+ if (!md->seq_rq_merge_deadline_usecs)
+ return false;
+
+ kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
+ kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
+
+ return !ktime_after(ktime_get(), kt_deadline);
+}
+
+/*
+ * q->request_fn for old request-based dm.
+ * Called with the queue lock held.
+ */
+static void dm_old_request_fn(struct request_queue *q)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_target *ti = md->immutable_target;
+ struct request *rq;
+ struct dm_rq_target_io *tio;
+ sector_t pos = 0;
+
+ if (unlikely(!ti)) {
+ int srcu_idx;
+ struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+
+ ti = dm_table_find_target(map, pos);
+ dm_put_live_table(md, srcu_idx);
+ }
+
+ /*
+ * For suspend, check blk_queue_stopped() and increment
+ * ->pending within a single queue_lock not to increment the
+ * number of in-flight I/Os after the queue is stopped in
+ * dm_suspend().
+ */
+ while (!blk_queue_stopped(q)) {
+ rq = blk_peek_request(q);
+ if (!rq)
+ return;
+
+ /* always use block 0 to find the target for flushes for now */
+ pos = 0;
+ if (req_op(rq) != REQ_OP_FLUSH)
+ pos = blk_rq_pos(rq);
+
+ if ((dm_old_request_peeked_before_merge_deadline(md) &&
+ md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
+ (ti->type->busy && ti->type->busy(ti))) {
+ blk_delay_queue(q, 10);
+ return;
+ }
+
+ dm_start_request(md, rq);
+
+ tio = tio_from_request(rq);
+ /* Establish tio->ti before queuing work (map_tio_request) */
+ tio->ti = ti;
+ queue_kthread_work(&md->kworker, &tio->work);
+ BUG_ON(!irqs_disabled());
+ }
+}
+
+/*
+ * Fully initialize a .request_fn request-based queue.
+ */
+int dm_old_init_request_queue(struct mapped_device *md)
+{
+ /* Fully initialize the queue */
+ if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL))
+ return -EINVAL;
+
+ /* disable dm_old_request_fn's merge heuristic by default */
+ md->seq_rq_merge_deadline_usecs = 0;
+
+ dm_init_normal_md_queue(md);
+ blk_queue_softirq_done(md->queue, dm_softirq_done);
+ blk_queue_prep_rq(md->queue, dm_old_prep_fn);
+
+ /* Initialize the request-based DM worker thread */
+ init_kthread_worker(&md->kworker);
+ md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
+ "kdmwork-%s", dm_device_name(md));
+ if (IS_ERR(md->kworker_task))
+ return PTR_ERR(md->kworker_task);
+
+ elv_register_queue(md->queue);
+
+ return 0;
+}
+
+static int dm_mq_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct mapped_device *md = data;
+ struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+
+ /*
+ * Must initialize md member of tio, otherwise it won't
+ * be available in dm_mq_queue_rq.
+ */
+ tio->md = md;
+
+ if (md->init_tio_pdu) {
+ /* target-specific per-io data is immediately after the tio */
+ tio->info.ptr = tio + 1;
+ }
+
+ return 0;
+}
+
+static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+ struct mapped_device *md = tio->md;
+ struct dm_target *ti = md->immutable_target;
+
+ if (unlikely(!ti)) {
+ int srcu_idx;
+ struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+
+ ti = dm_table_find_target(map, 0);
+ dm_put_live_table(md, srcu_idx);
+ }
+
+ if (ti->type->busy && ti->type->busy(ti))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ dm_start_request(md, rq);
+
+ /* Init tio using md established in .init_request */
+ init_tio(tio, rq, md);
+
+ /*
+ * Establish tio->ti before calling map_request().
+ */
+ tio->ti = ti;
+
+ /* Direct call is fine since .queue_rq allows allocations */
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
+ /* Undo dm_start_request() before requeuing */
+ rq_end_stats(md, rq);
+ rq_completed(md, rq_data_dir(rq), false);
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static struct blk_mq_ops dm_mq_ops = {
+ .queue_rq = dm_mq_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .complete = dm_softirq_done,
+ .init_request = dm_mq_init_request,
+};
+
+int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
+{
+ struct request_queue *q;
+ struct dm_target *immutable_tgt;
+ int err;
+
+ if (!dm_table_all_blk_mq_devices(t)) {
+ DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
+ return -EINVAL;
+ }
+
+ md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
+ if (!md->tag_set)
+ return -ENOMEM;
+
+ md->tag_set->ops = &dm_mq_ops;
+ md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
+ md->tag_set->numa_node = md->numa_node_id;
+ md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
+ md->tag_set->driver_data = md;
+
+ md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
+ immutable_tgt = dm_table_get_immutable_target(t);
+ if (immutable_tgt && immutable_tgt->per_io_data_size) {
+ /* any target-specific per-io data is immediately after the tio */
+ md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
+ md->init_tio_pdu = true;
+ }
+
+ err = blk_mq_alloc_tag_set(md->tag_set);
+ if (err)
+ goto out_kfree_tag_set;
+
+ q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto out_tag_set;
+ }
+ dm_init_md_queue(md);
+
+ /* backfill 'mq' sysfs registration normally done in blk_register_queue */
+ blk_mq_register_disk(md->disk);
+
+ return 0;
+
+out_tag_set:
+ blk_mq_free_tag_set(md->tag_set);
+out_kfree_tag_set:
+ kfree(md->tag_set);
+
+ return err;
+}
+
+void dm_mq_cleanup_mapped_device(struct mapped_device *md)
+{
+ if (md->tag_set) {
+ blk_mq_free_tag_set(md->tag_set);
+ kfree(md->tag_set);
+ }
+}
+
+module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+
+module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
+
+module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
+
+module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
new file mode 100644
index 000000000000..9e6f0a3773d4
--- /dev/null
+++ b/drivers/md/dm-rq.h
@@ -0,0 +1,64 @@
+/*
+ * Internal header file for device mapper
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef DM_RQ_INTERNAL_H
+#define DM_RQ_INTERNAL_H
+
+#include <linux/bio.h>
+#include <linux/kthread.h>
+
+#include "dm-stats.h"
+
+struct mapped_device;
+
+/*
+ * One of these is allocated per request.
+ */
+struct dm_rq_target_io {
+ struct mapped_device *md;
+ struct dm_target *ti;
+ struct request *orig, *clone;
+ struct kthread_work work;
+ int error;
+ union map_info info;
+ struct dm_stats_aux stats_aux;
+ unsigned long duration_jiffies;
+ unsigned n_sectors;
+};
+
+/*
+ * For request-based dm - the bio clones we allocate are embedded in these
+ * structs.
+ *
+ * We allocate these with bio_alloc_bioset, using the front_pad parameter when
+ * the bioset is created - this means the bio has to come at the end of the
+ * struct.
+ */
+struct dm_rq_clone_bio_info {
+ struct bio *orig;
+ struct dm_rq_target_io *tio;
+ struct bio clone;
+};
+
+bool dm_use_blk_mq_default(void);
+bool dm_use_blk_mq(struct mapped_device *md);
+
+int dm_old_init_request_queue(struct mapped_device *md);
+int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
+void dm_mq_cleanup_mapped_device(struct mapped_device *md);
+
+void dm_start_queue(struct request_queue *q);
+void dm_stop_queue(struct request_queue *q);
+
+unsigned dm_get_reserved_rq_based_ios(void);
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count);
+
+#endif
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 4d3909393f2c..b8cf956b577b 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -226,8 +226,8 @@ static void do_metadata(struct work_struct *work)
/*
* Read or write a chunk aligned and sized block of data from a device.
*/
-static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
- int metadata)
+static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
+ int op_flags, int metadata)
{
struct dm_io_region where = {
.bdev = dm_snap_cow(ps->store->snap)->bdev,
@@ -235,7 +235,8 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
.count = ps->store->chunk_size,
};
struct dm_io_request io_req = {
- .bi_rw = rw,
+ .bi_op = op,
+ .bi_op_flags = op_flags,
.mem.type = DM_IO_VMA,
.mem.ptr.vma = area,
.client = ps->io_client,
@@ -281,14 +282,14 @@ static void skip_metadata(struct pstore *ps)
* Read or write a metadata area. Remembering to skip the first
* chunk which holds the header.
*/
-static int area_io(struct pstore *ps, int rw)
+static int area_io(struct pstore *ps, int op, int op_flags)
{
int r;
chunk_t chunk;
chunk = area_location(ps, ps->current_area);
- r = chunk_io(ps, ps->area, chunk, rw, 0);
+ r = chunk_io(ps, ps->area, chunk, op, op_flags, 0);
if (r)
return r;
@@ -302,7 +303,8 @@ static void zero_memory_area(struct pstore *ps)
static int zero_disk_area(struct pstore *ps, chunk_t area)
{
- return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
+ return chunk_io(ps, ps->zero_area, area_location(ps, area),
+ REQ_OP_WRITE, 0, 0);
}
static int read_header(struct pstore *ps, int *new_snapshot)
@@ -334,7 +336,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
if (r)
return r;
- r = chunk_io(ps, ps->header_area, 0, READ, 1);
+ r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1);
if (r)
goto bad;
@@ -395,7 +397,7 @@ static int write_header(struct pstore *ps)
dh->version = cpu_to_le32(ps->version);
dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
- return chunk_io(ps, ps->header_area, 0, WRITE, 1);
+ return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1);
}
/*
@@ -739,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/*
* Commit exceptions to disk.
*/
- if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
+ if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA))
ps->valid = 0;
/*
@@ -779,7 +781,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
return 0;
ps->current_area--;
- r = area_io(ps, READ);
+ r = area_io(ps, REQ_OP_READ, 0);
if (r < 0)
return r;
ps->current_committed = ps->exceptions_per_area;
@@ -816,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
- r = area_io(ps, WRITE_FLUSH_FUA);
+ r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA);
if (r < 0)
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 70bb0e8b62ce..731e1f5bd895 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_rw & REQ_PREFLUSH) {
bio->bi_bdev = s->cow->bdev;
return DM_MAPIO_REMAPPED;
}
@@ -1696,7 +1696,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* to copy an exception */
down_write(&s->lock);
- if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
+ if (!s->valid || (unlikely(s->snapshot_overflowed) &&
+ bio_data_dir(bio) == WRITE)) {
r = -EIO;
goto out_unlock;
}
@@ -1713,7 +1714,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* flags so we should only get this if we are
* writeable.
*/
- if (bio_rw(bio) == WRITE) {
+ if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
up_write(&s->lock);
@@ -1799,7 +1800,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_rw & REQ_PREFLUSH) {
if (!dm_bio_get_target_bio_nr(bio))
bio->bi_bdev = s->origin->bdev;
else
@@ -1819,7 +1820,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
/* Queue writes overlapping with chunks being merged */
- if (bio_rw(bio) == WRITE &&
+ if (bio_data_dir(bio) == WRITE &&
chunk >= s->first_merging_chunk &&
chunk < (s->first_merging_chunk +
s->num_merging_chunks)) {
@@ -1831,7 +1832,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
remap_exception(s, e, bio, chunk);
- if (bio_rw(bio) == WRITE)
+ if (bio_data_dir(bio) == WRITE)
track_chunk(s, bio, chunk);
goto out_unlock;
}
@@ -1839,7 +1840,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
redirect_to_origin:
bio->bi_bdev = s->origin->bdev;
- if (bio_rw(bio) == WRITE) {
+ if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock);
return do_origin(s->origin, bio);
}
@@ -2285,10 +2286,10 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = o->dev->bdev;
- if (unlikely(bio->bi_rw & REQ_FLUSH))
+ if (unlikely(bio->bi_rw & REQ_PREFLUSH))
return DM_MAPIO_REMAPPED;
- if (bio_rw(bio) != WRITE)
+ if (bio_data_dir(bio) != WRITE)
return DM_MAPIO_REMAPPED;
available_sectors = o->split_boundary -
@@ -2301,6 +2302,13 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
return do_origin(o->dev, bio);
}
+static long origin_direct_access(struct dm_target *ti, sector_t sector,
+ void __pmem **kaddr, pfn_t *pfn, long size)
+{
+ DMWARN("device does not support dax.");
+ return -EIO;
+}
+
/*
* Set the target "max_io_len" field to the minimum of all the snapshots'
* chunk sizes.
@@ -2360,6 +2368,7 @@ static struct target_type origin_target = {
.postsuspend = origin_postsuspend,
.status = origin_status,
.iterate_devices = origin_iterate_devices,
+ .direct_access = origin_direct_access,
};
static struct target_type snapshot_target = {
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8289804ccd99..38b05f23b96c 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/device-mapper.h>
-#include "dm.h"
+#include "dm-core.h"
#include "dm-stats.h"
#define DM_MSG_PREFIX "stats"
@@ -514,11 +514,10 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
}
static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
- unsigned long bi_rw, sector_t len,
+ int idx, sector_t len,
struct dm_stats_aux *stats_aux, bool end,
unsigned long duration_jiffies)
{
- unsigned long idx = bi_rw & REQ_WRITE;
struct dm_stat_shared *shared = &s->stat_shared[entry];
struct dm_stat_percpu *p;
@@ -584,7 +583,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
#endif
}
-static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
+static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
sector_t bi_sector, sector_t end_sector,
bool end, unsigned long duration_jiffies,
struct dm_stats_aux *stats_aux)
@@ -645,8 +644,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
last = raw_cpu_ptr(stats->last);
stats_aux->merged =
(bi_sector == (ACCESS_ONCE(last->last_sector) &&
- ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
- (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
+ ((bi_rw == WRITE) ==
+ (ACCESS_ONCE(last->last_rw) == WRITE))
));
ACCESS_ONCE(last->last_sector) = end_sector;
ACCESS_ONCE(last->last_rw) = bi_rw;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 797ddb900b06..01bb9cf2a8c2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -286,14 +286,14 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
uint32_t stripe;
unsigned target_bio_nr;
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_rw & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
- if (unlikely(bio->bi_rw & REQ_DISCARD) ||
- unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
+ unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
return stripe_map_range(sc, bio, target_bio_nr);
@@ -308,6 +308,29 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+static long stripe_direct_access(struct dm_target *ti, sector_t sector,
+ void __pmem **kaddr, pfn_t *pfn, long size)
+{
+ struct stripe_c *sc = ti->private;
+ uint32_t stripe;
+ struct block_device *bdev;
+ struct blk_dax_ctl dax = {
+ .size = size,
+ };
+ long ret;
+
+ stripe_map_sector(sc, sector, &stripe, &dax.sector);
+
+ dax.sector += sc->stripe[stripe].physical_start;
+ bdev = sc->stripe[stripe].dev->bdev;
+
+ ret = bdev_direct_access(bdev, &dax);
+ *kaddr = dax.addr;
+ *pfn = dax.pfn;
+
+ return ret;
+}
+
/*
* Stripe status:
*
@@ -416,7 +439,7 @@ static void stripe_io_hints(struct dm_target *ti,
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 5, 1},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
@@ -425,6 +448,7 @@ static struct target_type stripe_target = {
.status = stripe_status,
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
+ .direct_access = stripe_direct_access,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index 7e818f5f1dc4..c209b8a19b84 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -6,7 +6,8 @@
#include <linux/sysfs.h>
#include <linux/dm-ioctl.h>
-#include "dm.h"
+#include "dm-core.h"
+#include "dm-rq.h"
struct dm_sysfs_attr {
struct attribute attr;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 626a5ec04466..3e407a9cde1f 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -5,7 +5,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include "dm-core.h"
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -43,8 +43,10 @@ struct dm_table {
struct dm_target *targets;
struct target_type *immutable_target_type;
- unsigned integrity_supported:1;
- unsigned singleton:1;
+
+ bool integrity_supported:1;
+ bool singleton:1;
+ bool all_blk_mq:1;
/*
* Indicates the rw permissions for the new logical
@@ -206,6 +208,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
return -ENOMEM;
}
+ t->type = DM_TYPE_NONE;
t->mode = mode;
t->md = md;
*result = t;
@@ -703,7 +706,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
dm_device_name(t->md), type);
return -EINVAL;
}
- t->singleton = 1;
+ t->singleton = true;
}
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
@@ -824,22 +827,70 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
}
EXPORT_SYMBOL(dm_consume_args);
+static bool __table_type_bio_based(unsigned table_type)
+{
+ return (table_type == DM_TYPE_BIO_BASED ||
+ table_type == DM_TYPE_DAX_BIO_BASED);
+}
+
static bool __table_type_request_based(unsigned table_type)
{
return (table_type == DM_TYPE_REQUEST_BASED ||
table_type == DM_TYPE_MQ_REQUEST_BASED);
}
-static int dm_table_set_type(struct dm_table *t)
+void dm_table_set_type(struct dm_table *t, unsigned type)
+{
+ t->type = type;
+}
+EXPORT_SYMBOL_GPL(dm_table_set_type);
+
+static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && blk_queue_dax(q);
+}
+
+static bool dm_table_supports_dax(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ /* Ensure that all targets support DAX. */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->direct_access)
+ return false;
+
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_supports_dax, NULL))
+ return false;
+ }
+
+ return true;
+}
+
+static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- bool use_blk_mq = false;
+ bool verify_blk_mq = false;
struct dm_target *tgt;
struct dm_dev_internal *dd;
- struct list_head *devices;
+ struct list_head *devices = dm_table_get_devices(t);
unsigned live_md_type = dm_get_md_type(t->md);
+ if (t->type != DM_TYPE_NONE) {
+ /* target already set the table's type */
+ if (t->type == DM_TYPE_BIO_BASED)
+ return 0;
+ BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
+ goto verify_rq_based;
+ }
+
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
if (dm_target_hybrid(tgt))
@@ -871,11 +922,27 @@ static int dm_table_set_type(struct dm_table *t)
if (bio_based) {
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
+ if (dm_table_supports_dax(t) ||
+ (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED))
+ t->type = DM_TYPE_DAX_BIO_BASED;
return 0;
}
BUG_ON(!request_based); /* No targets in this table */
+ if (list_empty(devices) && __table_type_request_based(live_md_type)) {
+ /* inherit live MD type */
+ t->type = live_md_type;
+ return 0;
+ }
+
+ /*
+ * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
+ * having a compatible target use dm_table_set_type.
+ */
+ t->type = DM_TYPE_REQUEST_BASED;
+
+verify_rq_based:
/*
* Request-based dm supports only tables that have a single target now.
* To support multiple targets, request splitting support is needed,
@@ -888,7 +955,6 @@ static int dm_table_set_type(struct dm_table *t)
}
/* Non-request-stackable devices can't be used for request-based dm */
- devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
@@ -899,10 +965,10 @@ static int dm_table_set_type(struct dm_table *t)
}
if (q->mq_ops)
- use_blk_mq = true;
+ verify_blk_mq = true;
}
- if (use_blk_mq) {
+ if (verify_blk_mq) {
/* verify _all_ devices in the table are blk-mq devices */
list_for_each_entry(dd, devices, list)
if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
@@ -910,14 +976,9 @@ static int dm_table_set_type(struct dm_table *t)
" are blk-mq request-stackable");
return -EINVAL;
}
- t->type = DM_TYPE_MQ_REQUEST_BASED;
- } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
- /* inherit live MD type */
- t->type = live_md_type;
-
- } else
- t->type = DM_TYPE_REQUEST_BASED;
+ t->all_blk_mq = true;
+ }
return 0;
}
@@ -956,14 +1017,19 @@ struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
return NULL;
}
+bool dm_table_bio_based(struct dm_table *t)
+{
+ return __table_type_bio_based(dm_table_get_type(t));
+}
+
bool dm_table_request_based(struct dm_table *t)
{
return __table_type_request_based(dm_table_get_type(t));
}
-bool dm_table_mq_request_based(struct dm_table *t)
+bool dm_table_all_blk_mq_devices(struct dm_table *t)
{
- return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
+ return t->all_blk_mq;
}
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
@@ -978,7 +1044,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
return -EINVAL;
}
- if (type == DM_TYPE_BIO_BASED)
+ if (__table_type_bio_based(type))
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
@@ -1106,7 +1172,7 @@ static int dm_table_register_integrity(struct dm_table *t)
return 0;
if (!integrity_profile_exists(dm_disk(md))) {
- t->integrity_supported = 1;
+ t->integrity_supported = true;
/*
* Register integrity profile during table load; we can do
* this because the final profile must match during resume.
@@ -1129,7 +1195,7 @@ static int dm_table_register_integrity(struct dm_table *t)
}
/* Preserve existing integrity profile */
- t->integrity_supported = 1;
+ t->integrity_supported = true;
return 0;
}
@@ -1141,9 +1207,9 @@ int dm_table_complete(struct dm_table *t)
{
int r;
- r = dm_table_set_type(t);
+ r = dm_table_determine_type(t);
if (r) {
- DMERR("unable to set table type");
+ DMERR("unable to determine table type");
return r;
}
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index a317dd884ba6..6eecd6b36f76 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -4,7 +4,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include "dm-core.h"
#include <linux/module.h>
#include <linux/init.h>
@@ -148,9 +148,15 @@ static void io_err_release_clone_rq(struct request *clone)
{
}
+static long io_err_direct_access(struct dm_target *ti, sector_t sector,
+ void __pmem **kaddr, pfn_t *pfn, long size)
+{
+ return -EIO;
+}
+
static struct target_type error_target = {
.name = "error",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.features = DM_TARGET_WILDCARD,
.ctr = io_err_ctr,
.dtr = io_err_dtr,
@@ -158,6 +164,7 @@ static struct target_type error_target = {
.map_rq = io_err_map_rq,
.clone_and_map_rq = io_err_clone_and_map_rq,
.release_clone_rq = io_err_release_clone_rq,
+ .direct_access = io_err_direct_access,
};
int __init dm_target_init(void)
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 43824d73366d..a15091a0d40c 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1677,6 +1677,36 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
return r;
}
+int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
+{
+ int r = 0;
+
+ down_write(&pmd->root_lock);
+ for (; b != e; b++) {
+ r = dm_sm_inc_block(pmd->data_sm, b);
+ if (r)
+ break;
+ }
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
+{
+ int r = 0;
+
+ down_write(&pmd->root_lock);
+ for (; b != e; b++) {
+ r = dm_sm_dec_block(pmd->data_sm, b);
+ if (r)
+ break;
+ }
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
{
int r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index a938babe4258..35e954ea20a9 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -197,6 +197,9 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
+int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
+int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
+
/*
* Returns -ENOSPC if the new size is too small and already allocated
* blocks would be lost.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fc803d50f9f0..197ea2003400 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -253,6 +253,7 @@ struct pool {
struct bio_list deferred_flush_bios;
struct list_head prepared_mappings;
struct list_head prepared_discards;
+ struct list_head prepared_discards_pt2;
struct list_head active_thins;
struct dm_deferred_set *shared_read_ds;
@@ -269,6 +270,7 @@ struct pool {
process_mapping_fn process_prepared_mapping;
process_mapping_fn process_prepared_discard;
+ process_mapping_fn process_prepared_discard_pt2;
struct dm_bio_prison_cell **cell_sort_array;
};
@@ -360,7 +362,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
- GFP_NOWAIT, REQ_WRITE | REQ_DISCARD, &op->bio);
+ GFP_NOWAIT, 0, &op->bio);
}
static void end_discard(struct discard_op *op, int r)
@@ -371,7 +373,8 @@ static void end_discard(struct discard_op *op, int r)
* need to wait for the chain to complete.
*/
bio_chain(op->bio, op->parent_bio);
- submit_bio(REQ_WRITE | REQ_DISCARD, op->bio);
+ bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0);
+ submit_bio(op->bio);
}
blk_finish_plug(&op->plug);
@@ -696,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
- return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+ return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
dm_thin_changed_this_transaction(tc->td);
}
@@ -704,7 +707,7 @@ static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
struct dm_thin_endio_hook *h;
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD)
return;
h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -867,7 +870,8 @@ static void __inc_remap_and_issue_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
- if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
+ if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+ bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
inc_all_io_entry(info->tc->pool, bio);
@@ -999,7 +1003,8 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
/*----------------------------------------------------------------*/
-static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
+static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m,
+ struct bio *discard_parent)
{
/*
* We've already unmapped this range of blocks, but before we
@@ -1012,7 +1017,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
struct discard_op op;
- begin_discard(&op, tc, m->bio);
+ begin_discard(&op, tc, discard_parent);
while (b != end) {
/* find start of unmapped run */
for (; b < end; b++) {
@@ -1047,28 +1052,101 @@ out:
end_discard(&op, r);
}
-static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
+static void queue_passdown_pt2(struct dm_thin_new_mapping *m)
+{
+ unsigned long flags;
+ struct pool *pool = m->tc->pool;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add_tail(&m->list, &pool->prepared_discards_pt2);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ wake_worker(pool);
+}
+
+static void passdown_endio(struct bio *bio)
+{
+ /*
+ * It doesn't matter if the passdown discard failed, we still want
+ * to unmap (we ignore err).
+ */
+ queue_passdown_pt2(bio->bi_private);
+}
+
+static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
{
int r;
struct thin_c *tc = m->tc;
struct pool *pool = tc->pool;
+ struct bio *discard_parent;
+ dm_block_t data_end = m->data_block + (m->virt_end - m->virt_begin);
+ /*
+ * Only this thread allocates blocks, so we can be sure that the
+ * newly unmapped blocks will not be allocated before the end of
+ * the function.
+ */
r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
if (r) {
metadata_operation_failed(pool, "dm_thin_remove_range", r);
bio_io_error(m->bio);
+ cell_defer_no_holder(tc, m->cell);
+ mempool_free(m, pool->mapping_pool);
+ return;
+ }
- } else if (m->maybe_shared) {
- passdown_double_checking_shared_status(m);
+ discard_parent = bio_alloc(GFP_NOIO, 1);
+ if (!discard_parent) {
+ DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
+ dm_device_name(tc->pool->pool_md));
+ queue_passdown_pt2(m);
} else {
- struct discard_op op;
- begin_discard(&op, tc, m->bio);
- r = issue_discard(&op, m->data_block,
- m->data_block + (m->virt_end - m->virt_begin));
- end_discard(&op, r);
+ discard_parent->bi_end_io = passdown_endio;
+ discard_parent->bi_private = m;
+
+ if (m->maybe_shared)
+ passdown_double_checking_shared_status(m, discard_parent);
+ else {
+ struct discard_op op;
+
+ begin_discard(&op, tc, discard_parent);
+ r = issue_discard(&op, m->data_block, data_end);
+ end_discard(&op, r);
+ }
}
+ /*
+ * Increment the unmapped blocks. This prevents a race between the
+ * passdown io and reallocation of freed blocks.
+ */
+ r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
+ bio_io_error(m->bio);
+ cell_defer_no_holder(tc, m->cell);
+ mempool_free(m, pool->mapping_pool);
+ return;
+ }
+}
+
+static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
+{
+ int r;
+ struct thin_c *tc = m->tc;
+ struct pool *pool = tc->pool;
+
+ /*
+ * The passdown has completed, so now we can decrement all those
+ * unmapped blocks.
+ */
+ r = dm_pool_dec_data_range(pool->pmd, m->data_block,
+ m->data_block + (m->virt_end - m->virt_begin));
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_dec_data_range", r);
+ bio_io_error(m->bio);
+ } else
+ bio_endio(m->bio);
+
cell_defer_no_holder(tc, m->cell);
mempool_free(m, pool->mapping_pool);
}
@@ -1639,7 +1717,8 @@ static void __remap_and_issue_shared_cell(void *context,
while ((bio = bio_list_pop(&cell->bios))) {
if ((bio_data_dir(bio) == WRITE) ||
- (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
+ (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+ bio_op(bio) == REQ_OP_DISCARD))
bio_list_add(&info->defer_bios, bio);
else {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
@@ -2028,7 +2107,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
break;
}
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD)
pool->process_discard(tc, bio);
else
pool->process_bio(tc, bio);
@@ -2115,7 +2194,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
return;
}
- if (cell->holder->bi_rw & REQ_DISCARD)
+ if (bio_op(cell->holder) == REQ_OP_DISCARD)
pool->process_discard_cell(tc, cell);
else
pool->process_cell(tc, cell);
@@ -2212,6 +2291,8 @@ static void do_worker(struct work_struct *ws)
throttle_work_update(&pool->throttle);
process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
throttle_work_update(&pool->throttle);
+ process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2);
+ throttle_work_update(&pool->throttle);
process_deferred_bios(pool);
throttle_work_complete(&pool->throttle);
}
@@ -2340,7 +2421,8 @@ static void set_discard_callbacks(struct pool *pool)
if (passdown_enabled(pt)) {
pool->process_discard_cell = process_discard_cell_passdown;
- pool->process_prepared_discard = process_prepared_discard_passdown;
+ pool->process_prepared_discard = process_prepared_discard_passdown_pt1;
+ pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2;
} else {
pool->process_discard_cell = process_discard_cell_no_passdown;
pool->process_prepared_discard = process_prepared_discard_no_passdown;
@@ -2553,7 +2635,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
- if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
+ if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+ bio_op(bio) == REQ_OP_DISCARD) {
thin_defer_bio_with_throttle(tc, bio);
return DM_MAPIO_SUBMITTED;
}
@@ -2826,6 +2909,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
bio_list_init(&pool->deferred_flush_bios);
INIT_LIST_HEAD(&pool->prepared_mappings);
INIT_LIST_HEAD(&pool->prepared_discards);
+ INIT_LIST_HEAD(&pool->prepared_discards_pt2);
INIT_LIST_HEAD(&pool->active_thins);
pool->low_water_triggered = false;
pool->suspended = true;
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 459a9f8905ed..0f0eb8a3d922 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -453,9 +453,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
*/
offset = block << v->data_dev_block_bits;
-
- res = offset;
- div64_u64(res, v->fec->rounds << v->data_dev_block_bits);
+ res = div64_u64(offset, v->fec->rounds << v->data_dev_block_bits);
/*
* The base RS block we can feed to the interleaver to find out all
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 766bc93006e6..618b8752dcf1 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -35,16 +35,19 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*/
static int zero_map(struct dm_target *ti, struct bio *bio)
{
- switch(bio_rw(bio)) {
- case READ:
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ if (bio->bi_rw & REQ_RAHEAD) {
+ /* readahead of null bytes only wastes buffer cache */
+ return -EIO;
+ }
zero_fill_bio(bio);
break;
- case READA:
- /* readahead of null bytes only wastes buffer cache */
- return -EIO;
- case WRITE:
+ case REQ_OP_WRITE:
/* writes get silently dropped */
break;
+ default:
+ return -EIO;
}
bio_endio(bio);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1b2f96205361..ceb69fc0b10b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -5,13 +5,13 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include "dm-core.h"
+#include "dm-rq.h"
#include "dm-uevent.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
@@ -20,14 +20,8 @@
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/wait.h>
-#include <linux/kthread.h>
-#include <linux/ktime.h>
-#include <linux/elevator.h> /* for rq_end_sector() */
-#include <linux/blk-mq.h>
#include <linux/pr.h>
-#include <trace/events/block.h>
-
#define DM_MSG_PREFIX "core"
#ifdef CONFIG_PRINTK
@@ -63,7 +57,6 @@ static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
static struct workqueue_struct *deferred_remove_workqueue;
/*
- * For bio-based dm.
* One of these is allocated per bio.
*/
struct dm_io {
@@ -76,36 +69,6 @@ struct dm_io {
struct dm_stats_aux stats_aux;
};
-/*
- * For request-based dm.
- * One of these is allocated per request.
- */
-struct dm_rq_target_io {
- struct mapped_device *md;
- struct dm_target *ti;
- struct request *orig, *clone;
- struct kthread_work work;
- int error;
- union map_info info;
- struct dm_stats_aux stats_aux;
- unsigned long duration_jiffies;
- unsigned n_sectors;
-};
-
-/*
- * For request-based dm - the bio clones we allocate are embedded in these
- * structs.
- *
- * We allocate these with bio_alloc_bioset, using the front_pad parameter when
- * the bioset is created - this means the bio has to come at the end of the
- * struct.
- */
-struct dm_rq_clone_bio_info {
- struct bio *orig;
- struct dm_rq_target_io *tio;
- struct bio clone;
-};
-
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -120,130 +83,9 @@ struct dm_rq_clone_bio_info {
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
-/*
- * Work processed by per-device workqueue.
- */
-struct mapped_device {
- struct srcu_struct io_barrier;
- struct mutex suspend_lock;
-
- /*
- * The current mapping (struct dm_table *).
- * Use dm_get_live_table{_fast} or take suspend_lock for
- * dereference.
- */
- void __rcu *map;
-
- struct list_head table_devices;
- struct mutex table_devices_lock;
-
- unsigned long flags;
-
- struct request_queue *queue;
- int numa_node_id;
-
- unsigned type;
- /* Protect queue and type against concurrent access. */
- struct mutex type_lock;
-
- atomic_t holders;
- atomic_t open_count;
-
- struct dm_target *immutable_target;
- struct target_type *immutable_target_type;
-
- struct gendisk *disk;
- char name[16];
-
- void *interface_ptr;
-
- /*
- * A list of ios that arrived while we were suspended.
- */
- atomic_t pending[2];
- wait_queue_head_t wait;
- struct work_struct work;
- spinlock_t deferred_lock;
- struct bio_list deferred;
-
- /*
- * Event handling.
- */
- wait_queue_head_t eventq;
- atomic_t event_nr;
- atomic_t uevent_seq;
- struct list_head uevent_list;
- spinlock_t uevent_lock; /* Protect access to uevent_list */
-
- /* the number of internal suspends */
- unsigned internal_suspend_count;
-
- /*
- * Processing queue (flush)
- */
- struct workqueue_struct *wq;
-
- /*
- * io objects are allocated from here.
- */
- mempool_t *io_pool;
- mempool_t *rq_pool;
-
- struct bio_set *bs;
-
- /*
- * freeze/thaw support require holding onto a super block
- */
- struct super_block *frozen_sb;
-
- /* forced geometry settings */
- struct hd_geometry geometry;
-
- struct block_device *bdev;
-
- /* kobject and completion */
- struct dm_kobject_holder kobj_holder;
-
- /* zero-length flush that will be cloned and submitted to targets */
- struct bio flush_bio;
-
- struct dm_stats stats;
-
- struct kthread_worker kworker;
- struct task_struct *kworker_task;
-
- /* for request-based merge heuristic in dm_request_fn() */
- unsigned seq_rq_merge_deadline_usecs;
- int last_rq_rw;
- sector_t last_rq_pos;
- ktime_t last_rq_start_time;
-
- /* for blk-mq request-based DM support */
- struct blk_mq_tag_set *tag_set;
- bool use_blk_mq:1;
- bool init_tio_pdu:1;
-};
-
-#ifdef CONFIG_DM_MQ_DEFAULT
-static bool use_blk_mq = true;
-#else
-static bool use_blk_mq = false;
-#endif
-
-#define DM_MQ_NR_HW_QUEUES 1
-#define DM_MQ_QUEUE_DEPTH 2048
#define DM_NUMA_NODE NUMA_NO_NODE
-
-static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
-static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
static int dm_numa_node = DM_NUMA_NODE;
-bool dm_use_blk_mq(struct mapped_device *md)
-{
- return md->use_blk_mq;
-}
-EXPORT_SYMBOL_GPL(dm_use_blk_mq);
-
/*
* For mempools pre-allocation at the table loading time.
*/
@@ -259,9 +101,6 @@ struct table_device {
struct dm_dev dm_dev;
};
-#define RESERVED_BIO_BASED_IOS 16
-#define RESERVED_REQUEST_BASED_IOS 256
-#define RESERVED_MAX_IOS 1024
static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
static struct kmem_cache *_rq_cache;
@@ -269,13 +108,9 @@ static struct kmem_cache *_rq_cache;
/*
* Bio-based DM's mempools' reserved IOs set by the user.
*/
+#define RESERVED_BIO_BASED_IOS 16
static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
-/*
- * Request-based DM's mempools' reserved IOs set by the user.
- */
-static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
int param = ACCESS_ONCE(*module_param);
@@ -297,8 +132,8 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
return param;
}
-static unsigned __dm_get_module_param(unsigned *module_param,
- unsigned def, unsigned max)
+unsigned __dm_get_module_param(unsigned *module_param,
+ unsigned def, unsigned max)
{
unsigned param = ACCESS_ONCE(*module_param);
unsigned modified_param = 0;
@@ -319,28 +154,10 @@ static unsigned __dm_get_module_param(unsigned *module_param,
unsigned dm_get_reserved_bio_based_ios(void)
{
return __dm_get_module_param(&reserved_bio_based_ios,
- RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
+ RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
-unsigned dm_get_reserved_rq_based_ios(void)
-{
- return __dm_get_module_param(&reserved_rq_based_ios,
- RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
-}
-EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
-
-static unsigned dm_get_blk_mq_nr_hw_queues(void)
-{
- return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
-}
-
-static unsigned dm_get_blk_mq_queue_depth(void)
-{
- return __dm_get_module_param(&dm_mq_queue_depth,
- DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
-}
-
static unsigned dm_get_numa_node(void)
{
return __dm_get_module_param_int(&dm_numa_node,
@@ -679,29 +496,7 @@ static void free_tio(struct dm_target_io *tio)
bio_put(&tio->clone);
}
-static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
- gfp_t gfp_mask)
-{
- return mempool_alloc(md->io_pool, gfp_mask);
-}
-
-static void free_old_rq_tio(struct dm_rq_target_io *tio)
-{
- mempool_free(tio, tio->md->io_pool);
-}
-
-static struct request *alloc_old_clone_request(struct mapped_device *md,
- gfp_t gfp_mask)
-{
- return mempool_alloc(md->rq_pool, gfp_mask);
-}
-
-static void free_old_clone_request(struct mapped_device *md, struct request *rq)
-{
- mempool_free(rq, md->rq_pool);
-}
-
-static int md_in_flight(struct mapped_device *md)
+int md_in_flight(struct mapped_device *md)
{
return atomic_read(&md->pending[READ]) +
atomic_read(&md->pending[WRITE]);
@@ -723,8 +518,9 @@ static void start_io_acct(struct dm_io *io)
atomic_inc_return(&md->pending[rw]));
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
- bio_sectors(bio), false, 0, &io->stats_aux);
+ dm_stats_account_io(&md->stats, bio_data_dir(bio),
+ bio->bi_iter.bi_sector, bio_sectors(bio),
+ false, 0, &io->stats_aux);
}
static void end_io_acct(struct dm_io *io)
@@ -738,8 +534,9 @@ static void end_io_acct(struct dm_io *io)
generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
- bio_sectors(bio), true, duration, &io->stats_aux);
+ dm_stats_account_io(&md->stats, bio_data_dir(bio),
+ bio->bi_iter.bi_sector, bio_sectors(bio),
+ true, duration, &io->stats_aux);
/*
* After this is decremented the bio must not be touched if it is
@@ -1001,12 +798,12 @@ static void dec_pending(struct dm_io *io, int error)
if (io_error == DM_ENDIO_REQUEUE)
return;
- if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
+ if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
/*
* Preflush done for flush with data, reissue
- * without REQ_FLUSH.
+ * without REQ_PREFLUSH.
*/
- bio->bi_rw &= ~REQ_FLUSH;
+ bio->bi_rw &= ~REQ_PREFLUSH;
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
@@ -1017,7 +814,7 @@ static void dec_pending(struct dm_io *io, int error)
}
}
-static void disable_write_same(struct mapped_device *md)
+void disable_write_same(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
@@ -1051,7 +848,7 @@ static void clone_endio(struct bio *bio)
}
}
- if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
+ if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) &&
!bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
disable_write_same(md);
@@ -1060,371 +857,6 @@ static void clone_endio(struct bio *bio)
}
/*
- * Partial completion handling for request-based dm
- */
-static void end_clone_bio(struct bio *clone)
-{
- struct dm_rq_clone_bio_info *info =
- container_of(clone, struct dm_rq_clone_bio_info, clone);
- struct dm_rq_target_io *tio = info->tio;
- struct bio *bio = info->orig;
- unsigned int nr_bytes = info->orig->bi_iter.bi_size;
- int error = clone->bi_error;
-
- bio_put(clone);
-
- if (tio->error)
- /*
- * An error has already been detected on the request.
- * Once error occurred, just let clone->end_io() handle
- * the remainder.
- */
- return;
- else if (error) {
- /*
- * Don't notice the error to the upper layer yet.
- * The error handling decision is made by the target driver,
- * when the request is completed.
- */
- tio->error = error;
- return;
- }
-
- /*
- * I/O for the bio successfully completed.
- * Notice the data completion to the upper layer.
- */
-
- /*
- * bios are processed from the head of the list.
- * So the completing bio should always be rq->bio.
- * If it's not, something wrong is happening.
- */
- if (tio->orig->bio != bio)
- DMERR("bio completion is going in the middle of the request");
-
- /*
- * Update the original request.
- * Do not use blk_end_request() here, because it may complete
- * the original request before the clone, and break the ordering.
- */
- blk_update_request(tio->orig, 0, nr_bytes);
-}
-
-static struct dm_rq_target_io *tio_from_request(struct request *rq)
-{
- return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
-}
-
-static void rq_end_stats(struct mapped_device *md, struct request *orig)
-{
- if (unlikely(dm_stats_used(&md->stats))) {
- struct dm_rq_target_io *tio = tio_from_request(orig);
- tio->duration_jiffies = jiffies - tio->duration_jiffies;
- dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
- tio->n_sectors, true, tio->duration_jiffies,
- &tio->stats_aux);
- }
-}
-
-/*
- * Don't touch any member of the md after calling this function because
- * the md may be freed in dm_put() at the end of this function.
- * Or do dm_get() before calling this function and dm_put() later.
- */
-static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
-{
- atomic_dec(&md->pending[rw]);
-
- /* nudge anyone waiting on suspend queue */
- if (!md_in_flight(md))
- wake_up(&md->wait);
-
- /*
- * Run this off this callpath, as drivers could invoke end_io while
- * inside their request_fn (and holding the queue lock). Calling
- * back into ->request_fn() could deadlock attempting to grab the
- * queue lock again.
- */
- if (!md->queue->mq_ops && run_queue)
- blk_run_queue_async(md->queue);
-
- /*
- * dm_put() must be at the end of this function. See the comment above
- */
- dm_put(md);
-}
-
-static void free_rq_clone(struct request *clone)
-{
- struct dm_rq_target_io *tio = clone->end_io_data;
- struct mapped_device *md = tio->md;
-
- blk_rq_unprep_clone(clone);
-
- if (md->type == DM_TYPE_MQ_REQUEST_BASED)
- /* stacked on blk-mq queue(s) */
- tio->ti->type->release_clone_rq(clone);
- else if (!md->queue->mq_ops)
- /* request_fn queue stacked on request_fn queue(s) */
- free_old_clone_request(md, clone);
-
- if (!md->queue->mq_ops)
- free_old_rq_tio(tio);
-}
-
-/*
- * Complete the clone and the original request.
- * Must be called without clone's queue lock held,
- * see end_clone_request() for more details.
- */
-static void dm_end_request(struct request *clone, int error)
-{
- int rw = rq_data_dir(clone);
- struct dm_rq_target_io *tio = clone->end_io_data;
- struct mapped_device *md = tio->md;
- struct request *rq = tio->orig;
-
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
- rq->errors = clone->errors;
- rq->resid_len = clone->resid_len;
-
- if (rq->sense)
- /*
- * We are using the sense buffer of the original
- * request.
- * So setting the length of the sense data is enough.
- */
- rq->sense_len = clone->sense_len;
- }
-
- free_rq_clone(clone);
- rq_end_stats(md, rq);
- if (!rq->q->mq_ops)
- blk_end_request_all(rq, error);
- else
- blk_mq_end_request(rq, error);
- rq_completed(md, rw, true);
-}
-
-static void dm_unprep_request(struct request *rq)
-{
- struct dm_rq_target_io *tio = tio_from_request(rq);
- struct request *clone = tio->clone;
-
- if (!rq->q->mq_ops) {
- rq->special = NULL;
- rq->cmd_flags &= ~REQ_DONTPREP;
- }
-
- if (clone)
- free_rq_clone(clone);
- else if (!tio->md->queue->mq_ops)
- free_old_rq_tio(tio);
-}
-
-/*
- * Requeue the original request of a clone.
- */
-static void dm_old_requeue_request(struct request *rq)
-{
- struct request_queue *q = rq->q;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_requeue_request(q, rq);
- blk_run_queue_async(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_mq_requeue_request(struct request *rq)
-{
- struct request_queue *q = rq->q;
- unsigned long flags;
-
- blk_mq_requeue_request(rq);
- spin_lock_irqsave(q->queue_lock, flags);
- if (!blk_queue_stopped(q))
- blk_mq_kick_requeue_list(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_requeue_original_request(struct mapped_device *md,
- struct request *rq)
-{
- int rw = rq_data_dir(rq);
-
- rq_end_stats(md, rq);
- dm_unprep_request(rq);
-
- if (!rq->q->mq_ops)
- dm_old_requeue_request(rq);
- else
- dm_mq_requeue_request(rq);
-
- rq_completed(md, rw, false);
-}
-
-static void dm_old_stop_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q)) {
- spin_unlock_irqrestore(q->queue_lock, flags);
- return;
- }
-
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_stop_queue(struct request_queue *q)
-{
- if (!q->mq_ops)
- dm_old_stop_queue(q);
- else
- blk_mq_stop_hw_queues(q);
-}
-
-static void dm_old_start_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_start_queue(struct request_queue *q)
-{
- if (!q->mq_ops)
- dm_old_start_queue(q);
- else {
- blk_mq_start_stopped_hw_queues(q, true);
- blk_mq_kick_requeue_list(q);
- }
-}
-
-static void dm_done(struct request *clone, int error, bool mapped)
-{
- int r = error;
- struct dm_rq_target_io *tio = clone->end_io_data;
- dm_request_endio_fn rq_end_io = NULL;
-
- if (tio->ti) {
- rq_end_io = tio->ti->type->rq_end_io;
-
- if (mapped && rq_end_io)
- r = rq_end_io(tio->ti, clone, error, &tio->info);
- }
-
- if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
- !clone->q->limits.max_write_same_sectors))
- disable_write_same(tio->md);
-
- if (r <= 0)
- /* The target wants to complete the I/O */
- dm_end_request(clone, r);
- else if (r == DM_ENDIO_INCOMPLETE)
- /* The target will handle the I/O */
- return;
- else if (r == DM_ENDIO_REQUEUE)
- /* The target wants to requeue the I/O */
- dm_requeue_original_request(tio->md, tio->orig);
- else {
- DMWARN("unimplemented target endio return value: %d", r);
- BUG();
- }
-}
-
-/*
- * Request completion handler for request-based dm
- */
-static void dm_softirq_done(struct request *rq)
-{
- bool mapped = true;
- struct dm_rq_target_io *tio = tio_from_request(rq);
- struct request *clone = tio->clone;
- int rw;
-
- if (!clone) {
- rq_end_stats(tio->md, rq);
- rw = rq_data_dir(rq);
- if (!rq->q->mq_ops) {
- blk_end_request_all(rq, tio->error);
- rq_completed(tio->md, rw, false);
- free_old_rq_tio(tio);
- } else {
- blk_mq_end_request(rq, tio->error);
- rq_completed(tio->md, rw, false);
- }
- return;
- }
-
- if (rq->cmd_flags & REQ_FAILED)
- mapped = false;
-
- dm_done(clone, tio->error, mapped);
-}
-
-/*
- * Complete the clone and the original request with the error status
- * through softirq context.
- */
-static void dm_complete_request(struct request *rq, int error)
-{
- struct dm_rq_target_io *tio = tio_from_request(rq);
-
- tio->error = error;
- if (!rq->q->mq_ops)
- blk_complete_request(rq);
- else
- blk_mq_complete_request(rq, error);
-}
-
-/*
- * Complete the not-mapped clone and the original request with the error status
- * through softirq context.
- * Target's rq_end_io() function isn't called.
- * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
- */
-static void dm_kill_unmapped_request(struct request *rq, int error)
-{
- rq->cmd_flags |= REQ_FAILED;
- dm_complete_request(rq, error);
-}
-
-/*
- * Called with the clone's queue lock held (in the case of .request_fn)
- */
-static void end_clone_request(struct request *clone, int error)
-{
- struct dm_rq_target_io *tio = clone->end_io_data;
-
- if (!clone->q->mq_ops) {
- /*
- * For just cleaning up the information of the queue in which
- * the clone was dispatched.
- * The clone is *NOT* freed actually here because it is alloced
- * from dm own mempool (REQ_ALLOCED isn't set).
- */
- __blk_put_request(clone->q, clone);
- }
-
- /*
- * Actual request completion is done in a softirq context which doesn't
- * hold the clone's queue lock. Otherwise, deadlock could occur because:
- * - another request may be submitted by the upper level driver
- * of the stacking during the completion
- * - the submission which requires queue lock may be done
- * against this clone's queue
- */
- dm_complete_request(tio->orig, error);
-}
-
-/*
* Return maximum size of I/O possible at the supplied sector up to the current
* target boundary.
*/
@@ -1473,9 +905,36 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
+static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
+ void __pmem **kaddr, pfn_t *pfn, long size)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ struct dm_table *map;
+ struct dm_target *ti;
+ int srcu_idx;
+ long len, ret = -EIO;
+
+ map = dm_get_live_table(md, &srcu_idx);
+ if (!map)
+ goto out;
+
+ ti = dm_table_find_target(map, sector);
+ if (!dm_target_is_valid(ti))
+ goto out;
+
+ len = max_io_len(sector, ti) << SECTOR_SHIFT;
+ size = min(len, size);
+
+ if (ti->type->direct_access)
+ ret = ti->type->direct_access(ti, sector, kaddr, pfn, size);
+out:
+ dm_put_live_table(md, srcu_idx);
+ return min(ret, size);
+}
+
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
- * allowed for all bio types except REQ_FLUSH.
+ * allowed for all bio types except REQ_PREFLUSH.
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1505,7 +964,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
- BUG_ON(bio->bi_rw & REQ_FLUSH);
+ BUG_ON(bio->bi_rw & REQ_PREFLUSH);
BUG_ON(bi_size > *tio->len_ptr);
BUG_ON(n_sectors > bi_size);
*tio->len_ptr -= bi_size - n_sectors;
@@ -1746,9 +1205,9 @@ static int __split_and_process_non_flush(struct clone_info *ci)
unsigned len;
int r;
- if (unlikely(bio->bi_rw & REQ_DISCARD))
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
return __send_discard(ci);
- else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
return __send_write_same(ci);
ti = dm_table_find_target(ci->map, ci->sector);
@@ -1793,7 +1252,7 @@ static void __split_and_process_bio(struct mapped_device *md,
start_io_acct(ci.io);
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_rw & REQ_PREFLUSH) {
ci.bio = &ci.md->flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
@@ -1831,7 +1290,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);
- if (bio_rw(bio) != READA)
+ if (!(bio->bi_rw & REQ_RAHEAD))
queue_io(md, bio);
else
bio_io_error(bio);
@@ -1843,352 +1302,6 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
-int dm_request_based(struct mapped_device *md)
-{
- return blk_queue_stackable(md->queue);
-}
-
-static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
-{
- int r;
-
- if (blk_queue_io_stat(clone->q))
- clone->cmd_flags |= REQ_IO_STAT;
-
- clone->start_time = jiffies;
- r = blk_insert_cloned_request(clone->q, clone);
- if (r)
- /* must complete clone in terms of original request */
- dm_complete_request(rq, r);
-}
-
-static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
- void *data)
-{
- struct dm_rq_target_io *tio = data;
- struct dm_rq_clone_bio_info *info =
- container_of(bio, struct dm_rq_clone_bio_info, clone);
-
- info->orig = bio_orig;
- info->tio = tio;
- bio->bi_end_io = end_clone_bio;
-
- return 0;
-}
-
-static int setup_clone(struct request *clone, struct request *rq,
- struct dm_rq_target_io *tio, gfp_t gfp_mask)
-{
- int r;
-
- r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
- dm_rq_bio_constructor, tio);
- if (r)
- return r;
-
- clone->cmd = rq->cmd;
- clone->cmd_len = rq->cmd_len;
- clone->sense = rq->sense;
- clone->end_io = end_clone_request;
- clone->end_io_data = tio;
-
- tio->clone = clone;
-
- return 0;
-}
-
-static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
- struct dm_rq_target_io *tio, gfp_t gfp_mask)
-{
- /*
- * Create clone for use with .request_fn request_queue
- */
- struct request *clone;
-
- clone = alloc_old_clone_request(md, gfp_mask);
- if (!clone)
- return NULL;
-
- blk_rq_init(NULL, clone);
- if (setup_clone(clone, rq, tio, gfp_mask)) {
- /* -ENOMEM */
- free_old_clone_request(md, clone);
- return NULL;
- }
-
- return clone;
-}
-
-static void map_tio_request(struct kthread_work *work);
-
-static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
- struct mapped_device *md)
-{
- tio->md = md;
- tio->ti = NULL;
- tio->clone = NULL;
- tio->orig = rq;
- tio->error = 0;
- /*
- * Avoid initializing info for blk-mq; it passes
- * target-specific data through info.ptr
- * (see: dm_mq_init_request)
- */
- if (!md->init_tio_pdu)
- memset(&tio->info, 0, sizeof(tio->info));
- if (md->kworker_task)
- init_kthread_work(&tio->work, map_tio_request);
-}
-
-static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
- struct mapped_device *md,
- gfp_t gfp_mask)
-{
- struct dm_rq_target_io *tio;
- int srcu_idx;
- struct dm_table *table;
-
- tio = alloc_old_rq_tio(md, gfp_mask);
- if (!tio)
- return NULL;
-
- init_tio(tio, rq, md);
-
- table = dm_get_live_table(md, &srcu_idx);
- /*
- * Must clone a request if this .request_fn DM device
- * is stacked on .request_fn device(s).
- */
- if (!dm_table_mq_request_based(table)) {
- if (!clone_old_rq(rq, md, tio, gfp_mask)) {
- dm_put_live_table(md, srcu_idx);
- free_old_rq_tio(tio);
- return NULL;
- }
- }
- dm_put_live_table(md, srcu_idx);
-
- return tio;
-}
-
-/*
- * Called with the queue lock held.
- */
-static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_rq_target_io *tio;
-
- if (unlikely(rq->special)) {
- DMWARN("Already has something in rq->special.");
- return BLKPREP_KILL;
- }
-
- tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
- if (!tio)
- return BLKPREP_DEFER;
-
- rq->special = tio;
- rq->cmd_flags |= REQ_DONTPREP;
-
- return BLKPREP_OK;
-}
-
-/*
- * Returns:
- * 0 : the request has been processed
- * DM_MAPIO_REQUEUE : the original request needs to be requeued
- * < 0 : the request was completed due to failure
- */
-static int map_request(struct dm_rq_target_io *tio, struct request *rq,
- struct mapped_device *md)
-{
- int r;
- struct dm_target *ti = tio->ti;
- struct request *clone = NULL;
-
- if (tio->clone) {
- clone = tio->clone;
- r = ti->type->map_rq(ti, clone, &tio->info);
- } else {
- r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
- if (r < 0) {
- /* The target wants to complete the I/O */
- dm_kill_unmapped_request(rq, r);
- return r;
- }
- if (r != DM_MAPIO_REMAPPED)
- return r;
- if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
- /* -ENOMEM */
- ti->type->release_clone_rq(clone);
- return DM_MAPIO_REQUEUE;
- }
- }
-
- switch (r) {
- case DM_MAPIO_SUBMITTED:
- /* The target has taken the I/O to submit by itself later */
- break;
- case DM_MAPIO_REMAPPED:
- /* The target has remapped the I/O so dispatch it */
- trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
- blk_rq_pos(rq));
- dm_dispatch_clone_request(clone, rq);
- break;
- case DM_MAPIO_REQUEUE:
- /* The target wants to requeue the I/O */
- dm_requeue_original_request(md, tio->orig);
- break;
- default:
- if (r > 0) {
- DMWARN("unimplemented target map return value: %d", r);
- BUG();
- }
-
- /* The target wants to complete the I/O */
- dm_kill_unmapped_request(rq, r);
- return r;
- }
-
- return 0;
-}
-
-static void map_tio_request(struct kthread_work *work)
-{
- struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
- struct request *rq = tio->orig;
- struct mapped_device *md = tio->md;
-
- if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
- dm_requeue_original_request(md, rq);
-}
-
-static void dm_start_request(struct mapped_device *md, struct request *orig)
-{
- if (!orig->q->mq_ops)
- blk_start_request(orig);
- else
- blk_mq_start_request(orig);
- atomic_inc(&md->pending[rq_data_dir(orig)]);
-
- if (md->seq_rq_merge_deadline_usecs) {
- md->last_rq_pos = rq_end_sector(orig);
- md->last_rq_rw = rq_data_dir(orig);
- md->last_rq_start_time = ktime_get();
- }
-
- if (unlikely(dm_stats_used(&md->stats))) {
- struct dm_rq_target_io *tio = tio_from_request(orig);
- tio->duration_jiffies = jiffies;
- tio->n_sectors = blk_rq_sectors(orig);
- dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
- tio->n_sectors, false, 0, &tio->stats_aux);
- }
-
- /*
- * Hold the md reference here for the in-flight I/O.
- * We can't rely on the reference count by device opener,
- * because the device may be closed during the request completion
- * when all bios are completed.
- * See the comment in rq_completed() too.
- */
- dm_get(md);
-}
-
-#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
-{
- return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
-}
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
- const char *buf, size_t count)
-{
- unsigned deadline;
-
- if (!dm_request_based(md) || md->use_blk_mq)
- return count;
-
- if (kstrtouint(buf, 10, &deadline))
- return -EINVAL;
-
- if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
- deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
-
- md->seq_rq_merge_deadline_usecs = deadline;
-
- return count;
-}
-
-static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
-{
- ktime_t kt_deadline;
-
- if (!md->seq_rq_merge_deadline_usecs)
- return false;
-
- kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
- kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
-
- return !ktime_after(ktime_get(), kt_deadline);
-}
-
-/*
- * q->request_fn for request-based dm.
- * Called with the queue lock held.
- */
-static void dm_request_fn(struct request_queue *q)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_target *ti = md->immutable_target;
- struct request *rq;
- struct dm_rq_target_io *tio;
- sector_t pos = 0;
-
- if (unlikely(!ti)) {
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
-
- ti = dm_table_find_target(map, pos);
- dm_put_live_table(md, srcu_idx);
- }
-
- /*
- * For suspend, check blk_queue_stopped() and increment
- * ->pending within a single queue_lock not to increment the
- * number of in-flight I/Os after the queue is stopped in
- * dm_suspend().
- */
- while (!blk_queue_stopped(q)) {
- rq = blk_peek_request(q);
- if (!rq)
- return;
-
- /* always use block 0 to find the target for flushes for now */
- pos = 0;
- if (!(rq->cmd_flags & REQ_FLUSH))
- pos = blk_rq_pos(rq);
-
- if ((dm_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
- md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
- (ti->type->busy && ti->type->busy(ti))) {
- blk_delay_queue(q, HZ / 100);
- return;
- }
-
- dm_start_request(md, rq);
-
- tio = tio_from_request(rq);
- /* Establish tio->ti before queuing work (map_tio_request) */
- tio->ti = ti;
- queue_kthread_work(&md->kworker, &tio->work);
- BUG_ON(!irqs_disabled());
- }
-}
-
static int dm_any_congested(void *congested_data, int bdi_bits)
{
int r = bdi_bits;
@@ -2266,7 +1379,7 @@ static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
-static void dm_init_md_queue(struct mapped_device *md)
+void dm_init_md_queue(struct mapped_device *md)
{
/*
* Request-based dm devices cannot be stacked on top of bio-based dm
@@ -2287,7 +1400,7 @@ static void dm_init_md_queue(struct mapped_device *md)
md->queue->backing_dev_info.congested_data = md;
}
-static void dm_init_normal_md_queue(struct mapped_device *md)
+void dm_init_normal_md_queue(struct mapped_device *md)
{
md->use_blk_mq = false;
dm_init_md_queue(md);
@@ -2327,6 +1440,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
bdput(md->bdev);
md->bdev = NULL;
}
+
+ dm_mq_cleanup_mapped_device(md);
}
/*
@@ -2360,7 +1475,7 @@ static struct mapped_device *alloc_dev(int minor)
goto bad_io_barrier;
md->numa_node_id = numa_node_id;
- md->use_blk_mq = use_blk_mq;
+ md->use_blk_mq = dm_use_blk_mq_default();
md->init_tio_pdu = false;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
@@ -2412,7 +1527,7 @@ static struct mapped_device *alloc_dev(int minor)
bio_init(&md->flush_bio);
md->flush_bio.bi_bdev = md->bdev;
- md->flush_bio.bi_rw = WRITE_FLUSH;
+ bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
dm_stats_init(&md->stats);
@@ -2445,10 +1560,6 @@ static void free_dev(struct mapped_device *md)
unlock_fs(md);
cleanup_mapped_device(md);
- if (md->tag_set) {
- blk_mq_free_tag_set(md->tag_set);
- kfree(md->tag_set);
- }
free_table_devices(&md->table_devices);
dm_stats_cleanup(&md->stats);
@@ -2464,7 +1575,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
if (md->bs) {
/* The md already has necessary mempools. */
- if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
+ if (dm_table_bio_based(t)) {
/*
* Reload bioset because front_pad may have changed
* because a different table was loaded.
@@ -2654,176 +1765,15 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
-static void dm_old_init_rq_based_worker_thread(struct mapped_device *md)
-{
- /* Initialize the request-based DM worker thread */
- init_kthread_worker(&md->kworker);
- md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
- "kdmwork-%s", dm_device_name(md));
-}
-
-/*
- * Fully initialize a .request_fn request-based queue.
- */
-static int dm_old_init_request_queue(struct mapped_device *md)
-{
- /* Fully initialize the queue */
- if (!blk_init_allocated_queue(md->queue, dm_request_fn, NULL))
- return -EINVAL;
-
- /* disable dm_request_fn's merge heuristic by default */
- md->seq_rq_merge_deadline_usecs = 0;
-
- dm_init_normal_md_queue(md);
- blk_queue_softirq_done(md->queue, dm_softirq_done);
- blk_queue_prep_rq(md->queue, dm_old_prep_fn);
-
- dm_old_init_rq_based_worker_thread(md);
-
- elv_register_queue(md->queue);
-
- return 0;
-}
-
-static int dm_mq_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
-{
- struct mapped_device *md = data;
- struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
-
- /*
- * Must initialize md member of tio, otherwise it won't
- * be available in dm_mq_queue_rq.
- */
- tio->md = md;
-
- if (md->init_tio_pdu) {
- /* target-specific per-io data is immediately after the tio */
- tio->info.ptr = tio + 1;
- }
-
- return 0;
-}
-
-static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct request *rq = bd->rq;
- struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
- struct mapped_device *md = tio->md;
- struct dm_target *ti = md->immutable_target;
-
- if (unlikely(!ti)) {
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
-
- ti = dm_table_find_target(map, 0);
- dm_put_live_table(md, srcu_idx);
- }
-
- if (ti->type->busy && ti->type->busy(ti))
- return BLK_MQ_RQ_QUEUE_BUSY;
-
- dm_start_request(md, rq);
-
- /* Init tio using md established in .init_request */
- init_tio(tio, rq, md);
-
- /*
- * Establish tio->ti before queuing work (map_tio_request)
- * or making direct call to map_request().
- */
- tio->ti = ti;
-
- /* Direct call is fine since .queue_rq allows allocations */
- if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
- /* Undo dm_start_request() before requeuing */
- rq_end_stats(md, rq);
- rq_completed(md, rq_data_dir(rq), false);
- return BLK_MQ_RQ_QUEUE_BUSY;
- }
-
- return BLK_MQ_RQ_QUEUE_OK;
-}
-
-static struct blk_mq_ops dm_mq_ops = {
- .queue_rq = dm_mq_queue_rq,
- .map_queue = blk_mq_map_queue,
- .complete = dm_softirq_done,
- .init_request = dm_mq_init_request,
-};
-
-static int dm_mq_init_request_queue(struct mapped_device *md,
- struct dm_target *immutable_tgt)
-{
- struct request_queue *q;
- int err;
-
- if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
- DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
- return -EINVAL;
- }
-
- md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
- if (!md->tag_set)
- return -ENOMEM;
-
- md->tag_set->ops = &dm_mq_ops;
- md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
- md->tag_set->numa_node = md->numa_node_id;
- md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
- md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
- md->tag_set->driver_data = md;
-
- md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
- if (immutable_tgt && immutable_tgt->per_io_data_size) {
- /* any target-specific per-io data is immediately after the tio */
- md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
- md->init_tio_pdu = true;
- }
-
- err = blk_mq_alloc_tag_set(md->tag_set);
- if (err)
- goto out_kfree_tag_set;
-
- q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
- if (IS_ERR(q)) {
- err = PTR_ERR(q);
- goto out_tag_set;
- }
- dm_init_md_queue(md);
-
- /* backfill 'mq' sysfs registration normally done in blk_register_queue */
- blk_mq_register_disk(md->disk);
-
- return 0;
-
-out_tag_set:
- blk_mq_free_tag_set(md->tag_set);
-out_kfree_tag_set:
- kfree(md->tag_set);
-
- return err;
-}
-
-static unsigned filter_md_type(unsigned type, struct mapped_device *md)
-{
- if (type == DM_TYPE_BIO_BASED)
- return type;
-
- return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
-}
-
/*
* Setup the DM device's queue based on md's type
*/
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
- unsigned md_type = filter_md_type(dm_get_md_type(md), md);
+ unsigned type = dm_get_md_type(md);
- switch (md_type) {
+ switch (type) {
case DM_TYPE_REQUEST_BASED:
r = dm_old_init_request_queue(md);
if (r) {
@@ -2832,13 +1782,14 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
}
break;
case DM_TYPE_MQ_REQUEST_BASED:
- r = dm_mq_init_request_queue(md, dm_table_get_immutable_target(t));
+ r = dm_mq_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
return r;
}
break;
case DM_TYPE_BIO_BASED:
+ case DM_TYPE_DAX_BIO_BASED:
dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
/*
@@ -2847,6 +1798,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
*/
bioset_free(md->queue->bio_split);
md->queue->bio_split = NULL;
+
+ if (type == DM_TYPE_DAX_BIO_BASED)
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
break;
}
@@ -3541,10 +2495,9 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
if (!pools)
return NULL;
- type = filter_md_type(type, md);
-
switch (type) {
case DM_TYPE_BIO_BASED:
+ case DM_TYPE_DAX_BIO_BASED:
cachep = _io_cache;
pool_size = dm_get_reserved_bio_based_ios();
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
@@ -3601,26 +2554,76 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
kfree(pools);
}
-static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
- u32 flags)
+struct dm_pr {
+ u64 old_key;
+ u64 new_key;
+ u32 flags;
+ bool fail_early;
+};
+
+static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
+ void *data)
{
struct mapped_device *md = bdev->bd_disk->private_data;
- const struct pr_ops *ops;
- fmode_t mode;
- int r;
+ struct dm_table *table;
+ struct dm_target *ti;
+ int ret = -ENOTTY, srcu_idx;
- r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
- if (r < 0)
- return r;
+ table = dm_get_live_table(md, &srcu_idx);
+ if (!table || !dm_table_get_size(table))
+ goto out;
- ops = bdev->bd_disk->fops->pr_ops;
- if (ops && ops->pr_register)
- r = ops->pr_register(bdev, old_key, new_key, flags);
- else
- r = -EOPNOTSUPP;
+ /* We only support devices that have a single target */
+ if (dm_table_get_num_targets(table) != 1)
+ goto out;
+ ti = dm_table_get_target(table, 0);
- bdput(bdev);
- return r;
+ ret = -EINVAL;
+ if (!ti->type->iterate_devices)
+ goto out;
+
+ ret = ti->type->iterate_devices(ti, fn, data);
+out:
+ dm_put_live_table(md, srcu_idx);
+ return ret;
+}
+
+/*
+ * For register / unregister we need to manually call out to every path.
+ */
+static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_pr *pr = data;
+ const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+
+ if (!ops || !ops->pr_register)
+ return -EOPNOTSUPP;
+ return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
+}
+
+static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
+ u32 flags)
+{
+ struct dm_pr pr = {
+ .old_key = old_key,
+ .new_key = new_key,
+ .flags = flags,
+ .fail_early = true,
+ };
+ int ret;
+
+ ret = dm_call_pr(bdev, __dm_pr_register, &pr);
+ if (ret && new_key) {
+ /* unregister all paths if we failed to register any path */
+ pr.old_key = new_key;
+ pr.new_key = 0;
+ pr.flags = 0;
+ pr.fail_early = false;
+ dm_call_pr(bdev, __dm_pr_register, &pr);
+ }
+
+ return ret;
}
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
@@ -3721,6 +2724,7 @@ static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
+ .direct_access = dm_blk_direct_access,
.getgeo = dm_blk_getgeo,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
@@ -3738,18 +2742,6 @@ MODULE_PARM_DESC(major, "The major number of the device mapper");
module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
-module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
-
-module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
-
-module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
-
-module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
-
module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 13a758ec0f88..f0aad08b9654 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/device-mapper.h>
#include <linux/list.h>
+#include <linux/moduleparam.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/hdreg.h>
@@ -33,14 +34,6 @@
#define DM_STATUS_NOFLUSH_FLAG (1 << 0)
/*
- * Type of table and mapped_device's mempool
- */
-#define DM_TYPE_NONE 0
-#define DM_TYPE_BIO_BASED 1
-#define DM_TYPE_REQUEST_BASED 2
-#define DM_TYPE_MQ_REQUEST_BASED 3
-
-/*
* List of devices that a metadevice uses and should open/close.
*/
struct dm_dev_internal {
@@ -75,8 +68,9 @@ unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
+bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
-bool dm_table_mq_request_based(struct dm_table *t);
+bool dm_table_all_blk_mq_devices(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
@@ -161,16 +155,6 @@ void dm_interface_exit(void);
/*
* sysfs interface
*/
-struct dm_kobject_holder {
- struct kobject kobj;
- struct completion completion;
-};
-
-static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
-{
- return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
-}
-
int dm_sysfs_init(struct mapped_device *md);
void dm_sysfs_exit(struct mapped_device *md);
struct kobject *dm_kobject(struct mapped_device *md);
@@ -212,8 +196,6 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
void dm_internal_suspend(struct mapped_device *md);
void dm_internal_resume(struct mapped_device *md);
-bool dm_use_blk_mq(struct mapped_device *md);
-
int dm_io_init(void);
void dm_io_exit(void);
@@ -228,18 +210,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
- * Helpers that are used by DM core
+ * Various helpers
*/
unsigned dm_get_reserved_bio_based_ios(void);
-unsigned dm_get_reserved_rq_based_ios(void);
-
-static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
-{
- return !maxlen || strlen(result) + 1 >= maxlen;
-}
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
- const char *buf, size_t count);
#endif
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index b7fe7e9fc777..70ff888d25d0 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split;
sector_t start_sector, end_sector, data_offset;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
@@ -252,7 +252,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
split->bi_iter.bi_sector = split->bi_iter.bi_sector -
start_sector + data_offset;
- if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 866825f10b4c..1f123f5a29da 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -394,8 +394,9 @@ static void submit_flushes(struct work_struct *ws)
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
+ bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
atomic_inc(&mddev->flush_pending);
- submit_bio(WRITE_FLUSH, bi);
+ submit_bio(bi);
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
@@ -413,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws)
/* an empty barrier - all done */
bio_endio(bio);
else {
- bio->bi_rw &= ~REQ_FLUSH;
+ bio->bi_rw &= ~REQ_PREFLUSH;
mddev->pers->make_request(mddev, bio);
}
@@ -742,9 +743,10 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
atomic_inc(&mddev->pending_writes);
- submit_bio(WRITE_FLUSH_FUA, bio);
+ submit_bio(bio);
}
void md_super_wait(struct mddev *mddev)
@@ -754,13 +756,14 @@ void md_super_wait(struct mddev *mddev)
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
- struct page *page, int rw, bool metadata_op)
+ struct page *page, int op, int op_flags, bool metadata_op)
{
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
int ret;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
+ bio_set_op_attrs(bio, op, op_flags);
if (metadata_op)
bio->bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
@@ -770,7 +773,8 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
else
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
- submit_bio_wait(rw, bio);
+
+ submit_bio_wait(bio);
ret = !bio->bi_error;
bio_put(bio);
@@ -785,7 +789,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
if (rdev->sb_loaded)
return 0;
- if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
goto fail;
rdev->sb_loaded = 1;
return 0;
@@ -1471,7 +1475,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL;
bb_sector = (long long)offset;
if (!sync_page_io(rdev, bb_sector, sectors << 9,
- rdev->bb_page, READ, true))
+ rdev->bb_page, REQ_OP_READ, 0, true))
return -EIO;
bbp = (u64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b5c4be73e6e4..b4f335245bd6 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -424,7 +424,7 @@ struct mddev {
/* Generic flush handling.
* The last to finish preflush schedules a worker to submit
- * the rest of the request (without the REQ_FLUSH flag).
+ * the rest of the request (without the REQ_PREFLUSH flag).
*/
struct bio *flush_bio;
atomic_t flush_pending;
@@ -618,7 +618,8 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
- struct page *page, int rw, bool metadata_op);
+ struct page *page, int op, int op_flags,
+ bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(struct mddev *mddev);
extern int md_allow_write(struct mddev *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index dd483bb2e111..72ea98e89e57 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -111,7 +111,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index ea3d3b656fd0..2cc1877804c2 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -429,7 +429,14 @@ static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t ro
if (flags & INTERNAL_NODE) {
i = lower_bound(n, key);
- if (i < 0 || i >= nr_entries) {
+ if (i < 0) {
+ /*
+ * avoid early -ENODATA return when all entries are
+ * higher than the search @key.
+ */
+ i = 0;
+ }
+ if (i >= nr_entries) {
r = -ENODATA;
goto out;
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 34783a3c8b3c..c3d439083212 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
struct md_rdev *tmp_dev;
struct bio *split;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
@@ -488,7 +488,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
split->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset;
- if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c7c8cde0ab21..4e6da4497553 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -759,7 +759,7 @@ static void flush_pending_writes(struct r1conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
@@ -1033,7 +1033,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
@@ -1053,12 +1053,11 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
int i, disks;
struct bitmap *bitmap;
unsigned long flags;
+ const int op = bio_op(bio);
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
- const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
- const unsigned long do_discard = (bio->bi_rw
- & (REQ_DISCARD | REQ_SECURE));
- const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
+ const unsigned long do_flush_fua = (bio->bi_rw &
+ (REQ_PREFLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
@@ -1106,7 +1105,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
bitmap = mddev->bitmap;
/*
- * make_request() can abort the operation when READA is being
+ * make_request() can abort the operation when read-ahead is being
* used and no empty request is available.
*
*/
@@ -1166,7 +1165,7 @@ read_again:
mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
- read_bio->bi_rw = READ | do_sync;
+ bio_set_op_attrs(read_bio, op, do_sync);
read_bio->bi_private = r1_bio;
if (max_sectors < r1_bio->sectors) {
@@ -1376,8 +1375,7 @@ read_again:
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw =
- WRITE | do_flush_fua | do_sync | do_discard | do_same;
+ bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
@@ -1771,7 +1769,7 @@ static void end_sync_write(struct bio *bio)
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
int sectors, struct page *page, int rw)
{
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
/* success */
return 1;
if (rw == WRITE) {
@@ -1825,7 +1823,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev, sect, s<<9,
bio->bi_io_vec[idx].bv_page,
- READ, false)) {
+ REQ_OP_READ, 0, false)) {
success = 1;
break;
}
@@ -2030,7 +2028,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
continue;
- wbio->bi_rw = WRITE;
+ bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
@@ -2090,7 +2088,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
is_badblock(rdev, sect, s,
&first_bad, &bad_sectors) == 0 &&
sync_page_io(rdev, sect, s<<9,
- conf->tmppage, READ, false))
+ conf->tmppage, REQ_OP_READ, 0, false))
success = 1;
else {
d++;
@@ -2201,14 +2199,15 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
}
- wbio->bi_rw = WRITE;
+ bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
wbio->bi_iter.bi_sector = r1_bio->sector;
wbio->bi_iter.bi_size = r1_bio->sectors << 9;
bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) < 0)
+
+ if (submit_bio_wait(wbio) < 0)
/* failure! */
ok = rdev_set_badblocks(rdev, sector,
sectors, 0)
@@ -2343,7 +2342,7 @@ read_more:
bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
- bio->bi_rw = READ | do_sync;
+ bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
bio->bi_private = r1_bio;
if (max_sectors < r1_bio->sectors) {
/* Drat - have to split this up more */
@@ -2571,7 +2570,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (i < conf->raid_disks)
still_degraded = 1;
} else if (!test_bit(In_sync, &rdev->flags)) {
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_end_io = end_sync_write;
write_targets ++;
} else {
@@ -2598,7 +2597,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (disk < 0)
disk = i;
}
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_end_io = end_sync_read;
read_targets++;
} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
@@ -2610,7 +2609,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* if we are doing resync or repair. Otherwise, leave
* this device alone for this sync request.
*/
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_end_io = end_sync_write;
write_targets++;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c7de2a53e625..26ae74fd0d01 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -865,7 +865,7 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
@@ -1041,7 +1041,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
@@ -1058,12 +1058,10 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
+ const int op = bio_op(bio);
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
- const unsigned long do_discard = (bio->bi_rw
- & (REQ_DISCARD | REQ_SECURE));
- const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
unsigned long flags;
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
@@ -1156,7 +1154,7 @@ read_again:
choose_data_offset(r10_bio, rdev);
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
- read_bio->bi_rw = READ | do_sync;
+ bio_set_op_attrs(read_bio, op, do_sync);
read_bio->bi_private = r10_bio;
if (max_sectors < r10_bio->sectors) {
@@ -1363,8 +1361,7 @@ retry_write:
rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw =
- WRITE | do_sync | do_fua | do_discard | do_same;
+ bio_set_op_attrs(mbio, op, do_sync | do_fua);
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -1406,8 +1403,7 @@ retry_write:
r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw =
- WRITE | do_sync | do_fua | do_discard | do_same;
+ bio_set_op_attrs(mbio, op, do_sync | do_fua);
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -1450,7 +1446,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
@@ -1992,10 +1988,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tbio->bi_vcnt = vcnt;
tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
- tbio->bi_rw = WRITE;
tbio->bi_private = r10_bio;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
tbio->bi_end_io = end_sync_write;
+ bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
bio_copy_data(tbio, fbio);
@@ -2078,7 +2074,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
addr,
s << 9,
bio->bi_io_vec[idx].bv_page,
- READ, false);
+ REQ_OP_READ, 0, false);
if (ok) {
rdev = conf->mirrors[dw].rdev;
addr = r10_bio->devs[1].addr + sect;
@@ -2086,7 +2082,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
addr,
s << 9,
bio->bi_io_vec[idx].bv_page,
- WRITE, false);
+ REQ_OP_WRITE, 0, false);
if (!ok) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement,
@@ -2213,7 +2209,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
&& (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1;
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
/* success */
return 1;
if (rw == WRITE) {
@@ -2299,7 +2295,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
r10_bio->devs[sl].addr +
sect,
s<<9,
- conf->tmppage, READ, false);
+ conf->tmppage,
+ REQ_OP_READ, 0, false);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
if (success)
@@ -2474,7 +2471,9 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) < 0)
+ bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+
+ if (submit_bio_wait(wbio) < 0)
/* Failure! */
ok = rdev_set_badblocks(rdev, sector,
sectors, 0)
@@ -2548,7 +2547,7 @@ read_more:
bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
+ choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
- bio->bi_rw = READ | do_sync;
+ bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
if (max_sectors < r10_bio->sectors) {
@@ -3038,7 +3037,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
from_addr = r10_bio->devs[j].addr;
bio->bi_iter.bi_sector = from_addr +
rdev->data_offset;
@@ -3064,7 +3063,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = to_addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
@@ -3093,7 +3092,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = to_addr +
rdev->data_offset;
bio->bi_bdev = rdev->bdev;
@@ -3213,7 +3212,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_iter.bi_sector = sector +
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -3235,7 +3234,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = sector +
conf->mirrors[d].replacement->data_offset;
bio->bi_bdev = conf->mirrors[d].replacement->bdev;
@@ -4320,7 +4319,7 @@ read_more:
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_sync_read;
- read_bio->bi_rw = READ;
+ bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
read_bio->bi_error = 0;
read_bio->bi_vcnt = 0;
@@ -4354,7 +4353,7 @@ read_more:
rdev2->new_data_offset;
b->bi_private = r10_bio;
b->bi_end_io = end_reshape_write;
- b->bi_rw = WRITE;
+ bio_set_op_attrs(b, REQ_OP_WRITE, 0);
b->bi_next = blist;
blist = b;
}
@@ -4522,7 +4521,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
addr,
s << 9,
bvec[idx].bv_page,
- READ, false);
+ REQ_OP_READ, 0, false);
if (success)
break;
failed:
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index e889e2deb7b3..5504ce2bac06 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -254,14 +254,14 @@ static void r5l_submit_current_io(struct r5l_log *log)
__r5l_set_io_unit_state(io, IO_UNIT_IO_START);
spin_unlock_irqrestore(&log->io_list_lock, flags);
- submit_bio(WRITE, io->current_bio);
+ submit_bio(io->current_bio);
}
static struct bio *r5l_bio_alloc(struct r5l_log *log)
{
struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_bdev = log->rdev->bdev;
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
@@ -373,7 +373,7 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
io->current_bio = r5l_bio_alloc(log);
bio_chain(io->current_bio, prev);
- submit_bio(WRITE, prev);
+ submit_bio(prev);
}
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
@@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
bio_endio(bio);
return 0;
}
- bio->bi_rw &= ~REQ_FLUSH;
+ bio->bi_rw &= ~REQ_PREFLUSH;
return -EAGAIN;
}
@@ -686,7 +686,8 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
bio_reset(&log->flush_bio);
log->flush_bio.bi_bdev = log->rdev->bdev;
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- submit_bio(WRITE_FLUSH, &log->flush_bio);
+ bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+ submit_bio(&log->flush_bio);
}
static void r5l_write_super(struct r5l_log *log, sector_t cp);
@@ -881,7 +882,8 @@ static int r5l_read_meta_block(struct r5l_log *log,
struct r5l_meta_block *mb;
u32 crc, stored_crc;
- if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
+ if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0,
+ false))
return -EIO;
mb = page_address(page);
@@ -926,7 +928,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
&disk_index, sh);
sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, READ, false);
+ sh->dev[disk_index].page, REQ_OP_READ, 0,
+ false);
sh->dev[disk_index].log_checksum =
le32_to_cpu(payload->checksum[0]);
set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
@@ -934,7 +937,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
} else {
disk_index = sh->pd_idx;
sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, READ, false);
+ sh->dev[disk_index].page, REQ_OP_READ, 0,
+ false);
sh->dev[disk_index].log_checksum =
le32_to_cpu(payload->checksum[0]);
set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
@@ -944,7 +948,7 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
sync_page_io(log->rdev,
r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
PAGE_SIZE, sh->dev[disk_index].page,
- READ, false);
+ REQ_OP_READ, 0, false);
sh->dev[disk_index].log_checksum =
le32_to_cpu(payload->checksum[1]);
set_bit(R5_Wantwrite,
@@ -986,11 +990,13 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
rdev = rcu_dereference(conf->disks[disk_index].rdev);
if (rdev)
sync_page_io(rdev, stripe_sect, PAGE_SIZE,
- sh->dev[disk_index].page, WRITE, false);
+ sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+ false);
rrdev = rcu_dereference(conf->disks[disk_index].replacement);
if (rrdev)
sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
- sh->dev[disk_index].page, WRITE, false);
+ sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+ false);
}
raid5_release_stripe(sh);
return 0;
@@ -1062,7 +1068,8 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
mb->checksum = cpu_to_le32(crc);
- if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
+ WRITE_FUA, false)) {
__free_page(page);
return -EIO;
}
@@ -1137,7 +1144,7 @@ static int r5l_load_log(struct r5l_log *log)
if (!page)
return -ENOMEM;
- if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
+ if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
ret = -EIO;
goto ioerr;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8959e6dd31dd..6953d78297b0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -806,7 +806,8 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
dd_idx = 0;
while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
dd_idx++;
- if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
+ if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
+ bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
goto unlock_out;
if (head->batch_head) {
@@ -891,29 +892,28 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (r5l_write_stripe(conf->log, sh) == 0)
return;
for (i = disks; i--; ) {
- int rw;
+ int op, op_flags = 0;
int replace_only = 0;
struct bio *bi, *rbi;
struct md_rdev *rdev, *rrdev = NULL;
sh = head_sh;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
+ op = REQ_OP_WRITE;
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
- rw = WRITE_FUA;
- else
- rw = WRITE;
+ op_flags = WRITE_FUA;
if (test_bit(R5_Discard, &sh->dev[i].flags))
- rw |= REQ_DISCARD;
+ op = REQ_OP_DISCARD;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
- rw = READ;
+ op = REQ_OP_READ;
else if (test_and_clear_bit(R5_WantReplace,
&sh->dev[i].flags)) {
- rw = WRITE;
+ op = REQ_OP_WRITE;
replace_only = 1;
} else
continue;
if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
- rw |= REQ_SYNC;
+ op_flags |= REQ_SYNC;
again:
bi = &sh->dev[i].req;
@@ -927,7 +927,7 @@ again:
rdev = rrdev;
rrdev = NULL;
}
- if (rw & WRITE) {
+ if (op_is_write(op)) {
if (replace_only)
rdev = NULL;
if (rdev == rrdev)
@@ -953,7 +953,7 @@ again:
* need to check for writes. We never accept write errors
* on the replacement, so we don't to check rrdev.
*/
- while ((rw & WRITE) && rdev &&
+ while (op_is_write(op) && rdev &&
test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
int bad_sectors;
@@ -995,13 +995,13 @@ again:
bio_reset(bi);
bi->bi_bdev = rdev->bdev;
- bi->bi_rw = rw;
- bi->bi_end_io = (rw & WRITE)
+ bio_set_op_attrs(bi, op, op_flags);
+ bi->bi_end_io = op_is_write(op)
? raid5_end_write_request
: raid5_end_read_request;
bi->bi_private = sh;
- pr_debug("%s: for %llu schedule op %ld on disc %d\n",
+ pr_debug("%s: for %llu schedule op %d on disc %d\n",
__func__, (unsigned long long)sh->sector,
bi->bi_rw, i);
atomic_inc(&sh->count);
@@ -1027,7 +1027,7 @@ again:
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
*/
- if (rw & REQ_DISCARD)
+ if (op == REQ_OP_DISCARD)
bi->bi_vcnt = 0;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
@@ -1047,12 +1047,12 @@ again:
bio_reset(rbi);
rbi->bi_bdev = rrdev->bdev;
- rbi->bi_rw = rw;
- BUG_ON(!(rw & WRITE));
+ bio_set_op_attrs(rbi, op, op_flags);
+ BUG_ON(!op_is_write(op));
rbi->bi_end_io = raid5_end_write_request;
rbi->bi_private = sh;
- pr_debug("%s: for %llu schedule op %ld on "
+ pr_debug("%s: for %llu schedule op %d on "
"replacement disc %d\n",
__func__, (unsigned long long)sh->sector,
rbi->bi_rw, i);
@@ -1076,7 +1076,7 @@ again:
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
*/
- if (rw & REQ_DISCARD)
+ if (op == REQ_OP_DISCARD)
rbi->bi_vcnt = 0;
if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
@@ -1085,9 +1085,9 @@ again:
generic_make_request(rbi);
}
if (!rdev && !rrdev) {
- if (rw & WRITE)
+ if (op_is_write(op))
set_bit(STRIPE_DEGRADED, &sh->state);
- pr_debug("skip op %ld on disc %d for sector %llu\n",
+ pr_debug("skip op %d on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -1623,7 +1623,7 @@ again:
set_bit(R5_WantFUA, &dev->flags);
if (wbi->bi_rw & REQ_SYNC)
set_bit(R5_SyncIO, &dev->flags);
- if (wbi->bi_rw & REQ_DISCARD)
+ if (bio_op(wbi) == REQ_OP_DISCARD)
set_bit(R5_Discard, &dev->flags);
else {
tx = async_copy_data(1, wbi, &dev->page,
@@ -5150,7 +5150,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
DEFINE_WAIT(w);
bool do_prepare;
- if (unlikely(bi->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
int ret = r5l_handle_flush_request(conf->log, bi);
if (ret == 0)
@@ -5176,7 +5176,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
return;
}
- if (unlikely(bi->bi_rw & REQ_DISCARD)) {
+ if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
make_discard_request(mddev, bi);
return;
}
@@ -5233,7 +5233,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
(unsigned long long)logical_sector);
sh = raid5_get_active_stripe(conf, new_sector, previous,
- (bi->bi_rw&RWA_MASK), 0);
+ (bi->bi_rw & REQ_RAHEAD), 0);
if (sh) {
if (unlikely(previous)) {
/* expansion might have moved on while waiting for a
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 19cd64c95bb8..fe031b06935f 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -1081,7 +1081,6 @@ static int airspy_probe(struct usb_interface *intf,
err_free_controls:
v4l2_ctrl_handler_free(&s->hdl);
-err_unregister_v4l2_dev:
v4l2_device_unregister(&s->v4l2_dev);
err_free_mem:
kfree(s);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index f899bf1c5fc0..51a0fa144392 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -2171,7 +2171,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
* The determine_valid_ioctls() call already should ensure
* that this can never happen, but just in case...
*/
- if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_cropcap))
+ if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_g_selection))
return -ENOTTY;
if (ops->vidioc_cropcap)
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 3cd68152ddf8..40bb8ae5853c 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2002,8 +2002,7 @@ static int msb_bd_getgeo(struct block_device *bdev,
static int msb_prepare_req(struct request_queue *q, struct request *req)
{
- if (req->cmd_type != REQ_TYPE_FS &&
- req->cmd_type != REQ_TYPE_BLOCK_PC) {
+ if (req->cmd_type != REQ_TYPE_FS) {
blk_dump_rq_flags(req, "MS unsupported request");
return BLKPREP_KILL;
}
@@ -2146,7 +2145,6 @@ static int msb_init_disk(struct memstick_dev *card)
msb->disk->fops = &msb_bdops;
msb->disk->private_data = msb;
msb->disk->queue = msb->queue;
- msb->disk->driverfs_dev = &card->dev;
msb->disk->flags |= GENHD_FL_EXT_DEVT;
capacity = msb->pages_in_block * msb->logical_block_count;
@@ -2163,7 +2161,7 @@ static int msb_init_disk(struct memstick_dev *card)
set_disk_ro(msb->disk, 1);
msb_start(card);
- add_disk(msb->disk);
+ device_add_disk(&card->dev, msb->disk);
dbg("Disk added");
return 0;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 0fb27d338811..c1472275fe57 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -829,8 +829,7 @@ static void mspro_block_start(struct memstick_dev *card)
static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
{
- if (req->cmd_type != REQ_TYPE_FS &&
- req->cmd_type != REQ_TYPE_BLOCK_PC) {
+ if (req->cmd_type != REQ_TYPE_FS) {
blk_dump_rq_flags(req, "MSPro unsupported request");
return BLKPREP_KILL;
}
@@ -1243,7 +1242,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
msb->usage_count = 1;
msb->disk->private_data = msb;
msb->disk->queue = msb->queue;
- msb->disk->driverfs_dev = &card->dev;
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
@@ -1255,7 +1253,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
- add_disk(msb->disk);
+ device_add_disk(&card->dev, msb->disk);
msb->active = 1;
return 0;
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b2fb6dbffcef..4387ccb79e64 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -57,3 +57,17 @@ obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PANEL) += panel.o
+
+lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_perms.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o
+
+OBJCOPYFLAGS :=
+OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
+ --set-section-flags .text=alloc,readonly \
+ --rename-section .text=.rodata
+$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o
+ $(call if_changed,objcopy)
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
deleted file mode 100644
index 0a5cbbe12452..000000000000
--- a/drivers/misc/lkdtm.c
+++ /dev/null
@@ -1,1023 +0,0 @@
-/*
- * Kprobe module for testing crash dumps
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Ankita Garg <ankita@in.ibm.com>
- *
- * This module induces system failures at predefined crashpoints to
- * evaluate the reliability of crash dumps obtained using different dumping
- * solutions.
- *
- * It is adapted from the Linux Kernel Dump Test Tool by
- * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
- *
- * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
- *
- * See Documentation/fault-injection/provoke-crashes.txt for instructions
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/buffer_head.h>
-#include <linux/kprobes.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-#include <linux/slab.h>
-#include <scsi/scsi_cmnd.h>
-#include <linux/debugfs.h>
-#include <linux/vmalloc.h>
-#include <linux/mman.h>
-#include <asm/cacheflush.h>
-
-#ifdef CONFIG_IDE
-#include <linux/ide.h>
-#endif
-
-/*
- * Make sure our attempts to over run the kernel stack doesn't trigger
- * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
- * recurse past the end of THREAD_SIZE by default.
- */
-#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
-#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
-#else
-#define REC_STACK_SIZE (THREAD_SIZE / 8)
-#endif
-#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
-
-#define DEFAULT_COUNT 10
-#define EXEC_SIZE 64
-
-enum cname {
- CN_INVALID,
- CN_INT_HARDWARE_ENTRY,
- CN_INT_HW_IRQ_EN,
- CN_INT_TASKLET_ENTRY,
- CN_FS_DEVRW,
- CN_MEM_SWAPOUT,
- CN_TIMERADD,
- CN_SCSI_DISPATCH_CMD,
- CN_IDE_CORE_CP,
- CN_DIRECT,
-};
-
-enum ctype {
- CT_NONE,
- CT_PANIC,
- CT_BUG,
- CT_WARNING,
- CT_EXCEPTION,
- CT_LOOP,
- CT_OVERFLOW,
- CT_CORRUPT_STACK,
- CT_UNALIGNED_LOAD_STORE_WRITE,
- CT_OVERWRITE_ALLOCATION,
- CT_WRITE_AFTER_FREE,
- CT_READ_AFTER_FREE,
- CT_WRITE_BUDDY_AFTER_FREE,
- CT_READ_BUDDY_AFTER_FREE,
- CT_SOFTLOCKUP,
- CT_HARDLOCKUP,
- CT_SPINLOCKUP,
- CT_HUNG_TASK,
- CT_EXEC_DATA,
- CT_EXEC_STACK,
- CT_EXEC_KMALLOC,
- CT_EXEC_VMALLOC,
- CT_EXEC_USERSPACE,
- CT_ACCESS_USERSPACE,
- CT_WRITE_RO,
- CT_WRITE_RO_AFTER_INIT,
- CT_WRITE_KERN,
- CT_WRAP_ATOMIC
-};
-
-static char* cp_name[] = {
- "INT_HARDWARE_ENTRY",
- "INT_HW_IRQ_EN",
- "INT_TASKLET_ENTRY",
- "FS_DEVRW",
- "MEM_SWAPOUT",
- "TIMERADD",
- "SCSI_DISPATCH_CMD",
- "IDE_CORE_CP",
- "DIRECT",
-};
-
-static char* cp_type[] = {
- "PANIC",
- "BUG",
- "WARNING",
- "EXCEPTION",
- "LOOP",
- "OVERFLOW",
- "CORRUPT_STACK",
- "UNALIGNED_LOAD_STORE_WRITE",
- "OVERWRITE_ALLOCATION",
- "WRITE_AFTER_FREE",
- "READ_AFTER_FREE",
- "WRITE_BUDDY_AFTER_FREE",
- "READ_BUDDY_AFTER_FREE",
- "SOFTLOCKUP",
- "HARDLOCKUP",
- "SPINLOCKUP",
- "HUNG_TASK",
- "EXEC_DATA",
- "EXEC_STACK",
- "EXEC_KMALLOC",
- "EXEC_VMALLOC",
- "EXEC_USERSPACE",
- "ACCESS_USERSPACE",
- "WRITE_RO",
- "WRITE_RO_AFTER_INIT",
- "WRITE_KERN",
- "WRAP_ATOMIC"
-};
-
-static struct jprobe lkdtm;
-
-static int lkdtm_parse_commandline(void);
-static void lkdtm_handler(void);
-
-static char* cpoint_name;
-static char* cpoint_type;
-static int cpoint_count = DEFAULT_COUNT;
-static int recur_count = REC_NUM_DEFAULT;
-
-static enum cname cpoint = CN_INVALID;
-static enum ctype cptype = CT_NONE;
-static int count = DEFAULT_COUNT;
-static DEFINE_SPINLOCK(count_lock);
-static DEFINE_SPINLOCK(lock_me_up);
-
-static u8 data_area[EXEC_SIZE];
-
-static const unsigned long rodata = 0xAA55AA55;
-static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
-
-module_param(recur_count, int, 0644);
-MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
-module_param(cpoint_name, charp, 0444);
-MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
-module_param(cpoint_type, charp, 0444);
-MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
- "hitting the crash point");
-module_param(cpoint_count, int, 0644);
-MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
- "crash point is to be hit to trigger action");
-
-static unsigned int jp_do_irq(unsigned int irq)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static irqreturn_t jp_handle_irq_event(unsigned int irq,
- struct irqaction *action)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static void jp_tasklet_action(struct softirq_action *a)
-{
- lkdtm_handler();
- jprobe_return();
-}
-
-static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
-{
- lkdtm_handler();
- jprobe_return();
-}
-
-struct scan_control;
-
-static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
- struct zone *zone,
- struct scan_control *sc)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-#ifdef CONFIG_IDE
-static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
- struct block_device *bdev, unsigned int cmd,
- unsigned long arg)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-#endif
-
-/* Return the crashpoint number or NONE if the name is invalid */
-static enum ctype parse_cp_type(const char *what, size_t count)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
- if (!strcmp(what, cp_type[i]))
- return i + 1;
- }
-
- return CT_NONE;
-}
-
-static const char *cp_type_to_str(enum ctype type)
-{
- if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
- return "None";
-
- return cp_type[type - 1];
-}
-
-static const char *cp_name_to_str(enum cname name)
-{
- if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
- return "INVALID";
-
- return cp_name[name - 1];
-}
-
-
-static int lkdtm_parse_commandline(void)
-{
- int i;
- unsigned long flags;
-
- if (cpoint_count < 1 || recur_count < 1)
- return -EINVAL;
-
- spin_lock_irqsave(&count_lock, flags);
- count = cpoint_count;
- spin_unlock_irqrestore(&count_lock, flags);
-
- /* No special parameters */
- if (!cpoint_type && !cpoint_name)
- return 0;
-
- /* Neither or both of these need to be set */
- if (!cpoint_type || !cpoint_name)
- return -EINVAL;
-
- cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
- if (cptype == CT_NONE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
- if (!strcmp(cpoint_name, cp_name[i])) {
- cpoint = i + 1;
- return 0;
- }
- }
-
- /* Could not find a valid crash point */
- return -EINVAL;
-}
-
-static int recursive_loop(int remaining)
-{
- char buf[REC_STACK_SIZE];
-
- /* Make sure compiler does not optimize this away. */
- memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
- if (!remaining)
- return 0;
- else
- return recursive_loop(remaining - 1);
-}
-
-static void do_nothing(void)
-{
- return;
-}
-
-/* Must immediately follow do_nothing for size calculuations to work out. */
-static void do_overwritten(void)
-{
- pr_info("do_overwritten wasn't overwritten!\n");
- return;
-}
-
-static noinline void corrupt_stack(void)
-{
- /* Use default char array length that triggers stack protection. */
- char data[8];
-
- memset((void *)data, 0, 64);
-}
-
-static void noinline execute_location(void *dst)
-{
- void (*func)(void) = dst;
-
- pr_info("attempting ok execution at %p\n", do_nothing);
- do_nothing();
-
- memcpy(dst, do_nothing, EXEC_SIZE);
- flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
- pr_info("attempting bad execution at %p\n", func);
- func();
-}
-
-static void execute_user_location(void *dst)
-{
- /* Intentionally crossing kernel/user memory boundary. */
- void (*func)(void) = dst;
-
- pr_info("attempting ok execution at %p\n", do_nothing);
- do_nothing();
-
- if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
- return;
- flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
- pr_info("attempting bad execution at %p\n", func);
- func();
-}
-
-static void lkdtm_do_action(enum ctype which)
-{
- switch (which) {
- case CT_PANIC:
- panic("dumptest");
- break;
- case CT_BUG:
- BUG();
- break;
- case CT_WARNING:
- WARN_ON(1);
- break;
- case CT_EXCEPTION:
- *((int *) 0) = 0;
- break;
- case CT_LOOP:
- for (;;)
- ;
- break;
- case CT_OVERFLOW:
- (void) recursive_loop(recur_count);
- break;
- case CT_CORRUPT_STACK:
- corrupt_stack();
- break;
- case CT_UNALIGNED_LOAD_STORE_WRITE: {
- static u8 data[5] __attribute__((aligned(4))) = {1, 2,
- 3, 4, 5};
- u32 *p;
- u32 val = 0x12345678;
-
- p = (u32 *)(data + 1);
- if (*p == 0)
- val = 0x87654321;
- *p = val;
- break;
- }
- case CT_OVERWRITE_ALLOCATION: {
- size_t len = 1020;
- u32 *data = kmalloc(len, GFP_KERNEL);
-
- data[1024 / sizeof(u32)] = 0x12345678;
- kfree(data);
- break;
- }
- case CT_WRITE_AFTER_FREE: {
- int *base, *again;
- size_t len = 1024;
- /*
- * The slub allocator uses the first word to store the free
- * pointer in some configurations. Use the middle of the
- * allocation to avoid running into the freelist
- */
- size_t offset = (len / sizeof(*base)) / 2;
-
- base = kmalloc(len, GFP_KERNEL);
- pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
- pr_info("Attempting bad write to freed memory at %p\n",
- &base[offset]);
- kfree(base);
- base[offset] = 0x0abcdef0;
- /* Attempt to notice the overwrite. */
- again = kmalloc(len, GFP_KERNEL);
- kfree(again);
- if (again != base)
- pr_info("Hmm, didn't get the same memory range.\n");
-
- break;
- }
- case CT_READ_AFTER_FREE: {
- int *base, *val, saw;
- size_t len = 1024;
- /*
- * The slub allocator uses the first word to store the free
- * pointer in some configurations. Use the middle of the
- * allocation to avoid running into the freelist
- */
- size_t offset = (len / sizeof(*base)) / 2;
-
- base = kmalloc(len, GFP_KERNEL);
- if (!base)
- break;
-
- val = kmalloc(len, GFP_KERNEL);
- if (!val) {
- kfree(base);
- break;
- }
-
- *val = 0x12345678;
- base[offset] = *val;
- pr_info("Value in memory before free: %x\n", base[offset]);
-
- kfree(base);
-
- pr_info("Attempting bad read from freed memory\n");
- saw = base[offset];
- if (saw != *val) {
- /* Good! Poisoning happened, so declare a win. */
- pr_info("Memory correctly poisoned (%x)\n", saw);
- BUG();
- }
- pr_info("Memory was not poisoned\n");
-
- kfree(val);
- break;
- }
- case CT_WRITE_BUDDY_AFTER_FREE: {
- unsigned long p = __get_free_page(GFP_KERNEL);
- if (!p)
- break;
- pr_info("Writing to the buddy page before free\n");
- memset((void *)p, 0x3, PAGE_SIZE);
- free_page(p);
- schedule();
- pr_info("Attempting bad write to the buddy page after free\n");
- memset((void *)p, 0x78, PAGE_SIZE);
- /* Attempt to notice the overwrite. */
- p = __get_free_page(GFP_KERNEL);
- free_page(p);
- schedule();
-
- break;
- }
- case CT_READ_BUDDY_AFTER_FREE: {
- unsigned long p = __get_free_page(GFP_KERNEL);
- int saw, *val;
- int *base;
-
- if (!p)
- break;
-
- val = kmalloc(1024, GFP_KERNEL);
- if (!val) {
- free_page(p);
- break;
- }
-
- base = (int *)p;
-
- *val = 0x12345678;
- base[0] = *val;
- pr_info("Value in memory before free: %x\n", base[0]);
- free_page(p);
- pr_info("Attempting to read from freed memory\n");
- saw = base[0];
- if (saw != *val) {
- /* Good! Poisoning happened, so declare a win. */
- pr_info("Memory correctly poisoned (%x)\n", saw);
- BUG();
- }
- pr_info("Buddy page was not poisoned\n");
-
- kfree(val);
- break;
- }
- case CT_SOFTLOCKUP:
- preempt_disable();
- for (;;)
- cpu_relax();
- break;
- case CT_HARDLOCKUP:
- local_irq_disable();
- for (;;)
- cpu_relax();
- break;
- case CT_SPINLOCKUP:
- /* Must be called twice to trigger. */
- spin_lock(&lock_me_up);
- /* Let sparse know we intended to exit holding the lock. */
- __release(&lock_me_up);
- break;
- case CT_HUNG_TASK:
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
- break;
- case CT_EXEC_DATA:
- execute_location(data_area);
- break;
- case CT_EXEC_STACK: {
- u8 stack_area[EXEC_SIZE];
- execute_location(stack_area);
- break;
- }
- case CT_EXEC_KMALLOC: {
- u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
- execute_location(kmalloc_area);
- kfree(kmalloc_area);
- break;
- }
- case CT_EXEC_VMALLOC: {
- u32 *vmalloc_area = vmalloc(EXEC_SIZE);
- execute_location(vmalloc_area);
- vfree(vmalloc_area);
- break;
- }
- case CT_EXEC_USERSPACE: {
- unsigned long user_addr;
-
- user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_addr >= TASK_SIZE) {
- pr_warn("Failed to allocate user memory\n");
- return;
- }
- execute_user_location((void *)user_addr);
- vm_munmap(user_addr, PAGE_SIZE);
- break;
- }
- case CT_ACCESS_USERSPACE: {
- unsigned long user_addr, tmp = 0;
- unsigned long *ptr;
-
- user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_addr >= TASK_SIZE) {
- pr_warn("Failed to allocate user memory\n");
- return;
- }
-
- if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
- pr_warn("copy_to_user failed\n");
- vm_munmap(user_addr, PAGE_SIZE);
- return;
- }
-
- ptr = (unsigned long *)user_addr;
-
- pr_info("attempting bad read at %p\n", ptr);
- tmp = *ptr;
- tmp += 0xc0dec0de;
-
- pr_info("attempting bad write at %p\n", ptr);
- *ptr = tmp;
-
- vm_munmap(user_addr, PAGE_SIZE);
-
- break;
- }
- case CT_WRITE_RO: {
- /* Explicitly cast away "const" for the test. */
- unsigned long *ptr = (unsigned long *)&rodata;
-
- pr_info("attempting bad rodata write at %p\n", ptr);
- *ptr ^= 0xabcd1234;
-
- break;
- }
- case CT_WRITE_RO_AFTER_INIT: {
- unsigned long *ptr = &ro_after_init;
-
- /*
- * Verify we were written to during init. Since an Oops
- * is considered a "success", a failure is to just skip the
- * real test.
- */
- if ((*ptr & 0xAA) != 0xAA) {
- pr_info("%p was NOT written during init!?\n", ptr);
- break;
- }
-
- pr_info("attempting bad ro_after_init write at %p\n", ptr);
- *ptr ^= 0xabcd1234;
-
- break;
- }
- case CT_WRITE_KERN: {
- size_t size;
- unsigned char *ptr;
-
- size = (unsigned long)do_overwritten -
- (unsigned long)do_nothing;
- ptr = (unsigned char *)do_overwritten;
-
- pr_info("attempting bad %zu byte write at %p\n", size, ptr);
- memcpy(ptr, (unsigned char *)do_nothing, size);
- flush_icache_range((unsigned long)ptr,
- (unsigned long)(ptr + size));
-
- do_overwritten();
- break;
- }
- case CT_WRAP_ATOMIC: {
- atomic_t under = ATOMIC_INIT(INT_MIN);
- atomic_t over = ATOMIC_INIT(INT_MAX);
-
- pr_info("attempting atomic underflow\n");
- atomic_dec(&under);
- pr_info("attempting atomic overflow\n");
- atomic_inc(&over);
-
- return;
- }
- case CT_NONE:
- default:
- break;
- }
-
-}
-
-static void lkdtm_handler(void)
-{
- unsigned long flags;
- bool do_it = false;
-
- spin_lock_irqsave(&count_lock, flags);
- count--;
- pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
- cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
-
- if (count == 0) {
- do_it = true;
- count = cpoint_count;
- }
- spin_unlock_irqrestore(&count_lock, flags);
-
- if (do_it)
- lkdtm_do_action(cptype);
-}
-
-static int lkdtm_register_cpoint(enum cname which)
-{
- int ret;
-
- cpoint = CN_INVALID;
- if (lkdtm.entry != NULL)
- unregister_jprobe(&lkdtm);
-
- switch (which) {
- case CN_DIRECT:
- lkdtm_do_action(cptype);
- return 0;
- case CN_INT_HARDWARE_ENTRY:
- lkdtm.kp.symbol_name = "do_IRQ";
- lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
- break;
- case CN_INT_HW_IRQ_EN:
- lkdtm.kp.symbol_name = "handle_IRQ_event";
- lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
- break;
- case CN_INT_TASKLET_ENTRY:
- lkdtm.kp.symbol_name = "tasklet_action";
- lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
- break;
- case CN_FS_DEVRW:
- lkdtm.kp.symbol_name = "ll_rw_block";
- lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
- break;
- case CN_MEM_SWAPOUT:
- lkdtm.kp.symbol_name = "shrink_inactive_list";
- lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
- break;
- case CN_TIMERADD:
- lkdtm.kp.symbol_name = "hrtimer_start";
- lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
- break;
- case CN_SCSI_DISPATCH_CMD:
- lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
- lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
- break;
- case CN_IDE_CORE_CP:
-#ifdef CONFIG_IDE
- lkdtm.kp.symbol_name = "generic_ide_ioctl";
- lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
-#else
- pr_info("Crash point not available\n");
- return -EINVAL;
-#endif
- break;
- default:
- pr_info("Invalid Crash Point\n");
- return -EINVAL;
- }
-
- cpoint = which;
- if ((ret = register_jprobe(&lkdtm)) < 0) {
- pr_info("Couldn't register jprobe\n");
- cpoint = CN_INVALID;
- }
-
- return ret;
-}
-
-static ssize_t do_register_entry(enum cname which, struct file *f,
- const char __user *user_buf, size_t count, loff_t *off)
-{
- char *buf;
- int err;
-
- if (count >= PAGE_SIZE)
- return -EINVAL;
-
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- if (copy_from_user(buf, user_buf, count)) {
- free_page((unsigned long) buf);
- return -EFAULT;
- }
- /* NULL-terminate and remove enter */
- buf[count] = '\0';
- strim(buf);
-
- cptype = parse_cp_type(buf, count);
- free_page((unsigned long) buf);
-
- if (cptype == CT_NONE)
- return -EINVAL;
-
- err = lkdtm_register_cpoint(which);
- if (err < 0)
- return err;
-
- *off += count;
-
- return count;
-}
-
-/* Generic read callback that just prints out the available crash types */
-static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
- size_t count, loff_t *off)
-{
- char *buf;
- int i, n, out;
-
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
- for (i = 0; i < ARRAY_SIZE(cp_type); i++)
- n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
- buf[n] = '\0';
-
- out = simple_read_from_buffer(user_buf, count, off,
- buf, n);
- free_page((unsigned long) buf);
-
- return out;
-}
-
-static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-
-static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
- size_t count, loff_t *off)
-{
- return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
-}
-
-static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
- size_t count, loff_t *off)
-{
- return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
-}
-
-static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
- size_t count, loff_t *off)
-{
- return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
-}
-
-static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
- size_t count, loff_t *off)
-{
- return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
-}
-
-static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
- size_t count, loff_t *off)
-{
- return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
-}
-
-static ssize_t timeradd_entry(struct file *f, const char __user *buf,
- size_t count, loff_t *off)
-{
- return do_register_entry(CN_TIMERADD, f, buf, count, off);
-}
-
-static ssize_t scsi_dispatch_cmd_entry(struct file *f,
- const char __user *buf, size_t count, loff_t *off)
-{
- return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
-}
-
-static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
- size_t count, loff_t *off)
-{
- return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
-}
-
-/* Special entry to just crash directly. Available without KPROBEs */
-static ssize_t direct_entry(struct file *f, const char __user *user_buf,
- size_t count, loff_t *off)
-{
- enum ctype type;
- char *buf;
-
- if (count >= PAGE_SIZE)
- return -EINVAL;
- if (count < 1)
- return -EINVAL;
-
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- if (copy_from_user(buf, user_buf, count)) {
- free_page((unsigned long) buf);
- return -EFAULT;
- }
- /* NULL-terminate and remove enter */
- buf[count] = '\0';
- strim(buf);
-
- type = parse_cp_type(buf, count);
- free_page((unsigned long) buf);
- if (type == CT_NONE)
- return -EINVAL;
-
- pr_info("Performing direct entry %s\n", cp_type_to_str(type));
- lkdtm_do_action(type);
- *off += count;
-
- return count;
-}
-
-struct crash_entry {
- const char *name;
- const struct file_operations fops;
-};
-
-static const struct crash_entry crash_entries[] = {
- {"DIRECT", {.read = lkdtm_debugfs_read,
- .llseek = generic_file_llseek,
- .open = lkdtm_debugfs_open,
- .write = direct_entry} },
- {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
- .llseek = generic_file_llseek,
- .open = lkdtm_debugfs_open,
- .write = int_hardware_entry} },
- {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
- .llseek = generic_file_llseek,
- .open = lkdtm_debugfs_open,
- .write = int_hw_irq_en} },
- {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
- .llseek = generic_file_llseek,
- .open = lkdtm_debugfs_open,
- .write = int_tasklet_entry} },
- {"FS_DEVRW", {.read = lkdtm_debugfs_read,
- .llseek = generic_file_llseek,
- .open = lkdtm_debugfs_open,
- .write = fs_devrw_entry} },
- {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
- .llseek = generic_file_llseek,
- .open = lkdtm_debugfs_open,
- .write = mem_swapout_entry} },
- {"TIMERADD", {.read = lkdtm_debugfs_read,
- .llseek = generic_file_llseek,
- .open = lkdtm_debugfs_open,
- .write = timeradd_entry} },
- {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
- .llseek = generic_file_llseek,
- .open = lkdtm_debugfs_open,
- .write = scsi_dispatch_cmd_entry} },
- {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
- .llseek = generic_file_llseek,
- .open = lkdtm_debugfs_open,
- .write = ide_core_cp_entry} },
-};
-
-static struct dentry *lkdtm_debugfs_root;
-
-static int __init lkdtm_module_init(void)
-{
- int ret = -EINVAL;
- int n_debugfs_entries = 1; /* Assume only the direct entry */
- int i;
-
- /* Make sure we can write to __ro_after_init values during __init */
- ro_after_init |= 0xAA;
-
- /* Register debugfs interface */
- lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
- if (!lkdtm_debugfs_root) {
- pr_err("creating root dir failed\n");
- return -ENODEV;
- }
-
-#ifdef CONFIG_KPROBES
- n_debugfs_entries = ARRAY_SIZE(crash_entries);
-#endif
-
- for (i = 0; i < n_debugfs_entries; i++) {
- const struct crash_entry *cur = &crash_entries[i];
- struct dentry *de;
-
- de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
- NULL, &cur->fops);
- if (de == NULL) {
- pr_err("could not create %s\n", cur->name);
- goto out_err;
- }
- }
-
- if (lkdtm_parse_commandline() == -EINVAL) {
- pr_info("Invalid command\n");
- goto out_err;
- }
-
- if (cpoint != CN_INVALID && cptype != CT_NONE) {
- ret = lkdtm_register_cpoint(cpoint);
- if (ret < 0) {
- pr_info("Invalid crash point %d\n", cpoint);
- goto out_err;
- }
- pr_info("Crash point %s of type %s registered\n",
- cpoint_name, cpoint_type);
- } else {
- pr_info("No crash points registered, enable through debugfs\n");
- }
-
- return 0;
-
-out_err:
- debugfs_remove_recursive(lkdtm_debugfs_root);
- return ret;
-}
-
-static void __exit lkdtm_module_exit(void)
-{
- debugfs_remove_recursive(lkdtm_debugfs_root);
-
- unregister_jprobe(&lkdtm);
- pr_info("Crash point unregistered\n");
-}
-
-module_init(lkdtm_module_init);
-module_exit(lkdtm_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Kprobe module for testing crash dumps");
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
new file mode 100644
index 000000000000..fdf954c2107f
--- /dev/null
+++ b/drivers/misc/lkdtm.h
@@ -0,0 +1,60 @@
+#ifndef __LKDTM_H
+#define __LKDTM_H
+
+#define pr_fmt(fmt) "lkdtm: " fmt
+
+#include <linux/kernel.h>
+
+/* lkdtm_bugs.c */
+void __init lkdtm_bugs_init(int *recur_param);
+void lkdtm_PANIC(void);
+void lkdtm_BUG(void);
+void lkdtm_WARNING(void);
+void lkdtm_EXCEPTION(void);
+void lkdtm_LOOP(void);
+void lkdtm_OVERFLOW(void);
+void lkdtm_CORRUPT_STACK(void);
+void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
+void lkdtm_SOFTLOCKUP(void);
+void lkdtm_HARDLOCKUP(void);
+void lkdtm_SPINLOCKUP(void);
+void lkdtm_HUNG_TASK(void);
+void lkdtm_ATOMIC_UNDERFLOW(void);
+void lkdtm_ATOMIC_OVERFLOW(void);
+
+/* lkdtm_heap.c */
+void lkdtm_OVERWRITE_ALLOCATION(void);
+void lkdtm_WRITE_AFTER_FREE(void);
+void lkdtm_READ_AFTER_FREE(void);
+void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
+void lkdtm_READ_BUDDY_AFTER_FREE(void);
+
+/* lkdtm_perms.c */
+void __init lkdtm_perms_init(void);
+void lkdtm_WRITE_RO(void);
+void lkdtm_WRITE_RO_AFTER_INIT(void);
+void lkdtm_WRITE_KERN(void);
+void lkdtm_EXEC_DATA(void);
+void lkdtm_EXEC_STACK(void);
+void lkdtm_EXEC_KMALLOC(void);
+void lkdtm_EXEC_VMALLOC(void);
+void lkdtm_EXEC_RODATA(void);
+void lkdtm_EXEC_USERSPACE(void);
+void lkdtm_ACCESS_USERSPACE(void);
+
+/* lkdtm_rodata.c */
+void lkdtm_rodata_do_nothing(void);
+
+/* lkdtm_usercopy.c */
+void __init lkdtm_usercopy_init(void);
+void __exit lkdtm_usercopy_exit(void);
+void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
+void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
+void lkdtm_USERCOPY_HEAP_FLAG_TO(void);
+void lkdtm_USERCOPY_HEAP_FLAG_FROM(void);
+void lkdtm_USERCOPY_STACK_FRAME_TO(void);
+void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
+void lkdtm_USERCOPY_STACK_BEYOND(void);
+void lkdtm_USERCOPY_KERNEL(void);
+
+#endif
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
new file mode 100644
index 000000000000..182ae1894b32
--- /dev/null
+++ b/drivers/misc/lkdtm_bugs.c
@@ -0,0 +1,148 @@
+/*
+ * This is for all the tests related to logic bugs (e.g. bad dereferences,
+ * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
+ * lockups) along with other things that don't fit well into existing LKDTM
+ * test source files.
+ */
+#include "lkdtm.h"
+#include <linux/sched.h>
+
+/*
+ * Make sure our attempts to over run the kernel stack doesn't trigger
+ * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
+ * recurse past the end of THREAD_SIZE by default.
+ */
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
+#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
+#else
+#define REC_STACK_SIZE (THREAD_SIZE / 8)
+#endif
+#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
+
+static int recur_count = REC_NUM_DEFAULT;
+
+static DEFINE_SPINLOCK(lock_me_up);
+
+static int recursive_loop(int remaining)
+{
+ char buf[REC_STACK_SIZE];
+
+ /* Make sure compiler does not optimize this away. */
+ memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
+ if (!remaining)
+ return 0;
+ else
+ return recursive_loop(remaining - 1);
+}
+
+/* If the depth is negative, use the default, otherwise keep parameter. */
+void __init lkdtm_bugs_init(int *recur_param)
+{
+ if (*recur_param < 0)
+ *recur_param = recur_count;
+ else
+ recur_count = *recur_param;
+}
+
+void lkdtm_PANIC(void)
+{
+ panic("dumptest");
+}
+
+void lkdtm_BUG(void)
+{
+ BUG();
+}
+
+void lkdtm_WARNING(void)
+{
+ WARN_ON(1);
+}
+
+void lkdtm_EXCEPTION(void)
+{
+ *((int *) 0) = 0;
+}
+
+void lkdtm_LOOP(void)
+{
+ for (;;)
+ ;
+}
+
+void lkdtm_OVERFLOW(void)
+{
+ (void) recursive_loop(recur_count);
+}
+
+noinline void lkdtm_CORRUPT_STACK(void)
+{
+ /* Use default char array length that triggers stack protection. */
+ char data[8];
+
+ memset((void *)data, 0, 64);
+}
+
+void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+{
+ static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
+ u32 *p;
+ u32 val = 0x12345678;
+
+ p = (u32 *)(data + 1);
+ if (*p == 0)
+ val = 0x87654321;
+ *p = val;
+}
+
+void lkdtm_SOFTLOCKUP(void)
+{
+ preempt_disable();
+ for (;;)
+ cpu_relax();
+}
+
+void lkdtm_HARDLOCKUP(void)
+{
+ local_irq_disable();
+ for (;;)
+ cpu_relax();
+}
+
+void lkdtm_SPINLOCKUP(void)
+{
+ /* Must be called twice to trigger. */
+ spin_lock(&lock_me_up);
+ /* Let sparse know we intended to exit holding the lock. */
+ __release(&lock_me_up);
+}
+
+void lkdtm_HUNG_TASK(void)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+}
+
+void lkdtm_ATOMIC_UNDERFLOW(void)
+{
+ atomic_t under = ATOMIC_INIT(INT_MIN);
+
+ pr_info("attempting good atomic increment\n");
+ atomic_inc(&under);
+ atomic_dec(&under);
+
+ pr_info("attempting bad atomic underflow\n");
+ atomic_dec(&under);
+}
+
+void lkdtm_ATOMIC_OVERFLOW(void)
+{
+ atomic_t over = ATOMIC_INIT(INT_MAX);
+
+ pr_info("attempting good atomic decrement\n");
+ atomic_dec(&over);
+ atomic_inc(&over);
+
+ pr_info("attempting bad atomic overflow\n");
+ atomic_inc(&over);
+}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
new file mode 100644
index 000000000000..f9154b8d67f6
--- /dev/null
+++ b/drivers/misc/lkdtm_core.c
@@ -0,0 +1,544 @@
+/*
+ * Linux Kernel Dump Test Module for testing kernel crashes conditions:
+ * induces system failures at predefined crashpoints and under predefined
+ * operational conditions in order to evaluate the reliability of kernel
+ * sanity checking and crash dumps obtained using different dumping
+ * solutions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Ankita Garg <ankita@in.ibm.com>
+ *
+ * It is adapted from the Linux Kernel Dump Test Tool by
+ * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
+ *
+ * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * See Documentation/fault-injection/provoke-crashes.txt for instructions
+ */
+#include "lkdtm.h"
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/kprobes.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/debugfs.h>
+
+#ifdef CONFIG_IDE
+#include <linux/ide.h>
+#endif
+
+#define DEFAULT_COUNT 10
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off);
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off);
+
+#ifdef CONFIG_KPROBES
+static void lkdtm_handler(void);
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+ const char __user *user_buf,
+ size_t count, loff_t *off);
+
+
+/* jprobe entry point handlers. */
+static unsigned int jp_do_irq(unsigned int irq)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static irqreturn_t jp_handle_irq_event(unsigned int irq,
+ struct irqaction *action)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static void jp_tasklet_action(struct softirq_action *a)
+{
+ lkdtm_handler();
+ jprobe_return();
+}
+
+static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
+{
+ lkdtm_handler();
+ jprobe_return();
+}
+
+struct scan_control;
+
+static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
+ struct zone *zone,
+ struct scan_control *sc)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+# ifdef CONFIG_IDE
+static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
+ struct block_device *bdev, unsigned int cmd,
+ unsigned long arg)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+# endif
+#endif
+
+/* Crash points */
+struct crashpoint {
+ const char *name;
+ const struct file_operations fops;
+ struct jprobe jprobe;
+};
+
+#define CRASHPOINT(_name, _write, _symbol, _entry) \
+ { \
+ .name = _name, \
+ .fops = { \
+ .read = lkdtm_debugfs_read, \
+ .llseek = generic_file_llseek, \
+ .open = lkdtm_debugfs_open, \
+ .write = _write, \
+ }, \
+ .jprobe = { \
+ .kp.symbol_name = _symbol, \
+ .entry = (kprobe_opcode_t *)_entry, \
+ }, \
+ }
+
+/* Define the possible places where we can trigger a crash point. */
+struct crashpoint crashpoints[] = {
+ CRASHPOINT("DIRECT", direct_entry,
+ NULL, NULL),
+#ifdef CONFIG_KPROBES
+ CRASHPOINT("INT_HARDWARE_ENTRY", lkdtm_debugfs_entry,
+ "do_IRQ", jp_do_irq),
+ CRASHPOINT("INT_HW_IRQ_EN", lkdtm_debugfs_entry,
+ "handle_IRQ_event", jp_handle_irq_event),
+ CRASHPOINT("INT_TASKLET_ENTRY", lkdtm_debugfs_entry,
+ "tasklet_action", jp_tasklet_action),
+ CRASHPOINT("FS_DEVRW", lkdtm_debugfs_entry,
+ "ll_rw_block", jp_ll_rw_block),
+ CRASHPOINT("MEM_SWAPOUT", lkdtm_debugfs_entry,
+ "shrink_inactive_list", jp_shrink_inactive_list),
+ CRASHPOINT("TIMERADD", lkdtm_debugfs_entry,
+ "hrtimer_start", jp_hrtimer_start),
+ CRASHPOINT("SCSI_DISPATCH_CMD", lkdtm_debugfs_entry,
+ "scsi_dispatch_cmd", jp_scsi_dispatch_cmd),
+# ifdef CONFIG_IDE
+ CRASHPOINT("IDE_CORE_CP", lkdtm_debugfs_entry,
+ "generic_ide_ioctl", jp_generic_ide_ioctl),
+# endif
+#endif
+};
+
+
+/* Crash types. */
+struct crashtype {
+ const char *name;
+ void (*func)(void);
+};
+
+#define CRASHTYPE(_name) \
+ { \
+ .name = __stringify(_name), \
+ .func = lkdtm_ ## _name, \
+ }
+
+/* Define the possible types of crashes that can be triggered. */
+struct crashtype crashtypes[] = {
+ CRASHTYPE(PANIC),
+ CRASHTYPE(BUG),
+ CRASHTYPE(WARNING),
+ CRASHTYPE(EXCEPTION),
+ CRASHTYPE(LOOP),
+ CRASHTYPE(OVERFLOW),
+ CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+ CRASHTYPE(OVERWRITE_ALLOCATION),
+ CRASHTYPE(WRITE_AFTER_FREE),
+ CRASHTYPE(READ_AFTER_FREE),
+ CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
+ CRASHTYPE(READ_BUDDY_AFTER_FREE),
+ CRASHTYPE(SOFTLOCKUP),
+ CRASHTYPE(HARDLOCKUP),
+ CRASHTYPE(SPINLOCKUP),
+ CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(EXEC_DATA),
+ CRASHTYPE(EXEC_STACK),
+ CRASHTYPE(EXEC_KMALLOC),
+ CRASHTYPE(EXEC_VMALLOC),
+ CRASHTYPE(EXEC_RODATA),
+ CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(WRITE_RO),
+ CRASHTYPE(WRITE_RO_AFTER_INIT),
+ CRASHTYPE(WRITE_KERN),
+ CRASHTYPE(ATOMIC_UNDERFLOW),
+ CRASHTYPE(ATOMIC_OVERFLOW),
+ CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
+ CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
+ CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
+ CRASHTYPE(USERCOPY_HEAP_FLAG_FROM),
+ CRASHTYPE(USERCOPY_STACK_FRAME_TO),
+ CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
+ CRASHTYPE(USERCOPY_STACK_BEYOND),
+ CRASHTYPE(USERCOPY_KERNEL),
+};
+
+
+/* Global jprobe entry and crashtype. */
+static struct jprobe *lkdtm_jprobe;
+struct crashpoint *lkdtm_crashpoint;
+struct crashtype *lkdtm_crashtype;
+
+/* Module parameters */
+static int recur_count = -1;
+module_param(recur_count, int, 0644);
+MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
+
+static char* cpoint_name;
+module_param(cpoint_name, charp, 0444);
+MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
+
+static char* cpoint_type;
+module_param(cpoint_type, charp, 0444);
+MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
+ "hitting the crash point");
+
+static int cpoint_count = DEFAULT_COUNT;
+module_param(cpoint_count, int, 0644);
+MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
+ "crash point is to be hit to trigger action");
+
+
+/* Return the crashtype number or NULL if the name is invalid */
+static struct crashtype *find_crashtype(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
+ if (!strcmp(name, crashtypes[i].name))
+ return &crashtypes[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * This is forced noinline just so it distinctly shows up in the stackdump
+ * which makes validation of expected lkdtm crashes easier.
+ */
+static noinline void lkdtm_do_action(struct crashtype *crashtype)
+{
+ BUG_ON(!crashtype || !crashtype->func);
+ crashtype->func();
+}
+
+static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
+ struct crashtype *crashtype)
+{
+ int ret;
+
+ /* If this doesn't have a symbol, just call immediately. */
+ if (!crashpoint->jprobe.kp.symbol_name) {
+ lkdtm_do_action(crashtype);
+ return 0;
+ }
+
+ if (lkdtm_jprobe != NULL)
+ unregister_jprobe(lkdtm_jprobe);
+
+ lkdtm_crashpoint = crashpoint;
+ lkdtm_crashtype = crashtype;
+ lkdtm_jprobe = &crashpoint->jprobe;
+ ret = register_jprobe(lkdtm_jprobe);
+ if (ret < 0) {
+ pr_info("Couldn't register jprobe %s\n",
+ crashpoint->jprobe.kp.symbol_name);
+ lkdtm_jprobe = NULL;
+ lkdtm_crashpoint = NULL;
+ lkdtm_crashtype = NULL;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_KPROBES
+/* Global crash counter and spinlock. */
+static int crash_count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(crash_count_lock);
+
+/* Called by jprobe entry points. */
+static void lkdtm_handler(void)
+{
+ unsigned long flags;
+ bool do_it = false;
+
+ BUG_ON(!lkdtm_crashpoint || !lkdtm_crashtype);
+
+ spin_lock_irqsave(&crash_count_lock, flags);
+ crash_count--;
+ pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
+ lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count);
+
+ if (crash_count == 0) {
+ do_it = true;
+ crash_count = cpoint_count;
+ }
+ spin_unlock_irqrestore(&crash_count_lock, flags);
+
+ if (do_it)
+ lkdtm_do_action(lkdtm_crashtype);
+}
+
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct crashpoint *crashpoint = file_inode(f)->i_private;
+ struct crashtype *crashtype = NULL;
+ char *buf;
+ int err;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ crashtype = find_crashtype(buf);
+ free_page((unsigned long)buf);
+
+ if (!crashtype)
+ return -EINVAL;
+
+ err = lkdtm_register_cpoint(crashpoint, crashtype);
+ if (err < 0)
+ return err;
+
+ *off += count;
+
+ return count;
+}
+#endif
+
+/* Generic read callback that just prints out the available crash types */
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ char *buf;
+ int i, n, out;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
+ for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
+ n += snprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ crashtypes[i].name);
+ }
+ buf[n] = '\0';
+
+ out = simple_read_from_buffer(user_buf, count, off,
+ buf, n);
+ free_page((unsigned long) buf);
+
+ return out;
+}
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/* Special entry to just crash directly. Available without KPROBEs */
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct crashtype *crashtype;
+ char *buf;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+ if (count < 1)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ crashtype = find_crashtype(buf);
+ free_page((unsigned long) buf);
+ if (!crashtype)
+ return -EINVAL;
+
+ pr_info("Performing direct entry %s\n", crashtype->name);
+ lkdtm_do_action(crashtype);
+ *off += count;
+
+ return count;
+}
+
+static struct dentry *lkdtm_debugfs_root;
+
+static int __init lkdtm_module_init(void)
+{
+ struct crashpoint *crashpoint = NULL;
+ struct crashtype *crashtype = NULL;
+ int ret = -EINVAL;
+ int i;
+
+ /* Neither or both of these need to be set */
+ if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) {
+ pr_err("Need both cpoint_type and cpoint_name or neither\n");
+ return -EINVAL;
+ }
+
+ if (cpoint_type) {
+ crashtype = find_crashtype(cpoint_type);
+ if (!crashtype) {
+ pr_err("Unknown crashtype '%s'\n", cpoint_type);
+ return -EINVAL;
+ }
+ }
+
+ if (cpoint_name) {
+ for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+ if (!strcmp(cpoint_name, crashpoints[i].name))
+ crashpoint = &crashpoints[i];
+ }
+
+ /* Refuse unknown crashpoints. */
+ if (!crashpoint) {
+ pr_err("Invalid crashpoint %s\n", cpoint_name);
+ return -EINVAL;
+ }
+ }
+
+#ifdef CONFIG_KPROBES
+ /* Set crash count. */
+ crash_count = cpoint_count;
+#endif
+
+ /* Handle test-specific initialization. */
+ lkdtm_bugs_init(&recur_count);
+ lkdtm_perms_init();
+ lkdtm_usercopy_init();
+
+ /* Register debugfs interface */
+ lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
+ if (!lkdtm_debugfs_root) {
+ pr_err("creating root dir failed\n");
+ return -ENODEV;
+ }
+
+ /* Install debugfs trigger files. */
+ for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+ struct crashpoint *cur = &crashpoints[i];
+ struct dentry *de;
+
+ de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
+ cur, &cur->fops);
+ if (de == NULL) {
+ pr_err("could not create crashpoint %s\n", cur->name);
+ goto out_err;
+ }
+ }
+
+ /* Install crashpoint if one was selected. */
+ if (crashpoint) {
+ ret = lkdtm_register_cpoint(crashpoint, crashtype);
+ if (ret < 0) {
+ pr_info("Invalid crashpoint %s\n", crashpoint->name);
+ goto out_err;
+ }
+ pr_info("Crash point %s of type %s registered\n",
+ crashpoint->name, cpoint_type);
+ } else {
+ pr_info("No crash points registered, enable through debugfs\n");
+ }
+
+ return 0;
+
+out_err:
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+ return ret;
+}
+
+static void __exit lkdtm_module_exit(void)
+{
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+
+ /* Handle test-specific clean-up. */
+ lkdtm_usercopy_exit();
+
+ unregister_jprobe(lkdtm_jprobe);
+ pr_info("Crash point unregistered\n");
+}
+
+module_init(lkdtm_module_init);
+module_exit(lkdtm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kernel crash testing module");
diff --git a/drivers/misc/lkdtm_heap.c b/drivers/misc/lkdtm_heap.c
new file mode 100644
index 000000000000..0f1581664c1c
--- /dev/null
+++ b/drivers/misc/lkdtm_heap.c
@@ -0,0 +1,142 @@
+/*
+ * This is for all the tests relating directly to heap memory, including
+ * page allocation and slab allocations.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+
+/*
+ * This tries to stay within the next largest power-of-2 kmalloc cache
+ * to avoid actually overwriting anything important if it's not detected
+ * correctly.
+ */
+void lkdtm_OVERWRITE_ALLOCATION(void)
+{
+ size_t len = 1020;
+ u32 *data = kmalloc(len, GFP_KERNEL);
+
+ data[1024 / sizeof(u32)] = 0x12345678;
+ kfree(data);
+}
+
+void lkdtm_WRITE_AFTER_FREE(void)
+{
+ int *base, *again;
+ size_t len = 1024;
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
+ pr_info("Attempting bad write to freed memory at %p\n",
+ &base[offset]);
+ kfree(base);
+ base[offset] = 0x0abcdef0;
+ /* Attempt to notice the overwrite. */
+ again = kmalloc(len, GFP_KERNEL);
+ kfree(again);
+ if (again != base)
+ pr_info("Hmm, didn't get the same memory range.\n");
+}
+
+void lkdtm_READ_AFTER_FREE(void)
+{
+ int *base, *val, saw;
+ size_t len = 1024;
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base) {
+ pr_info("Unable to allocate base memory.\n");
+ return;
+ }
+
+ val = kmalloc(len, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate val memory.\n");
+ kfree(base);
+ return;
+ }
+
+ *val = 0x12345678;
+ base[offset] = *val;
+ pr_info("Value in memory before free: %x\n", base[offset]);
+
+ kfree(base);
+
+ pr_info("Attempting bad read from freed memory\n");
+ saw = base[offset];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Memory was not poisoned\n");
+
+ kfree(val);
+}
+
+void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ if (!p) {
+ pr_info("Unable to allocate free page\n");
+ return;
+ }
+
+ pr_info("Writing to the buddy page before free\n");
+ memset((void *)p, 0x3, PAGE_SIZE);
+ free_page(p);
+ schedule();
+ pr_info("Attempting bad write to the buddy page after free\n");
+ memset((void *)p, 0x78, PAGE_SIZE);
+ /* Attempt to notice the overwrite. */
+ p = __get_free_page(GFP_KERNEL);
+ free_page(p);
+ schedule();
+}
+
+void lkdtm_READ_BUDDY_AFTER_FREE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ int saw, *val;
+ int *base;
+
+ if (!p) {
+ pr_info("Unable to allocate free page\n");
+ return;
+ }
+
+ val = kmalloc(1024, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate val memory.\n");
+ free_page(p);
+ return;
+ }
+
+ base = (int *)p;
+
+ *val = 0x12345678;
+ base[0] = *val;
+ pr_info("Value in memory before free: %x\n", base[0]);
+ free_page(p);
+ pr_info("Attempting to read from freed memory\n");
+ saw = base[0];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Buddy page was not poisoned\n");
+
+ kfree(val);
+}
diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c
new file mode 100644
index 000000000000..45f1c0f96612
--- /dev/null
+++ b/drivers/misc/lkdtm_perms.c
@@ -0,0 +1,199 @@
+/*
+ * This is for all the tests related to validating kernel memory
+ * permissions: non-executable regions, non-writable regions, and
+ * even non-readable regions.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+/* Whether or not to fill the target memory area with do_nothing(). */
+#define CODE_WRITE true
+#define CODE_AS_IS false
+
+/* How many bytes to copy to be sure we've copied enough of do_nothing(). */
+#define EXEC_SIZE 64
+
+/* This is non-const, so it will end up in the .data section. */
+static u8 data_area[EXEC_SIZE];
+
+/* This is cost, so it will end up in the .rodata section. */
+static const unsigned long rodata = 0xAA55AA55;
+
+/* This is marked __ro_after_init, so it should ultimately be .rodata. */
+static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
+
+/*
+ * This just returns to the caller. It is designed to be copied into
+ * non-executable memory regions.
+ */
+static void do_nothing(void)
+{
+ return;
+}
+
+/* Must immediately follow do_nothing for size calculuations to work out. */
+static void do_overwritten(void)
+{
+ pr_info("do_overwritten wasn't overwritten!\n");
+ return;
+}
+
+static noinline void execute_location(void *dst, bool write)
+{
+ void (*func)(void) = dst;
+
+ pr_info("attempting ok execution at %p\n", do_nothing);
+ do_nothing();
+
+ if (write == CODE_WRITE) {
+ memcpy(dst, do_nothing, EXEC_SIZE);
+ flush_icache_range((unsigned long)dst,
+ (unsigned long)dst + EXEC_SIZE);
+ }
+ pr_info("attempting bad execution at %p\n", func);
+ func();
+}
+
+static void execute_user_location(void *dst)
+{
+ /* Intentionally crossing kernel/user memory boundary. */
+ void (*func)(void) = dst;
+
+ pr_info("attempting ok execution at %p\n", do_nothing);
+ do_nothing();
+
+ if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
+ return;
+ flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
+ pr_info("attempting bad execution at %p\n", func);
+ func();
+}
+
+void lkdtm_WRITE_RO(void)
+{
+ /* Explicitly cast away "const" for the test. */
+ unsigned long *ptr = (unsigned long *)&rodata;
+
+ pr_info("attempting bad rodata write at %p\n", ptr);
+ *ptr ^= 0xabcd1234;
+}
+
+void lkdtm_WRITE_RO_AFTER_INIT(void)
+{
+ unsigned long *ptr = &ro_after_init;
+
+ /*
+ * Verify we were written to during init. Since an Oops
+ * is considered a "success", a failure is to just skip the
+ * real test.
+ */
+ if ((*ptr & 0xAA) != 0xAA) {
+ pr_info("%p was NOT written during init!?\n", ptr);
+ return;
+ }
+
+ pr_info("attempting bad ro_after_init write at %p\n", ptr);
+ *ptr ^= 0xabcd1234;
+}
+
+void lkdtm_WRITE_KERN(void)
+{
+ size_t size;
+ unsigned char *ptr;
+
+ size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
+ ptr = (unsigned char *)do_overwritten;
+
+ pr_info("attempting bad %zu byte write at %p\n", size, ptr);
+ memcpy(ptr, (unsigned char *)do_nothing, size);
+ flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
+
+ do_overwritten();
+}
+
+void lkdtm_EXEC_DATA(void)
+{
+ execute_location(data_area, CODE_WRITE);
+}
+
+void lkdtm_EXEC_STACK(void)
+{
+ u8 stack_area[EXEC_SIZE];
+ execute_location(stack_area, CODE_WRITE);
+}
+
+void lkdtm_EXEC_KMALLOC(void)
+{
+ u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
+ execute_location(kmalloc_area, CODE_WRITE);
+ kfree(kmalloc_area);
+}
+
+void lkdtm_EXEC_VMALLOC(void)
+{
+ u32 *vmalloc_area = vmalloc(EXEC_SIZE);
+ execute_location(vmalloc_area, CODE_WRITE);
+ vfree(vmalloc_area);
+}
+
+void lkdtm_EXEC_RODATA(void)
+{
+ execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS);
+}
+
+void lkdtm_EXEC_USERSPACE(void)
+{
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+ execute_user_location((void *)user_addr);
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void lkdtm_ACCESS_USERSPACE(void)
+{
+ unsigned long user_addr, tmp = 0;
+ unsigned long *ptr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
+ pr_warn("copy_to_user failed\n");
+ vm_munmap(user_addr, PAGE_SIZE);
+ return;
+ }
+
+ ptr = (unsigned long *)user_addr;
+
+ pr_info("attempting bad read at %p\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+
+ pr_info("attempting bad write at %p\n", ptr);
+ *ptr = tmp;
+
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void __init lkdtm_perms_init(void)
+{
+ /* Make sure we can write to __ro_after_init values during __init */
+ ro_after_init |= 0xAA;
+
+}
diff --git a/drivers/misc/lkdtm_rodata.c b/drivers/misc/lkdtm_rodata.c
new file mode 100644
index 000000000000..166b1db3969f
--- /dev/null
+++ b/drivers/misc/lkdtm_rodata.c
@@ -0,0 +1,10 @@
+/*
+ * This includes functions that are meant to live entirely in .rodata
+ * (via objcopy tricks), to validate the non-executability of .rodata.
+ */
+#include "lkdtm.h"
+
+void lkdtm_rodata_do_nothing(void)
+{
+ /* Does nothing. We just want an architecture agnostic "return". */
+}
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c
new file mode 100644
index 000000000000..5a3fd76eec27
--- /dev/null
+++ b/drivers/misc/lkdtm_usercopy.c
@@ -0,0 +1,313 @@
+/*
+ * This is for all the tests related to copy_to_user() and copy_from_user()
+ * hardening.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+static size_t cache_size = 1024;
+static struct kmem_cache *bad_cache;
+
+static const unsigned char test_text[] = "This is a test.\n";
+
+/*
+ * Instead of adding -Wno-return-local-addr, just pass the stack address
+ * through a function to obfuscate it from the compiler.
+ */
+static noinline unsigned char *trick_compiler(unsigned char *stack)
+{
+ return stack + 0;
+}
+
+static noinline unsigned char *do_usercopy_stack_callee(int value)
+{
+ unsigned char buf[32];
+ int i;
+
+ /* Exercise stack to avoid everything living in registers. */
+ for (i = 0; i < sizeof(buf); i++) {
+ buf[i] = value & 0xff;
+ }
+
+ return trick_compiler(buf);
+}
+
+static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
+{
+ unsigned long user_addr;
+ unsigned char good_stack[32];
+ unsigned char *bad_stack;
+ int i;
+
+ /* Exercise stack to avoid everything living in registers. */
+ for (i = 0; i < sizeof(good_stack); i++)
+ good_stack[i] = test_text[i % sizeof(test_text)];
+
+ /* This is a pointer to outside our current stack frame. */
+ if (bad_frame) {
+ bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack);
+ } else {
+ /* Put start address just inside stack. */
+ bad_stack = task_stack_page(current) + THREAD_SIZE;
+ bad_stack -= sizeof(unsigned long);
+ }
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user of local stack\n");
+ if (copy_to_user((void __user *)user_addr, good_stack,
+ sizeof(good_stack))) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user of distant stack\n");
+ if (copy_to_user((void __user *)user_addr, bad_stack,
+ sizeof(good_stack))) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ /*
+ * There isn't a safe way to not be protected by usercopy
+ * if we're going to write to another thread's stack.
+ */
+ if (!bad_frame)
+ goto free_user;
+
+ pr_info("attempting good copy_from_user of local stack\n");
+ if (copy_from_user(good_stack, (void __user *)user_addr,
+ sizeof(good_stack))) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user of distant stack\n");
+ if (copy_from_user(bad_stack, (void __user *)user_addr,
+ sizeof(good_stack))) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+static void do_usercopy_heap_size(bool to_user)
+{
+ unsigned long user_addr;
+ unsigned char *one, *two;
+ const size_t size = 1024;
+
+ one = kmalloc(size, GFP_KERNEL);
+ two = kmalloc(size, GFP_KERNEL);
+ if (!one || !two) {
+ pr_warn("Failed to allocate kernel memory\n");
+ goto free_kernel;
+ }
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ goto free_kernel;
+ }
+
+ memset(one, 'A', size);
+ memset(two, 'B', size);
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user of correct size\n");
+ if (copy_to_user((void __user *)user_addr, one, size)) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user of too large size\n");
+ if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ pr_info("attempting good copy_from_user of correct size\n");
+ if (copy_from_user(one, (void __user *)user_addr, size)) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user of too large size\n");
+ if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+free_kernel:
+ kfree(one);
+ kfree(two);
+}
+
+static void do_usercopy_heap_flag(bool to_user)
+{
+ unsigned long user_addr;
+ unsigned char *good_buf = NULL;
+ unsigned char *bad_buf = NULL;
+
+ /* Make sure cache was prepared. */
+ if (!bad_cache) {
+ pr_warn("Failed to allocate kernel cache\n");
+ return;
+ }
+
+ /*
+ * Allocate one buffer from each cache (kmalloc will have the
+ * SLAB_USERCOPY flag already, but "bad_cache" won't).
+ */
+ good_buf = kmalloc(cache_size, GFP_KERNEL);
+ bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
+ if (!good_buf || !bad_buf) {
+ pr_warn("Failed to allocate buffers from caches\n");
+ goto free_alloc;
+ }
+
+ /* Allocate user memory we'll poke at. */
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ goto free_alloc;
+ }
+
+ memset(good_buf, 'A', cache_size);
+ memset(bad_buf, 'B', cache_size);
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
+ if (copy_to_user((void __user *)user_addr, good_buf,
+ cache_size)) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
+ if (copy_to_user((void __user *)user_addr, bad_buf,
+ cache_size)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
+ if (copy_from_user(good_buf, (void __user *)user_addr,
+ cache_size)) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
+ if (copy_from_user(bad_buf, (void __user *)user_addr,
+ cache_size)) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+free_alloc:
+ if (bad_buf)
+ kmem_cache_free(bad_cache, bad_buf);
+ kfree(good_buf);
+}
+
+/* Callable tests. */
+void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
+{
+ do_usercopy_heap_size(true);
+}
+
+void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
+{
+ do_usercopy_heap_size(false);
+}
+
+void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
+{
+ do_usercopy_heap_flag(true);
+}
+
+void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
+{
+ do_usercopy_heap_flag(false);
+}
+
+void lkdtm_USERCOPY_STACK_FRAME_TO(void)
+{
+ do_usercopy_stack(true, true);
+}
+
+void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
+{
+ do_usercopy_stack(false, true);
+}
+
+void lkdtm_USERCOPY_STACK_BEYOND(void)
+{
+ do_usercopy_stack(true, false);
+}
+
+void lkdtm_USERCOPY_KERNEL(void)
+{
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ pr_info("attempting good copy_to_user from kernel rodata\n");
+ if (copy_to_user((void __user *)user_addr, test_text,
+ sizeof(test_text))) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user from kernel text\n");
+ if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void __init lkdtm_usercopy_init(void)
+{
+ /* Prepare cache that lacks SLAB_USERCOPY flag. */
+ bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
+ 0, NULL);
+}
+
+void __exit lkdtm_usercopy_exit(void)
+{
+ kmem_cache_destroy(bad_cache);
+}
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 5aa606c8a827..085f3aafe6fa 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -132,6 +132,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
hdr->length = length;
hdr->msg_complete = 1;
hdr->reserved = 0;
+ hdr->internal = 0;
}
/**
@@ -165,15 +166,15 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
* Return: 0 on success, <0 on failure.
*/
static inline
-int mei_hbm_cl_write(struct mei_device *dev,
- struct mei_cl *cl, u8 hbm_cmd, size_t len)
+int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl,
+ u8 hbm_cmd, u8 *buf, size_t len)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct mei_msg_hdr mei_hdr;
- mei_hbm_hdr(mei_hdr, len);
- mei_hbm_cl_hdr(cl, hbm_cmd, dev->wr_msg.data, len);
+ mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_cl_hdr(cl, hbm_cmd, buf, len);
- return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ return mei_write_message(dev, &mei_hdr, buf);
}
/**
@@ -250,24 +251,23 @@ int mei_hbm_start_wait(struct mei_device *dev)
*/
int mei_hbm_start_req(struct mei_device *dev)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_host_version_request *start_req;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_host_version_request start_req;
const size_t len = sizeof(struct hbm_host_version_request);
int ret;
mei_hbm_reset(dev);
- mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, len);
/* host start message */
- start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
- memset(start_req, 0, len);
- start_req->hbm_cmd = HOST_START_REQ_CMD;
- start_req->host_version.major_version = HBM_MAJOR_VERSION;
- start_req->host_version.minor_version = HBM_MINOR_VERSION;
+ memset(&start_req, 0, len);
+ start_req.hbm_cmd = HOST_START_REQ_CMD;
+ start_req.host_version.major_version = HBM_MAJOR_VERSION;
+ start_req.host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &start_req);
if (ret) {
dev_err(dev->dev, "version message write failed: ret = %d\n",
ret);
@@ -288,23 +288,22 @@ int mei_hbm_start_req(struct mei_device *dev)
*/
static int mei_hbm_enum_clients_req(struct mei_device *dev)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_host_enum_request *enum_req;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_host_enum_request enum_req;
const size_t len = sizeof(struct hbm_host_enum_request);
int ret;
/* enumerate clients */
- mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, len);
- enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
- memset(enum_req, 0, len);
- enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
- enum_req->flags |= dev->hbm_f_dc_supported ?
- MEI_HBM_ENUM_F_ALLOW_ADD : 0;
- enum_req->flags |= dev->hbm_f_ie_supported ?
- MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
+ memset(&enum_req, 0, len);
+ enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
+ enum_req.flags |= dev->hbm_f_dc_supported ?
+ MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+ enum_req.flags |= dev->hbm_f_ie_supported ?
+ MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &enum_req);
if (ret) {
dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
ret);
@@ -358,23 +357,21 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
*/
static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_add_client_response *resp;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_add_client_response resp;
const size_t len = sizeof(struct hbm_add_client_response);
int ret;
dev_dbg(dev->dev, "adding client response\n");
- resp = (struct hbm_add_client_response *)dev->wr_msg.data;
+ mei_hbm_hdr(&mei_hdr, len);
- mei_hbm_hdr(mei_hdr, len);
- memset(resp, 0, sizeof(struct hbm_add_client_response));
+ memset(&resp, 0, sizeof(struct hbm_add_client_response));
+ resp.hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
+ resp.me_addr = addr;
+ resp.status = status;
- resp->hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
- resp->me_addr = addr;
- resp->status = status;
-
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &resp);
if (ret)
dev_err(dev->dev, "add client response write failed: ret = %d\n",
ret);
@@ -421,18 +418,17 @@ int mei_hbm_cl_notify_req(struct mei_device *dev,
struct mei_cl *cl, u8 start)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_notification_request *req;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_notification_request req;
const size_t len = sizeof(struct hbm_notification_request);
int ret;
- mei_hbm_hdr(mei_hdr, len);
- mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, dev->wr_msg.data, len);
+ mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, len);
- req = (struct hbm_notification_request *)dev->wr_msg.data;
- req->start = start;
+ req.start = start;
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
@@ -534,8 +530,8 @@ static void mei_hbm_cl_notify(struct mei_device *dev,
*/
static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_props_request *prop_req;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_props_request prop_req;
const size_t len = sizeof(struct hbm_props_request);
unsigned long addr;
int ret;
@@ -550,15 +546,14 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
return 0;
}
- mei_hbm_hdr(mei_hdr, len);
- prop_req = (struct hbm_props_request *)dev->wr_msg.data;
+ mei_hbm_hdr(&mei_hdr, len);
- memset(prop_req, 0, sizeof(struct hbm_props_request));
+ memset(&prop_req, 0, sizeof(struct hbm_props_request));
- prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
- prop_req->me_addr = addr;
+ prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ prop_req.me_addr = addr;
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &prop_req);
if (ret) {
dev_err(dev->dev, "properties request write failed: ret = %d\n",
ret);
@@ -581,21 +576,20 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
*/
int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_power_gate *req;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_power_gate req;
const size_t len = sizeof(struct hbm_power_gate);
int ret;
if (!dev->hbm_f_pg_supported)
return -EOPNOTSUPP;
- mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, len);
- req = (struct hbm_power_gate *)dev->wr_msg.data;
- memset(req, 0, len);
- req->hbm_cmd = pg_cmd;
+ memset(&req, 0, len);
+ req.hbm_cmd = pg_cmd;
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "power gate command write failed.\n");
return ret;
@@ -611,18 +605,17 @@ EXPORT_SYMBOL_GPL(mei_hbm_pg);
*/
static int mei_hbm_stop_req(struct mei_device *dev)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_host_stop_request *req =
- (struct hbm_host_stop_request *)dev->wr_msg.data;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_host_stop_request req;
const size_t len = sizeof(struct hbm_host_stop_request);
- mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, len);
- memset(req, 0, len);
- req->hbm_cmd = HOST_STOP_REQ_CMD;
- req->reason = DRIVER_STOP_REQUEST;
+ memset(&req, 0, len);
+ req.hbm_cmd = HOST_STOP_REQ_CMD;
+ req.reason = DRIVER_STOP_REQUEST;
- return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ return mei_write_message(dev, &mei_hdr, &req);
}
/**
@@ -636,9 +629,10 @@ static int mei_hbm_stop_req(struct mei_device *dev)
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_flow_control);
+ u8 buf[len];
cl_dbg(dev, cl, "sending flow control\n");
- return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, len);
+ return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, buf, len);
}
/**
@@ -714,8 +708,9 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_client_connect_request);
+ u8 buf[len];
- return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, len);
+ return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, buf, len);
}
/**
@@ -729,8 +724,9 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_client_connect_response);
+ u8 buf[len];
- return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, len);
+ return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, buf, len);
}
/**
@@ -765,8 +761,9 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_client_connect_request);
+ u8 buf[len];
- return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, len);
+ return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, buf, len);
}
/**
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index c9e01021eadf..e5e32503d4bc 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -382,7 +382,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
*
* @hbuf_depth : depth of hardware host/write buffer is slots
* @hbuf_is_ready : query if the host host/write buffer is ready
- * @wr_msg : the buffer for hbm control messages
*
* @version : HBM protocol version in use
* @hbm_f_pg_supported : hbm feature pgi protocol
@@ -467,12 +466,6 @@ struct mei_device {
u8 hbuf_depth;
bool hbuf_is_ready;
- /* used for control messages */
- struct {
- struct mei_msg_hdr hdr;
- unsigned char data[128];
- } wr_msg;
-
struct hbm_version version;
unsigned int hbm_f_pg_supported:1;
unsigned int hbm_f_dc_supported:1;
@@ -670,8 +663,7 @@ static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
}
static inline int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *hdr,
- unsigned char *buf)
+ struct mei_msg_hdr *hdr, void *buf)
{
return dev->ops->write(dev, hdr, buf);
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e62fde3ac431..10b553765ee7 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -93,6 +93,7 @@ static DEFINE_SPINLOCK(mmc_blk_lock);
*/
struct mmc_blk_data {
spinlock_t lock;
+ struct device *parent;
struct gendisk *disk;
struct mmc_queue queue;
struct list_head part;
@@ -355,8 +356,10 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
goto idata_err;
}
- if (!idata->buf_bytes)
+ if (!idata->buf_bytes) {
+ idata->buf = NULL;
return idata;
+ }
idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
if (!idata->buf) {
@@ -1722,8 +1725,8 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
!IS_ALIGNED(blk_rq_sectors(next), 8))
break;
- if (next->cmd_flags & REQ_DISCARD ||
- next->cmd_flags & REQ_FLUSH)
+ if (req_op(next) == REQ_OP_DISCARD ||
+ req_op(next) == REQ_OP_FLUSH)
break;
if (rq_data_dir(cur) != rq_data_dir(next))
@@ -1786,8 +1789,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
packed_cmd_hdr = packed->cmd_hdr;
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
- packed_cmd_hdr[0] = (packed->nr_entries << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
/*
@@ -1801,14 +1804,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
((brq->data.blocks * brq->data.blksz) >=
card->ext_csd.data_tag_unit_size);
/* Argument of CMD23 */
- packed_cmd_hdr[(i * 2)] =
+ packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
- blk_rq_sectors(prq);
+ blk_rq_sectors(prq));
/* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] =
+ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
packed->blocks += blk_rq_sectors(prq);
i++;
}
@@ -2148,7 +2151,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
- unsigned int cmd_flags = req ? req->cmd_flags : 0;
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
@@ -2164,15 +2166,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
- if (cmd_flags & REQ_DISCARD) {
+ if (req && req_op(req) == REQ_OP_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- if (req->cmd_flags & REQ_SECURE)
- ret = mmc_blk_issue_secdiscard_rq(mq, req);
- else
- ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (cmd_flags & REQ_FLUSH) {
+ ret = mmc_blk_issue_discard_rq(mq, req);
+ } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
+ /* complete ongoing async transfer before issuing secure erase*/
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ ret = mmc_blk_issue_secdiscard_rq(mq, req);
+ } else if (req && req_op(req) == REQ_OP_FLUSH) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -2188,7 +2192,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
out:
if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
- (cmd_flags & MMC_REQ_SPECIAL_MASK))
+ mmc_req_is_special(req))
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
@@ -2269,7 +2273,7 @@ again:
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
- md->disk->driverfs_dev = parent;
+ md->parent = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
md->disk->flags = GENHD_FL_EXT_DEVT;
if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
@@ -2457,7 +2461,7 @@ static int mmc_add_disk(struct mmc_blk_data *md)
int ret;
struct mmc_card *card = md->queue.card;
- add_disk(md->disk);
+ device_add_disk(md->parent, md->disk);
md->force_ro.show = force_ro_show;
md->force_ro.store = force_ro_store;
sysfs_attr_init(&md->force_ro.attr);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6f4323c6d653..bf14642a576a 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -33,7 +33,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
/*
* We only like normal block requests and discards.
*/
- if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
+ if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) {
blk_dump_rq_flags(req, "MMC bad request");
return BLKPREP_KILL;
}
@@ -56,7 +56,6 @@ static int mmc_queue_thread(void *d)
down(&mq->thread_sem);
do {
struct request *req = NULL;
- unsigned int cmd_flags = 0;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
@@ -66,7 +65,6 @@ static int mmc_queue_thread(void *d)
if (req || mq->mqrq_prev->req) {
set_current_state(TASK_RUNNING);
- cmd_flags = req ? req->cmd_flags : 0;
mq->issue_fn(mq, req);
cond_resched();
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
@@ -81,7 +79,7 @@ static int mmc_queue_thread(void *d)
* has been finished. Do not assign it to previous
* request.
*/
- if (cmd_flags & MMC_REQ_SPECIAL_MASK)
+ if (mmc_req_is_special(req))
mq->mqrq_cur->req = NULL;
mq->mqrq_prev->brq.mrq.data = NULL;
@@ -173,7 +171,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
if (card->pref_erase > max_discard)
q->limits.discard_granularity = 0;
if (mmc_can_secure_erase_trim(card))
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
}
/**
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 36cddab57d77..d62531124d54 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -1,7 +1,11 @@
#ifndef MMC_QUEUE_H
#define MMC_QUEUE_H
-#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
+static inline bool mmc_req_is_special(struct request *req)
+{
+ return req &&
+ (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD);
+}
struct request;
struct task_struct;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 03ddf0ecf402..684087db170b 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1068,8 +1068,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
jz4740_mmc_clock_disable(host);
setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
(unsigned long)host);
- /* It is not important when it times out, it just needs to timeout. */
- set_timer_slack(&host->timeout_timer, HZ);
host->use_dma = true;
if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 86fac3e86833..c763b404510f 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -789,14 +789,16 @@ static int pxamci_probe(struct platform_device *pdev)
gpio_direction_output(gpio_power,
host->pdata->gpio_power_invert);
}
- if (gpio_is_valid(gpio_ro))
+ if (gpio_is_valid(gpio_ro)) {
ret = mmc_gpio_request_ro(mmc, gpio_ro);
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
- goto out;
- } else {
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n",
+ gpio_ro);
+ goto out;
+ } else {
+ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+ 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ }
}
if (gpio_is_valid(gpio_cd))
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 458ffb7637e5..008709c5cb09 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -43,6 +43,7 @@
#ifdef CONFIG_X86
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
#endif
@@ -126,7 +127,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
static bool sdhci_acpi_byt(void)
{
static const struct x86_cpu_id byt[] = {
- { X86_VENDOR_INTEL, 6, 0x37 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
{}
};
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 74ae24364a8d..8d58acf33021 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -87,14 +87,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (req->cmd_type != REQ_TYPE_FS)
return -EIO;
- if (req->cmd_flags & REQ_FLUSH)
+ if (req_op(req) == REQ_OP_FLUSH)
return tr->flush(dev);
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
get_capacity(req->rq_disk))
return -EIO;
- if (req->cmd_flags & REQ_DISCARD)
+ if (req_op(req) == REQ_OP_DISCARD)
return tr->discard(dev, block, nsect);
if (rq_data_dir(req) == READ) {
@@ -431,12 +431,10 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
goto error4;
INIT_WORK(&new->work, mtd_blktrans_work);
- gd->driverfs_dev = &new->mtd->dev;
-
if (new->readonly)
set_disk_ro(gd, 1);
- add_disk(gd);
+ device_add_disk(&new->mtd->dev, gd);
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 08e158895635..a136da8df6fe 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1657,8 +1657,11 @@ static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
/* detect availability of ELM module. Won't be present pre-OMAP4 */
info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
- if (!info->elm_of_node)
- dev_dbg(dev, "ti,elm-id not in DT\n");
+ if (!info->elm_of_node) {
+ info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
+ if (!info->elm_of_node)
+ dev_dbg(dev, "ti,elm-id not in DT\n");
+ }
/* select ecc-scheme for NAND */
if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index ca81f46ea1aa..edc70ffad660 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -101,11 +101,14 @@ enum ad_link_speed_type {
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
-static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
+static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
+ 0, 0, 0, 0, 0, 0
+};
static u16 ad_ticks_per_sec;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
+ MULTICAST_LACPDU_ADDR;
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
@@ -1739,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
aggregator->is_individual = false;
aggregator->actor_admin_aggregator_key = 0;
aggregator->actor_oper_aggregator_key = 0;
- aggregator->partner_system = null_mac_addr;
+ eth_zero_addr(aggregator->partner_system.mac_addr_value);
aggregator->partner_system_priority = 0;
aggregator->partner_oper_aggregator_key = 0;
aggregator->receive_state = 0;
@@ -1761,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
if (aggregator) {
ad_clear_agg(aggregator);
- aggregator->aggregator_mac_address = null_mac_addr;
+ eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
aggregator->aggregator_identifier = 0;
aggregator->slave = NULL;
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c5ac160a8ae9..551f0f8dead3 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -42,13 +42,10 @@
-#ifndef __long_aligned
-#define __long_aligned __attribute__((aligned((sizeof(long)))))
-#endif
-static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
-static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
};
static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 941ec99cd3b6..a2afa3be17a4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
/* check for initial state */
+ new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) {
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index db760e84119f..b8df0f5e8c25 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
if (err < 0)
return err;
- return register_netdevice(bond_dev);
+ err = register_netdevice(bond_dev);
+
+ netif_carrier_off(bond_dev);
+
+ return err;
}
static size_t bond_get_size(const struct net_device *bond_dev)
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 30defe6c81f2..821d86c38ab2 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3851,7 +3851,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
unsigned long flags;
/* If the device is closed, ignore the timeout */
- if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
+ if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
return;
/* Any nonrecoverable hardware error?
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 08a23e6b60e9..1a3555d03a96 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -259,6 +259,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
if (err) {
netdev_err(dev, "rx buffer allocation failed\n");
dev->stats.rx_dropped++;
+ dev_kfree_skb(skb);
return;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 543bf38105c9..bfa26a2590c9 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -392,7 +392,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ data[i] = *(unsigned long *)p;
}
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index a6333d38ecc0..25bbae5928d4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -231,7 +231,7 @@ err_dma:
dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
DMA_TO_DEVICE);
- while (i > 0) {
+ while (i-- > 0) {
int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index a38cb047b540..1b0ae4a72e9e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1591,7 +1591,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
u16 start = eeprom->offset, length = eeprom->len;
- int rc;
+ int rc = 0;
memset(data, 0, eeprom->len);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8de79ae63231..0e7e7da8d201 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2821,7 +2821,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (!g) {
netif_info(lio, tx_err, lio->netdev,
"Transmit scatter gather: glist null!\n");
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
}
cmdsetup.s.gather = 1;
@@ -2892,7 +2892,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
else
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED)
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
@@ -2906,12 +2906,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
+lio_xmit_dma_failed:
+ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+ ndata.datasize, DMA_TO_DEVICE);
lio_xmit_failed:
stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped);
- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
- ndata.datasize, DMA_TO_DEVICE);
recv_buffer_free(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index c4b262ca7d43..2accab386323 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0E
-#define T4FW_VERSION_MICRO 0x04
+#define T4FW_VERSION_MINOR 0x0F
+#define T4FW_VERSION_MICRO 0x25
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0E
-#define T5FW_VERSION_MICRO 0x04
+#define T5FW_VERSION_MINOR 0x0F
+#define T5FW_VERSION_MICRO 0x25
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0E
-#define T6FW_VERSION_MICRO 0x04
+#define T6FW_VERSION_MINOR 0x0F
+#define T6FW_VERSION_MICRO 0x25
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4edb98c3c6c7..4466a1187110 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -860,6 +860,11 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int entry;
void *dest;
+ if (skb_put_padto(skb, ETHOC_ZLEN)) {
+ dev->stats.tx_errors++;
+ goto out_no_free;
+ }
+
if (unlikely(skb->len > ETHOC_BUFSIZ)) {
dev->stats.tx_errors++;
goto out;
@@ -894,6 +899,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
out:
dev_kfree_skb(skb);
+out_no_free:
return NETDEV_TX_OK;
}
@@ -1086,7 +1092,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
if (netdev->mem_end) {
@@ -1095,7 +1101,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
} else {
/* Allocate buffer memory */
@@ -1106,7 +1112,7 @@ static int ethoc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
buffer_size);
ret = -ENOMEM;
- goto error;
+ goto free;
}
netdev->mem_end = netdev->mem_start + buffer_size;
priv->dma_alloc = buffer_size;
@@ -1120,7 +1126,7 @@ static int ethoc_probe(struct platform_device *pdev)
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
if (num_bd < 4) {
ret = -ENODEV;
- goto error;
+ goto free;
}
priv->num_bd = num_bd;
/* num_tx must be a power of two */
@@ -1133,7 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
- goto error;
+ goto free;
}
/* Allow the platform setup code to pass in a MAC address. */
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 06f031715b57..9b7a3f5a2818 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -285,6 +285,7 @@ static void nps_enet_hw_reset(struct net_device *ndev)
ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
usleep_range(10, 20);
+ ge_rst_value = 0;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
/* Tx fifo reset sequence */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index ecdb6854a898..88f3c85fb04a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -75,6 +75,7 @@
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/seq_file.h>
+#include <linux/workqueue.h>
#include "ibmvnic.h"
@@ -89,6 +90,7 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
static int ibmvnic_remove(struct vio_dev *);
static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
@@ -469,7 +471,8 @@ static int ibmvnic_open(struct net_device *netdev)
crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
ibmvnic_send_crq(adapter, &crq);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
+
return 0;
bounce_map_failed:
@@ -519,7 +522,7 @@ static int ibmvnic_close(struct net_device *netdev)
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
if (adapter->bounce_buffer) {
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -1212,12 +1215,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
goto reg_failed;
}
- scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
- dev_err(dev, "Error mapping irq\n");
- goto map_irq_failed;
- }
-
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
scrq->cur = 0;
@@ -1230,12 +1227,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
return scrq;
-map_irq_failed:
- do {
- rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
- adapter->vdev->unit_address,
- scrq->crq_num);
- } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -1256,6 +1247,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
if (adapter->tx_scrq[i]) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
+ irq_dispose_mapping(adapter->tx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->tx_scrq[i]);
}
@@ -1267,6 +1259,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
if (adapter->rx_scrq[i]) {
free_irq(adapter->rx_scrq[i]->irq,
adapter->rx_scrq[i]);
+ irq_dispose_mapping(adapter->rx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->rx_scrq[i]);
}
@@ -1276,6 +1269,29 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
adapter->requested_caps = 0;
}
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->tx_scrq[i]);
+ adapter->tx_scrq = NULL;
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->rx_scrq[i]);
+ adapter->rx_scrq = NULL;
+ }
+
+ adapter->requested_caps = 0;
+}
+
static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -1395,6 +1411,66 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
return IRQ_HANDLED;
}
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_sub_crq_queue *scrq;
+ int i = 0, j = 0;
+ int rc = 0;
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ scrq = adapter->tx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_tx_irq_failed;
+ }
+
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
+ 0, "ibmvnic_tx", scrq);
+
+ if (rc) {
+ dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ scrq = adapter->rx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_rx_irq_failed;
+ }
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
+ 0, "ibmvnic_rx", scrq);
+ if (rc) {
+ dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+ return rc;
+
+req_rx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ i = adapter->req_tx_queues;
+req_tx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ release_sub_crqs_no_irqs(adapter);
+ return rc;
+}
+
static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
{
struct device *dev = &adapter->vdev->dev;
@@ -1403,8 +1479,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
union ibmvnic_crq crq;
int total_queues;
int more = 0;
- int i, j;
- int rc;
+ int i;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
@@ -1483,13 +1558,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
for (i = 0; i < adapter->req_tx_queues; i++) {
adapter->tx_scrq[i] = allqueues[i];
adapter->tx_scrq[i]->pool_index = i;
- rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
- 0, "ibmvnic_tx", adapter->tx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
- adapter->tx_scrq[i]->irq, rc);
- goto req_tx_irq_failed;
- }
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -1500,13 +1568,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
for (i = 0; i < adapter->req_rx_queues; i++) {
adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
adapter->rx_scrq[i]->scrq_num = i;
- rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
- 0, "ibmvnic_rx", adapter->rx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
- adapter->rx_scrq[i]->irq, rc);
- goto req_rx_irq_failed;
- }
}
memset(&crq, 0, sizeof(crq));
@@ -1559,15 +1620,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
return;
-req_rx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
- i = adapter->req_tx_queues;
-req_tx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- kfree(adapter->rx_scrq);
- adapter->rx_scrq = NULL;
rx_failed:
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
@@ -2348,9 +2400,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be32_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs(adapter);
+ release_sub_crqs_no_irqs(adapter);
*req_value = be32_to_cpu(crq->request_capability_rsp.number);
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
@@ -2659,7 +2711,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
out:
if (atomic_read(&adapter->running_cap_queries) == 0)
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 0);
/* We're done querying the capabilities, initialize sub-crqs */
}
@@ -3202,8 +3254,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dev_info(dev, "Partner initialized\n");
/* Send back a response */
rc = ibmvnic_send_crq_init_complete(adapter);
- if (rc == 0)
- send_version_xchg(adapter);
+ if (!rc)
+ schedule_work(&adapter->vnic_crq_init);
else
dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
break;
@@ -3555,8 +3607,63 @@ static const struct file_operations ibmvnic_dump_ops = {
.release = single_release,
};
+static void handle_crq_init_rsp(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ vnic_crq_init);
+ struct device *dev = &adapter->vdev->dev;
+ struct net_device *netdev = adapter->netdev;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ int rc;
+
+ send_version_xchg(adapter);
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
+ send_cap_queries(adapter);
+
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+ }
+ } while (adapter->renegotiate);
+ rc = init_sub_crq_irqs(adapter);
+
+ if (rc)
+ goto task_failed;
+
+ netdev->real_num_tx_queues = adapter->req_tx_queues;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(dev,
+ "failed to register netdev rc=%d\n", rc);
+ goto register_failed;
+ }
+ dev_info(dev, "ibmvnic registered\n");
+
+ return;
+
+register_failed:
+ release_sub_crqs(adapter);
+task_failed:
+ dev_err(dev, "Passive initialization was not successful\n");
+}
+
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
+ unsigned long timeout = msecs_to_jiffies(30000);
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
@@ -3593,6 +3700,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
+ INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+
spin_lock_init(&adapter->stats_lock);
rc = ibmvnic_init_crq_queue(adapter);
@@ -3635,30 +3744,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
ibmvnic_send_crq_init(adapter);
init_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout))
+ return 0;
do {
- adapter->renegotiate = false;
-
- init_sub_crqs(adapter, 0);
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
-
if (adapter->renegotiate) {
- release_sub_crqs(adapter);
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout))
+ return 0;
}
} while (adapter->renegotiate);
- /* if init_sub_crqs is partially successful, retry */
- while (!adapter->tx_scrq || !adapter->rx_scrq) {
- init_sub_crqs(adapter, 1);
-
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
+ goto free_debugfs;
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
@@ -3666,12 +3771,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto free_debugfs;
+ goto free_sub_crqs;
}
dev_info(&dev->dev, "ibmvnic registered\n");
return 0;
+free_sub_crqs:
+ release_sub_crqs(adapter);
free_debugfs:
if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
debugfs_remove_recursive(adapter->debugfs_dir);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 0b66a506a4e4..e82898fd518e 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1045,4 +1045,6 @@ struct ibmvnic_adapter {
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
+
+ struct work_struct vnic_crq_init;
};
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 73f745205a1c..2b2e2f8c6369 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -154,16 +154,6 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
writel(val, hw->hw_addr + reg);
}
-static bool e1000e_vlan_used(struct e1000_adapter *adapter)
-{
- u16 vid;
-
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- return true;
-
- return false;
-}
-
/**
* e1000_regdump - register printout routine
* @hw: pointer to the HW structure
@@ -3453,8 +3443,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
ew32(RCTL, rctl);
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX ||
- e1000e_vlan_used(adapter))
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
e1000e_vlan_strip_enable(adapter);
else
e1000e_vlan_strip_disable(adapter);
@@ -6926,6 +6915,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_RXFCS;
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
return features;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5ea22008d721..501f15d9f4d6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1344,6 +1344,13 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (!vsi || !macaddr)
return NULL;
+ /* Do not allow broadcast filter to be added since broadcast filter
+ * is added as part of add VSI for any newly created VSI except
+ * FDIR VSI
+ */
+ if (is_broadcast_ether_addr(macaddr))
+ return NULL;
+
f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
@@ -2151,18 +2158,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
aq_ret, pf->hw.aq.asq_last_status);
}
}
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
- }
}
out:
/* if something went wrong then set the changed flag so we try again */
@@ -7726,10 +7721,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
+ * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
{
struct i40e_q_vector *q_vector;
@@ -7740,7 +7736,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi,
i40e_napi_poll, NAPI_POLL_WEIGHT);
@@ -7764,8 +7761,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int v_idx, num_q_vectors;
- int err;
+ int err, v_idx, num_q_vectors, current_cpu;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -7775,10 +7771,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
+ current_cpu = cpumask_first(cpu_online_mask);
+
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
if (err)
goto err_out;
+ current_cpu = cpumask_next(current_cpu, cpu_online_mask);
+ if (unlikely(current_cpu >= nr_cpu_ids))
+ current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
@@ -9224,6 +9225,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
+ i40e_status aq_ret = 0;
u8 laa_macaddr[ETH_ALEN];
bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back;
@@ -9413,6 +9415,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number;
}
+ /* Except FDIR VSI, for all othet VSI set the broadcast filter */
+ if (vsi->type != I40E_VSI_FDIR) {
+ aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+ if (aq_ret) {
+ ret = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 55f151fca1dc..a8868e1bf832 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1280,8 +1280,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
union i40e_rx_desc *rx_desc)
{
struct i40e_rx_ptype_decoded decoded;
- bool ipv4, ipv6, tunnel = false;
u32 rx_error, rx_status;
+ bool ipv4, ipv6;
u8 ptype;
u64 qword;
@@ -1336,19 +1336,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
- if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
- I40E_RX_PTYPE_INNER_PROT_UDP |
- I40E_RX_PTYPE_INNER_PROT_SCTP))
- tunnel = true;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = tunnel ? 1 : 0;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index be99189da925..79d99cd91b24 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -752,8 +752,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
union i40e_rx_desc *rx_desc)
{
struct i40e_rx_ptype_decoded decoded;
- bool ipv4, ipv6, tunnel = false;
u32 rx_error, rx_status;
+ bool ipv4, ipv6;
u8 ptype;
u64 qword;
@@ -808,19 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
- if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
- I40E_RX_PTYPE_INNER_PROT_UDP |
- I40E_RX_PTYPE_INNER_PROT_SCTP))
- tunnel = true;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = tunnel ? 1 : 0;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 088c47cf27d9..8bebd862a54c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2887,7 +2887,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
- return 0;
+ return min(work_done, budget - 1);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 61a80da8b6f0..2819abc454c7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
if (!mbx->ops.read)
goto out;
@@ -111,7 +111,7 @@ out:
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a6d26d351dfc..f92018b13d28 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -244,7 +244,7 @@
/* Various constants */
/* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS 1
+#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
@@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
return 0;
err_free_irq:
+ unregister_cpu_notifier(&pp->cpu_notifier);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs:
mvneta_cleanup_txqs(pp);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fc95affaf76b..44cf16d01f42 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1042,6 +1042,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
@@ -1061,22 +1063,25 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
tx_size == priv->tx_ring[0]->size)
return 0;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.tx_ring_size = tx_size;
+ new_prof.rx_ring_size = rx_size;
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->prof->tx_ring_size = tx_size;
- priv->prof->rx_ring_size = rx_size;
+ mlx4_en_safe_replace_resources(priv, tmp);
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
@@ -1084,8 +1089,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
@@ -1714,6 +1719,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
@@ -1723,23 +1730,26 @@ static int mlx4_en_set_channels(struct net_device *dev,
!channel->tx_count || !channel->rx_count)
return -EINVAL;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.num_tx_rings_p_up = channel->tx_count;
+ new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.rx_ring_num = channel->rx_count;
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->num_tx_rings_p_up = channel->tx_count;
- priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
- priv->rx_ring_num = channel->rx_count;
-
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
+ mlx4_en_safe_replace_resources(priv, tmp);
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -1757,8 +1767,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0c0dfd6cdca6..8359e9e51b3b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1954,7 +1954,7 @@ static int mlx4_en_close(struct net_device *dev)
return 0;
}
-void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
@@ -1979,7 +1979,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
}
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
@@ -2044,6 +2044,77 @@ static void mlx4_en_shutdown(struct net_device *dev)
rtnl_unlock();
}
+static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src,
+ struct mlx4_en_port_profile *prof)
+{
+ memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+ dst->tx_ring_num = prof->tx_ring_num;
+ dst->rx_ring_num = prof->rx_ring_num;
+ dst->flags = prof->flags;
+ dst->mdev = src->mdev;
+ dst->port = src->port;
+ dst->dev = src->dev;
+ dst->prof = prof;
+ dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+
+ dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_ring)
+ return -ENOMEM;
+
+ dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_cq) {
+ kfree(dst->tx_ring);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src)
+{
+ memcpy(dst->rx_ring, src->rx_ring,
+ sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
+ memcpy(dst->rx_cq, src->rx_cq,
+ sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
+ memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->tx_ring_num = src->tx_ring_num;
+ dst->rx_ring_num = src->rx_ring_num;
+ dst->tx_ring = src->tx_ring;
+ dst->tx_cq = src->tx_cq;
+ memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
+}
+
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof)
+{
+ mlx4_en_copy_priv(tmp, priv, prof);
+
+ if (mlx4_en_alloc_resources(tmp)) {
+ en_warn(priv,
+ "%s: Resource allocation failed, using previous configuration\n",
+ __func__);
+ kfree(tmp->tx_ring);
+ kfree(tmp->tx_cq);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp)
+{
+ mlx4_en_free_resources(priv);
+ mlx4_en_update_priv(priv, tmp);
+}
+
void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -2080,6 +2151,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mdev->upper[priv->port] = NULL;
mutex_unlock(&mdev->state_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mlx4_en_cleanup_filters(priv);
+#endif
+
mlx4_en_free_resources(priv);
kfree(priv->tx_ring);
@@ -3124,6 +3199,8 @@ int mlx4_en_reset_config(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
@@ -3140,19 +3217,29 @@ int mlx4_en_reset_config(struct net_device *dev,
return -EINVAL;
}
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
- ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+ ts_config.rx_filter,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
- priv->hwtstamp_config.tx_type = ts_config.tx_type;
- priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+ mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -3186,11 +3273,6 @@ int mlx4_en_reset_config(struct net_device *dev,
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
@@ -3199,6 +3281,8 @@ int mlx4_en_reset_config(struct net_device *dev,
out:
mutex_unlock(&mdev->state_lock);
- netdev_features_change(dev);
+ kfree(tmp);
+ if (!err)
+ netdev_features_change(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c1b3a9c8cf3b..99b5407f2278 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -514,9 +514,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
-#ifdef CONFIG_RFS_ACCEL
- mlx4_en_cleanup_filters(priv);
-#endif
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 467d47ed2c39..13d297ee34bb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -353,12 +353,14 @@ struct mlx4_en_port_profile {
u32 rx_ring_num;
u32 tx_ring_size;
u32 rx_ring_size;
+ u8 num_tx_rings_p_up;
u8 rx_pause;
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
int inline_thold;
+ struct hwtstamp_config hwtstamp_config;
};
struct mlx4_en_profile {
@@ -623,8 +625,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause);
-void mlx4_en_free_resources(struct mlx4_en_priv *priv);
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof);
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode, int node);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 0b4986268cc9..d6e2a1cae19a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
- case MLX5_CMD_OP_2ERR_QP:
- case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_QUERY_QP:
case MLX5_CMD_OP_SQD_RTS_QP:
case MLX5_CMD_OP_INIT2INIT_QP:
@@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
- case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_RQT:
case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
+
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
- case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
@@ -602,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
pr_debug("\n");
}
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+ struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
+
+ return be16_to_cpu(hdr->opcode);
+}
+
+static void cb_timeout_handler(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_cmd_work_ent *ent = container_of(dwork,
+ struct mlx5_cmd_work_ent,
+ cb_timeout_work);
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
+ cmd);
+
+ ent->ret = -ETIMEDOUT;
+ mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+ mlx5_command_str(msg_to_opcode(ent->in)),
+ msg_to_opcode(ent->in));
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+}
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
unsigned long flags;
@@ -647,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
+ if (ent->callback)
+ schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@@ -691,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
}
}
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
- struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
- return be16_to_cpu(hdr->opcode);
-}
-
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@@ -706,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
if (cmd->mode == CMD_MODE_POLLING) {
wait_for_completion(&ent->done);
- err = ent->ret;
- } else {
- if (!wait_for_completion_timeout(&ent->done, timeout))
- err = -ETIMEDOUT;
- else
- err = 0;
+ } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
+ ent->ret = -ETIMEDOUT;
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
}
+
+ err = ent->ret;
+
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
@@ -761,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (!callback)
init_completion(&ent->done);
+ INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
INIT_WORK(&ent->work, cmd_work_handler);
if (page_queue) {
cmd_work_handler(&ent->work);
@@ -770,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
}
- if (!callback) {
- err = wait_func(dev, ent);
- if (err == -ETIMEDOUT)
- goto out;
-
- ds = ent->ts2 - ent->ts1;
- op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
- if (op < ARRAY_SIZE(cmd->stats)) {
- stats = &cmd->stats[op];
- spin_lock_irq(&stats->lock);
- stats->sum += ds;
- ++stats->n;
- spin_unlock_irq(&stats->lock);
- }
- mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
- "fw exec time for %s is %lld nsec\n",
- mlx5_command_str(op), ds);
- *status = ent->status;
- free_cmd(ent);
- }
+ if (callback)
+ goto out;
- return err;
+ err = wait_func(dev, ent);
+ if (err == -ETIMEDOUT)
+ goto out_free;
+
+ ds = ent->ts2 - ent->ts1;
+ op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ if (op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[op];
+ spin_lock_irq(&stats->lock);
+ stats->sum += ds;
+ ++stats->n;
+ spin_unlock_irq(&stats->lock);
+ }
+ mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+ "fw exec time for %s is %lld nsec\n",
+ mlx5_command_str(op), ds);
+ *status = ent->status;
out_free:
free_cmd(ent);
@@ -1181,41 +1205,30 @@ err_dbg:
return err;
}
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
{
struct mlx5_cmd *cmd = &dev->cmd;
int i;
for (i = 0; i < cmd->max_reg_cmds; i++)
down(&cmd->sem);
-
down(&cmd->pages_sem);
- flush_workqueue(cmd->wq);
-
- cmd->mode = CMD_MODE_EVENTS;
+ cmd->mode = mode;
up(&cmd->pages_sem);
for (i = 0; i < cmd->max_reg_cmds; i++)
up(&cmd->sem);
}
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd *cmd = &dev->cmd;
- int i;
-
- for (i = 0; i < cmd->max_reg_cmds; i++)
- down(&cmd->sem);
-
- down(&cmd->pages_sem);
-
- flush_workqueue(cmd->wq);
- cmd->mode = CMD_MODE_POLLING;
+ mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
+}
- up(&cmd->pages_sem);
- for (i = 0; i < cmd->max_reg_cmds; i++)
- up(&cmd->sem);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+ mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
}
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1251,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
struct semaphore *sem;
ent = cmd->ent_arr[i];
+ if (ent->callback)
+ cancel_delayed_work(&ent->cb_timeout_work);
if (ent->page_queue)
sem = &cmd->pages_sem;
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index baa991a23475..943b1bd434bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -145,7 +145,6 @@ struct mlx5e_umr_wqe {
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
#endif
struct mlx5e_params {
@@ -191,6 +190,7 @@ struct mlx5e_tstamp {
enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+ MLX5E_RQ_STATE_FLUSH_TIMEOUT,
};
struct mlx5e_cq {
@@ -220,6 +220,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
u16 ix);
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+
struct mlx5e_dma_info {
struct page *page;
dma_addr_t addr;
@@ -241,6 +243,7 @@ struct mlx5e_rq {
struct mlx5e_cq cq;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_alloc_wqe alloc_wqe;
+ mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
@@ -305,6 +308,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
MLX5E_SQ_STATE_BF_ENABLE,
+ MLX5E_SQ_STATE_TX_TIMEOUT,
};
struct mlx5e_ico_wqe_info {
@@ -538,6 +542,7 @@ struct mlx5e_priv {
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
+ struct work_struct tx_timeout_work;
struct delayed_work update_stats_work;
struct mlx5_core_dev *mdev;
@@ -589,12 +594,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index b2db180ae2a5..c585349e05c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
- tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
+ tc_tx_bw[i] = ets->tc_tx_bw[i];
break;
}
}
@@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ if (!ets->tc_tx_bw[i])
+ return -EINVAL;
+
bw_sum += ets->tc_tx_bw[i];
+ }
}
if (bw_sum != 0 && bw_sum != 100)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cb6defd71fc1..5a4d88c2cdb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,13 @@
#include "eswitch.h"
#include "vxlan.h"
+enum {
+ MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
+ MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
+ MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
+ MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
+};
+
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
@@ -74,10 +81,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
port_state = mlx5_query_vport_state(mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
- if (port_state == VPORT_STATE_UP)
+ if (port_state == VPORT_STATE_UP) {
+ netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
- else
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
+ }
}
static void mlx5e_update_carrier_work(struct work_struct *work)
@@ -91,6 +101,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
+static void mlx5e_tx_timeout_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ tx_timeout_work);
+ int err;
+
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+ mlx5e_close_locked(priv->netdev);
+ err = mlx5e_open_locked(priv->netdev);
+ if (err)
+ netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ err);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+}
+
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -305,6 +335,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
}
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
@@ -320,6 +351,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
}
rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
rq->wqe_sz = (priv->params.lro_en) ?
priv->params.lro_wqe_sz :
@@ -525,17 +557,25 @@ err_destroy_rq:
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
+ int tout = 0;
+ int err;
+
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
- mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
- while (!mlx5_wq_ll_is_empty(&rq->wq))
- msleep(20);
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+ while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
+ tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
+ msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+
+ if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
+ set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
napi_synchronize(&rq->channel->napi);
mlx5e_disable_rq(rq);
+ mlx5e_free_rx_descs(rq);
mlx5e_destroy_rq(rq);
}
@@ -782,6 +822,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
+ int tout = 0;
+ int err;
+
if (sq->txq) {
clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
/* prevent netif_tx_wake_queue */
@@ -792,15 +835,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_ERR);
+ if (err)
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
- while (sq->cc != sq->pc) /* wait till sq is empty */
- msleep(20);
+ /* wait till sq is empty, unless a TX timeout occurred on this SQ */
+ while (sq->cc != sq->pc &&
+ !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
+ msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+ if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ }
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
napi_synchronize(&sq->channel->napi);
+ mlx5e_free_tx_descs(sq);
mlx5e_disable_sq(sq);
mlx5e_destroy_sq(sq);
}
@@ -1296,6 +1348,11 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
goto err_close_channels;
}
+ /* FIXME: This is a W/A for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_start_all_queues(priv->netdev);
+
kfree(cparam);
return 0;
@@ -1315,6 +1372,12 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
{
int i;
+ /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_stop_all_queues(priv->netdev);
+ netif_tx_disable(priv->netdev);
+
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(priv->channel[i]);
@@ -1658,8 +1721,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_num_tc(netdev, ntc);
+ /* Map netdev TCs to offset 0
+ * We have our own UP to TXQ mapping for QoS
+ */
for (tc = 0; tc < ntc; tc++)
- netdev_set_tc_queue(netdev, tc, nch, tc * nch);
+ netdev_set_tc_queue(netdev, tc, nch, 0);
}
int mlx5e_open_locked(struct net_device *netdev)
@@ -2590,6 +2656,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features;
}
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ bool sched_work = false;
+ int i;
+
+ netdev_err(dev, "TX timeout detected\n");
+
+ for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+
+ if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+ continue;
+ sched_work = true;
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
+ i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+ }
+
+ if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+ schedule_work(&priv->tx_timeout_work);
+}
+
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
@@ -2607,6 +2696,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2636,6 +2726,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2838,6 +2929,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 022acc2e8922..9f2a16a507e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -212,6 +212,20 @@ err_free_skb:
return -ENOMEM;
}
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct sk_buff *skb = rq->skb[ix];
+
+ if (skb) {
+ rq->skb[ix] = NULL;
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+}
+
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
{
return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
@@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
return 0;
}
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+
+ wi->free_wqe(rq, wi);
+}
+
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_ix_be;
+ u16 wqe_ix;
+
+ while (!mlx5_wq_ll_is_empty(wq)) {
+ wqe_ix_be = *wq->tail_next;
+ wqe_ix = be16_to_cpu(wqe_ix_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+ rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+ &wqe->next.next_wqe_index);
+ }
+}
+
#define RQ_CANNOT_POST(rq) \
(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
@@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done = 0;
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+ return 0;
+
if (cq->decmprs_left)
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 5a750b9cd006..5740b465ef84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
- int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
- skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
+ int up = 0;
+
+ if (!netdev_get_num_tc(dev))
+ return channel_ix;
+
+ if (skb_vlan_tag_present(skb))
+ up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+ /* channel_ix can be larger than num_channels since
+ * dev->num_real_tx_queues = num_channels * num_tc
+ */
+ if (channel_ix >= priv->params.num_channels)
+ channel_ix = reciprocal_scale(channel_ix,
+ priv->params.num_channels);
return priv->channeltc_to_txq_map[channel_ix][up];
}
@@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
-#define MLX5E_MIN_INLINE ETH_HLEN
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
if (bf) {
u16 ihs = skb_headlen(skb);
@@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
return skb_headlen(skb);
}
- return MLX5E_MIN_INLINE;
+ return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -341,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb);
}
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct sk_buff *skb;
+ u16 ci;
+ int i;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+ wi = &sq->wqe_info[ci];
+
+ if (!skb) { /* nop */
+ sq->cc++;
+ continue;
+ }
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+
+ dev_kfree_skb_any(skb);
+ sq->cc += wi->num_wqebbs;
+ }
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
@@ -352,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
+ if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+ return false;
+
npkts = 0;
nbytes = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 42d16b9458e4..96a59463ae65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
void mlx5_enter_error_state(struct mlx5_core_dev *dev)
{
+ mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
- return;
+ goto unlock;
mlx5_core_err(dev, "start\n");
- if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ trigger_cmd_completions(dev);
+ }
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
mlx5_core_err(dev, "end\n");
+
+unlock:
+ mutex_unlock(&dev->intf_state_mutex);
}
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
u32 count;
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- trigger_cmd_completions(dev);
mod_timer(&health->timer, get_next_poll_jiffies());
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c65f4a13e17e..6695893ddd2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1422,46 +1422,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
mlx5_pci_err_detected(dev->pdev, 0);
}
-/* wait for the device to show vital signs. For now we check
- * that we can read the device ID and that the health buffer
- * shows a non zero value which is different than 0xffffffff
+/* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
*/
-static void wait_vital(struct pci_dev *pdev)
+static int wait_vital(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_health *health = &dev->priv.health;
const int niter = 100;
+ u32 last_count = 0;
u32 count;
- u16 did;
int i;
- /* Wait for firmware to be ready after reset */
- msleep(1000);
- for (i = 0; i < niter; i++) {
- if (pci_read_config_word(pdev, 2, &did)) {
- dev_warn(&pdev->dev, "failed reading config word\n");
- break;
- }
- if (did == pdev->device) {
- dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
- break;
- }
- msleep(50);
- }
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
-
for (i = 0; i < niter; i++) {
count = ioread32be(health->health_counter);
if (count && count != 0xffffffff) {
- dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
- break;
+ if (last_count && last_count != count) {
+ dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ return 0;
+ }
+ last_count = count;
}
msleep(50);
}
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+ return -ETIMEDOUT;
}
static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1473,7 +1458,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__);
pci_save_state(pdev);
- wait_vital(pdev);
+ err = wait_vital(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+ return;
+ }
err = mlx5_load_one(dev, priv);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9eeee0545f1c..32dea3524cee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -345,7 +345,6 @@ retry:
func_id, npages, err);
goto out_4k;
}
- dev->priv.fw_pages += npages;
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
@@ -373,6 +372,33 @@ out_free:
return err;
}
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_manage_pages_inbox *in, int in_size,
+ struct mlx5_manage_pages_outbox *out, int out_size)
+{
+ struct fw_page *fwp;
+ struct rb_node *p;
+ u32 npages;
+ u32 i = 0;
+
+ if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
+ (u32 *)out, out_size);
+
+ npages = be32_to_cpu(in->num_entries);
+
+ p = rb_first(&dev->priv.page_root);
+ while (p && i < npages) {
+ fwp = rb_entry(p, struct fw_page, rb_node);
+ out->pas[i] = cpu_to_be64(fwp->addr);
+ p = rb_next(p);
+ i++;
+ }
+
+ out->num_entries = cpu_to_be32(i);
+ return 0;
+}
+
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
@@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+ err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
if (err) {
- mlx5_core_err(dev, "failed reclaiming pages\n");
- goto out_free;
- }
- dev->priv.fw_pages -= npages;
-
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
+ mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
@@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
err = -EINVAL;
goto out_free;
}
- if (nclaimed)
- *nclaimed = num_claimed;
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
free_4k(dev, addr);
}
+
+ if (nclaimed)
+ *nclaimed = num_claimed;
+
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
@@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- free_4k(dev, fwp->addr);
- nclaimed = 1;
- } else {
- err = reclaim_pages(dev, fwp->func_id,
- optimal_reclaimed_pages(),
- &nclaimed);
- }
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
+
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
@@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
}
} while (p);
+ WARN(dev->priv.fw_pages,
+ "FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.fw_pages);
+ WARN(dev->priv.vfs_pages,
+ "VFs FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.vfs_pages);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index daf44cd4c566..91846dfcbe9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -513,7 +513,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
{
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
void *nic_vport_context;
- u8 *guid;
void *in;
int err;
@@ -535,8 +534,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
- guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
- node_guid);
MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 05de77267d58..e25a73ed2981 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -72,8 +72,8 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1977e7a5c530..57d48da709fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -2718,7 +2718,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
* Configures the switch priority to buffer table.
*/
#define MLXSW_REG_PPTB_ID 0x500B
-#define MLXSW_REG_PPTB_LEN 0x0C
+#define MLXSW_REG_PPTB_LEN 0x10
static const struct mlxsw_reg_info mlxsw_reg_pptb = {
.id = MLXSW_REG_PPTB_ID,
@@ -2784,6 +2784,13 @@ MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
*/
MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
+/* reg_pptb_prio_to_buff_msb
+ * Mapping of switch priority <i+8> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
+
#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
@@ -2792,6 +2799,14 @@ static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
mlxsw_reg_pptb_local_port_set(payload, local_port);
mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+ mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+}
+
+static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
+ u8 buff)
+{
+ mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
+ mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
}
/* PBMC - Port Buffer Management Control Register
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 660429ebfbe1..374080027b2f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -171,23 +171,6 @@ static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
}
-static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
- bool *p_is_up)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char paos_pl[MLXSW_REG_PAOS_LEN];
- u8 oper_status;
- int err;
-
- mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
- if (err)
- return err;
- oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
- *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
- return 0;
-}
-
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned char *addr)
{
@@ -1434,7 +1417,8 @@ static int mlxsw_sp_port_get_settings(struct net_device *dev,
cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause |
+ SUPPORTED_Autoneg;
cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
eth_proto_oper, cmd);
@@ -1493,7 +1477,6 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
u32 eth_proto_new;
u32 eth_proto_cap;
u32 eth_proto_admin;
- bool is_up;
int err;
speed = ethtool_cmd_speed(cmd);
@@ -1525,12 +1508,7 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
return err;
}
- err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
- if (err) {
- netdev_err(dev, "Failed to get oper status");
- return err;
- }
- if (!is_up)
+ if (!netif_running(dev))
return 0;
err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index a3720a0fad7d..074cdda7b6f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -194,7 +194,7 @@ static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0);
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
pptb_pl);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 0b323661c0b6..01cfb7512827 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -103,7 +103,8 @@ static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]);
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
+
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
pptb_pl);
}
@@ -249,6 +250,7 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
return err;
memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
return 0;
}
@@ -351,7 +353,8 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err;
- if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) {
+ if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
+ pfc->pfc_en) {
netdev_err(dev, "PAUSE frames already enabled on port\n");
return -EINVAL;
}
@@ -371,6 +374,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
}
memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
return 0;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 7066954c39d6..0a26b11ca8f6 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
enc28j60_phy_read(priv, PHIR);
}
/* TX complete handler */
- if ((intflags & EIR_TXIF) != 0) {
+ if (((intflags & EIR_TXIF) != 0) &&
+ ((intflags & EIR_TXERIF) == 0)) {
bool err = false;
loop++;
if (netif_msg_intr(priv))
@@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
enc28j60_tx_clear(ndev, true);
} else
enc28j60_tx_clear(ndev, true);
- locked_reg_bfclr(priv, EIR, EIR_TXERIF);
+ locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
}
/* RX Error handler */
if ((intflags & EIR_RXERIF) != 0) {
@@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
*/
static void enc28j60_hw_tx(struct enc28j60_net *priv)
{
+ BUG_ON(!priv->tx_skb);
+
if (netif_msg_tx_queued(priv))
printk(KERN_DEBUG DRV_NAME
": Tx Packet Len:%d\n", priv->tx_skb->len);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 607bb7d4514d..87c642d3b075 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
+ /* Ensure writes are complete before HW fetches Tx descriptors */
+ wmb();
qlcnic_update_cmd_producer(tx_ring);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a473c182c91d..e4071265be76 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2804,7 +2804,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
- if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
priv->rx_tail_addr,
STMMAC_CHAN0);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 922a443e3415..4ef605a90247 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
{
if (!info->egress_timer_scheduled) {
- mod_timer_pinned(&info->egress_timer, jiffies + 1);
+ mod_timer(&info->egress_timer, jiffies + 1);
info->egress_timer_scheduled = true;
}
}
@@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_ptr)
BUG();
/* Initialize the egress timer. */
- init_timer(&info->egress_timer);
+ init_timer_pinned(&info->egress_timer);
info->egress_timer.data = (long)info;
info->egress_timer.function = tile_net_handle_egress_timer;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index cc39cefeae45..9b3dc3c61e00 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1072,12 +1072,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
{
+ struct geneve_dev *geneve = netdev_priv(dev);
/* The max_mtu calculation does not take account of GENEVE
* options, to avoid excluding potentially valid
* configurations.
*/
- int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
- - dev->hard_header_len;
+ int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
+
+ if (geneve->remote.sa.sa_family == AF_INET6)
+ max_mtu -= sizeof(struct ipv6hdr);
+ else
+ max_mtu -= sizeof(struct iphdr);
if (new_mtu < 68)
return -EINVAL;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0e7eff7f1cd2..8bcd78f94966 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2640,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.OutPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
+ skb->dev = macsec->real_dev;
len = skb->len;
ret = dev_queue_xmit(skb);
count_tx(dev, ret, len);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 2afa61b51d41..91177a4a32ad 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -57,6 +57,7 @@
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
@@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
- int ret;
- u16 val, delay;
+ int ret, val;
+ u16 delay;
if (!phydev->priv) {
dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
}
if (phy_interface_is_rgmii(phydev)) {
- ret = phy_write(phydev, MII_DP83867_PHYCTRL,
- (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
+ return val;
+ val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
+ val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
if (ret)
return ret;
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 8dedafa1a95d..a30ee427efab 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2601,8 +2601,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
- put_net(pch->chan_net);
- pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
@@ -3136,6 +3134,9 @@ ppp_disconnect_channel(struct channel *pch)
*/
static void ppp_destroy_channel(struct channel *pch)
{
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
+
atomic_dec(&channel_count);
if (!pch->file.dead) {
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 53759c315b97..877c9516e781 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
if (cdc_ncm_init(dev))
goto error2;
+ /* Some firmwares need a pause here or they will silently fail
+ * to set up the interface properly. This value was decided
+ * empirically on a Sierra Wireless MC7455 running 02.08.02.00
+ * firmware.
+ */
+ usleep_range(10000, 20000);
+
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 4e257b8d8f3e..e9654a685381 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,12 +26,13 @@
#include <linux/mdio.h>
#include <linux/usb/cdc.h>
#include <linux/suspend.h>
+#include <linux/acpi.h>
/* Information for net-next */
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "4"
+#define NET_VERSION "5"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -460,6 +461,11 @@
/* SRAM_IMPEDANCE */
#define RX_DRIVING_MASK 0x6000
+/* MAC PASSTHRU */
+#define AD_MASK 0xfee0
+#define EFUSE 0xcfdb
+#define PASS_THRU_MASK 0x1
+
enum rtl_register_content {
_1000bps = 0x10,
_100bps = 0x08,
@@ -624,6 +630,7 @@ struct r8152 {
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
bool (*in_nway)(struct r8152 *);
+ void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
int intr_interval;
@@ -1035,6 +1042,65 @@ out1:
return ret;
}
+/* Devices containing RTL8153-AD can support a persistent
+ * host system provided MAC address.
+ * Examples of this are Dell TB15 and Dell WD15 docks
+ */
+static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ int ret = -EINVAL;
+ u32 ocp_data;
+ unsigned char buf[6];
+
+ /* test for -AD variant of RTL8153 */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if ((ocp_data & AD_MASK) != 0x1000)
+ return -ENODEV;
+
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1)
+ return -ENODEV;
+
+ /* returns _AUXMAC_#AABBCCDDEEFF# */
+ status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+ obj = (union acpi_object *)buffer.pointer;
+ if (!ACPI_SUCCESS(status))
+ return -ENODEV;
+ if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid buffer when reading pass-thru MAC addr: "
+ "(%d, %d)\n",
+ obj->type, obj->string.length);
+ goto amacout;
+ }
+ if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
+ strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid header when reading pass-thru MAC addr\n");
+ goto amacout;
+ }
+ ret = hex2bin(buf, obj->string.pointer + 9, 6);
+ if (!(ret == 0 && is_valid_ether_addr(buf))) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid MAC when reading pass-thru MAC addr: "
+ "%d, %pM\n", ret, buf);
+ ret = -EINVAL;
+ goto amacout;
+ }
+ memcpy(sa->sa_data, buf, 6);
+ ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
+ netif_info(tp, probe, tp->netdev,
+ "Using pass-thru MAC addr %pM\n", sa->sa_data);
+
+amacout:
+ kfree(obj);
+ return ret;
+}
+
static int set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
@@ -1043,8 +1109,15 @@ static int set_ethernet_addr(struct r8152 *tp)
if (tp->version == RTL_VER_01)
ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
- else
- ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ else {
+ /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
+ * or system doesn't provide valid _SB.AMAC this will be
+ * be expected to non-zero
+ */
+ ret = vendor_mac_passthru_addr_read(tp, &sa);
+ if (ret < 0)
+ ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ }
if (ret < 0) {
netif_err(tp, probe, dev, "Get ether addr fail\n");
@@ -2295,10 +2368,6 @@ static u32 __rtl_get_wol(struct r8152 *tp)
u32 ocp_data;
u32 wolopts = 0;
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
- if (!(ocp_data & LAN_WAKE_EN))
- return 0;
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
if (ocp_data & LINK_ON_WAKE_EN)
wolopts |= WAKE_PHY;
@@ -2331,15 +2400,13 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
- ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+ ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN);
if (wolopts & WAKE_UCAST)
ocp_data |= UWF_EN;
if (wolopts & WAKE_BCAST)
ocp_data |= BWF_EN;
if (wolopts & WAKE_MCAST)
ocp_data |= MWF_EN;
- if (wolopts & WAKE_ANY)
- ocp_data |= LAN_WAKE_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
@@ -2408,9 +2475,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
if (enable) {
u32 ocp_data;
- r8153_u1u2en(tp, false);
- r8153_u2p3en(tp, false);
-
__rtl_set_wol(tp, WAKE_ANY);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2421,7 +2485,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
} else {
+ u32 ocp_data;
+
__rtl_set_wol(tp, tp->saved_wolopts);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data &= ~LINK_OFF_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ }
+}
+
+static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
+{
+ rtl_runtime_suspend_enable(tp, enable);
+
+ if (enable) {
+ r8153_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ } else {
r8153_u2p3en(tp, true);
r8153_u1u2en(tp, true);
}
@@ -3512,7 +3597,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
napi_disable(&tp->napi);
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_stop_rx(tp);
- rtl_runtime_suspend_enable(tp, true);
+ tp->rtl_ops.autosuspend_en(tp, true);
} else {
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
@@ -3538,7 +3623,7 @@ static int rtl8152_resume(struct usb_interface *intf)
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- rtl_runtime_suspend_enable(tp, false);
+ tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
@@ -3557,7 +3642,7 @@ static int rtl8152_resume(struct usb_interface *intf)
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
if (tp->netdev->flags & IFF_UP)
- rtl_runtime_suspend_enable(tp, false);
+ tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
}
@@ -4137,6 +4222,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8152_in_nway;
+ ops->autosuspend_en = rtl_runtime_suspend_enable;
break;
case RTL_VER_03:
@@ -4152,6 +4238,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
ops->in_nway = rtl8153_in_nway;
+ ops->autosuspend_en = rtl8153_runtime_enable;
break;
default:
@@ -4338,3 +4425,4 @@ module_usb_driver(rtl8152_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 61ba46404937..3bfb59209326 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -42,7 +42,6 @@
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/usbnet.h>
-#include <linux/usb/cdc.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
@@ -395,8 +394,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
dev->hard_mtu = net->mtu + net->hard_header_len;
if (dev->rx_urb_size == old_hard_mtu) {
dev->rx_urb_size = dev->hard_mtu;
- if (dev->rx_urb_size > old_rx_urb_size)
+ if (dev->rx_urb_size > old_rx_urb_size) {
+ usbnet_pause_rx(dev);
usbnet_unlink_rx_urbs(dev);
+ usbnet_resume_rx(dev);
+ }
}
/* max qlen depend on hard_mtu and rx_urb_size */
@@ -1508,8 +1510,9 @@ static void usbnet_bh (unsigned long param)
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
- !timer_pending (&dev->delay) &&
- !test_bit (EVENT_RX_HALT, &dev->flags)) {
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
if (temp < RX_QLEN(dev)) {
@@ -1968,143 +1971,6 @@ out:
return err;
}
-int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
- struct usb_interface *intf,
- u8 *buffer,
- int buflen)
-{
- /* duplicates are ignored */
- struct usb_cdc_union_desc *union_header = NULL;
-
- /* duplicates are not tolerated */
- struct usb_cdc_header_desc *header = NULL;
- struct usb_cdc_ether_desc *ether = NULL;
- struct usb_cdc_mdlm_detail_desc *detail = NULL;
- struct usb_cdc_mdlm_desc *desc = NULL;
-
- unsigned int elength;
- int cnt = 0;
-
- memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
- hdr->phonet_magic_present = false;
- while (buflen > 0) {
- elength = buffer[0];
- if (!elength) {
- dev_err(&intf->dev, "skipping garbage byte\n");
- elength = 1;
- goto next_desc;
- }
- if (buffer[1] != USB_DT_CS_INTERFACE) {
- dev_err(&intf->dev, "skipping garbage\n");
- goto next_desc;
- }
-
- switch (buffer[2]) {
- case USB_CDC_UNION_TYPE: /* we've found it */
- if (elength < sizeof(struct usb_cdc_union_desc))
- goto next_desc;
- if (union_header) {
- dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
- goto next_desc;
- }
- union_header = (struct usb_cdc_union_desc *)buffer;
- break;
- case USB_CDC_COUNTRY_TYPE:
- if (elength < sizeof(struct usb_cdc_country_functional_desc))
- goto next_desc;
- hdr->usb_cdc_country_functional_desc =
- (struct usb_cdc_country_functional_desc *)buffer;
- break;
- case USB_CDC_HEADER_TYPE:
- if (elength != sizeof(struct usb_cdc_header_desc))
- goto next_desc;
- if (header)
- return -EINVAL;
- header = (struct usb_cdc_header_desc *)buffer;
- break;
- case USB_CDC_ACM_TYPE:
- if (elength < sizeof(struct usb_cdc_acm_descriptor))
- goto next_desc;
- hdr->usb_cdc_acm_descriptor =
- (struct usb_cdc_acm_descriptor *)buffer;
- break;
- case USB_CDC_ETHERNET_TYPE:
- if (elength != sizeof(struct usb_cdc_ether_desc))
- goto next_desc;
- if (ether)
- return -EINVAL;
- ether = (struct usb_cdc_ether_desc *)buffer;
- break;
- case USB_CDC_CALL_MANAGEMENT_TYPE:
- if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
- goto next_desc;
- hdr->usb_cdc_call_mgmt_descriptor =
- (struct usb_cdc_call_mgmt_descriptor *)buffer;
- break;
- case USB_CDC_DMM_TYPE:
- if (elength < sizeof(struct usb_cdc_dmm_desc))
- goto next_desc;
- hdr->usb_cdc_dmm_desc =
- (struct usb_cdc_dmm_desc *)buffer;
- break;
- case USB_CDC_MDLM_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
- goto next_desc;
- if (desc)
- return -EINVAL;
- desc = (struct usb_cdc_mdlm_desc *)buffer;
- break;
- case USB_CDC_MDLM_DETAIL_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
- goto next_desc;
- if (detail)
- return -EINVAL;
- detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
- break;
- case USB_CDC_NCM_TYPE:
- if (elength < sizeof(struct usb_cdc_ncm_desc))
- goto next_desc;
- hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
- break;
- case USB_CDC_MBIM_TYPE:
- if (elength < sizeof(struct usb_cdc_mbim_desc))
- goto next_desc;
-
- hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
- break;
- case USB_CDC_MBIM_EXTENDED_TYPE:
- if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
- break;
- hdr->usb_cdc_mbim_extended_desc =
- (struct usb_cdc_mbim_extended_desc *)buffer;
- break;
- case CDC_PHONET_MAGIC_NUMBER:
- hdr->phonet_magic_present = true;
- break;
- default:
- /*
- * there are LOTS more CDC descriptors that
- * could legitimately be found here.
- */
- dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
- buffer[2], elength);
- goto next_desc;
- }
- cnt++;
-next_desc:
- buflen -= elength;
- buffer += elength;
- }
- hdr->usb_cdc_union_desc = union_header;
- hdr->usb_cdc_header_desc = header;
- hdr->usb_cdc_mdlm_detail_desc = detail;
- hdr->usb_cdc_mdlm_desc = desc;
- hdr->usb_cdc_ether_desc = ether;
- return cnt;
-}
-
-EXPORT_SYMBOL(cdc_parse_cdc_header);
-
/*
* The function can't be called inside suspend/resume callback,
* otherwise deadlock will be caused.
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 495e06d9f7e7..7e262ef06ede 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -287,14 +287,13 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
return -ENOMEM;
}
- disk->driverfs_dev = dev;
disk->first_minor = 0;
disk->fops = &nd_blk_fops;
disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
set_capacity(disk, 0);
- add_disk(disk);
+ device_add_disk(dev, disk);
if (nsblk_meta_size(nsblk)) {
int rc = nd_integrity_init(disk, nsblk_meta_size(nsblk));
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 68a7c3c1eed9..9dce03f420eb 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1243,7 +1243,6 @@ static int btt_blk_init(struct btt *btt)
}
nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
- btt->btt_disk->driverfs_dev = &btt->nd_btt->dev;
btt->btt_disk->first_minor = 0;
btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt;
@@ -1258,7 +1257,7 @@ static int btt_blk_init(struct btt *btt)
btt->btt_queue->queuedata = btt;
set_capacity(btt->btt_disk, 0);
- add_disk(btt->btt_disk);
+ device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
if (btt_meta_size(btt)) {
int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index f085f8bceae8..5e4e5c772ea5 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -312,7 +312,7 @@ EXPORT_SYMBOL(__nd_driver_register);
int nvdimm_revalidate_disk(struct gendisk *disk)
{
- struct device *dev = disk->driverfs_dev;
+ struct device *dev = disk_to_dev(disk)->parent;
struct nd_region *nd_region = to_nd_region(dev->parent);
const char *pol = nd_region->ro ? "only" : "write";
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 608fc4464574..36cb39047d5b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -283,6 +283,7 @@ static int pmem_attach_disk(struct device *dev,
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
q->queuedata = pmem;
disk = alloc_disk_node(0, nid);
@@ -297,14 +298,13 @@ static int pmem_attach_disk(struct device *dev,
disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
- disk->driverfs_dev = dev;
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
disk->bb = &pmem->bb;
- add_disk(disk);
+ device_add_disk(dev, disk);
revalidate_disk(disk);
return 0;
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
index a39d9431eaec..b7c78a5b1f7a 100644
--- a/drivers/nvme/Kconfig
+++ b/drivers/nvme/Kconfig
@@ -1 +1,2 @@
source "drivers/nvme/host/Kconfig"
+source "drivers/nvme/target/Kconfig"
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
index 9421e829d2a9..0096a7fd1431 100644
--- a/drivers/nvme/Makefile
+++ b/drivers/nvme/Makefile
@@ -1,2 +1,3 @@
obj-y += host/
+obj-y += target/
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index d296fc3ae06e..db39d53cdfb9 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -24,3 +24,22 @@ config BLK_DEV_NVME_SCSI
to say N here, unless you run a distro that abuses the SCSI
emulation to provide stable device names for mount by id, like
some OpenSuSE and SLES versions.
+
+config NVME_FABRICS
+ tristate
+
+config NVME_RDMA
+ tristate "NVM Express over Fabrics RDMA host driver"
+ depends on INFINIBAND
+ depends on BLK_DEV_NVME
+ select NVME_FABRICS
+ select SG_POOL
+ help
+ This provides support for the NVMe over Fabrics protocol using
+ the RDMA (Infiniband, RoCE, iWarp) transport. This allows you
+ to use remote block devices exported using the NVMe protocol set.
+
+ To configure a NVMe over Fabrics controller use the nvme-cli tool
+ from https://github.com/linux-nvme/nvme-cli.
+
+ If unsure, say N.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 9a3ca892b4a7..47abcec23514 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,8 +1,14 @@
obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
+obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
+obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
nvme-core-y := core.o
nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
nvme-y += pci.o
+
+nvme-fabrics-y += fabrics.o
+
+nvme-rdma-y += rdma.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1a51584a382b..7ff2e820bbf4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -30,6 +30,7 @@
#include <asm/unaligned.h>
#include "nvme.h"
+#include "fabrics.h"
#define NVME_MINORS (1U << MINORBITS)
@@ -47,8 +48,10 @@ unsigned char shutdown_timeout = 5;
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
-static int nvme_major;
-module_param(nvme_major, int, 0);
+unsigned int nvme_max_retries = 5;
+module_param_named(max_retries, nvme_max_retries, uint, 0644);
+MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
+EXPORT_SYMBOL_GPL(nvme_max_retries);
static int nvme_char_major;
module_param(nvme_char_major, int, 0);
@@ -58,6 +61,23 @@ static DEFINE_SPINLOCK(dev_list_lock);
static struct class *nvme_class;
+void nvme_cancel_request(struct request *req, void *data, bool reserved)
+{
+ int status;
+
+ if (!blk_mq_request_started(req))
+ return;
+
+ dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
+ "Cancelling I/O %d", req->tag);
+
+ status = NVME_SC_ABORT_REQ;
+ if (blk_queue_dying(req->q))
+ status |= NVME_SC_DNR;
+ blk_mq_complete_request(req, status);
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_request);
+
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
@@ -68,7 +88,9 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
+ case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -79,6 +101,16 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
+ case NVME_CTRL_RECONNECTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_RECONNECTING:
+ switch (old_state) {
+ case NVME_CTRL_LIVE:
changed = true;
/* FALLTHRU */
default:
@@ -89,6 +121,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -174,21 +207,21 @@ void nvme_requeue_req(struct request *req)
EXPORT_SYMBOL_GPL(nvme_requeue_req);
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, unsigned int flags)
+ struct nvme_command *cmd, unsigned int flags, int qid)
{
- bool write = cmd->common.opcode & 1;
struct request *req;
- req = blk_mq_alloc_request(q, write, flags);
+ if (qid == NVME_QID_ANY) {
+ req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags);
+ } else {
+ req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags,
+ qid ? qid - 1 : 0);
+ }
if (IS_ERR(req))
return req;
req->cmd_type = REQ_TYPE_DRV_PRIV;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
- req->__data_len = 0;
- req->__sector = (sector_t) -1;
- req->bio = req->biotail = NULL;
-
req->cmd = (unsigned char *)cmd;
req->cmd_len = sizeof(struct nvme_command);
@@ -290,9 +323,9 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
memcpy(cmd, req->cmd, sizeof(*cmd));
- else if (req->cmd_flags & REQ_FLUSH)
+ else if (req_op(req) == REQ_OP_FLUSH)
nvme_setup_flush(ns, cmd);
- else if (req->cmd_flags & REQ_DISCARD)
+ else if (req_op(req) == REQ_OP_DISCARD)
ret = nvme_setup_discard(ns, req, cmd);
else
nvme_setup_rw(ns, req, cmd);
@@ -307,12 +340,12 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct nvme_completion *cqe, void *buffer, unsigned bufflen,
- unsigned timeout)
+ unsigned timeout, int qid, int at_head, int flags)
{
struct request *req;
int ret;
- req = nvme_alloc_request(q, cmd, 0);
+ req = nvme_alloc_request(q, cmd, flags, qid);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -325,17 +358,19 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- blk_execute_rq(req->q, NULL, req, 0);
+ blk_execute_rq(req->q, NULL, req, at_head);
ret = req->errors;
out:
blk_mq_free_request(req);
return ret;
}
+EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0);
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
+ NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -344,7 +379,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
u32 *result, unsigned timeout)
{
- bool write = cmd->common.opcode & 1;
+ bool write = nvme_is_write(cmd);
struct nvme_completion cqe;
struct nvme_ns *ns = q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
@@ -353,7 +388,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void *meta = NULL;
int ret;
- req = nvme_alloc_request(q, cmd, 0);
+ req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -439,6 +474,74 @@ int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
result, timeout);
}
+static void nvme_keep_alive_end_io(struct request *rq, int error)
+{
+ struct nvme_ctrl *ctrl = rq->end_io_data;
+
+ blk_mq_free_request(rq);
+
+ if (error) {
+ dev_err(ctrl->device,
+ "failed nvme_keep_alive_end_io error=%d\n", error);
+ return;
+ }
+
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+
+static int nvme_keep_alive(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command c;
+ struct request *rq;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_keep_alive;
+
+ rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
+ NVME_QID_ANY);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ rq->timeout = ctrl->kato * HZ;
+ rq->end_io_data = ctrl;
+
+ blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
+
+ return 0;
+}
+
+static void nvme_keep_alive_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_ctrl, ka_work);
+
+ if (nvme_keep_alive(ctrl)) {
+ /* allocation failure, reset the controller */
+ dev_err(ctrl->device, "keep-alive failed\n");
+ ctrl->ops->reset_ctrl(ctrl);
+ return;
+ }
+}
+
+void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
+
+void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
+{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+ cancel_delayed_work_sync(&ctrl->ka_work);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+
int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
{
struct nvme_command c = { };
@@ -500,10 +603,11 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_get_features;
c.features.nsid = cpu_to_le32(nsid);
- c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
if (ret >= 0)
*result = le32_to_cpu(cqe.result);
return ret;
@@ -518,11 +622,12 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
- c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
if (ret >= 0)
*result = le32_to_cpu(cqe.result);
return ret;
@@ -558,11 +663,22 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
&result);
- if (status)
+ if (status < 0)
return status;
- nr_io_queues = min(result & 0xffff, result >> 16) + 1;
- *count = min(*count, nr_io_queues);
+ /*
+ * Degraded controllers might return an error when setting the queue
+ * count. We still want to be able to bring them online and offer
+ * access to the admin queue, as that might be only way to fix them up.
+ */
+ if (status > 0) {
+ dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
+ *count = 0;
+ } else {
+ nr_io_queues = min(result & 0xffff, result >> 16) + 1;
+ *count = min(*count, nr_io_queues);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
@@ -726,6 +842,7 @@ static void nvme_init_integrity(struct nvme_ns *ns)
{
struct blk_integrity integrity;
+ memset(&integrity, 0, sizeof(integrity));
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc;
@@ -764,7 +881,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size;
- blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
+ blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
@@ -991,6 +1108,15 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
return ret;
+
+ /* Checking for ctrl->tagset is a trick to avoid sleeping on module
+ * load, since we only need the quirk on reset_controller. Notice
+ * that the HGST device needs this delay only in firmware activation
+ * procedure; unfortunately we have no (easy) way to verify this.
+ */
+ if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
+ msleep(NVME_QUIRK_DELAY_AMOUNT);
+
return nvme_wait_ready(ctrl, cap, false);
}
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
@@ -1088,6 +1214,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
struct nvme_id_ctrl *id;
u64 cap;
int ret, page_shift;
+ u32 max_hw_sectors;
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
if (ret) {
@@ -1120,9 +1247,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
memcpy(ctrl->model, id->mn, sizeof(id->mn));
memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
if (id->mdts)
- ctrl->max_hw_sectors = 1 << (id->mdts + page_shift - 9);
+ max_hw_sectors = 1 << (id->mdts + page_shift - 9);
else
- ctrl->max_hw_sectors = UINT_MAX;
+ max_hw_sectors = UINT_MAX;
+ ctrl->max_hw_sectors =
+ min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
unsigned int max_hw_sectors;
@@ -1138,9 +1267,33 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
nvme_set_queue_limits(ctrl, ctrl->admin_q);
+ ctrl->sgls = le32_to_cpu(id->sgls);
+ ctrl->kas = le16_to_cpu(id->kas);
+
+ if (ctrl->ops->is_fabrics) {
+ ctrl->icdoff = le16_to_cpu(id->icdoff);
+ ctrl->ioccsz = le32_to_cpu(id->ioccsz);
+ ctrl->iorcsz = le32_to_cpu(id->iorcsz);
+ ctrl->maxcmd = le16_to_cpu(id->maxcmd);
+
+ /*
+ * In fabrics we need to verify the cntlid matches the
+ * admin connect
+ */
+ if (ctrl->cntlid != le16_to_cpu(id->cntlid))
+ ret = -EINVAL;
+
+ if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+ dev_err(ctrl->dev,
+ "keep-alive support is mandatory for fabrics\n");
+ ret = -EINVAL;
+ }
+ } else {
+ ctrl->cntlid = le16_to_cpu(id->cntlid);
+ }
kfree(id);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -1322,7 +1475,7 @@ static struct attribute *nvme_ns_attrs[] = {
NULL,
};
-static umode_t nvme_attrs_are_visible(struct kobject *kobj,
+static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1341,7 +1494,7 @@ static umode_t nvme_attrs_are_visible(struct kobject *kobj,
static const struct attribute_group nvme_ns_attr_group = {
.attrs = nvme_ns_attrs,
- .is_visible = nvme_attrs_are_visible,
+ .is_visible = nvme_ns_attrs_are_visible,
};
#define nvme_show_str_function(field) \
@@ -1367,6 +1520,49 @@ nvme_show_str_function(serial);
nvme_show_str_function(firmware_rev);
nvme_show_int_function(cntlid);
+static ssize_t nvme_sysfs_delete(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (device_remove_file_self(dev, attr))
+ ctrl->ops->delete_ctrl(ctrl);
+ return count;
+}
+static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
+
+static ssize_t nvme_sysfs_show_transport(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
+}
+static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
+
+static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ ctrl->ops->get_subsysnqn(ctrl));
+}
+static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
+
+static ssize_t nvme_sysfs_show_address(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -1374,11 +1570,38 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_serial.attr,
&dev_attr_firmware_rev.attr,
&dev_attr_cntlid.attr,
+ &dev_attr_delete_controller.attr,
+ &dev_attr_transport.attr,
+ &dev_attr_subsysnqn.attr,
+ &dev_attr_address.attr,
NULL
};
+#define CHECK_ATTR(ctrl, a, name) \
+ if ((a) == &dev_attr_##name.attr && \
+ !(ctrl)->ops->get_##name) \
+ return 0
+
+static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (a == &dev_attr_delete_controller.attr) {
+ if (!ctrl->ops->delete_ctrl)
+ return 0;
+ }
+
+ CHECK_ATTR(ctrl, a, subsysnqn);
+ CHECK_ATTR(ctrl, a, address);
+
+ return a->mode;
+}
+
static struct attribute_group nvme_dev_attrs_group = {
- .attrs = nvme_dev_attrs,
+ .attrs = nvme_dev_attrs,
+ .is_visible = nvme_dev_attrs_are_visible,
};
static const struct attribute_group *nvme_dev_attr_groups[] = {
@@ -1394,19 +1617,22 @@ static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
return nsa->ns_id - nsb->ns_id;
}
-static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
- struct nvme_ns *ns;
-
- lockdep_assert_held(&ctrl->namespaces_mutex);
+ struct nvme_ns *ns, *ret = NULL;
+ mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->ns_id == nsid)
- return ns;
+ if (ns->ns_id == nsid) {
+ kref_get(&ns->kref);
+ ret = ns;
+ break;
+ }
if (ns->ns_id > nsid)
break;
}
- return NULL;
+ mutex_unlock(&ctrl->namespaces_mutex);
+ return ret;
}
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
@@ -1415,8 +1641,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
struct gendisk *disk;
int node = dev_to_node(ctrl->dev);
- lockdep_assert_held(&ctrl->namespaces_mutex);
-
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
return;
@@ -1445,24 +1669,24 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
nvme_set_queue_limits(ctrl, ns->queue);
- disk->major = nvme_major;
- disk->first_minor = 0;
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
- disk->driverfs_dev = ctrl->device;
disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
- list_add_tail_rcu(&ns->list, &ctrl->namespaces);
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_add_tail(&ns->list, &ctrl->namespaces);
+ mutex_unlock(&ctrl->namespaces_mutex);
+
kref_get(&ctrl->kref);
if (ns->type == NVME_NS_LIGHTNVM)
return;
- add_disk(ns->disk);
+ device_add_disk(ctrl->device, ns->disk);
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_attr_group))
pr_warn("%s: failed to create sysfs group for identification\n",
@@ -1480,8 +1704,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
static void nvme_ns_remove(struct nvme_ns *ns)
{
- lockdep_assert_held(&ns->ctrl->namespaces_mutex);
-
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
@@ -1494,8 +1716,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
+
+ mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
- synchronize_rcu();
+ mutex_unlock(&ns->ctrl->namespaces_mutex);
+
nvme_put_ns(ns);
}
@@ -1503,14 +1728,26 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns;
- ns = nvme_find_ns(ctrl, nsid);
+ ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
if (revalidate_disk(ns->disk))
nvme_ns_remove(ns);
+ nvme_put_ns(ns);
} else
nvme_alloc_ns(ctrl, nsid);
}
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+ unsigned nsid)
+{
+ struct nvme_ns *ns, *next;
+
+ list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
+ if (ns->ns_id > nsid)
+ nvme_ns_remove(ns);
+ }
+}
+
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
{
struct nvme_ns *ns;
@@ -1525,7 +1762,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
for (i = 0; i < num_lists; i++) {
ret = nvme_identify_ns_list(ctrl, prev, ns_list);
if (ret)
- goto out;
+ goto free;
for (j = 0; j < min(nn, 1024U); j++) {
nsid = le32_to_cpu(ns_list[j]);
@@ -1535,32 +1772,30 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
nvme_validate_ns(ctrl, nsid);
while (++prev < nsid) {
- ns = nvme_find_ns(ctrl, prev);
- if (ns)
+ ns = nvme_find_get_ns(ctrl, prev);
+ if (ns) {
nvme_ns_remove(ns);
+ nvme_put_ns(ns);
+ }
}
}
nn -= j;
}
out:
+ nvme_remove_invalid_namespaces(ctrl, prev);
+ free:
kfree(ns_list);
return ret;
}
static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
{
- struct nvme_ns *ns, *next;
unsigned i;
- lockdep_assert_held(&ctrl->namespaces_mutex);
-
for (i = 1; i <= nn; i++)
nvme_validate_ns(ctrl, i);
- list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->ns_id > nn)
- nvme_ns_remove(ns);
- }
+ nvme_remove_invalid_namespaces(ctrl, nn);
}
static void nvme_scan_work(struct work_struct *work)
@@ -1576,7 +1811,6 @@ static void nvme_scan_work(struct work_struct *work)
if (nvme_identify_ctrl(ctrl, &id))
return;
- mutex_lock(&ctrl->namespaces_mutex);
nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -1585,6 +1819,7 @@ static void nvme_scan_work(struct work_struct *work)
}
nvme_scan_ns_sequential(ctrl, nn);
done:
+ mutex_lock(&ctrl->namespaces_mutex);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
@@ -1604,6 +1839,11 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_queue_scan);
+/*
+ * This function iterates the namespace list unlocked to allow recovery from
+ * controller failure. It is up to the caller to ensure the namespace list is
+ * not modified by scan work while this function is executing.
+ */
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns, *next;
@@ -1617,10 +1857,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
if (ctrl->state == NVME_CTRL_DEAD)
nvme_kill_queues(ctrl);
- mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
- mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
@@ -1791,11 +2029,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
- if (!kref_get_unless_zero(&ns->kref))
- continue;
-
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
/*
* Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced.
@@ -1806,10 +2041,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
blk_set_queue_dying(ns->queue);
blk_mq_abort_requeue_list(ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
-
- nvme_put_ns(ns);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
@@ -1817,8 +2050,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
spin_lock_irq(ns->queue->queue_lock);
queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
spin_unlock_irq(ns->queue->queue_lock);
@@ -1826,7 +2059,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
blk_mq_cancel_requeue_work(ns->queue);
blk_mq_stop_hw_queues(ns->queue);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
@@ -1834,13 +2067,13 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- rcu_read_lock();
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
blk_mq_kick_requeue_list(ns->queue);
}
- rcu_read_unlock();
+ mutex_unlock(&ctrl->namespaces_mutex);
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
@@ -1848,16 +2081,10 @@ int __init nvme_core_init(void)
{
int result;
- result = register_blkdev(nvme_major, "nvme");
- if (result < 0)
- return result;
- else if (result > 0)
- nvme_major = result;
-
result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
&nvme_dev_fops);
if (result < 0)
- goto unregister_blkdev;
+ return result;
else if (result > 0)
nvme_char_major = result;
@@ -1871,8 +2098,6 @@ int __init nvme_core_init(void)
unregister_chrdev:
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
- unregister_blkdev:
- unregister_blkdev(nvme_major, "nvme");
return result;
}
@@ -1880,7 +2105,6 @@ void nvme_core_exit(void)
{
class_destroy(nvme_class);
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
- unregister_blkdev(nvme_major, "nvme");
}
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
new file mode 100644
index 000000000000..dc996761042f
--- /dev/null
+++ b/drivers/nvme/host/fabrics.c
@@ -0,0 +1,952 @@
+/*
+ * NVMe over Fabrics common host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include "nvme.h"
+#include "fabrics.h"
+
+static LIST_HEAD(nvmf_transports);
+static DEFINE_MUTEX(nvmf_transports_mutex);
+
+static LIST_HEAD(nvmf_hosts);
+static DEFINE_MUTEX(nvmf_hosts_mutex);
+
+static struct nvmf_host *nvmf_default_host;
+
+static struct nvmf_host *__nvmf_host_find(const char *hostnqn)
+{
+ struct nvmf_host *host;
+
+ list_for_each_entry(host, &nvmf_hosts, list) {
+ if (!strcmp(host->nqn, hostnqn))
+ return host;
+ }
+
+ return NULL;
+}
+
+static struct nvmf_host *nvmf_host_add(const char *hostnqn)
+{
+ struct nvmf_host *host;
+
+ mutex_lock(&nvmf_hosts_mutex);
+ host = __nvmf_host_find(hostnqn);
+ if (host)
+ goto out_unlock;
+
+ host = kmalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ goto out_unlock;
+
+ kref_init(&host->ref);
+ memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
+ uuid_le_gen(&host->id);
+
+ list_add_tail(&host->list, &nvmf_hosts);
+out_unlock:
+ mutex_unlock(&nvmf_hosts_mutex);
+ return host;
+}
+
+static struct nvmf_host *nvmf_host_default(void)
+{
+ struct nvmf_host *host;
+
+ host = kmalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ kref_init(&host->ref);
+ uuid_le_gen(&host->id);
+ snprintf(host->nqn, NVMF_NQN_SIZE,
+ "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUl", &host->id);
+
+ mutex_lock(&nvmf_hosts_mutex);
+ list_add_tail(&host->list, &nvmf_hosts);
+ mutex_unlock(&nvmf_hosts_mutex);
+
+ return host;
+}
+
+static void nvmf_host_destroy(struct kref *ref)
+{
+ struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
+
+ mutex_lock(&nvmf_hosts_mutex);
+ list_del(&host->list);
+ mutex_unlock(&nvmf_hosts_mutex);
+
+ kfree(host);
+}
+
+static void nvmf_host_put(struct nvmf_host *host)
+{
+ if (host)
+ kref_put(&host->ref, nvmf_host_destroy);
+}
+
+/**
+ * nvmf_get_address() - Get address/port
+ * @ctrl: Host NVMe controller instance which we got the address
+ * @buf: OUTPUT parameter that will contain the address/port
+ * @size: buffer size
+ */
+int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ return snprintf(buf, size, "traddr=%s,trsvcid=%s\n",
+ ctrl->opts->traddr, ctrl->opts->trsvcid);
+}
+EXPORT_SYMBOL_GPL(nvmf_get_address);
+
+/**
+ * nvmf_get_subsysnqn() - Get subsystem NQN
+ * @ctrl: Host NVMe controller instance which we got the NQN
+ */
+const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts->subsysnqn;
+}
+EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn);
+
+/**
+ * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated NVMe controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: OUTPUT parameter that will contain the value of
+ * the property after a successful read.
+ *
+ * Used by the host system to retrieve a 32-bit capsule property value
+ * from an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful read
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+{
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.prop_get.opcode = nvme_fabrics_command;
+ cmd.prop_get.fctype = nvme_fabrics_type_property_get;
+ cmd.prop_get.offset = cpu_to_le32(off);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+
+ if (ret >= 0)
+ *val = le64_to_cpu(cqe.result64);
+ if (unlikely(ret != 0))
+ dev_err(ctrl->device,
+ "Property Get error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_read32);
+
+/**
+ * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: OUTPUT parameter that will contain the value of
+ * the property after a successful read.
+ *
+ * Used by the host system to retrieve a 64-bit capsule property value
+ * from an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful read
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.prop_get.opcode = nvme_fabrics_command;
+ cmd.prop_get.fctype = nvme_fabrics_type_property_get;
+ cmd.prop_get.attrib = 1;
+ cmd.prop_get.offset = cpu_to_le32(off);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+
+ if (ret >= 0)
+ *val = le64_to_cpu(cqe.result64);
+ if (unlikely(ret != 0))
+ dev_err(ctrl->device,
+ "Property Get error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_read64);
+
+/**
+ * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated NVMe controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: Input parameter that contains the value to be
+ * written to the property.
+ *
+ * Used by the NVMe host system to write a 32-bit capsule property value
+ * to an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful write
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+{
+ struct nvme_command cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.prop_set.opcode = nvme_fabrics_command;
+ cmd.prop_set.fctype = nvme_fabrics_type_property_set;
+ cmd.prop_set.attrib = 0;
+ cmd.prop_set.offset = cpu_to_le32(off);
+ cmd.prop_set.value = cpu_to_le64(val);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+ if (unlikely(ret))
+ dev_err(ctrl->device,
+ "Property Set error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_write32);
+
+/**
+ * nvmf_log_connect_error() - Error-parsing-diagnostic print
+ * out function for connect() errors.
+ *
+ * @ctrl: the specific /dev/nvmeX device that had the error.
+ *
+ * @errval: Error code to be decoded in a more human-friendly
+ * printout.
+ *
+ * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM.
+ *
+ * @cmd: This is the SQE portion of a submission capsule.
+ *
+ * @data: This is the "Data" portion of a submission capsule.
+ */
+static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
+ int errval, int offset, struct nvme_command *cmd,
+ struct nvmf_connect_data *data)
+{
+ int err_sctype = errval & (~NVME_SC_DNR);
+
+ switch (err_sctype) {
+
+ case (NVME_SC_CONNECT_INVALID_PARAM):
+ if (offset >> 16) {
+ char *inv_data = "Connect Invalid Data Parameter";
+
+ switch (offset & 0xffff) {
+ case (offsetof(struct nvmf_connect_data, cntlid)):
+ dev_err(ctrl->device,
+ "%s, cntlid: %d\n",
+ inv_data, data->cntlid);
+ break;
+ case (offsetof(struct nvmf_connect_data, hostnqn)):
+ dev_err(ctrl->device,
+ "%s, hostnqn \"%s\"\n",
+ inv_data, data->hostnqn);
+ break;
+ case (offsetof(struct nvmf_connect_data, subsysnqn)):
+ dev_err(ctrl->device,
+ "%s, subsysnqn \"%s\"\n",
+ inv_data, data->subsysnqn);
+ break;
+ default:
+ dev_err(ctrl->device,
+ "%s, starting byte offset: %d\n",
+ inv_data, offset & 0xffff);
+ break;
+ }
+ } else {
+ char *inv_sqe = "Connect Invalid SQE Parameter";
+
+ switch (offset) {
+ case (offsetof(struct nvmf_connect_command, qid)):
+ dev_err(ctrl->device,
+ "%s, qid %d\n",
+ inv_sqe, cmd->connect.qid);
+ break;
+ default:
+ dev_err(ctrl->device,
+ "%s, starting byte offset: %d\n",
+ inv_sqe, offset);
+ }
+ }
+ break;
+ default:
+ dev_err(ctrl->device,
+ "Connect command failed, error wo/DNR bit: %d\n",
+ err_sctype);
+ break;
+ } /* switch (err_sctype) */
+}
+
+/**
+ * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
+ * API function.
+ * @ctrl: Host nvme controller instance used to request
+ * a new NVMe controller allocation on the target
+ * system and establish an NVMe Admin connection to
+ * that controller.
+ *
+ * This function enables an NVMe host device to request a new allocation of
+ * an NVMe controller resource on a target system as well establish a
+ * fabrics-protocol connection of the NVMe Admin queue between the
+ * host system device and the allocated NVMe controller on the
+ * target system via a NVMe Fabrics "Connect" command.
+ *
+ * Return:
+ * 0: success
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ *
+ */
+int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ struct nvmf_connect_data *data;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.connect.opcode = nvme_fabrics_command;
+ cmd.connect.fctype = nvme_fabrics_type_connect;
+ cmd.connect.qid = 0;
+ cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+ /*
+ * Set keep-alive timeout in seconds granularity (ms * 1000)
+ * and add a grace period for controller kato enforcement
+ */
+ cmd.connect.kato = ctrl->opts->discovery_nqn ? 0 :
+ cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
+ data->cntlid = cpu_to_le16(0xffff);
+ strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe,
+ data, sizeof(*data), 0, NVME_QID_ANY, 1,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ if (ret) {
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+ &cmd, data);
+ goto out_free_data;
+ }
+
+ ctrl->cntlid = le16_to_cpu(cqe.result16);
+
+out_free_data:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
+
+/**
+ * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
+ * API function.
+ * @ctrl: Host nvme controller instance used to establish an
+ * NVMe I/O queue connection to the already allocated NVMe
+ * controller on the target system.
+ * @qid: NVMe I/O queue number for the new I/O connection between
+ * host and target (note qid == 0 is illegal as this is
+ * the Admin queue, per NVMe standard).
+ *
+ * This function issues a fabrics-protocol connection
+ * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
+ * between the host system device and the allocated NVMe controller
+ * on the target system.
+ *
+ * Return:
+ * 0: success
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
+{
+ struct nvme_command cmd;
+ struct nvmf_connect_data *data;
+ struct nvme_completion cqe;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.connect.opcode = nvme_fabrics_command;
+ cmd.connect.fctype = nvme_fabrics_type_connect;
+ cmd.connect.qid = cpu_to_le16(qid);
+ cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
+ data->cntlid = cpu_to_le16(ctrl->cntlid);
+ strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+
+ ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &cqe,
+ data, sizeof(*data), 0, qid, 1,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ if (ret) {
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+ &cmd, data);
+ }
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
+
+/**
+ * nvmf_register_transport() - NVMe Fabrics Library registration function.
+ * @ops: Transport ops instance to be registered to the
+ * common fabrics library.
+ *
+ * API function that registers the type of specific transport fabric
+ * being implemented to the common NVMe fabrics library. Part of
+ * the overall init sequence of starting up a fabrics driver.
+ */
+void nvmf_register_transport(struct nvmf_transport_ops *ops)
+{
+ mutex_lock(&nvmf_transports_mutex);
+ list_add_tail(&ops->entry, &nvmf_transports);
+ mutex_unlock(&nvmf_transports_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmf_register_transport);
+
+/**
+ * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
+ * @ops: Transport ops instance to be unregistered from the
+ * common fabrics library.
+ *
+ * Fabrics API function that unregisters the type of specific transport
+ * fabric being implemented from the common NVMe fabrics library.
+ * Part of the overall exit sequence of unloading the implemented driver.
+ */
+void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
+{
+ mutex_lock(&nvmf_transports_mutex);
+ list_del(&ops->entry);
+ mutex_unlock(&nvmf_transports_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
+
+static struct nvmf_transport_ops *nvmf_lookup_transport(
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvmf_transport_ops *ops;
+
+ lockdep_assert_held(&nvmf_transports_mutex);
+
+ list_for_each_entry(ops, &nvmf_transports, entry) {
+ if (strcmp(ops->name, opts->transport) == 0)
+ return ops;
+ }
+
+ return NULL;
+}
+
+static const match_table_t opt_tokens = {
+ { NVMF_OPT_TRANSPORT, "transport=%s" },
+ { NVMF_OPT_TRADDR, "traddr=%s" },
+ { NVMF_OPT_TRSVCID, "trsvcid=%s" },
+ { NVMF_OPT_NQN, "nqn=%s" },
+ { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" },
+ { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" },
+ { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
+ { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
+ { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
+ { NVMF_OPT_ERR, NULL }
+};
+
+static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ size_t nqnlen = 0;
+
+ /* Set defaults */
+ opts->queue_size = NVMF_DEF_QUEUE_SIZE;
+ opts->nr_io_queues = num_online_cpus();
+ opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case NVMF_OPT_TRANSPORT:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->transport = p;
+ break;
+ case NVMF_OPT_NQN:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->subsysnqn = p;
+ nqnlen = strlen(opts->subsysnqn);
+ if (nqnlen >= NVMF_NQN_SIZE) {
+ pr_err("%s needs to be < %d bytes\n",
+ opts->subsysnqn, NVMF_NQN_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->discovery_nqn =
+ !(strcmp(opts->subsysnqn,
+ NVME_DISC_SUBSYS_NAME));
+ if (opts->discovery_nqn)
+ opts->nr_io_queues = 0;
+ break;
+ case NVMF_OPT_TRADDR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->traddr = p;
+ break;
+ case NVMF_OPT_TRSVCID:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->trsvcid = p;
+ break;
+ case NVMF_OPT_QUEUE_SIZE:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token < NVMF_MIN_QUEUE_SIZE ||
+ token > NVMF_MAX_QUEUE_SIZE) {
+ pr_err("Invalid queue_size %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->queue_size = token;
+ break;
+ case NVMF_OPT_NR_IO_QUEUES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid number of IOQs %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_io_queues = min_t(unsigned int,
+ num_online_cpus(), token);
+ break;
+ case NVMF_OPT_KATO:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (opts->discovery_nqn) {
+ pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (token < 0) {
+ pr_err("Invalid keep_alive_tmo %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ } else if (token == 0) {
+ /* Allowed for debug */
+ pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
+ }
+ opts->kato = token;
+ break;
+ case NVMF_OPT_HOSTNQN:
+ if (opts->host) {
+ pr_err("hostnqn already user-assigned: %s\n",
+ opts->host->nqn);
+ ret = -EADDRINUSE;
+ goto out;
+ }
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ nqnlen = strlen(p);
+ if (nqnlen >= NVMF_NQN_SIZE) {
+ pr_err("%s needs to be < %d bytes\n",
+ p, NVMF_NQN_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->host = nvmf_host_add(p);
+ if (!opts->host) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ break;
+ case NVMF_OPT_RECONNECT_DELAY:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid reconnect_delay %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->reconnect_delay = token;
+ break;
+ default:
+ pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (!opts->host) {
+ kref_get(&nvmf_default_host->ref);
+ opts->host = nvmf_default_host;
+ }
+
+out:
+ if (!opts->discovery_nqn && !opts->kato)
+ opts->kato = NVME_DEFAULT_KATO;
+ kfree(options);
+ return ret;
+}
+
+static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
+ unsigned int required_opts)
+{
+ if ((opts->mask & required_opts) != required_opts) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
+ if ((opt_tokens[i].token & required_opts) &&
+ !(opt_tokens[i].token & opts->mask)) {
+ pr_warn("missing parameter '%s'\n",
+ opt_tokens[i].pattern);
+ }
+ }
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
+ unsigned int allowed_opts)
+{
+ if (opts->mask & ~allowed_opts) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
+ if (opt_tokens[i].token & ~allowed_opts) {
+ pr_warn("invalid parameter '%s'\n",
+ opt_tokens[i].pattern);
+ }
+ }
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void nvmf_free_options(struct nvmf_ctrl_options *opts)
+{
+ nvmf_host_put(opts->host);
+ kfree(opts->transport);
+ kfree(opts->traddr);
+ kfree(opts->trsvcid);
+ kfree(opts->subsysnqn);
+ kfree(opts);
+}
+EXPORT_SYMBOL_GPL(nvmf_free_options);
+
+#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
+#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
+ NVMF_OPT_KATO | NVMF_OPT_HOSTNQN)
+
+static struct nvme_ctrl *
+nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
+{
+ struct nvmf_ctrl_options *opts;
+ struct nvmf_transport_ops *ops;
+ struct nvme_ctrl *ctrl;
+ int ret;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ ret = nvmf_parse_options(opts, buf);
+ if (ret)
+ goto out_free_opts;
+
+ /*
+ * Check the generic options first as we need a valid transport for
+ * the lookup below. Then clear the generic flags so that transport
+ * drivers don't have to care about them.
+ */
+ ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
+ if (ret)
+ goto out_free_opts;
+ opts->mask &= ~NVMF_REQUIRED_OPTS;
+
+ mutex_lock(&nvmf_transports_mutex);
+ ops = nvmf_lookup_transport(opts);
+ if (!ops) {
+ pr_info("no handler found for transport %s.\n",
+ opts->transport);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = nvmf_check_required_opts(opts, ops->required_opts);
+ if (ret)
+ goto out_unlock;
+ ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
+ ops->allowed_opts | ops->required_opts);
+ if (ret)
+ goto out_unlock;
+
+ ctrl = ops->create_ctrl(dev, opts);
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ goto out_unlock;
+ }
+
+ mutex_unlock(&nvmf_transports_mutex);
+ return ctrl;
+
+out_unlock:
+ mutex_unlock(&nvmf_transports_mutex);
+out_free_opts:
+ nvmf_host_put(opts->host);
+ kfree(opts);
+ return ERR_PTR(ret);
+}
+
+static struct class *nvmf_class;
+static struct device *nvmf_device;
+static DEFINE_MUTEX(nvmf_dev_mutex);
+
+static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *seq_file = file->private_data;
+ struct nvme_ctrl *ctrl;
+ const char *buf;
+ int ret = 0;
+
+ if (count > PAGE_SIZE)
+ return -ENOMEM;
+
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ mutex_lock(&nvmf_dev_mutex);
+ if (seq_file->private) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ctrl = nvmf_create_ctrl(nvmf_device, buf, count);
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ goto out_unlock;
+ }
+
+ seq_file->private = ctrl;
+
+out_unlock:
+ mutex_unlock(&nvmf_dev_mutex);
+ kfree(buf);
+ return ret ? ret : count;
+}
+
+static int nvmf_dev_show(struct seq_file *seq_file, void *private)
+{
+ struct nvme_ctrl *ctrl;
+ int ret = 0;
+
+ mutex_lock(&nvmf_dev_mutex);
+ ctrl = seq_file->private;
+ if (!ctrl) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ seq_printf(seq_file, "instance=%d,cntlid=%d\n",
+ ctrl->instance, ctrl->cntlid);
+
+out_unlock:
+ mutex_unlock(&nvmf_dev_mutex);
+ return ret;
+}
+
+static int nvmf_dev_open(struct inode *inode, struct file *file)
+{
+ /*
+ * The miscdevice code initializes file->private_data, but doesn't
+ * make use of it later.
+ */
+ file->private_data = NULL;
+ return single_open(file, nvmf_dev_show, NULL);
+}
+
+static int nvmf_dev_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq_file = file->private_data;
+ struct nvme_ctrl *ctrl = seq_file->private;
+
+ if (ctrl)
+ nvme_put_ctrl(ctrl);
+ return single_release(inode, file);
+}
+
+static const struct file_operations nvmf_dev_fops = {
+ .owner = THIS_MODULE,
+ .write = nvmf_dev_write,
+ .read = seq_read,
+ .open = nvmf_dev_open,
+ .release = nvmf_dev_release,
+};
+
+static struct miscdevice nvmf_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "nvme-fabrics",
+ .fops = &nvmf_dev_fops,
+};
+
+static int __init nvmf_init(void)
+{
+ int ret;
+
+ nvmf_default_host = nvmf_host_default();
+ if (!nvmf_default_host)
+ return -ENOMEM;
+
+ nvmf_class = class_create(THIS_MODULE, "nvme-fabrics");
+ if (IS_ERR(nvmf_class)) {
+ pr_err("couldn't register class nvme-fabrics\n");
+ ret = PTR_ERR(nvmf_class);
+ goto out_free_host;
+ }
+
+ nvmf_device =
+ device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
+ if (IS_ERR(nvmf_device)) {
+ pr_err("couldn't create nvme-fabris device!\n");
+ ret = PTR_ERR(nvmf_device);
+ goto out_destroy_class;
+ }
+
+ ret = misc_register(&nvmf_misc);
+ if (ret) {
+ pr_err("couldn't register misc device: %d\n", ret);
+ goto out_destroy_device;
+ }
+
+ return 0;
+
+out_destroy_device:
+ device_destroy(nvmf_class, MKDEV(0, 0));
+out_destroy_class:
+ class_destroy(nvmf_class);
+out_free_host:
+ nvmf_host_put(nvmf_default_host);
+ return ret;
+}
+
+static void __exit nvmf_exit(void)
+{
+ misc_deregister(&nvmf_misc);
+ device_destroy(nvmf_class, MKDEV(0, 0));
+ class_destroy(nvmf_class);
+ nvmf_host_put(nvmf_default_host);
+
+ BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
+}
+
+MODULE_LICENSE("GPL v2");
+
+module_init(nvmf_init);
+module_exit(nvmf_exit);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
new file mode 100644
index 000000000000..89df52c8be97
--- /dev/null
+++ b/drivers/nvme/host/fabrics.h
@@ -0,0 +1,132 @@
+/*
+ * NVMe over Fabrics common host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _NVME_FABRICS_H
+#define _NVME_FABRICS_H 1
+
+#include <linux/in.h>
+#include <linux/inet.h>
+
+#define NVMF_MIN_QUEUE_SIZE 16
+#define NVMF_MAX_QUEUE_SIZE 1024
+#define NVMF_DEF_QUEUE_SIZE 128
+#define NVMF_DEF_RECONNECT_DELAY 10
+
+/*
+ * Define a host as seen by the target. We allocate one at boot, but also
+ * allow the override it when creating controllers. This is both to provide
+ * persistence of the Host NQN over multiple boots, and to allow using
+ * multiple ones, for example in a container scenario. Because we must not
+ * use different Host NQNs with the same Host ID we generate a Host ID and
+ * use this structure to keep track of the relation between the two.
+ */
+struct nvmf_host {
+ struct kref ref;
+ struct list_head list;
+ char nqn[NVMF_NQN_SIZE];
+ uuid_le id;
+};
+
+/**
+ * enum nvmf_parsing_opts - used to define the sysfs parsing options used.
+ */
+enum {
+ NVMF_OPT_ERR = 0,
+ NVMF_OPT_TRANSPORT = 1 << 0,
+ NVMF_OPT_NQN = 1 << 1,
+ NVMF_OPT_TRADDR = 1 << 2,
+ NVMF_OPT_TRSVCID = 1 << 3,
+ NVMF_OPT_QUEUE_SIZE = 1 << 4,
+ NVMF_OPT_NR_IO_QUEUES = 1 << 5,
+ NVMF_OPT_TL_RETRY_COUNT = 1 << 6,
+ NVMF_OPT_KATO = 1 << 7,
+ NVMF_OPT_HOSTNQN = 1 << 8,
+ NVMF_OPT_RECONNECT_DELAY = 1 << 9,
+};
+
+/**
+ * struct nvmf_ctrl_options - Used to hold the options specified
+ * with the parsing opts enum.
+ * @mask: Used by the fabrics library to parse through sysfs options
+ * on adding a NVMe controller.
+ * @transport: Holds the fabric transport "technology name" (for a lack of
+ * better description) that will be used by an NVMe controller
+ * being added.
+ * @subsysnqn: Hold the fully qualified NQN subystem name (format defined
+ * in the NVMe specification, "NVMe Qualified Names").
+ * @traddr: network address that will be used by the host to communicate
+ * to the added NVMe controller.
+ * @trsvcid: network port used for host-controller communication.
+ * @queue_size: Number of IO queue elements.
+ * @nr_io_queues: Number of controller IO queues that will be established.
+ * @reconnect_delay: Time between two consecutive reconnect attempts.
+ * @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
+ * @kato: Keep-alive timeout.
+ * @host: Virtual NVMe host, contains the NQN and Host ID.
+ */
+struct nvmf_ctrl_options {
+ unsigned mask;
+ char *transport;
+ char *subsysnqn;
+ char *traddr;
+ char *trsvcid;
+ size_t queue_size;
+ unsigned int nr_io_queues;
+ unsigned int reconnect_delay;
+ bool discovery_nqn;
+ unsigned int kato;
+ struct nvmf_host *host;
+};
+
+/*
+ * struct nvmf_transport_ops - used to register a specific
+ * fabric implementation of NVMe fabrics.
+ * @entry: Used by the fabrics library to add the new
+ * registration entry to its linked-list internal tree.
+ * @name: Name of the NVMe fabric driver implementation.
+ * @required_opts: sysfs command-line options that must be specified
+ * when adding a new NVMe controller.
+ * @allowed_opts: sysfs command-line options that can be specified
+ * when adding a new NVMe controller.
+ * @create_ctrl(): function pointer that points to a non-NVMe
+ * implementation-specific fabric technology
+ * that would go into starting up that fabric
+ * for the purpose of conneciton to an NVMe controller
+ * using that fabric technology.
+ *
+ * Notes:
+ * 1. At minimum, 'required_opts' and 'allowed_opts' should
+ * be set to the same enum parsing options defined earlier.
+ * 2. create_ctrl() must be defined (even if it does nothing)
+ */
+struct nvmf_transport_ops {
+ struct list_head entry;
+ const char *name;
+ int required_opts;
+ int allowed_opts;
+ struct nvme_ctrl *(*create_ctrl)(struct device *dev,
+ struct nvmf_ctrl_options *opts);
+};
+
+int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
+int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
+int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
+int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
+void nvmf_register_transport(struct nvmf_transport_ops *ops);
+void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
+void nvmf_free_options(struct nvmf_ctrl_options *opts);
+const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl);
+int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
+
+#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index a0af0558354c..63f483daf930 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -156,7 +156,7 @@ struct nvme_nvm_completion {
#define NVME_NVM_LP_MLC_PAIRS 886
struct nvme_nvm_lp_mlc {
- __u16 num_pairs;
+ __le16 num_pairs;
__u8 pairs[NVME_NVM_LP_MLC_PAIRS];
};
@@ -500,7 +500,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct bio *bio = rqd->bio;
struct nvme_nvm_command *cmd;
- rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
+ rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
if (IS_ERR(rq))
return -ENOMEM;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1daa0482de0e..ab18b78102bf 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -38,6 +38,11 @@ extern unsigned char admin_timeout;
extern unsigned char shutdown_timeout;
#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
+#define NVME_DEFAULT_KATO 5
+#define NVME_KATO_GRACE 10
+
+extern unsigned int nvme_max_retries;
+
enum {
NVME_NS_LBA = 0,
NVME_NS_LIGHTNVM = 1,
@@ -65,12 +70,26 @@ enum nvme_quirks {
* logical blocks.
*/
NVME_QUIRK_DISCARD_ZEROES = (1 << 2),
+
+ /*
+ * The controller needs a delay before starts checking the device
+ * readiness, which is done by reading the NVME_CSTS_RDY bit.
+ */
+ NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
};
+/* The below value is the specific amount of delay needed before checking
+ * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
+ * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
+ * found empirically.
+ */
+#define NVME_QUIRK_DELAY_AMOUNT 2000
+
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
NVME_CTRL_RESETTING,
+ NVME_CTRL_RECONNECTING,
NVME_CTRL_DELETING,
NVME_CTRL_DEAD,
};
@@ -80,6 +99,7 @@ struct nvme_ctrl {
spinlock_t lock;
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
+ struct request_queue *connect_q;
struct device *dev;
struct kref kref;
int instance;
@@ -107,10 +127,22 @@ struct nvme_ctrl {
u8 event_limit;
u8 vwc;
u32 vs;
+ u32 sgls;
+ u16 kas;
+ unsigned int kato;
bool subsystem;
unsigned long quirks;
struct work_struct scan_work;
struct work_struct async_event_work;
+ struct delayed_work ka_work;
+
+ /* Fabrics only */
+ u16 sqsize;
+ u32 ioccsz;
+ u32 iorcsz;
+ u16 icdoff;
+ u16 maxcmd;
+ struct nvmf_ctrl_options *opts;
};
/*
@@ -144,7 +176,9 @@ struct nvme_ns {
};
struct nvme_ctrl_ops {
+ const char *name;
struct module *module;
+ bool is_fabrics;
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -152,6 +186,9 @@ struct nvme_ctrl_ops {
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*post_scan)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
+ int (*delete_ctrl)(struct nvme_ctrl *ctrl);
+ const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
+ int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
};
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
@@ -177,7 +214,7 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
static inline unsigned nvme_map_len(struct request *rq)
{
- if (rq->cmd_flags & REQ_DISCARD)
+ if (req_op(rq) == REQ_OP_DISCARD)
return sizeof(struct nvme_dsm_range);
else
return blk_rq_bytes(rq);
@@ -185,7 +222,7 @@ static inline unsigned nvme_map_len(struct request *rq)
static inline void nvme_cleanup_cmd(struct request *req)
{
- if (req->cmd_flags & REQ_DISCARD)
+ if (req_op(req) == REQ_OP_DISCARD)
kfree(req->completion_data);
}
@@ -204,9 +241,11 @@ static inline int nvme_error_status(u16 status)
static inline bool nvme_req_needs_retry(struct request *req, u16 status)
{
return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
- (jiffies - req->start_time) < req->timeout;
+ (jiffies - req->start_time) < req->timeout &&
+ req->retries < nvme_max_retries;
}
+void nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
@@ -230,8 +269,9 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
+#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, unsigned int flags);
+ struct nvme_command *cmd, unsigned int flags, int qid);
void nvme_requeue_req(struct request *req);
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
@@ -239,7 +279,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct nvme_completion *cqe, void *buffer, unsigned bufflen,
- unsigned timeout);
+ unsigned timeout, int qid, int at_head, int flags);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
unsigned timeout);
@@ -256,6 +296,8 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
+void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
+void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
struct sg_io_hdr;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index befac5b19490..4cb9b156cab7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -310,6 +310,11 @@ static int nvme_init_iod(struct request *rq, unsigned size,
iod->npages = -1;
iod->nents = 0;
iod->length = size;
+
+ if (!(rq->cmd_flags & REQ_DONTPREP)) {
+ rq->retries = 0;
+ rq->cmd_flags |= REQ_DONTPREP;
+ }
return 0;
}
@@ -520,8 +525,8 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out_unmap;
}
- cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
- cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
+ cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
if (blk_integrity_rq(req))
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
return BLK_MQ_RQ_QUEUE_OK;
@@ -623,6 +628,7 @@ static void nvme_complete_rq(struct request *req)
if (unlikely(req->errors)) {
if (nvme_req_needs_retry(req, req->errors)) {
+ req->retries++;
nvme_requeue_req(req);
return;
}
@@ -901,7 +907,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
- BLK_MQ_REQ_NOWAIT);
+ BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
@@ -919,22 +925,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
return BLK_EH_RESET_TIMER;
}
-static void nvme_cancel_io(struct request *req, void *data, bool reserved)
-{
- int status;
-
- if (!blk_mq_request_started(req))
- return;
-
- dev_dbg_ratelimited(((struct nvme_dev *) data)->ctrl.device,
- "Cancelling I/O %d", req->tag);
-
- status = NVME_SC_ABORT_REQ;
- if (blk_queue_dying(req->q))
- status |= NVME_SC_DNR;
- blk_mq_complete_request(req, status);
-}
-
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
@@ -1399,16 +1389,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < 0)
return result;
- /*
- * Degraded controllers might return an error when setting the queue
- * count. We still want to be able to bring them online and offer
- * access to the admin queue, as that might be only way to fix them up.
- */
- if (result > 0) {
- dev_err(dev->ctrl.device,
- "Could not set queue count (%d)\n", result);
+ if (nr_io_queues == 0)
return 0;
- }
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -1536,7 +1518,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
- req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
+ req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1727,8 +1709,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
}
nvme_pci_disable(dev);
- blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
- blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
+ blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
+ blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
mutex_unlock(&dev->shutdown_lock);
}
@@ -1902,6 +1884,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+ .name = "pcie",
.module = THIS_MODULE,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
@@ -1940,7 +1923,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
node = dev_to_node(&pdev->dev);
if (node == NUMA_NO_NODE)
- set_dev_node(&pdev->dev, 0);
+ set_dev_node(&pdev->dev, first_memory_node);
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
@@ -2037,6 +2020,24 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_put_ctrl(&dev->ctrl);
}
+static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ int ret = 0;
+
+ if (numvfs == 0) {
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "Cannot disable SR-IOV VFs while assigned\n");
+ return -EPERM;
+ }
+ pci_disable_sriov(pdev);
+ return 0;
+ }
+
+ ret = pci_enable_sriov(pdev, numvfs);
+ return ret ? ret : numvfs;
+}
+
#ifdef CONFIG_PM_SLEEP
static int nvme_suspend(struct device *dev)
{
@@ -2122,6 +2123,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
+ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ 0, }
@@ -2137,6 +2140,7 @@ static struct pci_driver nvme_driver = {
.driver = {
.pm = &nvme_dev_pm_ops,
},
+ .sriov_configure = nvme_pci_sriov_configure,
.err_handler = &nvme_err_handler,
};
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
new file mode 100644
index 000000000000..3e3ce2b0424e
--- /dev/null
+++ b/drivers/nvme/host/rdma.c
@@ -0,0 +1,2018 @@
+/*
+ * NVMe over Fabrics RDMA host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/nvme.h>
+#include <linux/t10-pi.h>
+#include <asm/unaligned.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_cm.h>
+#include <linux/nvme-rdma.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+
+
+#define NVME_RDMA_CONNECT_TIMEOUT_MS 1000 /* 1 second */
+
+#define NVME_RDMA_MAX_SEGMENT_SIZE 0xffffff /* 24-bit SGL field */
+
+#define NVME_RDMA_MAX_SEGMENTS 256
+
+#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
+
+#define NVME_RDMA_MAX_PAGES_PER_MR 512
+
+#define NVME_RDMA_DEF_RECONNECT_DELAY 20
+
+/*
+ * We handle AEN commands ourselves and don't even let the
+ * block layer know about them.
+ */
+#define NVME_RDMA_NR_AEN_COMMANDS 1
+#define NVME_RDMA_AQ_BLKMQ_DEPTH \
+ (NVMF_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
+
+struct nvme_rdma_device {
+ struct ib_device *dev;
+ struct ib_pd *pd;
+ struct ib_mr *mr;
+ struct kref ref;
+ struct list_head entry;
+};
+
+struct nvme_rdma_qe {
+ struct ib_cqe cqe;
+ void *data;
+ u64 dma;
+};
+
+struct nvme_rdma_queue;
+struct nvme_rdma_request {
+ struct ib_mr *mr;
+ struct nvme_rdma_qe sqe;
+ struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
+ u32 num_sge;
+ int nents;
+ bool inline_data;
+ bool need_inval;
+ struct ib_reg_wr reg_wr;
+ struct ib_cqe reg_cqe;
+ struct nvme_rdma_queue *queue;
+ struct sg_table sg_table;
+ struct scatterlist first_sgl[];
+};
+
+enum nvme_rdma_queue_flags {
+ NVME_RDMA_Q_CONNECTED = (1 << 0),
+};
+
+struct nvme_rdma_queue {
+ struct nvme_rdma_qe *rsp_ring;
+ u8 sig_count;
+ int queue_size;
+ size_t cmnd_capsule_len;
+ struct nvme_rdma_ctrl *ctrl;
+ struct nvme_rdma_device *device;
+ struct ib_cq *ib_cq;
+ struct ib_qp *qp;
+
+ unsigned long flags;
+ struct rdma_cm_id *cm_id;
+ int cm_error;
+ struct completion cm_done;
+};
+
+struct nvme_rdma_ctrl {
+ /* read and written in the hot path */
+ spinlock_t lock;
+
+ /* read only in the hot path */
+ struct nvme_rdma_queue *queues;
+ u32 queue_count;
+
+ /* other member variables */
+ struct blk_mq_tag_set tag_set;
+ struct work_struct delete_work;
+ struct work_struct reset_work;
+ struct work_struct err_work;
+
+ struct nvme_rdma_qe async_event_sqe;
+
+ int reconnect_delay;
+ struct delayed_work reconnect_work;
+
+ struct list_head list;
+
+ struct blk_mq_tag_set admin_tag_set;
+ struct nvme_rdma_device *device;
+
+ u64 cap;
+ u32 max_fr_pages;
+
+ union {
+ struct sockaddr addr;
+ struct sockaddr_in addr_in;
+ };
+
+ struct nvme_ctrl ctrl;
+};
+
+static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
+}
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_mutex);
+
+static LIST_HEAD(nvme_rdma_ctrl_list);
+static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
+
+static struct workqueue_struct *nvme_rdma_wq;
+
+/*
+ * Disabling this option makes small I/O goes faster, but is fundamentally
+ * unsafe. With it turned off we will have to register a global rkey that
+ * allows read and write access to all physical memory.
+ */
+static bool register_always = true;
+module_param(register_always, bool, 0444);
+MODULE_PARM_DESC(register_always,
+ "Use memory registration even for contiguous memory regions");
+
+static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event);
+static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
+
+/* XXX: really should move to a generic header sooner or later.. */
+static inline void put_unaligned_le24(u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
+{
+ return queue - queue->ctrl->queues;
+}
+
+static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
+{
+ return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+}
+
+static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
+ kfree(qe->data);
+}
+
+static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ qe->data = kzalloc(capsule_size, GFP_KERNEL);
+ if (!qe->data)
+ return -ENOMEM;
+
+ qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
+ if (ib_dma_mapping_error(ibdev, qe->dma)) {
+ kfree(qe->data);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nvme_rdma_free_ring(struct ib_device *ibdev,
+ struct nvme_rdma_qe *ring, size_t ib_queue_size,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < ib_queue_size; i++)
+ nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
+ kfree(ring);
+}
+
+static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
+ size_t ib_queue_size, size_t capsule_size,
+ enum dma_data_direction dir)
+{
+ struct nvme_rdma_qe *ring;
+ int i;
+
+ ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ for (i = 0; i < ib_queue_size; i++) {
+ if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
+ goto out_free_ring;
+ }
+
+ return ring;
+
+out_free_ring:
+ nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
+ return NULL;
+}
+
+static void nvme_rdma_qp_event(struct ib_event *event, void *context)
+{
+ pr_debug("QP event %d\n", event->event);
+}
+
+static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
+{
+ wait_for_completion_interruptible_timeout(&queue->cm_done,
+ msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
+ return queue->cm_error;
+}
+
+static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
+{
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_qp_init_attr init_attr;
+ int ret;
+
+ memset(&init_attr, 0, sizeof(init_attr));
+ init_attr.event_handler = nvme_rdma_qp_event;
+ /* +1 for drain */
+ init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
+ /* +1 for drain */
+ init_attr.cap.max_recv_wr = queue->queue_size + 1;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.send_cq = queue->ib_cq;
+ init_attr.recv_cq = queue->ib_cq;
+
+ ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
+
+ queue->qp = queue->cm_id->qp;
+ return ret;
+}
+
+static int nvme_rdma_reinit_request(void *data, struct request *rq)
+{
+ struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_device *dev = ctrl->device;
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ int ret = 0;
+
+ if (!req->need_inval)
+ goto out;
+
+ ib_dereg_mr(req->mr);
+
+ req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
+ ctrl->max_fr_pages);
+ if (IS_ERR(req->mr)) {
+ ret = PTR_ERR(req->mr);
+ req->mr = NULL;
+ }
+
+ req->need_inval = false;
+
+out:
+ return ret;
+}
+
+static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
+ struct request *rq, unsigned int queue_idx)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
+ struct nvme_rdma_device *dev = queue->device;
+
+ if (req->mr)
+ ib_dereg_mr(req->mr);
+
+ nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+}
+
+static void nvme_rdma_exit_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx)
+{
+ return __nvme_rdma_exit_request(data, rq, hctx_idx + 1);
+}
+
+static void nvme_rdma_exit_admin_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx)
+{
+ return __nvme_rdma_exit_request(data, rq, 0);
+}
+
+static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
+ struct request *rq, unsigned int queue_idx)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int ret;
+
+ BUG_ON(queue_idx >= ctrl->queue_count);
+
+ ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
+ ctrl->max_fr_pages);
+ if (IS_ERR(req->mr)) {
+ ret = PTR_ERR(req->mr);
+ goto out_free_qe;
+ }
+
+ req->queue = queue;
+
+ return 0;
+
+out_free_qe:
+ nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+static int nvme_rdma_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ return __nvme_rdma_init_request(data, rq, hctx_idx + 1);
+}
+
+static int nvme_rdma_init_admin_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ return __nvme_rdma_init_request(data, rq, 0);
+}
+
+static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
+
+ BUG_ON(hctx_idx >= ctrl->queue_count);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_queue *queue = &ctrl->queues[0];
+
+ BUG_ON(hctx_idx != 0);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static void nvme_rdma_free_dev(struct kref *ref)
+{
+ struct nvme_rdma_device *ndev =
+ container_of(ref, struct nvme_rdma_device, ref);
+
+ mutex_lock(&device_list_mutex);
+ list_del(&ndev->entry);
+ mutex_unlock(&device_list_mutex);
+
+ if (!register_always)
+ ib_dereg_mr(ndev->mr);
+ ib_dealloc_pd(ndev->pd);
+
+ kfree(ndev);
+}
+
+static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
+{
+ kref_put(&dev->ref, nvme_rdma_free_dev);
+}
+
+static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
+{
+ return kref_get_unless_zero(&dev->ref);
+}
+
+static struct nvme_rdma_device *
+nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
+{
+ struct nvme_rdma_device *ndev;
+
+ mutex_lock(&device_list_mutex);
+ list_for_each_entry(ndev, &device_list, entry) {
+ if (ndev->dev->node_guid == cm_id->device->node_guid &&
+ nvme_rdma_dev_get(ndev))
+ goto out_unlock;
+ }
+
+ ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ goto out_err;
+
+ ndev->dev = cm_id->device;
+ kref_init(&ndev->ref);
+
+ ndev->pd = ib_alloc_pd(ndev->dev);
+ if (IS_ERR(ndev->pd))
+ goto out_free_dev;
+
+ if (!register_always) {
+ ndev->mr = ib_get_dma_mr(ndev->pd,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE);
+ if (IS_ERR(ndev->mr))
+ goto out_free_pd;
+ }
+
+ if (!(ndev->dev->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ dev_err(&ndev->dev->dev,
+ "Memory registrations not supported.\n");
+ goto out_free_mr;
+ }
+
+ list_add(&ndev->entry, &device_list);
+out_unlock:
+ mutex_unlock(&device_list_mutex);
+ return ndev;
+
+out_free_mr:
+ if (!register_always)
+ ib_dereg_mr(ndev->mr);
+out_free_pd:
+ ib_dealloc_pd(ndev->pd);
+out_free_dev:
+ kfree(ndev);
+out_err:
+ mutex_unlock(&device_list_mutex);
+ return NULL;
+}
+
+static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+
+ rdma_destroy_qp(queue->cm_id);
+ ib_free_cq(queue->ib_cq);
+
+ nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
+
+ nvme_rdma_dev_put(dev);
+}
+
+static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_device *dev)
+{
+ struct ib_device *ibdev = dev->dev;
+ const int send_wr_factor = 3; /* MR, SEND, INV */
+ const int cq_factor = send_wr_factor + 1; /* + RECV */
+ int comp_vector, idx = nvme_rdma_queue_idx(queue);
+
+ int ret;
+
+ queue->device = dev;
+
+ /*
+ * The admin queue is barely used once the controller is live, so don't
+ * bother to spread it out.
+ */
+ if (idx == 0)
+ comp_vector = 0;
+ else
+ comp_vector = idx % ibdev->num_comp_vectors;
+
+
+ /* +1 for ib_stop_cq */
+ queue->ib_cq = ib_alloc_cq(dev->dev, queue,
+ cq_factor * queue->queue_size + 1, comp_vector,
+ IB_POLL_SOFTIRQ);
+ if (IS_ERR(queue->ib_cq)) {
+ ret = PTR_ERR(queue->ib_cq);
+ goto out;
+ }
+
+ ret = nvme_rdma_create_qp(queue, send_wr_factor);
+ if (ret)
+ goto out_destroy_ib_cq;
+
+ queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
+ if (!queue->rsp_ring) {
+ ret = -ENOMEM;
+ goto out_destroy_qp;
+ }
+
+ return 0;
+
+out_destroy_qp:
+ ib_destroy_qp(queue->qp);
+out_destroy_ib_cq:
+ ib_free_cq(queue->ib_cq);
+out:
+ return ret;
+}
+
+static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
+ int idx, size_t queue_size)
+{
+ struct nvme_rdma_queue *queue;
+ int ret;
+
+ queue = &ctrl->queues[idx];
+ queue->ctrl = ctrl;
+ init_completion(&queue->cm_done);
+
+ if (idx > 0)
+ queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
+ else
+ queue->cmnd_capsule_len = sizeof(struct nvme_command);
+
+ queue->queue_size = queue_size;
+
+ queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(queue->cm_id)) {
+ dev_info(ctrl->ctrl.device,
+ "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
+ return PTR_ERR(queue->cm_id);
+ }
+
+ queue->cm_error = -ETIMEDOUT;
+ ret = rdma_resolve_addr(queue->cm_id, NULL, &ctrl->addr,
+ NVME_RDMA_CONNECT_TIMEOUT_MS);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "rdma_resolve_addr failed (%d).\n", ret);
+ goto out_destroy_cm_id;
+ }
+
+ ret = nvme_rdma_wait_for_cm(queue);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "rdma_resolve_addr wait failed (%d).\n", ret);
+ goto out_destroy_cm_id;
+ }
+
+ set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
+
+ return 0;
+
+out_destroy_cm_id:
+ rdma_destroy_id(queue->cm_id);
+ return ret;
+}
+
+static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+{
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->qp);
+}
+
+static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
+{
+ nvme_rdma_destroy_queue_ib(queue);
+ rdma_destroy_id(queue->cm_id);
+}
+
+static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
+{
+ if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
+ return;
+ nvme_rdma_stop_queue(queue);
+ nvme_rdma_free_queue(queue);
+}
+
+static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
+}
+
+static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i, ret = 0;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i, ret;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "failed to initialize i/o queue: %d\n", ret);
+ goto out_free_queues;
+ }
+ }
+
+ return 0;
+
+out_free_queues:
+ for (; i >= 1; i--)
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
+
+ return ret;
+}
+
+static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
+{
+ nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_rdma_dev_put(ctrl->device);
+}
+
+static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ if (list_empty(&ctrl->list))
+ goto free_ctrl;
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_del(&ctrl->list);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ if (ctrl->ctrl.tagset) {
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ nvme_rdma_dev_put(ctrl->device);
+ }
+ kfree(ctrl->queues);
+ nvmf_free_options(nctrl->opts);
+free_ctrl:
+ kfree(ctrl);
+}
+
+static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_rdma_ctrl, reconnect_work);
+ bool changed;
+ int ret;
+
+ if (ctrl->queue_count > 1) {
+ nvme_rdma_free_io_queues(ctrl);
+
+ ret = blk_mq_reinit_tagset(&ctrl->tag_set);
+ if (ret)
+ goto requeue;
+ }
+
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
+
+ ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set);
+ if (ret)
+ goto requeue;
+
+ ret = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
+ if (ret)
+ goto requeue;
+
+ blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
+
+ ret = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (ret)
+ goto stop_admin_q;
+
+ ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ if (ret)
+ goto stop_admin_q;
+
+ nvme_start_keep_alive(&ctrl->ctrl);
+
+ if (ctrl->queue_count > 1) {
+ ret = nvme_rdma_init_io_queues(ctrl);
+ if (ret)
+ goto stop_admin_q;
+
+ ret = nvme_rdma_connect_io_queues(ctrl);
+ if (ret)
+ goto stop_admin_q;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ if (ctrl->queue_count > 1)
+ nvme_start_queues(&ctrl->ctrl);
+
+ dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
+
+ return;
+
+stop_admin_q:
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+requeue:
+ /* Make sure we are not resetting/deleting */
+ if (ctrl->ctrl.state == NVME_CTRL_RECONNECTING) {
+ dev_info(ctrl->ctrl.device,
+ "Failed reconnect attempt, requeueing...\n");
+ queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
+ ctrl->reconnect_delay * HZ);
+ }
+}
+
+static void nvme_rdma_error_recovery_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, err_work);
+
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ if (ctrl->queue_count > 1)
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+
+ /* We must take care of fastfail/requeue all our inflight requests */
+ if (ctrl->queue_count > 1)
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+
+ dev_info(ctrl->ctrl.device, "reconnecting in %d seconds\n",
+ ctrl->reconnect_delay);
+
+ queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
+ ctrl->reconnect_delay * HZ);
+}
+
+static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
+ return;
+
+ queue_work(nvme_rdma_wq, &ctrl->err_work);
+}
+
+static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
+ const char *op)
+{
+ struct nvme_rdma_queue *queue = cq->cq_context;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+
+ if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ dev_info(ctrl->ctrl.device,
+ "%s for CQE 0x%p failed with status %s (%d)\n",
+ op, wc->wr_cqe,
+ ib_wc_status_msg(wc->status), wc->status);
+ nvme_rdma_error_recovery(ctrl);
+}
+
+static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "MEMREG");
+}
+
+static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
+}
+
+static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req)
+{
+ struct ib_send_wr *bad_wr;
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .next = NULL,
+ .num_sge = 0,
+ .send_flags = 0,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+
+ req->reg_cqe.done = nvme_rdma_inv_rkey_done;
+ wr.wr_cqe = &req->reg_cqe;
+
+ return ib_post_send(queue->qp, &wr, &bad_wr);
+}
+
+static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
+ struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int res;
+
+ if (!blk_rq_bytes(rq))
+ return;
+
+ if (req->need_inval) {
+ res = nvme_rdma_inv_rkey(queue, req);
+ if (res < 0) {
+ dev_err(ctrl->ctrl.device,
+ "Queueing INV WR for rkey %#x failed (%d)\n",
+ req->mr->rkey, res);
+ nvme_rdma_error_recovery(queue->ctrl);
+ }
+ }
+
+ ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
+ req->nents, rq_data_dir(rq) ==
+ WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ nvme_cleanup_cmd(rq);
+ sg_free_table_chained(&req->sg_table, true);
+}
+
+static int nvme_rdma_set_sg_null(struct nvme_command *c)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+
+ sg->addr = 0;
+ put_unaligned_le24(0, sg->length);
+ put_unaligned_le32(0, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c)
+{
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+
+ req->sge[1].addr = sg_dma_address(req->sg_table.sgl);
+ req->sge[1].length = sg_dma_len(req->sg_table.sgl);
+ req->sge[1].lkey = queue->device->pd->local_dma_lkey;
+
+ sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
+ sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
+ sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
+
+ req->inline_data = true;
+ req->num_sge++;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+
+ sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
+ put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
+ put_unaligned_le32(queue->device->mr->rkey, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ int nr;
+
+ nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
+ if (nr < count) {
+ if (nr < 0)
+ return nr;
+ return -EINVAL;
+ }
+
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ req->reg_cqe.done = nvme_rdma_memreg_done;
+ memset(&req->reg_wr, 0, sizeof(req->reg_wr));
+ req->reg_wr.wr.opcode = IB_WR_REG_MR;
+ req->reg_wr.wr.wr_cqe = &req->reg_cqe;
+ req->reg_wr.wr.num_sge = 0;
+ req->reg_wr.mr = req->mr;
+ req->reg_wr.key = req->mr->rkey;
+ req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+
+ req->need_inval = true;
+
+ sg->addr = cpu_to_le64(req->mr->iova);
+ put_unaligned_le24(req->mr->length, sg->length);
+ put_unaligned_le32(req->mr->rkey, sg->key);
+ sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
+ NVME_SGL_FMT_INVALIDATE;
+
+ return 0;
+}
+
+static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
+ struct request *rq, unsigned int map_len,
+ struct nvme_command *c)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int nents, count;
+ int ret;
+
+ req->num_sge = 1;
+ req->inline_data = false;
+ req->need_inval = false;
+
+ c->common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!blk_rq_bytes(rq))
+ return nvme_rdma_set_sg_null(c);
+
+ req->sg_table.sgl = req->first_sgl;
+ ret = sg_alloc_table_chained(&req->sg_table, rq->nr_phys_segments,
+ req->sg_table.sgl);
+ if (ret)
+ return -ENOMEM;
+
+ nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
+ BUG_ON(nents > rq->nr_phys_segments);
+ req->nents = nents;
+
+ count = ib_dma_map_sg(ibdev, req->sg_table.sgl, nents,
+ rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (unlikely(count <= 0)) {
+ sg_free_table_chained(&req->sg_table, true);
+ return -EIO;
+ }
+
+ if (count == 1) {
+ if (rq_data_dir(rq) == WRITE &&
+ map_len <= nvme_rdma_inline_data_size(queue) &&
+ nvme_rdma_queue_idx(queue))
+ return nvme_rdma_map_sg_inline(queue, req, c);
+
+ if (!register_always)
+ return nvme_rdma_map_sg_single(queue, req, c);
+ }
+
+ return nvme_rdma_map_sg_fr(queue, req, c, count);
+}
+
+static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "SEND");
+}
+
+static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
+ struct ib_send_wr *first, bool flush)
+{
+ struct ib_send_wr wr, *bad_wr;
+ int ret;
+
+ sge->addr = qe->dma;
+ sge->length = sizeof(struct nvme_command),
+ sge->lkey = queue->device->pd->local_dma_lkey;
+
+ qe->cqe.done = nvme_rdma_send_done;
+
+ wr.next = NULL;
+ wr.wr_cqe = &qe->cqe;
+ wr.sg_list = sge;
+ wr.num_sge = num_sge;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = 0;
+
+ /*
+ * Unsignalled send completions are another giant desaster in the
+ * IB Verbs spec: If we don't regularly post signalled sends
+ * the send queue will fill up and only a QP reset will rescue us.
+ * Would have been way to obvious to handle this in hardware or
+ * at least the RDMA stack..
+ *
+ * This messy and racy code sniplet is copy and pasted from the iSER
+ * initiator, and the magic '32' comes from there as well.
+ *
+ * Always signal the flushes. The magic request used for the flush
+ * sequencer is not allocated in our driver's tagset and it's
+ * triggered to be freed by blk_cleanup_queue(). So we need to
+ * always mark it as signaled to ensure that the "wr_cqe", which is
+ * embeded in request's payload, is not freed when __ib_process_cq()
+ * calls wr_cqe->done().
+ */
+ if ((++queue->sig_count % 32) == 0 || flush)
+ wr.send_flags |= IB_SEND_SIGNALED;
+
+ if (first)
+ first->next = &wr;
+ else
+ first = &wr;
+
+ ret = ib_post_send(queue->qp, first, &bad_wr);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "%s failed with error code %d\n", __func__, ret);
+ }
+ return ret;
+}
+
+static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_qe *qe)
+{
+ struct ib_recv_wr wr, *bad_wr;
+ struct ib_sge list;
+ int ret;
+
+ list.addr = qe->dma;
+ list.length = sizeof(struct nvme_completion);
+ list.lkey = queue->device->pd->local_dma_lkey;
+
+ qe->cqe.done = nvme_rdma_recv_done;
+
+ wr.next = NULL;
+ wr.wr_cqe = &qe->cqe;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ ret = ib_post_recv(queue->qp, &wr, &bad_wr);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "%s failed with error code %d\n", __func__, ret);
+ }
+ return ret;
+}
+
+static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
+{
+ u32 queue_idx = nvme_rdma_queue_idx(queue);
+
+ if (queue_idx == 0)
+ return queue->ctrl->admin_tag_set.tags[queue_idx];
+ return queue->ctrl->tag_set.tags[queue_idx - 1];
+}
+
+static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
+ struct nvme_rdma_queue *queue = &ctrl->queues[0];
+ struct ib_device *dev = queue->device->dev;
+ struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
+ struct nvme_command *cmd = sqe->data;
+ struct ib_sge sge;
+ int ret;
+
+ if (WARN_ON_ONCE(aer_idx != 0))
+ return;
+
+ ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->common.opcode = nvme_admin_async_event;
+ cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH;
+ cmd->common.flags |= NVME_CMD_SGL_METABUF;
+ nvme_rdma_set_sg_null(cmd);
+
+ ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
+ DMA_TO_DEVICE);
+
+ ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
+ WARN_ON_ONCE(ret);
+}
+
+static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
+ struct nvme_completion *cqe, struct ib_wc *wc, int tag)
+{
+ u16 status = le16_to_cpu(cqe->status);
+ struct request *rq;
+ struct nvme_rdma_request *req;
+ int ret = 0;
+
+ status >>= 1;
+
+ rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "tag 0x%x on QP %#x not found\n",
+ cqe->command_id, queue->qp->qp_num);
+ nvme_rdma_error_recovery(queue->ctrl);
+ return ret;
+ }
+ req = blk_mq_rq_to_pdu(rq);
+
+ if (rq->cmd_type == REQ_TYPE_DRV_PRIV && rq->special)
+ memcpy(rq->special, cqe, sizeof(*cqe));
+
+ if (rq->tag == tag)
+ ret = 1;
+
+ if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
+ wc->ex.invalidate_rkey == req->mr->rkey)
+ req->need_inval = false;
+
+ blk_mq_complete_request(rq, status);
+
+ return ret;
+}
+
+static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
+{
+ struct nvme_rdma_qe *qe =
+ container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+ struct nvme_rdma_queue *queue = cq->cq_context;
+ struct ib_device *ibdev = queue->device->dev;
+ struct nvme_completion *cqe = qe->data;
+ const size_t len = sizeof(struct nvme_completion);
+ int ret = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvme_rdma_wr_error(cq, wc, "RECV");
+ return 0;
+ }
+
+ ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
+ cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
+ nvme_complete_async_event(&queue->ctrl->ctrl, cqe);
+ else
+ ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
+ ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
+
+ nvme_rdma_post_recv(queue, qe);
+ return ret;
+}
+
+static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ __nvme_rdma_recv_done(cq, wc, -1);
+}
+
+static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
+{
+ int ret, i;
+
+ for (i = 0; i < queue->queue_size; i++) {
+ ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
+ if (ret)
+ goto out_destroy_queue_ib;
+ }
+
+ return 0;
+
+out_destroy_queue_ib:
+ nvme_rdma_destroy_queue_ib(queue);
+ return ret;
+}
+
+static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
+ struct rdma_cm_event *ev)
+{
+ if (ev->param.conn.private_data_len) {
+ struct nvme_rdma_cm_rej *rej =
+ (struct nvme_rdma_cm_rej *)ev->param.conn.private_data;
+
+ dev_err(queue->ctrl->ctrl.device,
+ "Connect rejected, status %d.", le16_to_cpu(rej->sts));
+ /* XXX: Think of something clever to do here... */
+ } else {
+ dev_err(queue->ctrl->ctrl.device,
+ "Connect rejected, no private data.\n");
+ }
+
+ return -ECONNRESET;
+}
+
+static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_device *dev;
+ int ret;
+
+ dev = nvme_rdma_find_get_device(queue->cm_id);
+ if (!dev) {
+ dev_err(queue->cm_id->device->dma_device,
+ "no client data found!\n");
+ return -ECONNREFUSED;
+ }
+
+ ret = nvme_rdma_create_queue_ib(queue, dev);
+ if (ret) {
+ nvme_rdma_dev_put(dev);
+ goto out;
+ }
+
+ ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "rdma_resolve_route failed (%d).\n",
+ queue->cm_error);
+ goto out_destroy_queue;
+ }
+
+ return 0;
+
+out_destroy_queue:
+ nvme_rdma_destroy_queue_ib(queue);
+out:
+ return ret;
+}
+
+static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ struct rdma_conn_param param = { };
+ struct nvme_rdma_cm_req priv;
+ int ret;
+
+ param.qp_num = queue->qp->qp_num;
+ param.flow_control = 1;
+
+ param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
+ /* maximum retry count */
+ param.retry_count = 7;
+ param.rnr_retry_count = 7;
+ param.private_data = &priv;
+ param.private_data_len = sizeof(priv);
+
+ priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+ priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
+ priv.hrqsize = cpu_to_le16(queue->queue_size);
+ priv.hsqsize = cpu_to_le16(queue->queue_size);
+
+ ret = rdma_connect(queue->cm_id, &param);
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "rdma_connect failed (%d).\n", ret);
+ goto out_destroy_queue_ib;
+ }
+
+ return 0;
+
+out_destroy_queue_ib:
+ nvme_rdma_destroy_queue_ib(queue);
+ return ret;
+}
+
+/**
+ * nvme_rdma_device_unplug() - Handle RDMA device unplug
+ * @queue: Queue that owns the cm_id that caught the event
+ *
+ * DEVICE_REMOVAL event notifies us that the RDMA device is about
+ * to unplug so we should take care of destroying our RDMA resources.
+ * This event will be generated for each allocated cm_id.
+ *
+ * In our case, the RDMA resources are managed per controller and not
+ * only per queue. So the way we handle this is we trigger an implicit
+ * controller deletion upon the first DEVICE_REMOVAL event we see, and
+ * hold the event inflight until the controller deletion is completed.
+ *
+ * One exception that we need to handle is the destruction of the cm_id
+ * that caught the event. Since we hold the callout until the controller
+ * deletion is completed, we'll deadlock if the controller deletion will
+ * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
+ * of destroying this queue before-hand, destroy the queue resources
+ * after the controller deletion completed with the exception of destroying
+ * the cm_id implicitely by returning a non-zero rc to the callout.
+ */
+static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ int ret, ctrl_deleted = 0;
+
+ /* First disable the queue so ctrl delete won't free it */
+ if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
+ goto out;
+
+ /* delete the controller */
+ ret = __nvme_rdma_del_ctrl(ctrl);
+ if (!ret) {
+ dev_warn(ctrl->ctrl.device,
+ "Got rdma device removal event, deleting ctrl\n");
+ flush_work(&ctrl->delete_work);
+
+ /* Return non-zero so the cm_id will destroy implicitly */
+ ctrl_deleted = 1;
+
+ /* Free this queue ourselves */
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->qp);
+ nvme_rdma_destroy_queue_ib(queue);
+ }
+
+out:
+ return ctrl_deleted;
+}
+
+static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct nvme_rdma_queue *queue = cm_id->context;
+ int cm_error = 0;
+
+ dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
+ rdma_event_msg(ev->event), ev->event,
+ ev->status, cm_id);
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ cm_error = nvme_rdma_addr_resolved(queue);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ cm_error = nvme_rdma_route_resolved(queue);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ queue->cm_error = nvme_rdma_conn_established(queue);
+ /* complete cm_done regardless of success/failure */
+ complete(&queue->cm_done);
+ return 0;
+ case RDMA_CM_EVENT_REJECTED:
+ cm_error = nvme_rdma_conn_rejected(queue, ev);
+ break;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ dev_dbg(queue->ctrl->ctrl.device,
+ "CM error event %d\n", ev->event);
+ cm_error = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ dev_dbg(queue->ctrl->ctrl.device,
+ "disconnect received - connection closed\n");
+ nvme_rdma_error_recovery(queue->ctrl);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /* return 1 means impliciy CM ID destroy */
+ return nvme_rdma_device_unplug(queue);
+ default:
+ dev_err(queue->ctrl->ctrl.device,
+ "Unexpected RDMA CM event (%d)\n", ev->event);
+ nvme_rdma_error_recovery(queue->ctrl);
+ break;
+ }
+
+ if (cm_error) {
+ queue->cm_error = cm_error;
+ complete(&queue->cm_done);
+ }
+
+ return 0;
+}
+
+static enum blk_eh_timer_return
+nvme_rdma_timeout(struct request *rq, bool reserved)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+
+ /* queue error recovery */
+ nvme_rdma_error_recovery(req->queue->ctrl);
+
+ /* fail with DNR on cmd timeout */
+ rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+
+ return BLK_EH_HANDLED;
+}
+
+static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_rdma_queue *queue = hctx->driver_data;
+ struct request *rq = bd->rq;
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_qe *sqe = &req->sqe;
+ struct nvme_command *c = sqe->data;
+ bool flush = false;
+ struct ib_device *dev;
+ unsigned int map_len;
+ int ret;
+
+ WARN_ON_ONCE(rq->tag < 0);
+
+ dev = queue->device->dev;
+ ib_dma_sync_single_for_cpu(dev, sqe->dma,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+
+ ret = nvme_setup_cmd(ns, rq, c);
+ if (ret)
+ return ret;
+
+ c->common.command_id = rq->tag;
+ blk_mq_start_request(rq);
+
+ map_len = nvme_map_len(rq);
+ ret = nvme_rdma_map_data(queue, rq, map_len, c);
+ if (ret < 0) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Failed to map data (%d)\n", ret);
+ nvme_cleanup_cmd(rq);
+ goto err;
+ }
+
+ ib_dma_sync_single_for_device(dev, sqe->dma,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+
+ if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
+ flush = true;
+ ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
+ req->need_inval ? &req->reg_wr.wr : NULL, flush);
+ if (ret) {
+ nvme_rdma_unmap_data(queue, rq);
+ goto err;
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+err:
+ return (ret == -ENOMEM || ret == -EAGAIN) ?
+ BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+}
+
+static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+{
+ struct nvme_rdma_queue *queue = hctx->driver_data;
+ struct ib_cq *cq = queue->ib_cq;
+ struct ib_wc wc;
+ int found = 0;
+
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ while (ib_poll_cq(cq, 1, &wc) > 0) {
+ struct ib_cqe *cqe = wc.wr_cqe;
+
+ if (cqe) {
+ if (cqe->done == nvme_rdma_recv_done)
+ found |= __nvme_rdma_recv_done(cq, &wc, tag);
+ else
+ cqe->done(cq, &wc);
+ }
+ }
+
+ return found;
+}
+
+static void nvme_rdma_complete_rq(struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ int error = 0;
+
+ nvme_rdma_unmap_data(queue, rq);
+
+ if (unlikely(rq->errors)) {
+ if (nvme_req_needs_retry(rq, rq->errors)) {
+ nvme_requeue_req(rq);
+ return;
+ }
+
+ if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ error = rq->errors;
+ else
+ error = nvme_error_status(rq->errors);
+ }
+
+ blk_mq_end_request(rq, error);
+}
+
+static struct blk_mq_ops nvme_rdma_mq_ops = {
+ .queue_rq = nvme_rdma_queue_rq,
+ .complete = nvme_rdma_complete_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = nvme_rdma_init_request,
+ .exit_request = nvme_rdma_exit_request,
+ .reinit_request = nvme_rdma_reinit_request,
+ .init_hctx = nvme_rdma_init_hctx,
+ .poll = nvme_rdma_poll,
+ .timeout = nvme_rdma_timeout,
+};
+
+static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
+ .queue_rq = nvme_rdma_queue_rq,
+ .complete = nvme_rdma_complete_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = nvme_rdma_init_admin_request,
+ .exit_request = nvme_rdma_exit_admin_request,
+ .reinit_request = nvme_rdma_reinit_request,
+ .init_hctx = nvme_rdma_init_admin_hctx,
+ .timeout = nvme_rdma_timeout,
+};
+
+static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
+{
+ int error;
+
+ error = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
+ if (error)
+ return error;
+
+ ctrl->device = ctrl->queues[0].device;
+
+ /*
+ * We need a reference on the device as long as the tag_set is alive,
+ * as the MRs in the request structures need a valid ib_device.
+ */
+ error = -EINVAL;
+ if (!nvme_rdma_dev_get(ctrl->device))
+ goto out_free_queue;
+
+ ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+ ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+
+ memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
+ ctrl->admin_tag_set.ops = &nvme_rdma_admin_mq_ops;
+ ctrl->admin_tag_set.queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
+ ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
+ SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ ctrl->admin_tag_set.driver_data = ctrl;
+ ctrl->admin_tag_set.nr_hw_queues = 1;
+ ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+
+ error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ if (error)
+ goto out_put_dev;
+
+ ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+ if (IS_ERR(ctrl->ctrl.admin_q)) {
+ error = PTR_ERR(ctrl->ctrl.admin_q);
+ goto out_free_tagset;
+ }
+
+ error = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (error) {
+ dev_err(ctrl->ctrl.device,
+ "prop_get NVME_REG_CAP failed\n");
+ goto out_cleanup_queue;
+ }
+
+ ctrl->ctrl.sqsize =
+ min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+
+ error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ if (error)
+ goto out_cleanup_queue;
+
+ ctrl->ctrl.max_hw_sectors =
+ (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
+
+ error = nvme_init_identify(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
+ &ctrl->async_event_sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ if (error)
+ goto out_cleanup_queue;
+
+ nvme_start_keep_alive(&ctrl->ctrl);
+
+ return 0;
+
+out_cleanup_queue:
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_free_tagset:
+ /* disconnect and drain the queue before freeing the tagset */
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_put_dev:
+ nvme_rdma_dev_put(ctrl->device);
+out_free_queue:
+ nvme_rdma_free_queue(&ctrl->queues[0]);
+ return error;
+}
+
+static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
+{
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ cancel_work_sync(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
+
+ if (ctrl->queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ nvme_rdma_free_io_queues(ctrl);
+ }
+
+ if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ nvme_shutdown_ctrl(&ctrl->ctrl);
+
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl);
+}
+
+static void nvme_rdma_del_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, delete_work);
+
+ nvme_remove_namespaces(&ctrl->ctrl);
+ nvme_rdma_shutdown_ctrl(ctrl);
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+ return -EBUSY;
+
+ if (!queue_work(nvme_rdma_wq, &ctrl->delete_work))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+ int ret;
+
+ ret = __nvme_rdma_del_ctrl(ctrl);
+ if (ret)
+ return ret;
+
+ flush_work(&ctrl->delete_work);
+
+ return 0;
+}
+
+static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, delete_work);
+
+ nvme_remove_namespaces(&ctrl->ctrl);
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, reset_work);
+ int ret;
+ bool changed;
+
+ nvme_rdma_shutdown_ctrl(ctrl);
+
+ ret = nvme_rdma_configure_admin_queue(ctrl);
+ if (ret) {
+ /* ctrl is already shutdown, just remove the ctrl */
+ INIT_WORK(&ctrl->delete_work, nvme_rdma_remove_ctrl_work);
+ goto del_dead_ctrl;
+ }
+
+ if (ctrl->queue_count > 1) {
+ ret = blk_mq_reinit_tagset(&ctrl->tag_set);
+ if (ret)
+ goto del_dead_ctrl;
+
+ ret = nvme_rdma_init_io_queues(ctrl);
+ if (ret)
+ goto del_dead_ctrl;
+
+ ret = nvme_rdma_connect_io_queues(ctrl);
+ if (ret)
+ goto del_dead_ctrl;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ if (ctrl->queue_count > 1) {
+ nvme_start_queues(&ctrl->ctrl);
+ nvme_queue_scan(&ctrl->ctrl);
+ }
+
+ return;
+
+del_dead_ctrl:
+ /* Deleting this dead controller... */
+ dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
+ WARN_ON(!queue_work(nvme_rdma_wq, &ctrl->delete_work));
+}
+
+static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
+ return -EBUSY;
+
+ if (!queue_work(nvme_rdma_wq, &ctrl->reset_work))
+ return -EBUSY;
+
+ flush_work(&ctrl->reset_work);
+
+ return 0;
+}
+
+static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
+ .name = "rdma",
+ .module = THIS_MODULE,
+ .is_fabrics = true,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .reset_ctrl = nvme_rdma_reset_ctrl,
+ .free_ctrl = nvme_rdma_free_ctrl,
+ .submit_async_event = nvme_rdma_submit_async_event,
+ .delete_ctrl = nvme_rdma_del_ctrl,
+ .get_subsysnqn = nvmf_get_subsysnqn,
+ .get_address = nvmf_get_address,
+};
+
+static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ int ret;
+
+ ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ if (ret)
+ return ret;
+
+ ctrl->queue_count = opts->nr_io_queues + 1;
+ if (ctrl->queue_count < 2)
+ return 0;
+
+ dev_info(ctrl->ctrl.device,
+ "creating %d I/O queues.\n", opts->nr_io_queues);
+
+ ret = nvme_rdma_init_io_queues(ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * We need a reference on the device as long as the tag_set is alive,
+ * as the MRs in the request structures need a valid ib_device.
+ */
+ ret = -EINVAL;
+ if (!nvme_rdma_dev_get(ctrl->device))
+ goto out_free_io_queues;
+
+ memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
+ ctrl->tag_set.ops = &nvme_rdma_mq_ops;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+ ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+ ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
+ SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ ctrl->tag_set.driver_data = ctrl;
+ ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+ ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
+
+ ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ if (ret)
+ goto out_put_dev;
+ ctrl->ctrl.tagset = &ctrl->tag_set;
+
+ ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+ if (IS_ERR(ctrl->ctrl.connect_q)) {
+ ret = PTR_ERR(ctrl->ctrl.connect_q);
+ goto out_free_tag_set;
+ }
+
+ ret = nvme_rdma_connect_io_queues(ctrl);
+ if (ret)
+ goto out_cleanup_connect_q;
+
+ return 0;
+
+out_cleanup_connect_q:
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+out_free_tag_set:
+ blk_mq_free_tag_set(&ctrl->tag_set);
+out_put_dev:
+ nvme_rdma_dev_put(ctrl->device);
+out_free_io_queues:
+ nvme_rdma_free_io_queues(ctrl);
+ return ret;
+}
+
+static int nvme_rdma_parse_ipaddr(struct sockaddr_in *in_addr, char *p)
+{
+ u8 *addr = (u8 *)&in_addr->sin_addr.s_addr;
+ size_t buflen = strlen(p);
+
+ /* XXX: handle IPv6 addresses */
+
+ if (buflen > INET_ADDRSTRLEN)
+ return -EINVAL;
+ if (in4_pton(p, buflen, addr, '\0', NULL) == 0)
+ return -EINVAL;
+ in_addr->sin_family = AF_INET;
+ return 0;
+}
+
+static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_rdma_ctrl *ctrl;
+ int ret;
+ bool changed;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+ ctrl->ctrl.opts = opts;
+ INIT_LIST_HEAD(&ctrl->list);
+
+ ret = nvme_rdma_parse_ipaddr(&ctrl->addr_in, opts->traddr);
+ if (ret) {
+ pr_err("malformed IP address passed: %s\n", opts->traddr);
+ goto out_free_ctrl;
+ }
+
+ if (opts->mask & NVMF_OPT_TRSVCID) {
+ u16 port;
+
+ ret = kstrtou16(opts->trsvcid, 0, &port);
+ if (ret)
+ goto out_free_ctrl;
+
+ ctrl->addr_in.sin_port = cpu_to_be16(port);
+ } else {
+ ctrl->addr_in.sin_port = cpu_to_be16(NVME_RDMA_IP_PORT);
+ }
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+ 0 /* no quirks, we're perfect! */);
+ if (ret)
+ goto out_free_ctrl;
+
+ ctrl->reconnect_delay = opts->reconnect_delay;
+ INIT_DELAYED_WORK(&ctrl->reconnect_work,
+ nvme_rdma_reconnect_ctrl_work);
+ INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
+ INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
+ INIT_WORK(&ctrl->reset_work, nvme_rdma_reset_ctrl_work);
+ spin_lock_init(&ctrl->lock);
+
+ ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
+ ctrl->ctrl.sqsize = opts->queue_size;
+ ctrl->ctrl.kato = opts->kato;
+
+ ret = -ENOMEM;
+ ctrl->queues = kcalloc(ctrl->queue_count, sizeof(*ctrl->queues),
+ GFP_KERNEL);
+ if (!ctrl->queues)
+ goto out_uninit_ctrl;
+
+ ret = nvme_rdma_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_kfree_queues;
+
+ /* sanity check icdoff */
+ if (ctrl->ctrl.icdoff) {
+ dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
+ goto out_remove_admin_queue;
+ }
+
+ /* sanity check keyed sgls */
+ if (!(ctrl->ctrl.sgls & (1 << 20))) {
+ dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
+ goto out_remove_admin_queue;
+ }
+
+ if (opts->queue_size > ctrl->ctrl.maxcmd) {
+ /* warn if maxcmd is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl maxcmd %u, clamping down\n",
+ opts->queue_size, ctrl->ctrl.maxcmd);
+ opts->queue_size = ctrl->ctrl.maxcmd;
+ }
+
+ if (opts->nr_io_queues) {
+ ret = nvme_rdma_create_io_queues(ctrl);
+ if (ret)
+ goto out_remove_admin_queue;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
+ ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+
+ kref_get(&ctrl->ctrl.kref);
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ if (opts->nr_io_queues) {
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ return &ctrl->ctrl;
+
+out_remove_admin_queue:
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl);
+out_kfree_queues:
+ kfree(ctrl->queues);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+ if (ret > 0)
+ ret = -EIO;
+ return ERR_PTR(ret);
+out_free_ctrl:
+ kfree(ctrl);
+ return ERR_PTR(ret);
+}
+
+static struct nvmf_transport_ops nvme_rdma_transport = {
+ .name = "rdma",
+ .required_opts = NVMF_OPT_TRADDR,
+ .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY,
+ .create_ctrl = nvme_rdma_create_ctrl,
+};
+
+static int __init nvme_rdma_init_module(void)
+{
+ nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
+ if (!nvme_rdma_wq)
+ return -ENOMEM;
+
+ nvmf_register_transport(&nvme_rdma_transport);
+ return 0;
+}
+
+static void __exit nvme_rdma_cleanup_module(void)
+{
+ struct nvme_rdma_ctrl *ctrl;
+
+ nvmf_unregister_transport(&nvme_rdma_transport);
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
+ __nvme_rdma_del_ctrl(ctrl);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ destroy_workqueue(nvme_rdma_wq);
+}
+
+module_init(nvme_rdma_init_module);
+module_exit(nvme_rdma_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
new file mode 100644
index 000000000000..a5c31cbeb481
--- /dev/null
+++ b/drivers/nvme/target/Kconfig
@@ -0,0 +1,36 @@
+
+config NVME_TARGET
+ tristate "NVMe Target support"
+ depends on BLOCK
+ depends on CONFIGFS_FS
+ help
+ This enabled target side support for the NVMe protocol, that is
+ it allows the Linux kernel to implement NVMe subsystems and
+ controllers and export Linux block devices as NVMe namespaces.
+ You need to select at least one of the transports below to make this
+ functionality useful.
+
+ To configure the NVMe target you probably want to use the nvmetcli
+ tool from http://git.infradead.org/users/hch/nvmetcli.git.
+
+config NVME_TARGET_LOOP
+ tristate "NVMe loopback device support"
+ depends on BLK_DEV_NVME
+ depends on NVME_TARGET
+ select NVME_FABRICS
+ select SG_POOL
+ help
+ This enables the NVMe loopback device support, which can be useful
+ to test NVMe host and target side features.
+
+ If unsure, say N.
+
+config NVME_TARGET_RDMA
+ tristate "NVMe over Fabrics RDMA target support"
+ depends on INFINIBAND
+ depends on NVME_TARGET
+ help
+ This enables the NVMe RDMA target support, which allows exporting NVMe
+ devices over RDMA.
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
new file mode 100644
index 000000000000..b7a06232c9da
--- /dev/null
+++ b/drivers/nvme/target/Makefile
@@ -0,0 +1,9 @@
+
+obj-$(CONFIG_NVME_TARGET) += nvmet.o
+obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
+obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
+
+nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \
+ discovery.o
+nvme-loop-y += loop.o
+nvmet-rdma-y += rdma.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
new file mode 100644
index 000000000000..2fac17a5ad53
--- /dev/null
+++ b/drivers/nvme/target/admin-cmd.c
@@ -0,0 +1,465 @@
+/*
+ * NVMe admin command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/random.h>
+#include <generated/utsrelease.h>
+#include "nvmet.h"
+
+u32 nvmet_get_log_page_len(struct nvme_command *cmd)
+{
+ u32 len = le16_to_cpu(cmd->get_log_page.numdu);
+
+ len <<= 16;
+ len += le16_to_cpu(cmd->get_log_page.numdl);
+ /* NUMD is a 0's based value */
+ len += 1;
+ len *= sizeof(u32);
+
+ return len;
+}
+
+static void nvmet_execute_get_log_page(struct nvmet_req *req)
+{
+ size_t data_len = nvmet_get_log_page_len(req->cmd);
+ void *buf;
+ u16 status = 0;
+
+ buf = kzalloc(data_len, GFP_KERNEL);
+ if (!buf) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ switch (req->cmd->get_log_page.lid) {
+ case 0x01:
+ /*
+ * We currently never set the More bit in the status field,
+ * so all error log entries are invalid and can be zeroed out.
+ * This is called a minum viable implementation (TM) of this
+ * mandatory log page.
+ */
+ break;
+ case 0x02:
+ /*
+ * XXX: fill out actual smart log
+ *
+ * We might have a hard time coming up with useful values for
+ * many of the fields, and even when we have useful data
+ * available (e.g. units or commands read/written) those aren't
+ * persistent over power loss.
+ */
+ break;
+ case 0x03:
+ /*
+ * We only support a single firmware slot which always is
+ * active, so we can zero out the whole firmware slot log and
+ * still claim to fully implement this mandatory log page.
+ */
+ break;
+ default:
+ BUG();
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, buf, data_len);
+
+ kfree(buf);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_id_ctrl *id;
+ u64 serial;
+ u16 status = 0;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ /* XXX: figure out how to assign real vendors IDs. */
+ id->vid = 0;
+ id->ssvid = 0;
+
+ /* generate a random serial number as our controllers are ephemeral: */
+ get_random_bytes(&serial, sizeof(serial));
+ memset(id->sn, ' ', sizeof(id->sn));
+ snprintf(id->sn, sizeof(id->sn), "%llx", serial);
+
+ memset(id->mn, ' ', sizeof(id->mn));
+ strncpy((char *)id->mn, "Linux", sizeof(id->mn));
+
+ memset(id->fr, ' ', sizeof(id->fr));
+ strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
+
+ id->rab = 6;
+
+ /*
+ * XXX: figure out how we can assign a IEEE OUI, but until then
+ * the safest is to leave it as zeroes.
+ */
+
+ /* we support multiple ports and multiples hosts: */
+ id->mic = (1 << 0) | (1 << 1);
+
+ /* no limit on data transfer sizes for now */
+ id->mdts = 0;
+ id->cntlid = cpu_to_le16(ctrl->cntlid);
+ id->ver = cpu_to_le32(ctrl->subsys->ver);
+
+ /* XXX: figure out what to do about RTD3R/RTD3 */
+ id->oaes = cpu_to_le32(1 << 8);
+ id->ctratt = cpu_to_le32(1 << 0);
+
+ id->oacs = 0;
+
+ /*
+ * We don't really have a practical limit on the number of abort
+ * comands. But we don't do anything useful for abort either, so
+ * no point in allowing more abort commands than the spec requires.
+ */
+ id->acl = 3;
+
+ id->aerl = NVMET_ASYNC_EVENTS - 1;
+
+ /* first slot is read-only, only one slot supported */
+ id->frmw = (1 << 0) | (1 << 1);
+ id->lpa = (1 << 0) | (1 << 2);
+ id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
+ id->npss = 0;
+
+ /* We support keep-alive timeout in granularity of seconds */
+ id->kas = cpu_to_le16(NVMET_KAS);
+
+ id->sqes = (0x6 << 4) | 0x6;
+ id->cqes = (0x4 << 4) | 0x4;
+
+ /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+ id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
+ id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM);
+
+ /* XXX: don't report vwc if the underlying device is write through */
+ id->vwc = NVME_CTRL_VWC_PRESENT;
+
+ /*
+ * We can't support atomic writes bigger than a LBA without support
+ * from the backend device.
+ */
+ id->awun = 0;
+ id->awupf = 0;
+
+ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
+ if (ctrl->ops->has_keyed_sgls)
+ id->sgls |= cpu_to_le32(1 << 2);
+ if (ctrl->ops->sqe_inline_size)
+ id->sgls |= cpu_to_le32(1 << 20);
+
+ strcpy(id->subnqn, ctrl->subsys->subsysnqn);
+
+ /* Max command capsule size is sqe + single page of in-capsule data */
+ id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
+ ctrl->ops->sqe_inline_size) / 16);
+ /* Max response capsule size is cqe */
+ id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
+
+ id->msdbd = ctrl->ops->msdbd;
+
+ /*
+ * Meh, we don't really support any power state. Fake up the same
+ * values that qemu does.
+ */
+ id->psd[0].max_power = cpu_to_le16(0x9c4);
+ id->psd[0].entry_lat = cpu_to_le32(0x10);
+ id->psd[0].exit_lat = cpu_to_le32(0x4);
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_ns(struct nvmet_req *req)
+{
+ struct nvmet_ns *ns;
+ struct nvme_id_ns *id;
+ u16 status = 0;
+
+ ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
+ if (!ns) {
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto out;
+ }
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_ns;
+ }
+
+ /*
+ * nuse = ncap = nsze isn't aways true, but we have no way to find
+ * that out from the underlying device.
+ */
+ id->ncap = id->nuse = id->nsze =
+ cpu_to_le64(ns->size >> ns->blksize_shift);
+
+ /*
+ * We just provide a single LBA format that matches what the
+ * underlying device reports.
+ */
+ id->nlbaf = 0;
+ id->flbas = 0;
+
+ /*
+ * Our namespace might always be shared. Not just with other
+ * controllers, but also with any other user of the block device.
+ */
+ id->nmic = (1 << 0);
+
+ memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le));
+
+ id->lbaf[0].ds = ns->blksize_shift;
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out_put_ns:
+ nvmet_put_namespace(ns);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_nslist(struct nvmet_req *req)
+{
+ static const int buf_size = 4096;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_ns *ns;
+ u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
+ __le32 *list;
+ u16 status = 0;
+ int i = 0;
+
+ list = kzalloc(buf_size, GFP_KERNEL);
+ if (!list) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ if (ns->nsid <= min_nsid)
+ continue;
+ list[i++] = cpu_to_le32(ns->nsid);
+ if (i == buf_size / sizeof(__le32))
+ break;
+ }
+ rcu_read_unlock();
+
+ status = nvmet_copy_to_sgl(req, 0, list, buf_size);
+
+ kfree(list);
+out:
+ nvmet_req_complete(req, status);
+}
+
+/*
+ * A "mimimum viable" abort implementation: the command is mandatory in the
+ * spec, but we are not required to do any useful work. We couldn't really
+ * do a useful abort, so don't bother even with waiting for the command
+ * to be exectuted and return immediately telling the command to abort
+ * wasn't found.
+ */
+static void nvmet_execute_abort(struct nvmet_req *req)
+{
+ nvmet_set_result(req, 1);
+ nvmet_req_complete(req, 0);
+}
+
+static void nvmet_execute_set_features(struct nvmet_req *req)
+{
+ struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
+ u64 val;
+ u32 val32;
+ u16 status = 0;
+
+ switch (cdw10 & 0xf) {
+ case NVME_FEAT_NUM_QUEUES:
+ nvmet_set_result(req,
+ (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
+ break;
+ case NVME_FEAT_KATO:
+ val = le64_to_cpu(req->cmd->prop_set.value);
+ val32 = val & 0xffff;
+ req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
+ nvmet_set_result(req, req->sq->ctrl->kato);
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_get_features(struct nvmet_req *req)
+{
+ struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
+ u16 status = 0;
+
+ switch (cdw10 & 0xf) {
+ /*
+ * These features are mandatory in the spec, but we don't
+ * have a useful way to implement them. We'll eventually
+ * need to come up with some fake values for these.
+ */
+#if 0
+ case NVME_FEAT_ARBITRATION:
+ break;
+ case NVME_FEAT_POWER_MGMT:
+ break;
+ case NVME_FEAT_TEMP_THRESH:
+ break;
+ case NVME_FEAT_ERR_RECOVERY:
+ break;
+ case NVME_FEAT_IRQ_COALESCE:
+ break;
+ case NVME_FEAT_IRQ_CONFIG:
+ break;
+ case NVME_FEAT_WRITE_ATOMIC:
+ break;
+ case NVME_FEAT_ASYNC_EVENT:
+ break;
+#endif
+ case NVME_FEAT_VOLATILE_WC:
+ nvmet_set_result(req, 1);
+ break;
+ case NVME_FEAT_NUM_QUEUES:
+ nvmet_set_result(req,
+ (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
+ break;
+ case NVME_FEAT_KATO:
+ nvmet_set_result(req, req->sq->ctrl->kato * 1000);
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_async_event(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+
+ mutex_lock(&ctrl->lock);
+ if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
+ return;
+ }
+ ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
+ mutex_unlock(&ctrl->lock);
+
+ schedule_work(&ctrl->async_event_work);
+}
+
+static void nvmet_execute_keep_alive(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+
+ pr_debug("ctrl %d update keep-alive timer for %d secs\n",
+ ctrl->cntlid, ctrl->kato);
+
+ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvmet_req_complete(req, 0);
+}
+
+int nvmet_parse_admin_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ req->ns = NULL;
+
+ if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
+ pr_err("nvmet: got admin cmd %d while CC.EN == 0\n",
+ cmd->common.opcode);
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+ if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+ pr_err("nvmet: got admin cmd %d while CSTS.RDY == 0\n",
+ cmd->common.opcode);
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ req->data_len = nvmet_get_log_page_len(cmd);
+
+ switch (cmd->get_log_page.lid) {
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ req->execute = nvmet_execute_get_log_page;
+ return 0;
+ }
+ break;
+ case nvme_admin_identify:
+ req->data_len = 4096;
+ switch (le32_to_cpu(cmd->identify.cns)) {
+ case 0x00:
+ req->execute = nvmet_execute_identify_ns;
+ return 0;
+ case 0x01:
+ req->execute = nvmet_execute_identify_ctrl;
+ return 0;
+ case 0x02:
+ req->execute = nvmet_execute_identify_nslist;
+ return 0;
+ }
+ break;
+ case nvme_admin_abort_cmd:
+ req->execute = nvmet_execute_abort;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_set_features:
+ req->execute = nvmet_execute_set_features;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_get_features:
+ req->execute = nvmet_execute_get_features;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_async_event:
+ req->execute = nvmet_execute_async_event;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_keep_alive:
+ req->execute = nvmet_execute_keep_alive;
+ req->data_len = 0;
+ return 0;
+ }
+
+ pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
new file mode 100644
index 000000000000..af5e2dc4a3d5
--- /dev/null
+++ b/drivers/nvme/target/configfs.c
@@ -0,0 +1,917 @@
+/*
+ * Configfs interface for the NVMe target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/ctype.h>
+
+#include "nvmet.h"
+
+static struct config_item_type nvmet_host_type;
+static struct config_item_type nvmet_subsys_type;
+
+/*
+ * nvmet_port Generic ConfigFS definitions.
+ * Used in any place in the ConfigFS tree that refers to an address.
+ */
+static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
+ char *page)
+{
+ switch (to_nvmet_port(item)->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ return sprintf(page, "ipv4\n");
+ case NVMF_ADDR_FAMILY_IP6:
+ return sprintf(page, "ipv6\n");
+ case NVMF_ADDR_FAMILY_IB:
+ return sprintf(page, "ib\n");
+ default:
+ return sprintf(page, "\n");
+ }
+}
+
+static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+
+ if (sysfs_streq(page, "ipv4")) {
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4;
+ } else if (sysfs_streq(page, "ipv6")) {
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
+ } else if (sysfs_streq(page, "ib")) {
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
+ } else {
+ pr_err("Invalid value '%s' for adrfam\n", page);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_adrfam);
+
+static ssize_t nvmet_addr_portid_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ le16_to_cpu(port->disc_addr.portid));
+}
+
+static ssize_t nvmet_addr_portid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ u16 portid = 0;
+
+ if (kstrtou16(page, 0, &portid)) {
+ pr_err("Invalid value '%s' for portid\n", page);
+ return -EINVAL;
+ }
+
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+ port->disc_addr.portid = cpu_to_le16(portid);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_portid);
+
+static ssize_t nvmet_addr_traddr_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ port->disc_addr.traddr);
+}
+
+static ssize_t nvmet_addr_traddr_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (count > NVMF_TRADDR_SIZE) {
+ pr_err("Invalid value '%s' for traddr\n", page);
+ return -EINVAL;
+ }
+
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+ return snprintf(port->disc_addr.traddr,
+ sizeof(port->disc_addr.traddr), "%s", page);
+}
+
+CONFIGFS_ATTR(nvmet_, addr_traddr);
+
+static ssize_t nvmet_addr_treq_show(struct config_item *item,
+ char *page)
+{
+ switch (to_nvmet_port(item)->disc_addr.treq) {
+ case NVMF_TREQ_NOT_SPECIFIED:
+ return sprintf(page, "not specified\n");
+ case NVMF_TREQ_REQUIRED:
+ return sprintf(page, "required\n");
+ case NVMF_TREQ_NOT_REQUIRED:
+ return sprintf(page, "not required\n");
+ default:
+ return sprintf(page, "\n");
+ }
+}
+
+static ssize_t nvmet_addr_treq_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+
+ if (sysfs_streq(page, "not specified")) {
+ port->disc_addr.treq = NVMF_TREQ_NOT_SPECIFIED;
+ } else if (sysfs_streq(page, "required")) {
+ port->disc_addr.treq = NVMF_TREQ_REQUIRED;
+ } else if (sysfs_streq(page, "not required")) {
+ port->disc_addr.treq = NVMF_TREQ_NOT_REQUIRED;
+ } else {
+ pr_err("Invalid value '%s' for treq\n", page);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_treq);
+
+static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ port->disc_addr.trsvcid);
+}
+
+static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (count > NVMF_TRSVCID_SIZE) {
+ pr_err("Invalid value '%s' for trsvcid\n", page);
+ return -EINVAL;
+ }
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+ return snprintf(port->disc_addr.trsvcid,
+ sizeof(port->disc_addr.trsvcid), "%s", page);
+}
+
+CONFIGFS_ATTR(nvmet_, addr_trsvcid);
+
+static ssize_t nvmet_addr_trtype_show(struct config_item *item,
+ char *page)
+{
+ switch (to_nvmet_port(item)->disc_addr.trtype) {
+ case NVMF_TRTYPE_RDMA:
+ return sprintf(page, "rdma\n");
+ case NVMF_TRTYPE_LOOP:
+ return sprintf(page, "loop\n");
+ default:
+ return sprintf(page, "\n");
+ }
+}
+
+static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
+{
+ port->disc_addr.trtype = NVMF_TRTYPE_RDMA;
+ memset(&port->disc_addr.tsas.rdma, 0, NVMF_TSAS_SIZE);
+ port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
+ port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
+ port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
+}
+
+static void nvmet_port_init_tsas_loop(struct nvmet_port *port)
+{
+ port->disc_addr.trtype = NVMF_TRTYPE_LOOP;
+ memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
+}
+
+static ssize_t nvmet_addr_trtype_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+
+ if (sysfs_streq(page, "rdma")) {
+ nvmet_port_init_tsas_rdma(port);
+ } else if (sysfs_streq(page, "loop")) {
+ nvmet_port_init_tsas_loop(port);
+ } else {
+ pr_err("Invalid value '%s' for trtype\n", page);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_trtype);
+
+/*
+ * Namespace structures & file operation functions below
+ */
+static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
+}
+
+static ssize_t nvmet_ns_device_path_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct nvmet_subsys *subsys = ns->subsys;
+ int ret;
+
+ mutex_lock(&subsys->lock);
+ ret = -EBUSY;
+ if (nvmet_ns_enabled(ns))
+ goto out_unlock;
+
+ kfree(ns->device_path);
+
+ ret = -ENOMEM;
+ ns->device_path = kstrdup(page, GFP_KERNEL);
+ if (!ns->device_path)
+ goto out_unlock;
+
+ mutex_unlock(&subsys->lock);
+ return count;
+
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, device_path);
+
+static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
+}
+
+static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct nvmet_subsys *subsys = ns->subsys;
+ u8 nguid[16];
+ const char *p = page;
+ int i;
+ int ret = 0;
+
+ mutex_lock(&subsys->lock);
+ if (nvmet_ns_enabled(ns)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (p + 2 > page + count) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!isxdigit(p[0]) || !isxdigit(p[1])) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
+ p += 2;
+
+ if (*p == '-' || *p == ':')
+ p++;
+ }
+
+ memcpy(&ns->nguid, nguid, sizeof(nguid));
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, device_nguid);
+
+static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item)));
+}
+
+static ssize_t nvmet_ns_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool enable;
+ int ret = 0;
+
+ if (strtobool(page, &enable))
+ return -EINVAL;
+
+ if (enable)
+ ret = nvmet_ns_enable(ns);
+ else
+ nvmet_ns_disable(ns);
+
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, enable);
+
+static struct configfs_attribute *nvmet_ns_attrs[] = {
+ &nvmet_ns_attr_device_path,
+ &nvmet_ns_attr_device_nguid,
+ &nvmet_ns_attr_enable,
+ NULL,
+};
+
+static void nvmet_ns_release(struct config_item *item)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+
+ nvmet_ns_free(ns);
+}
+
+static struct configfs_item_operations nvmet_ns_item_ops = {
+ .release = nvmet_ns_release,
+};
+
+static struct config_item_type nvmet_ns_type = {
+ .ct_item_ops = &nvmet_ns_item_ops,
+ .ct_attrs = nvmet_ns_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_ns_make(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
+ struct nvmet_ns *ns;
+ int ret;
+ u32 nsid;
+
+ ret = kstrtou32(name, 0, &nsid);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ if (nsid == 0 || nsid == 0xffffffff)
+ goto out;
+
+ ret = -ENOMEM;
+ ns = nvmet_ns_alloc(subsys, nsid);
+ if (!ns)
+ goto out;
+ config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
+
+ pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
+
+ return &ns->group;
+out:
+ return ERR_PTR(ret);
+}
+
+static struct configfs_group_operations nvmet_namespaces_group_ops = {
+ .make_group = nvmet_ns_make,
+};
+
+static struct config_item_type nvmet_namespaces_type = {
+ .ct_group_ops = &nvmet_namespaces_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int nvmet_port_subsys_allow_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
+ struct nvmet_subsys *subsys;
+ struct nvmet_subsys_link *link, *p;
+ int ret;
+
+ if (target->ci_type != &nvmet_subsys_type) {
+ pr_err("can only link subsystems into the subsystems dir.!\n");
+ return -EINVAL;
+ }
+ subsys = to_subsys(target);
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+ link->subsys = subsys;
+
+ down_write(&nvmet_config_sem);
+ ret = -EEXIST;
+ list_for_each_entry(p, &port->subsystems, entry) {
+ if (p->subsys == subsys)
+ goto out_free_link;
+ }
+
+ if (list_empty(&port->subsystems)) {
+ ret = nvmet_enable_port(port);
+ if (ret)
+ goto out_free_link;
+ }
+
+ list_add_tail(&link->entry, &port->subsystems);
+ nvmet_genctr++;
+ up_write(&nvmet_config_sem);
+ return 0;
+
+out_free_link:
+ up_write(&nvmet_config_sem);
+ kfree(link);
+ return ret;
+}
+
+static int nvmet_port_subsys_drop_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
+ struct nvmet_subsys *subsys = to_subsys(target);
+ struct nvmet_subsys_link *p;
+
+ down_write(&nvmet_config_sem);
+ list_for_each_entry(p, &port->subsystems, entry) {
+ if (p->subsys == subsys)
+ goto found;
+ }
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+
+found:
+ list_del(&p->entry);
+ nvmet_genctr++;
+ if (list_empty(&port->subsystems))
+ nvmet_disable_port(port);
+ up_write(&nvmet_config_sem);
+ kfree(p);
+ return 0;
+}
+
+static struct configfs_item_operations nvmet_port_subsys_item_ops = {
+ .allow_link = nvmet_port_subsys_allow_link,
+ .drop_link = nvmet_port_subsys_drop_link,
+};
+
+static struct config_item_type nvmet_port_subsys_type = {
+ .ct_item_ops = &nvmet_port_subsys_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
+ struct nvmet_host *host;
+ struct nvmet_host_link *link, *p;
+ int ret;
+
+ if (target->ci_type != &nvmet_host_type) {
+ pr_err("can only link hosts into the allowed_hosts directory!\n");
+ return -EINVAL;
+ }
+
+ host = to_host(target);
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+ link->host = host;
+
+ down_write(&nvmet_config_sem);
+ ret = -EINVAL;
+ if (subsys->allow_any_host) {
+ pr_err("can't add hosts when allow_any_host is set!\n");
+ goto out_free_link;
+ }
+
+ ret = -EEXIST;
+ list_for_each_entry(p, &subsys->hosts, entry) {
+ if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+ goto out_free_link;
+ }
+ list_add_tail(&link->entry, &subsys->hosts);
+ nvmet_genctr++;
+ up_write(&nvmet_config_sem);
+ return 0;
+out_free_link:
+ up_write(&nvmet_config_sem);
+ kfree(link);
+ return ret;
+}
+
+static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
+ struct nvmet_host *host = to_host(target);
+ struct nvmet_host_link *p;
+
+ down_write(&nvmet_config_sem);
+ list_for_each_entry(p, &subsys->hosts, entry) {
+ if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+ goto found;
+ }
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+
+found:
+ list_del(&p->entry);
+ nvmet_genctr++;
+ up_write(&nvmet_config_sem);
+ kfree(p);
+ return 0;
+}
+
+static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
+ .allow_link = nvmet_allowed_hosts_allow_link,
+ .drop_link = nvmet_allowed_hosts_drop_link,
+};
+
+static struct config_item_type nvmet_allowed_hosts_type = {
+ .ct_item_ops = &nvmet_allowed_hosts_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ to_subsys(item)->allow_any_host);
+}
+
+static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ bool allow_any_host;
+ int ret = 0;
+
+ if (strtobool(page, &allow_any_host))
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (allow_any_host && !list_empty(&subsys->hosts)) {
+ pr_err("Can't set allow_any_host when explicit hosts are set!\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ subsys->allow_any_host = allow_any_host;
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
+
+static struct configfs_attribute *nvmet_subsys_attrs[] = {
+ &nvmet_subsys_attr_attr_allow_any_host,
+ NULL,
+};
+
+/*
+ * Subsystem structures & folder operation functions below
+ */
+static void nvmet_subsys_release(struct config_item *item)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ nvmet_subsys_put(subsys);
+}
+
+static struct configfs_item_operations nvmet_subsys_item_ops = {
+ .release = nvmet_subsys_release,
+};
+
+static struct config_item_type nvmet_subsys_type = {
+ .ct_item_ops = &nvmet_subsys_item_ops,
+ .ct_attrs = nvmet_subsys_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_subsys_make(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_subsys *subsys;
+
+ if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
+ pr_err("can't create discovery subsystem through configfs\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
+ if (!subsys)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
+
+ config_group_init_type_name(&subsys->namespaces_group,
+ "namespaces", &nvmet_namespaces_type);
+ configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
+
+ config_group_init_type_name(&subsys->allowed_hosts_group,
+ "allowed_hosts", &nvmet_allowed_hosts_type);
+ configfs_add_default_group(&subsys->allowed_hosts_group,
+ &subsys->group);
+
+ return &subsys->group;
+}
+
+static struct configfs_group_operations nvmet_subsystems_group_ops = {
+ .make_group = nvmet_subsys_make,
+};
+
+static struct config_item_type nvmet_subsystems_type = {
+ .ct_group_ops = &nvmet_subsystems_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static ssize_t nvmet_referral_enable_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
+}
+
+static ssize_t nvmet_referral_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
+ struct nvmet_port *port = to_nvmet_port(item);
+ bool enable;
+
+ if (strtobool(page, &enable))
+ goto inval;
+
+ if (enable)
+ nvmet_referral_enable(parent, port);
+ else
+ nvmet_referral_disable(port);
+
+ return count;
+inval:
+ pr_err("Invalid value '%s' for enable\n", page);
+ return -EINVAL;
+}
+
+CONFIGFS_ATTR(nvmet_referral_, enable);
+
+/*
+ * Discovery Service subsystem definitions
+ */
+static struct configfs_attribute *nvmet_referral_attrs[] = {
+ &nvmet_attr_addr_adrfam,
+ &nvmet_attr_addr_portid,
+ &nvmet_attr_addr_treq,
+ &nvmet_attr_addr_traddr,
+ &nvmet_attr_addr_trsvcid,
+ &nvmet_attr_addr_trtype,
+ &nvmet_referral_attr_enable,
+ NULL,
+};
+
+static void nvmet_referral_release(struct config_item *item)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ nvmet_referral_disable(port);
+ kfree(port);
+}
+
+static struct configfs_item_operations nvmet_referral_item_ops = {
+ .release = nvmet_referral_release,
+};
+
+static struct config_item_type nvmet_referral_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_attrs = nvmet_referral_attrs,
+ .ct_item_ops = &nvmet_referral_item_ops,
+};
+
+static struct config_group *nvmet_referral_make(
+ struct config_group *group, const char *name)
+{
+ struct nvmet_port *port;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&port->entry);
+ config_group_init_type_name(&port->group, name, &nvmet_referral_type);
+
+ return &port->group;
+}
+
+static struct configfs_group_operations nvmet_referral_group_ops = {
+ .make_group = nvmet_referral_make,
+};
+
+static struct config_item_type nvmet_referrals_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_group_ops = &nvmet_referral_group_ops,
+};
+
+/*
+ * Ports definitions.
+ */
+static void nvmet_port_release(struct config_item *item)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ kfree(port);
+}
+
+static struct configfs_attribute *nvmet_port_attrs[] = {
+ &nvmet_attr_addr_adrfam,
+ &nvmet_attr_addr_treq,
+ &nvmet_attr_addr_traddr,
+ &nvmet_attr_addr_trsvcid,
+ &nvmet_attr_addr_trtype,
+ NULL,
+};
+
+static struct configfs_item_operations nvmet_port_item_ops = {
+ .release = nvmet_port_release,
+};
+
+static struct config_item_type nvmet_port_type = {
+ .ct_attrs = nvmet_port_attrs,
+ .ct_item_ops = &nvmet_port_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_ports_make(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_port *port;
+ u16 portid;
+
+ if (kstrtou16(name, 0, &portid))
+ return ERR_PTR(-EINVAL);
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&port->entry);
+ INIT_LIST_HEAD(&port->subsystems);
+ INIT_LIST_HEAD(&port->referrals);
+
+ port->disc_addr.portid = cpu_to_le16(portid);
+ config_group_init_type_name(&port->group, name, &nvmet_port_type);
+
+ config_group_init_type_name(&port->subsys_group,
+ "subsystems", &nvmet_port_subsys_type);
+ configfs_add_default_group(&port->subsys_group, &port->group);
+
+ config_group_init_type_name(&port->referrals_group,
+ "referrals", &nvmet_referrals_type);
+ configfs_add_default_group(&port->referrals_group, &port->group);
+
+ return &port->group;
+}
+
+static struct configfs_group_operations nvmet_ports_group_ops = {
+ .make_group = nvmet_ports_make,
+};
+
+static struct config_item_type nvmet_ports_type = {
+ .ct_group_ops = &nvmet_ports_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group nvmet_subsystems_group;
+static struct config_group nvmet_ports_group;
+
+static void nvmet_host_release(struct config_item *item)
+{
+ struct nvmet_host *host = to_host(item);
+
+ kfree(host);
+}
+
+static struct configfs_item_operations nvmet_host_item_ops = {
+ .release = nvmet_host_release,
+};
+
+static struct config_item_type nvmet_host_type = {
+ .ct_item_ops = &nvmet_host_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_hosts_make_group(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_host *host;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&host->group, name, &nvmet_host_type);
+
+ return &host->group;
+}
+
+static struct configfs_group_operations nvmet_hosts_group_ops = {
+ .make_group = nvmet_hosts_make_group,
+};
+
+static struct config_item_type nvmet_hosts_type = {
+ .ct_group_ops = &nvmet_hosts_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group nvmet_hosts_group;
+
+static struct config_item_type nvmet_root_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem nvmet_configfs_subsystem = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "nvmet",
+ .ci_type = &nvmet_root_type,
+ },
+ },
+};
+
+int __init nvmet_init_configfs(void)
+{
+ int ret;
+
+ config_group_init(&nvmet_configfs_subsystem.su_group);
+ mutex_init(&nvmet_configfs_subsystem.su_mutex);
+
+ config_group_init_type_name(&nvmet_subsystems_group,
+ "subsystems", &nvmet_subsystems_type);
+ configfs_add_default_group(&nvmet_subsystems_group,
+ &nvmet_configfs_subsystem.su_group);
+
+ config_group_init_type_name(&nvmet_ports_group,
+ "ports", &nvmet_ports_type);
+ configfs_add_default_group(&nvmet_ports_group,
+ &nvmet_configfs_subsystem.su_group);
+
+ config_group_init_type_name(&nvmet_hosts_group,
+ "hosts", &nvmet_hosts_type);
+ configfs_add_default_group(&nvmet_hosts_group,
+ &nvmet_configfs_subsystem.su_group);
+
+ ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
+ if (ret) {
+ pr_err("configfs_register_subsystem: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void __exit nvmet_exit_configfs(void)
+{
+ configfs_unregister_subsystem(&nvmet_configfs_subsystem);
+}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
new file mode 100644
index 000000000000..8a891ca53367
--- /dev/null
+++ b/drivers/nvme/target/core.c
@@ -0,0 +1,964 @@
+/*
+ * Common code for the NVMe target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include "nvmet.h"
+
+static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
+
+/*
+ * This read/write semaphore is used to synchronize access to configuration
+ * information on a target system that will result in discovery log page
+ * information change for at least one host.
+ * The full list of resources to protected by this semaphore is:
+ *
+ * - subsystems list
+ * - per-subsystem allowed hosts list
+ * - allow_any_host subsystem attribute
+ * - nvmet_genctr
+ * - the nvmet_transports array
+ *
+ * When updating any of those lists/structures write lock should be obtained,
+ * while when reading (popolating discovery log page or checking host-subsystem
+ * link) read lock is obtained to allow concurrent reads.
+ */
+DECLARE_RWSEM(nvmet_config_sem);
+
+static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
+ const char *subsysnqn);
+
+u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
+ size_t len)
+{
+ if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
+ return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return 0;
+}
+
+u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
+{
+ if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
+ return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return 0;
+}
+
+static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
+{
+ return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
+}
+
+static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_req *req;
+
+ while (1) {
+ mutex_lock(&ctrl->lock);
+ if (!ctrl->nr_async_event_cmds) {
+ mutex_unlock(&ctrl->lock);
+ return;
+ }
+
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+ }
+}
+
+static void nvmet_async_event_work(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, async_event_work);
+ struct nvmet_async_event *aen;
+ struct nvmet_req *req;
+
+ while (1) {
+ mutex_lock(&ctrl->lock);
+ aen = list_first_entry_or_null(&ctrl->async_events,
+ struct nvmet_async_event, entry);
+ if (!aen || !ctrl->nr_async_event_cmds) {
+ mutex_unlock(&ctrl->lock);
+ return;
+ }
+
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ nvmet_set_result(req, nvmet_async_event_result(aen));
+
+ list_del(&aen->entry);
+ kfree(aen);
+
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, 0);
+ }
+}
+
+static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+ u8 event_info, u8 log_page)
+{
+ struct nvmet_async_event *aen;
+
+ aen = kmalloc(sizeof(*aen), GFP_KERNEL);
+ if (!aen)
+ return;
+
+ aen->event_type = event_type;
+ aen->event_info = event_info;
+ aen->log_page = log_page;
+
+ mutex_lock(&ctrl->lock);
+ list_add_tail(&aen->entry, &ctrl->async_events);
+ mutex_unlock(&ctrl->lock);
+
+ schedule_work(&ctrl->async_event_work);
+}
+
+int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
+{
+ int ret = 0;
+
+ down_write(&nvmet_config_sem);
+ if (nvmet_transports[ops->type])
+ ret = -EINVAL;
+ else
+ nvmet_transports[ops->type] = ops;
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmet_register_transport);
+
+void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
+{
+ down_write(&nvmet_config_sem);
+ nvmet_transports[ops->type] = NULL;
+ up_write(&nvmet_config_sem);
+}
+EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
+
+int nvmet_enable_port(struct nvmet_port *port)
+{
+ struct nvmet_fabrics_ops *ops;
+ int ret;
+
+ lockdep_assert_held(&nvmet_config_sem);
+
+ ops = nvmet_transports[port->disc_addr.trtype];
+ if (!ops) {
+ up_write(&nvmet_config_sem);
+ request_module("nvmet-transport-%d", port->disc_addr.trtype);
+ down_write(&nvmet_config_sem);
+ ops = nvmet_transports[port->disc_addr.trtype];
+ if (!ops) {
+ pr_err("transport type %d not supported\n",
+ port->disc_addr.trtype);
+ return -EINVAL;
+ }
+ }
+
+ if (!try_module_get(ops->owner))
+ return -EINVAL;
+
+ ret = ops->add_port(port);
+ if (ret) {
+ module_put(ops->owner);
+ return ret;
+ }
+
+ port->enabled = true;
+ return 0;
+}
+
+void nvmet_disable_port(struct nvmet_port *port)
+{
+ struct nvmet_fabrics_ops *ops;
+
+ lockdep_assert_held(&nvmet_config_sem);
+
+ port->enabled = false;
+
+ ops = nvmet_transports[port->disc_addr.trtype];
+ ops->remove_port(port);
+ module_put(ops->owner);
+}
+
+static void nvmet_keep_alive_timer(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvmet_ctrl, ka_work);
+
+ pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
+ ctrl->cntlid, ctrl->kato);
+
+ ctrl->ops->delete_ctrl(ctrl);
+}
+
+static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
+{
+ pr_debug("ctrl %d start keep-alive timer for %d secs\n",
+ ctrl->cntlid, ctrl->kato);
+
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+
+static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+{
+ pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
+
+ cancel_delayed_work_sync(&ctrl->ka_work);
+}
+
+static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
+ __le32 nsid)
+{
+ struct nvmet_ns *ns;
+
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ if (ns->nsid == le32_to_cpu(nsid))
+ return ns;
+ }
+
+ return NULL;
+}
+
+struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
+{
+ struct nvmet_ns *ns;
+
+ rcu_read_lock();
+ ns = __nvmet_find_namespace(ctrl, nsid);
+ if (ns)
+ percpu_ref_get(&ns->ref);
+ rcu_read_unlock();
+
+ return ns;
+}
+
+static void nvmet_destroy_namespace(struct percpu_ref *ref)
+{
+ struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
+
+ complete(&ns->disable_done);
+}
+
+void nvmet_put_namespace(struct nvmet_ns *ns)
+{
+ percpu_ref_put(&ns->ref);
+}
+
+int nvmet_ns_enable(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
+ int ret = 0;
+
+ mutex_lock(&subsys->lock);
+ if (!list_empty(&ns->dev_link))
+ goto out_unlock;
+
+ ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
+ NULL);
+ if (IS_ERR(ns->bdev)) {
+ pr_err("nvmet: failed to open block device %s: (%ld)\n",
+ ns->device_path, PTR_ERR(ns->bdev));
+ ret = PTR_ERR(ns->bdev);
+ ns->bdev = NULL;
+ goto out_unlock;
+ }
+
+ ns->size = i_size_read(ns->bdev->bd_inode);
+ ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+ ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
+ 0, GFP_KERNEL);
+ if (ret)
+ goto out_blkdev_put;
+
+ if (ns->nsid > subsys->max_nsid)
+ subsys->max_nsid = ns->nsid;
+
+ /*
+ * The namespaces list needs to be sorted to simplify the implementation
+ * of the Identify Namepace List subcommand.
+ */
+ if (list_empty(&subsys->namespaces)) {
+ list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
+ } else {
+ struct nvmet_ns *old;
+
+ list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
+ BUG_ON(ns->nsid == old->nsid);
+ if (ns->nsid < old->nsid)
+ break;
+ }
+
+ list_add_tail_rcu(&ns->dev_link, &old->dev_link);
+ }
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
+
+ ret = 0;
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+out_blkdev_put:
+ blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
+ ns->bdev = NULL;
+ goto out_unlock;
+}
+
+void nvmet_ns_disable(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ if (list_empty(&ns->dev_link)) {
+ mutex_unlock(&subsys->lock);
+ return;
+ }
+ list_del_init(&ns->dev_link);
+ mutex_unlock(&subsys->lock);
+
+ /*
+ * Now that we removed the namespaces from the lookup list, we
+ * can kill the per_cpu ref and wait for any remaining references
+ * to be dropped, as well as a RCU grace period for anyone only
+ * using the namepace under rcu_read_lock(). Note that we can't
+ * use call_rcu here as we need to ensure the namespaces have
+ * been fully destroyed before unloading the module.
+ */
+ percpu_ref_kill(&ns->ref);
+ synchronize_rcu();
+ wait_for_completion(&ns->disable_done);
+ percpu_ref_exit(&ns->ref);
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
+
+ if (ns->bdev)
+ blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_ns_free(struct nvmet_ns *ns)
+{
+ nvmet_ns_disable(ns);
+
+ kfree(ns->device_path);
+ kfree(ns);
+}
+
+struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
+{
+ struct nvmet_ns *ns;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
+ return NULL;
+
+ INIT_LIST_HEAD(&ns->dev_link);
+ init_completion(&ns->disable_done);
+
+ ns->nsid = nsid;
+ ns->subsys = subsys;
+
+ return ns;
+}
+
+static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+{
+ if (status)
+ nvmet_set_status(req, status);
+
+ /* XXX: need to fill in something useful for sq_head */
+ req->rsp->sq_head = 0;
+ if (likely(req->sq)) /* may happen during early failure */
+ req->rsp->sq_id = cpu_to_le16(req->sq->qid);
+ req->rsp->command_id = req->cmd->common.command_id;
+
+ if (req->ns)
+ nvmet_put_namespace(req->ns);
+ req->ops->queue_response(req);
+}
+
+void nvmet_req_complete(struct nvmet_req *req, u16 status)
+{
+ __nvmet_req_complete(req, status);
+ percpu_ref_put(&req->sq->ref);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_complete);
+
+void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
+ u16 qid, u16 size)
+{
+ cq->qid = qid;
+ cq->size = size;
+
+ ctrl->cqs[qid] = cq;
+}
+
+void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
+ u16 qid, u16 size)
+{
+ sq->qid = qid;
+ sq->size = size;
+
+ ctrl->sqs[qid] = sq;
+}
+
+void nvmet_sq_destroy(struct nvmet_sq *sq)
+{
+ /*
+ * If this is the admin queue, complete all AERs so that our
+ * queue doesn't have outstanding requests on it.
+ */
+ if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
+ nvmet_async_events_free(sq->ctrl);
+ percpu_ref_kill(&sq->ref);
+ wait_for_completion(&sq->free_done);
+ percpu_ref_exit(&sq->ref);
+
+ if (sq->ctrl) {
+ nvmet_ctrl_put(sq->ctrl);
+ sq->ctrl = NULL; /* allows reusing the queue later */
+ }
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
+
+static void nvmet_sq_free(struct percpu_ref *ref)
+{
+ struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
+
+ complete(&sq->free_done);
+}
+
+int nvmet_sq_init(struct nvmet_sq *sq)
+{
+ int ret;
+
+ ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
+ if (ret) {
+ pr_err("percpu_ref init failed!\n");
+ return ret;
+ }
+ init_completion(&sq->free_done);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_init);
+
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
+{
+ u8 flags = req->cmd->common.flags;
+ u16 status;
+
+ req->cq = cq;
+ req->sq = sq;
+ req->ops = ops;
+ req->sg = NULL;
+ req->sg_cnt = 0;
+ req->rsp->status = 0;
+
+ /* no support for fused commands yet */
+ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto fail;
+ }
+
+ /* either variant of SGLs is fine, as we don't support metadata */
+ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
+ (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto fail;
+ }
+
+ if (unlikely(!req->sq->ctrl))
+ /* will return an error for any Non-connect command: */
+ status = nvmet_parse_connect_cmd(req);
+ else if (likely(req->sq->qid != 0))
+ status = nvmet_parse_io_cmd(req);
+ else if (req->cmd->common.opcode == nvme_fabrics_command)
+ status = nvmet_parse_fabrics_cmd(req);
+ else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+ status = nvmet_parse_discovery_cmd(req);
+ else
+ status = nvmet_parse_admin_cmd(req);
+
+ if (status)
+ goto fail;
+
+ if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto fail;
+ }
+
+ return true;
+
+fail:
+ __nvmet_req_complete(req, status);
+ return false;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_init);
+
+static inline bool nvmet_cc_en(u32 cc)
+{
+ return cc & 0x1;
+}
+
+static inline u8 nvmet_cc_css(u32 cc)
+{
+ return (cc >> 4) & 0x7;
+}
+
+static inline u8 nvmet_cc_mps(u32 cc)
+{
+ return (cc >> 7) & 0xf;
+}
+
+static inline u8 nvmet_cc_ams(u32 cc)
+{
+ return (cc >> 11) & 0x7;
+}
+
+static inline u8 nvmet_cc_shn(u32 cc)
+{
+ return (cc >> 14) & 0x3;
+}
+
+static inline u8 nvmet_cc_iosqes(u32 cc)
+{
+ return (cc >> 16) & 0xf;
+}
+
+static inline u8 nvmet_cc_iocqes(u32 cc)
+{
+ return (cc >> 20) & 0xf;
+}
+
+static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
+{
+ lockdep_assert_held(&ctrl->lock);
+
+ if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
+ nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
+ nvmet_cc_mps(ctrl->cc) != 0 ||
+ nvmet_cc_ams(ctrl->cc) != 0 ||
+ nvmet_cc_css(ctrl->cc) != 0) {
+ ctrl->csts = NVME_CSTS_CFS;
+ return;
+ }
+
+ ctrl->csts = NVME_CSTS_RDY;
+}
+
+static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
+{
+ lockdep_assert_held(&ctrl->lock);
+
+ /* XXX: tear down queues? */
+ ctrl->csts &= ~NVME_CSTS_RDY;
+ ctrl->cc = 0;
+}
+
+void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
+{
+ u32 old;
+
+ mutex_lock(&ctrl->lock);
+ old = ctrl->cc;
+ ctrl->cc = new;
+
+ if (nvmet_cc_en(new) && !nvmet_cc_en(old))
+ nvmet_start_ctrl(ctrl);
+ if (!nvmet_cc_en(new) && nvmet_cc_en(old))
+ nvmet_clear_ctrl(ctrl);
+ if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
+ nvmet_clear_ctrl(ctrl);
+ ctrl->csts |= NVME_CSTS_SHST_CMPLT;
+ }
+ if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
+ ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
+{
+ /* command sets supported: NVMe command set: */
+ ctrl->cap = (1ULL << 37);
+ /* CC.EN timeout in 500msec units: */
+ ctrl->cap |= (15ULL << 24);
+ /* maximum queue entries supported: */
+ ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+}
+
+u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
+ struct nvmet_req *req, struct nvmet_ctrl **ret)
+{
+ struct nvmet_subsys *subsys;
+ struct nvmet_ctrl *ctrl;
+ u16 status = 0;
+
+ subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+ if (!subsys) {
+ pr_warn("connect request for invalid subsystem %s!\n",
+ subsysnqn);
+ req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ }
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->cntlid == cntlid) {
+ if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
+ pr_warn("hostnqn mismatch.\n");
+ continue;
+ }
+ if (!kref_get_unless_zero(&ctrl->ref))
+ continue;
+
+ *ret = ctrl;
+ goto out;
+ }
+ }
+
+ pr_warn("could not find controller %d for subsys %s / host %s\n",
+ cntlid, subsysnqn, hostnqn);
+ req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+
+out:
+ mutex_unlock(&subsys->lock);
+ nvmet_subsys_put(subsys);
+ return status;
+}
+
+static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
+ const char *hostnqn)
+{
+ struct nvmet_host_link *p;
+
+ if (subsys->allow_any_host)
+ return true;
+
+ list_for_each_entry(p, &subsys->hosts, entry) {
+ if (!strcmp(nvmet_host_name(p->host), hostnqn))
+ return true;
+ }
+
+ return false;
+}
+
+static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
+ const char *hostnqn)
+{
+ struct nvmet_subsys_link *s;
+
+ list_for_each_entry(s, &req->port->subsystems, entry) {
+ if (__nvmet_host_allowed(s->subsys, hostnqn))
+ return true;
+ }
+
+ return false;
+}
+
+bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
+ const char *hostnqn)
+{
+ lockdep_assert_held(&nvmet_config_sem);
+
+ if (subsys->type == NVME_NQN_DISC)
+ return nvmet_host_discovery_allowed(req, hostnqn);
+ else
+ return __nvmet_host_allowed(subsys, hostnqn);
+}
+
+u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
+ struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
+{
+ struct nvmet_subsys *subsys;
+ struct nvmet_ctrl *ctrl;
+ int ret;
+ u16 status;
+
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+ if (!subsys) {
+ pr_warn("connect request for invalid subsystem %s!\n",
+ subsysnqn);
+ req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ goto out;
+ }
+
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ down_read(&nvmet_config_sem);
+ if (!nvmet_host_allowed(req, subsys, hostnqn)) {
+ pr_info("connect by host %s for subsystem %s not allowed\n",
+ hostnqn, subsysnqn);
+ req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn);
+ up_read(&nvmet_config_sem);
+ goto out_put_subsystem;
+ }
+ up_read(&nvmet_config_sem);
+
+ status = NVME_SC_INTERNAL;
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ goto out_put_subsystem;
+ mutex_init(&ctrl->lock);
+
+ nvmet_init_cap(ctrl);
+
+ INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
+ INIT_LIST_HEAD(&ctrl->async_events);
+
+ memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
+ memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
+
+ kref_init(&ctrl->ref);
+ ctrl->subsys = subsys;
+
+ ctrl->cqs = kcalloc(subsys->max_qid + 1,
+ sizeof(struct nvmet_cq *),
+ GFP_KERNEL);
+ if (!ctrl->cqs)
+ goto out_free_ctrl;
+
+ ctrl->sqs = kcalloc(subsys->max_qid + 1,
+ sizeof(struct nvmet_sq *),
+ GFP_KERNEL);
+ if (!ctrl->sqs)
+ goto out_free_cqs;
+
+ ret = ida_simple_get(&subsys->cntlid_ida,
+ NVME_CNTLID_MIN, NVME_CNTLID_MAX,
+ GFP_KERNEL);
+ if (ret < 0) {
+ status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ goto out_free_sqs;
+ }
+ ctrl->cntlid = ret;
+
+ ctrl->ops = req->ops;
+ if (ctrl->subsys->type == NVME_NQN_DISC) {
+ /* Don't accept keep-alive timeout for discovery controllers */
+ if (kato) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out_free_sqs;
+ }
+
+ /*
+ * Discovery controllers use some arbitrary high value in order
+ * to cleanup stale discovery sessions
+ *
+ * From the latest base diff RC:
+ * "The Keep Alive command is not supported by
+ * Discovery controllers. A transport may specify a
+ * fixed Discovery controller activity timeout value
+ * (e.g., 2 minutes). If no commands are received
+ * by a Discovery controller within that time
+ * period, the controller may perform the
+ * actions for Keep Alive Timer expiration".
+ */
+ ctrl->kato = NVMET_DISC_KATO;
+ } else {
+ /* keep-alive timeout in seconds */
+ ctrl->kato = DIV_ROUND_UP(kato, 1000);
+ }
+ nvmet_start_keep_alive_timer(ctrl);
+
+ mutex_lock(&subsys->lock);
+ list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ mutex_unlock(&subsys->lock);
+
+ *ctrlp = ctrl;
+ return 0;
+
+out_free_sqs:
+ kfree(ctrl->sqs);
+out_free_cqs:
+ kfree(ctrl->cqs);
+out_free_ctrl:
+ kfree(ctrl);
+out_put_subsystem:
+ nvmet_subsys_put(subsys);
+out:
+ return status;
+}
+
+static void nvmet_ctrl_free(struct kref *ref)
+{
+ struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
+ struct nvmet_subsys *subsys = ctrl->subsys;
+
+ nvmet_stop_keep_alive_timer(ctrl);
+
+ mutex_lock(&subsys->lock);
+ list_del(&ctrl->subsys_entry);
+ mutex_unlock(&subsys->lock);
+
+ ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
+ nvmet_subsys_put(subsys);
+
+ kfree(ctrl->sqs);
+ kfree(ctrl->cqs);
+ kfree(ctrl);
+}
+
+void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
+{
+ kref_put(&ctrl->ref, nvmet_ctrl_free);
+}
+
+static void nvmet_fatal_error_handler(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, fatal_err_work);
+
+ pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
+ ctrl->ops->delete_ctrl(ctrl);
+}
+
+void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
+{
+ ctrl->csts |= NVME_CSTS_CFS;
+ INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+ schedule_work(&ctrl->fatal_err_work);
+}
+EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
+
+static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
+ const char *subsysnqn)
+{
+ struct nvmet_subsys_link *p;
+
+ if (!port)
+ return NULL;
+
+ if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
+ NVMF_NQN_SIZE)) {
+ if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
+ return NULL;
+ return nvmet_disc_subsys;
+ }
+
+ down_read(&nvmet_config_sem);
+ list_for_each_entry(p, &port->subsystems, entry) {
+ if (!strncmp(p->subsys->subsysnqn, subsysnqn,
+ NVMF_NQN_SIZE)) {
+ if (!kref_get_unless_zero(&p->subsys->ref))
+ break;
+ up_read(&nvmet_config_sem);
+ return p->subsys;
+ }
+ }
+ up_read(&nvmet_config_sem);
+ return NULL;
+}
+
+struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
+ enum nvme_subsys_type type)
+{
+ struct nvmet_subsys *subsys;
+
+ subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+ if (!subsys)
+ return NULL;
+
+ subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */
+
+ switch (type) {
+ case NVME_NQN_NVME:
+ subsys->max_qid = NVMET_NR_QUEUES;
+ break;
+ case NVME_NQN_DISC:
+ subsys->max_qid = 0;
+ break;
+ default:
+ pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
+ kfree(subsys);
+ return NULL;
+ }
+ subsys->type = type;
+ subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
+ GFP_KERNEL);
+ if (!subsys->subsysnqn) {
+ kfree(subsys);
+ return NULL;
+ }
+
+ kref_init(&subsys->ref);
+
+ mutex_init(&subsys->lock);
+ INIT_LIST_HEAD(&subsys->namespaces);
+ INIT_LIST_HEAD(&subsys->ctrls);
+
+ ida_init(&subsys->cntlid_ida);
+
+ INIT_LIST_HEAD(&subsys->hosts);
+
+ return subsys;
+}
+
+static void nvmet_subsys_free(struct kref *ref)
+{
+ struct nvmet_subsys *subsys =
+ container_of(ref, struct nvmet_subsys, ref);
+
+ WARN_ON_ONCE(!list_empty(&subsys->namespaces));
+
+ ida_destroy(&subsys->cntlid_ida);
+ kfree(subsys->subsysnqn);
+ kfree(subsys);
+}
+
+void nvmet_subsys_put(struct nvmet_subsys *subsys)
+{
+ kref_put(&subsys->ref, nvmet_subsys_free);
+}
+
+static int __init nvmet_init(void)
+{
+ int error;
+
+ error = nvmet_init_discovery();
+ if (error)
+ goto out;
+
+ error = nvmet_init_configfs();
+ if (error)
+ goto out_exit_discovery;
+ return 0;
+
+out_exit_discovery:
+ nvmet_exit_discovery();
+out:
+ return error;
+}
+
+static void __exit nvmet_exit(void)
+{
+ nvmet_exit_configfs();
+ nvmet_exit_discovery();
+
+ BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
+ BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
+}
+
+module_init(nvmet_init);
+module_exit(nvmet_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
new file mode 100644
index 000000000000..6f65646e89cf
--- /dev/null
+++ b/drivers/nvme/target/discovery.c
@@ -0,0 +1,221 @@
+/*
+ * Discovery service for the NVMe over Fabrics target.
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/slab.h>
+#include <generated/utsrelease.h>
+#include "nvmet.h"
+
+struct nvmet_subsys *nvmet_disc_subsys;
+
+u64 nvmet_genctr;
+
+void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
+{
+ down_write(&nvmet_config_sem);
+ if (list_empty(&port->entry)) {
+ list_add_tail(&port->entry, &parent->referrals);
+ port->enabled = true;
+ nvmet_genctr++;
+ }
+ up_write(&nvmet_config_sem);
+}
+
+void nvmet_referral_disable(struct nvmet_port *port)
+{
+ down_write(&nvmet_config_sem);
+ if (!list_empty(&port->entry)) {
+ port->enabled = false;
+ list_del_init(&port->entry);
+ nvmet_genctr++;
+ }
+ up_write(&nvmet_config_sem);
+}
+
+static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
+ struct nvmet_port *port, char *subsys_nqn, u8 type, u32 numrec)
+{
+ struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
+
+ e->trtype = port->disc_addr.trtype;
+ e->adrfam = port->disc_addr.adrfam;
+ e->treq = port->disc_addr.treq;
+ e->portid = port->disc_addr.portid;
+ /* we support only dynamic controllers */
+ e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
+ e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
+ e->nqntype = type;
+ memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
+ memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
+ memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
+ memcpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
+}
+
+static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
+{
+ const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_disc_rsp_page_hdr *hdr;
+ size_t data_len = nvmet_get_log_page_len(req->cmd);
+ size_t alloc_len = max(data_len, sizeof(*hdr));
+ int residual_len = data_len - sizeof(*hdr);
+ struct nvmet_subsys_link *p;
+ struct nvmet_port *r;
+ u32 numrec = 0;
+ u16 status = 0;
+
+ /*
+ * Make sure we're passing at least a buffer of response header size.
+ * If host provided data len is less than the header size, only the
+ * number of bytes requested by host will be sent to host.
+ */
+ hdr = kzalloc(alloc_len, GFP_KERNEL);
+ if (!hdr) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ down_read(&nvmet_config_sem);
+ list_for_each_entry(p, &req->port->subsystems, entry) {
+ if (!nvmet_host_allowed(req, p->subsys, ctrl->hostnqn))
+ continue;
+ if (residual_len >= entry_size) {
+ nvmet_format_discovery_entry(hdr, req->port,
+ p->subsys->subsysnqn,
+ NVME_NQN_NVME, numrec);
+ residual_len -= entry_size;
+ }
+ numrec++;
+ }
+
+ list_for_each_entry(r, &req->port->referrals, entry) {
+ if (residual_len >= entry_size) {
+ nvmet_format_discovery_entry(hdr, r,
+ NVME_DISC_SUBSYS_NAME,
+ NVME_NQN_DISC, numrec);
+ residual_len -= entry_size;
+ }
+ numrec++;
+ }
+
+ hdr->genctr = cpu_to_le64(nvmet_genctr);
+ hdr->numrec = cpu_to_le64(numrec);
+ hdr->recfmt = cpu_to_le16(0);
+
+ up_read(&nvmet_config_sem);
+
+ status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
+ kfree(hdr);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_id_ctrl *id;
+ u16 status = 0;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ memset(id->fr, ' ', sizeof(id->fr));
+ strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
+
+ /* no limit on data transfer sizes for now */
+ id->mdts = 0;
+ id->cntlid = cpu_to_le16(ctrl->cntlid);
+ id->ver = cpu_to_le32(ctrl->subsys->ver);
+ id->lpa = (1 << 2);
+
+ /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
+ if (ctrl->ops->has_keyed_sgls)
+ id->sgls |= cpu_to_le32(1 << 2);
+ if (ctrl->ops->sqe_inline_size)
+ id->sgls |= cpu_to_le32(1 << 20);
+
+ strcpy(id->subnqn, ctrl->subsys->subsysnqn);
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+int nvmet_parse_discovery_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ req->ns = NULL;
+
+ if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+ pr_err("nvmet: got cmd %d while not ready\n",
+ cmd->common.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ req->data_len = nvmet_get_log_page_len(cmd);
+
+ switch (cmd->get_log_page.lid) {
+ case NVME_LOG_DISC:
+ req->execute = nvmet_execute_get_disc_log_page;
+ return 0;
+ default:
+ pr_err("nvmet: unsupported get_log_page lid %d\n",
+ cmd->get_log_page.lid);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ case nvme_admin_identify:
+ req->data_len = 4096;
+ switch (le32_to_cpu(cmd->identify.cns)) {
+ case 0x01:
+ req->execute =
+ nvmet_execute_identify_disc_ctrl;
+ return 0;
+ default:
+ pr_err("nvmet: unsupported identify cns %d\n",
+ le32_to_cpu(cmd->identify.cns));
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ default:
+ pr_err("nvmet: unsupported cmd %d\n",
+ cmd->common.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+}
+
+int __init nvmet_init_discovery(void)
+{
+ nvmet_disc_subsys =
+ nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
+ if (!nvmet_disc_subsys)
+ return -ENOMEM;
+ return 0;
+}
+
+void nvmet_exit_discovery(void)
+{
+ nvmet_subsys_put(nvmet_disc_subsys);
+}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
new file mode 100644
index 000000000000..9a97ae67e656
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -0,0 +1,240 @@
+/*
+ * NVMe Fabrics command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include "nvmet.h"
+
+static void nvmet_execute_prop_set(struct nvmet_req *req)
+{
+ u16 status = 0;
+
+ if (!(req->cmd->prop_set.attrib & 1)) {
+ u64 val = le64_to_cpu(req->cmd->prop_set.value);
+
+ switch (le32_to_cpu(req->cmd->prop_set.offset)) {
+ case NVME_REG_CC:
+ nvmet_update_cc(req->sq->ctrl, val);
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+ } else {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_prop_get(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = 0;
+ u64 val = 0;
+
+ if (req->cmd->prop_get.attrib & 1) {
+ switch (le32_to_cpu(req->cmd->prop_get.offset)) {
+ case NVME_REG_CAP:
+ val = ctrl->cap;
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+ } else {
+ switch (le32_to_cpu(req->cmd->prop_get.offset)) {
+ case NVME_REG_VS:
+ val = ctrl->subsys->ver;
+ break;
+ case NVME_REG_CC:
+ val = ctrl->cc;
+ break;
+ case NVME_REG_CSTS:
+ val = ctrl->csts;
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+ }
+
+ req->rsp->result64 = cpu_to_le64(val);
+ nvmet_req_complete(req, status);
+}
+
+int nvmet_parse_fabrics_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ req->ns = NULL;
+
+ switch (cmd->fabrics.fctype) {
+ case nvme_fabrics_type_property_set:
+ req->data_len = 0;
+ req->execute = nvmet_execute_prop_set;
+ break;
+ case nvme_fabrics_type_property_get:
+ req->data_len = 0;
+ req->execute = nvmet_execute_prop_get;
+ break;
+ default:
+ pr_err("received unknown capsule type 0x%x\n",
+ cmd->fabrics.fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+{
+ struct nvmf_connect_command *c = &req->cmd->connect;
+ u16 qid = le16_to_cpu(c->qid);
+ u16 sqsize = le16_to_cpu(c->sqsize);
+ struct nvmet_ctrl *old;
+
+ old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
+ if (old) {
+ pr_warn("queue already connected!\n");
+ return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ }
+
+ nvmet_cq_setup(ctrl, req->cq, qid, sqsize);
+ nvmet_sq_setup(ctrl, req->sq, qid, sqsize);
+ return 0;
+}
+
+static void nvmet_execute_admin_connect(struct nvmet_req *req)
+{
+ struct nvmf_connect_command *c = &req->cmd->connect;
+ struct nvmf_connect_data *d;
+ struct nvmet_ctrl *ctrl = NULL;
+ u16 status = 0;
+
+ d = kmap(sg_page(req->sg)) + req->sg->offset;
+
+ /* zero out initial completion result, assign values as needed */
+ req->rsp->result = 0;
+
+ if (c->recfmt != 0) {
+ pr_warn("invalid connect version (%d).\n",
+ le16_to_cpu(c->recfmt));
+ status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
+ pr_warn("connect attempt for invalid controller ID %#x\n",
+ d->cntlid);
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
+ goto out;
+ }
+
+ status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
+ le32_to_cpu(c->kato), &ctrl);
+ if (status)
+ goto out;
+
+ status = nvmet_install_queue(ctrl, req);
+ if (status) {
+ nvmet_ctrl_put(ctrl);
+ goto out;
+ }
+
+ pr_info("creating controller %d for NQN %s.\n",
+ ctrl->cntlid, ctrl->hostnqn);
+ req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
+
+out:
+ kunmap(sg_page(req->sg));
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_io_connect(struct nvmet_req *req)
+{
+ struct nvmf_connect_command *c = &req->cmd->connect;
+ struct nvmf_connect_data *d;
+ struct nvmet_ctrl *ctrl = NULL;
+ u16 qid = le16_to_cpu(c->qid);
+ u16 status = 0;
+
+ d = kmap(sg_page(req->sg)) + req->sg->offset;
+
+ /* zero out initial completion result, assign values as needed */
+ req->rsp->result = 0;
+
+ if (c->recfmt != 0) {
+ pr_warn("invalid connect version (%d).\n",
+ le16_to_cpu(c->recfmt));
+ status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+ goto out;
+ }
+
+ status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
+ le16_to_cpu(d->cntlid),
+ req, &ctrl);
+ if (status)
+ goto out;
+
+ if (unlikely(qid > ctrl->subsys->max_qid)) {
+ pr_warn("invalid queue id (%d)\n", qid);
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ req->rsp->result = IPO_IATTR_CONNECT_SQE(qid);
+ goto out_ctrl_put;
+ }
+
+ status = nvmet_install_queue(ctrl, req);
+ if (status) {
+ /* pass back cntlid that had the issue of installing queue */
+ req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
+ goto out_ctrl_put;
+ }
+
+ pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+
+out:
+ kunmap(sg_page(req->sg));
+ nvmet_req_complete(req, status);
+ return;
+
+out_ctrl_put:
+ nvmet_ctrl_put(ctrl);
+ goto out;
+}
+
+int nvmet_parse_connect_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ req->ns = NULL;
+
+ if (req->cmd->common.opcode != nvme_fabrics_command) {
+ pr_err("invalid command 0x%x on unconnected queue.\n",
+ cmd->fabrics.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+ pr_err("invalid capsule type 0x%x on unconnected queue.\n",
+ cmd->fabrics.fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ req->data_len = sizeof(struct nvmf_connect_data);
+ if (cmd->connect.qid == 0)
+ req->execute = nvmet_execute_admin_connect;
+ else
+ req->execute = nvmet_execute_io_connect;
+ return 0;
+}
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
new file mode 100644
index 000000000000..2cd069b691ae
--- /dev/null
+++ b/drivers/nvme/target/io-cmd.c
@@ -0,0 +1,215 @@
+/*
+ * NVMe I/O command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include "nvmet.h"
+
+static void nvmet_bio_done(struct bio *bio)
+{
+ struct nvmet_req *req = bio->bi_private;
+
+ nvmet_req_complete(req,
+ bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+
+ if (bio != &req->inline_bio)
+ bio_put(bio);
+}
+
+static inline u32 nvmet_rw_len(struct nvmet_req *req)
+{
+ return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
+ req->ns->blksize_shift;
+}
+
+static void nvmet_inline_bio_init(struct nvmet_req *req)
+{
+ struct bio *bio = &req->inline_bio;
+
+ bio_init(bio);
+ bio->bi_max_vecs = NVMET_MAX_INLINE_BIOVEC;
+ bio->bi_io_vec = req->inline_bvec;
+}
+
+static void nvmet_execute_rw(struct nvmet_req *req)
+{
+ int sg_cnt = req->sg_cnt;
+ struct scatterlist *sg;
+ struct bio *bio;
+ sector_t sector;
+ blk_qc_t cookie;
+ int op, op_flags = 0, i;
+
+ if (!req->sg_cnt) {
+ nvmet_req_complete(req, 0);
+ return;
+ }
+
+ if (req->cmd->rw.opcode == nvme_cmd_write) {
+ op = REQ_OP_WRITE;
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+ op_flags |= REQ_FUA;
+ } else {
+ op = REQ_OP_READ;
+ }
+
+ sector = le64_to_cpu(req->cmd->rw.slba);
+ sector <<= (req->ns->blksize_shift - 9);
+
+ nvmet_inline_bio_init(req);
+ bio = &req->inline_bio;
+ bio->bi_bdev = req->ns->bdev;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ bio_set_op_attrs(bio, op, op_flags);
+
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+ != sg->length) {
+ struct bio *prev = bio;
+
+ bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ bio->bi_bdev = req->ns->bdev;
+ bio->bi_iter.bi_sector = sector;
+ bio_set_op_attrs(bio, op, op_flags);
+
+ bio_chain(bio, prev);
+ cookie = submit_bio(prev);
+ }
+
+ sector += sg->length >> 9;
+ sg_cnt--;
+ }
+
+ cookie = submit_bio(bio);
+
+ blk_poll(bdev_get_queue(req->ns->bdev), cookie);
+}
+
+static void nvmet_execute_flush(struct nvmet_req *req)
+{
+ struct bio *bio;
+
+ nvmet_inline_bio_init(req);
+ bio = &req->inline_bio;
+
+ bio->bi_bdev = req->ns->bdev;
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+
+ submit_bio(bio);
+}
+
+static u16 nvmet_discard_range(struct nvmet_ns *ns,
+ struct nvme_dsm_range *range, struct bio **bio)
+{
+ if (__blkdev_issue_discard(ns->bdev,
+ le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
+ le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
+ GFP_KERNEL, 0, bio))
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return 0;
+}
+
+static void nvmet_execute_discard(struct nvmet_req *req)
+{
+ struct nvme_dsm_range range;
+ struct bio *bio = NULL;
+ int i;
+ u16 status;
+
+ for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
+ status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+ sizeof(range));
+ if (status)
+ break;
+
+ status = nvmet_discard_range(req->ns, &range, &bio);
+ if (status)
+ break;
+ }
+
+ if (bio) {
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ if (status) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else {
+ submit_bio(bio);
+ }
+ } else {
+ nvmet_req_complete(req, status);
+ }
+}
+
+static void nvmet_execute_dsm(struct nvmet_req *req)
+{
+ switch (le32_to_cpu(req->cmd->dsm.attributes)) {
+ case NVME_DSMGMT_AD:
+ nvmet_execute_discard(req);
+ return;
+ case NVME_DSMGMT_IDR:
+ case NVME_DSMGMT_IDW:
+ default:
+ /* Not supported yet */
+ nvmet_req_complete(req, 0);
+ return;
+ }
+}
+
+int nvmet_parse_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
+ pr_err("nvmet: got io cmd %d while CC.EN == 0\n",
+ cmd->common.opcode);
+ req->ns = NULL;
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+ pr_err("nvmet: got io cmd %d while CSTS.RDY == 0\n",
+ cmd->common.opcode);
+ req->ns = NULL;
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
+ if (!req->ns)
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
+
+ switch (cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ req->execute = nvmet_execute_rw;
+ req->data_len = nvmet_rw_len(req);
+ return 0;
+ case nvme_cmd_flush:
+ req->execute = nvmet_execute_flush;
+ req->data_len = 0;
+ return 0;
+ case nvme_cmd_dsm:
+ req->execute = nvmet_execute_dsm;
+ req->data_len = le32_to_cpu(cmd->dsm.nr) *
+ sizeof(struct nvme_dsm_range);
+ return 0;
+ default:
+ pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
new file mode 100644
index 000000000000..94e782987cc9
--- /dev/null
+++ b/drivers/nvme/target/loop.c
@@ -0,0 +1,754 @@
+/*
+ * NVMe over Fabrics loopback device.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/blk-mq.h>
+#include <linux/nvme.h>
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <linux/t10-pi.h>
+#include "nvmet.h"
+#include "../host/nvme.h"
+#include "../host/fabrics.h"
+
+#define NVME_LOOP_AQ_DEPTH 256
+
+#define NVME_LOOP_MAX_SEGMENTS 256
+
+/*
+ * We handle AEN commands ourselves and don't even let the
+ * block layer know about them.
+ */
+#define NVME_LOOP_NR_AEN_COMMANDS 1
+#define NVME_LOOP_AQ_BLKMQ_DEPTH \
+ (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
+
+struct nvme_loop_iod {
+ struct nvme_command cmd;
+ struct nvme_completion rsp;
+ struct nvmet_req req;
+ struct nvme_loop_queue *queue;
+ struct work_struct work;
+ struct sg_table sg_table;
+ struct scatterlist first_sgl[];
+};
+
+struct nvme_loop_ctrl {
+ spinlock_t lock;
+ struct nvme_loop_queue *queues;
+ u32 queue_count;
+
+ struct blk_mq_tag_set admin_tag_set;
+
+ struct list_head list;
+ u64 cap;
+ struct blk_mq_tag_set tag_set;
+ struct nvme_loop_iod async_event_iod;
+ struct nvme_ctrl ctrl;
+
+ struct nvmet_ctrl *target_ctrl;
+ struct work_struct delete_work;
+ struct work_struct reset_work;
+};
+
+static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
+}
+
+struct nvme_loop_queue {
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+ struct nvme_loop_ctrl *ctrl;
+};
+
+static struct nvmet_port *nvmet_loop_port;
+
+static LIST_HEAD(nvme_loop_ctrl_list);
+static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
+
+static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
+static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
+
+static struct nvmet_fabrics_ops nvme_loop_ops;
+
+static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
+{
+ return queue - queue->ctrl->queues;
+}
+
+static void nvme_loop_complete_rq(struct request *req)
+{
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+ int error = 0;
+
+ nvme_cleanup_cmd(req);
+ sg_free_table_chained(&iod->sg_table, true);
+
+ if (unlikely(req->errors)) {
+ if (nvme_req_needs_retry(req, req->errors)) {
+ nvme_requeue_req(req);
+ return;
+ }
+
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ error = req->errors;
+ else
+ error = nvme_error_status(req->errors);
+ }
+
+ blk_mq_end_request(req, error);
+}
+
+static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
+{
+ struct nvme_loop_iod *iod =
+ container_of(nvme_req, struct nvme_loop_iod, req);
+ struct nvme_completion *cqe = &iod->rsp;
+
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
+ cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
+ nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
+ } else {
+ struct request *req = blk_mq_rq_from_pdu(iod);
+
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
+ memcpy(req->special, cqe, sizeof(*cqe));
+ blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1);
+ }
+}
+
+static void nvme_loop_execute_work(struct work_struct *work)
+{
+ struct nvme_loop_iod *iod =
+ container_of(work, struct nvme_loop_iod, work);
+
+ iod->req.execute(&iod->req);
+}
+
+static enum blk_eh_timer_return
+nvme_loop_timeout(struct request *rq, bool reserved)
+{
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
+
+ /* queue error recovery */
+ schedule_work(&iod->queue->ctrl->reset_work);
+
+ /* fail with DNR on admin cmd timeout */
+ rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+
+ return BLK_EH_HANDLED;
+}
+
+static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_loop_queue *queue = hctx->driver_data;
+ struct request *req = bd->rq;
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+ int ret;
+
+ ret = nvme_setup_cmd(ns, req, &iod->cmd);
+ if (ret)
+ return ret;
+
+ iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
+ iod->req.port = nvmet_loop_port;
+ if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
+ &queue->nvme_sq, &nvme_loop_ops)) {
+ nvme_cleanup_cmd(req);
+ blk_mq_start_request(req);
+ nvme_loop_queue_response(&iod->req);
+ return 0;
+ }
+
+ if (blk_rq_bytes(req)) {
+ iod->sg_table.sgl = iod->first_sgl;
+ ret = sg_alloc_table_chained(&iod->sg_table,
+ req->nr_phys_segments, iod->sg_table.sgl);
+ if (ret)
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ iod->req.sg = iod->sg_table.sgl;
+ iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
+ BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
+ }
+
+ iod->cmd.common.command_id = req->tag;
+ blk_mq_start_request(req);
+
+ schedule_work(&iod->work);
+ return 0;
+}
+
+static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
+ struct nvme_loop_queue *queue = &ctrl->queues[0];
+ struct nvme_loop_iod *iod = &ctrl->async_event_iod;
+
+ memset(&iod->cmd, 0, sizeof(iod->cmd));
+ iod->cmd.common.opcode = nvme_admin_async_event;
+ iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
+ iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
+ &nvme_loop_ops)) {
+ dev_err(ctrl->ctrl.device, "failed async event work\n");
+ return;
+ }
+
+ schedule_work(&iod->work);
+}
+
+static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
+ struct nvme_loop_iod *iod, unsigned int queue_idx)
+{
+ BUG_ON(queue_idx >= ctrl->queue_count);
+
+ iod->req.cmd = &iod->cmd;
+ iod->req.rsp = &iod->rsp;
+ iod->queue = &ctrl->queues[queue_idx];
+ INIT_WORK(&iod->work, nvme_loop_execute_work);
+ return 0;
+}
+
+static int nvme_loop_init_request(void *data, struct request *req,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
+}
+
+static int nvme_loop_init_admin_request(void *data, struct request *req,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
+}
+
+static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
+
+ BUG_ON(hctx_idx >= ctrl->queue_count);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_queue *queue = &ctrl->queues[0];
+
+ BUG_ON(hctx_idx != 0);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static struct blk_mq_ops nvme_loop_mq_ops = {
+ .queue_rq = nvme_loop_queue_rq,
+ .complete = nvme_loop_complete_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = nvme_loop_init_request,
+ .init_hctx = nvme_loop_init_hctx,
+ .timeout = nvme_loop_timeout,
+};
+
+static struct blk_mq_ops nvme_loop_admin_mq_ops = {
+ .queue_rq = nvme_loop_queue_rq,
+ .complete = nvme_loop_complete_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = nvme_loop_init_admin_request,
+ .init_hctx = nvme_loop_init_admin_hctx,
+ .timeout = nvme_loop_timeout,
+};
+
+static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
+{
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+}
+
+static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
+
+ if (list_empty(&ctrl->list))
+ goto free_ctrl;
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_del(&ctrl->list);
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+
+ if (nctrl->tagset) {
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ }
+ kfree(ctrl->queues);
+ nvmf_free_options(nctrl->opts);
+free_ctrl:
+ kfree(ctrl);
+}
+
+static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
+{
+ int error;
+
+ memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
+ ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
+ ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
+ ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
+ SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ ctrl->admin_tag_set.driver_data = ctrl;
+ ctrl->admin_tag_set.nr_hw_queues = 1;
+ ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+
+ ctrl->queues[0].ctrl = ctrl;
+ error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
+ if (error)
+ return error;
+ ctrl->queue_count = 1;
+
+ error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ if (error)
+ goto out_free_sq;
+
+ ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+ if (IS_ERR(ctrl->ctrl.admin_q)) {
+ error = PTR_ERR(ctrl->ctrl.admin_q);
+ goto out_free_tagset;
+ }
+
+ error = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (error) {
+ dev_err(ctrl->ctrl.device,
+ "prop_get NVME_REG_CAP failed\n");
+ goto out_cleanup_queue;
+ }
+
+ ctrl->ctrl.sqsize =
+ min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+
+ error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ if (error)
+ goto out_cleanup_queue;
+
+ ctrl->ctrl.max_hw_sectors =
+ (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
+
+ error = nvme_init_identify(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_queue;
+
+ nvme_start_keep_alive(&ctrl->ctrl);
+
+ return 0;
+
+out_cleanup_queue:
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_free_tagset:
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_free_sq:
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+ return error;
+}
+
+static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
+{
+ int i;
+
+ nvme_stop_keep_alive(&ctrl->ctrl);
+
+ if (ctrl->queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ }
+
+ if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ nvme_shutdown_ctrl(&ctrl->ctrl);
+
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ nvme_loop_destroy_admin_queue(ctrl);
+}
+
+static void nvme_loop_del_ctrl_work(struct work_struct *work)
+{
+ struct nvme_loop_ctrl *ctrl = container_of(work,
+ struct nvme_loop_ctrl, delete_work);
+
+ nvme_remove_namespaces(&ctrl->ctrl);
+ nvme_loop_shutdown_ctrl(ctrl);
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+ return -EBUSY;
+
+ if (!schedule_work(&ctrl->delete_work))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
+ int ret;
+
+ ret = __nvme_loop_del_ctrl(ctrl);
+ if (ret)
+ return ret;
+
+ flush_work(&ctrl->delete_work);
+
+ return 0;
+}
+
+static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
+{
+ struct nvme_loop_ctrl *ctrl;
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
+ if (ctrl->ctrl.cntlid == nctrl->cntlid)
+ __nvme_loop_del_ctrl(ctrl);
+ }
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+}
+
+static void nvme_loop_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_loop_ctrl *ctrl = container_of(work,
+ struct nvme_loop_ctrl, reset_work);
+ bool changed;
+ int i, ret;
+
+ nvme_loop_shutdown_ctrl(ctrl);
+
+ ret = nvme_loop_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_disable;
+
+ for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
+ ctrl->queues[i].ctrl = ctrl;
+ ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+ if (ret)
+ goto out_free_queues;
+
+ ctrl->queue_count++;
+ }
+
+ for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ goto out_free_queues;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+
+ nvme_start_queues(&ctrl->ctrl);
+
+ return;
+
+out_free_queues:
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ nvme_loop_destroy_admin_queue(ctrl);
+out_disable:
+ dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
+ nvme_remove_namespaces(&ctrl->ctrl);
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
+ return -EBUSY;
+
+ if (!schedule_work(&ctrl->reset_work))
+ return -EBUSY;
+
+ flush_work(&ctrl->reset_work);
+
+ return 0;
+}
+
+static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
+ .name = "loop",
+ .module = THIS_MODULE,
+ .is_fabrics = true,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .reset_ctrl = nvme_loop_reset_ctrl,
+ .free_ctrl = nvme_loop_free_ctrl,
+ .submit_async_event = nvme_loop_submit_async_event,
+ .delete_ctrl = nvme_loop_del_ctrl,
+ .get_subsysnqn = nvmf_get_subsysnqn,
+};
+
+static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ int ret, i;
+
+ ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ if (ret || !opts->nr_io_queues)
+ return ret;
+
+ dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
+ opts->nr_io_queues);
+
+ for (i = 1; i <= opts->nr_io_queues; i++) {
+ ctrl->queues[i].ctrl = ctrl;
+ ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+ if (ret)
+ goto out_destroy_queues;
+
+ ctrl->queue_count++;
+ }
+
+ memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
+ ctrl->tag_set.ops = &nvme_loop_mq_ops;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+ ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+ ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
+ SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ ctrl->tag_set.driver_data = ctrl;
+ ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+ ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
+ ctrl->ctrl.tagset = &ctrl->tag_set;
+
+ ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ if (ret)
+ goto out_destroy_queues;
+
+ ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+ if (IS_ERR(ctrl->ctrl.connect_q)) {
+ ret = PTR_ERR(ctrl->ctrl.connect_q);
+ goto out_free_tagset;
+ }
+
+ for (i = 1; i <= opts->nr_io_queues; i++) {
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ goto out_cleanup_connect_q;
+ }
+
+ return 0;
+
+out_cleanup_connect_q:
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+out_free_tagset:
+ blk_mq_free_tag_set(&ctrl->tag_set);
+out_destroy_queues:
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ return ret;
+}
+
+static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_loop_ctrl *ctrl;
+ bool changed;
+ int ret;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+ ctrl->ctrl.opts = opts;
+ INIT_LIST_HEAD(&ctrl->list);
+
+ INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
+ INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
+ 0 /* no quirks, we're perfect! */);
+ if (ret)
+ goto out_put_ctrl;
+
+ spin_lock_init(&ctrl->lock);
+
+ ret = -ENOMEM;
+
+ ctrl->ctrl.sqsize = opts->queue_size;
+ ctrl->ctrl.kato = opts->kato;
+
+ ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
+ GFP_KERNEL);
+ if (!ctrl->queues)
+ goto out_uninit_ctrl;
+
+ ret = nvme_loop_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_free_queues;
+
+ if (opts->queue_size > ctrl->ctrl.maxcmd) {
+ /* warn if maxcmd is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl maxcmd %u, clamping down\n",
+ opts->queue_size, ctrl->ctrl.maxcmd);
+ opts->queue_size = ctrl->ctrl.maxcmd;
+ }
+
+ if (opts->nr_io_queues) {
+ ret = nvme_loop_create_io_queues(ctrl);
+ if (ret)
+ goto out_remove_admin_queue;
+ }
+
+ nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
+
+ dev_info(ctrl->ctrl.device,
+ "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
+
+ kref_get(&ctrl->ctrl.kref);
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+
+ if (opts->nr_io_queues) {
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ return &ctrl->ctrl;
+
+out_remove_admin_queue:
+ nvme_loop_destroy_admin_queue(ctrl);
+out_free_queues:
+ kfree(ctrl->queues);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
+ nvme_put_ctrl(&ctrl->ctrl);
+ if (ret > 0)
+ ret = -EIO;
+ return ERR_PTR(ret);
+}
+
+static int nvme_loop_add_port(struct nvmet_port *port)
+{
+ /*
+ * XXX: disalow adding more than one port so
+ * there is no connection rejections when a
+ * a subsystem is assigned to a port for which
+ * loop doesn't have a pointer.
+ * This scenario would be possible if we allowed
+ * more than one port to be added and a subsystem
+ * was assigned to a port other than nvmet_loop_port.
+ */
+
+ if (nvmet_loop_port)
+ return -EPERM;
+
+ nvmet_loop_port = port;
+ return 0;
+}
+
+static void nvme_loop_remove_port(struct nvmet_port *port)
+{
+ if (port == nvmet_loop_port)
+ nvmet_loop_port = NULL;
+}
+
+static struct nvmet_fabrics_ops nvme_loop_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_LOOP,
+ .add_port = nvme_loop_add_port,
+ .remove_port = nvme_loop_remove_port,
+ .queue_response = nvme_loop_queue_response,
+ .delete_ctrl = nvme_loop_delete_ctrl,
+};
+
+static struct nvmf_transport_ops nvme_loop_transport = {
+ .name = "loop",
+ .create_ctrl = nvme_loop_create_ctrl,
+};
+
+static int __init nvme_loop_init_module(void)
+{
+ int ret;
+
+ ret = nvmet_register_transport(&nvme_loop_ops);
+ if (ret)
+ return ret;
+ nvmf_register_transport(&nvme_loop_transport);
+ return 0;
+}
+
+static void __exit nvme_loop_cleanup_module(void)
+{
+ struct nvme_loop_ctrl *ctrl, *next;
+
+ nvmf_unregister_transport(&nvme_loop_transport);
+ nvmet_unregister_transport(&nvme_loop_ops);
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
+ __nvme_loop_del_ctrl(ctrl);
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+
+ flush_scheduled_work();
+}
+
+module_init(nvme_loop_init_module);
+module_exit(nvme_loop_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
new file mode 100644
index 000000000000..57dd6d834c28
--- /dev/null
+++ b/drivers/nvme/target/nvmet.h
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _NVMET_H
+#define _NVMET_H
+
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/percpu-refcount.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/nvme.h>
+#include <linux/configfs.h>
+#include <linux/rcupdate.h>
+#include <linux/blkdev.h>
+
+#define NVMET_ASYNC_EVENTS 4
+#define NVMET_ERROR_LOG_SLOTS 128
+
+/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
+ * The 16 bit shift is to set IATTR bit to 1, which means offending
+ * offset starts in the data section of connect()
+ */
+#define IPO_IATTR_CONNECT_DATA(x) \
+ (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
+#define IPO_IATTR_CONNECT_SQE(x) \
+ (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
+
+struct nvmet_ns {
+ struct list_head dev_link;
+ struct percpu_ref ref;
+ struct block_device *bdev;
+ u32 nsid;
+ u32 blksize_shift;
+ loff_t size;
+ u8 nguid[16];
+
+ struct nvmet_subsys *subsys;
+ const char *device_path;
+
+ struct config_group device_group;
+ struct config_group group;
+
+ struct completion disable_done;
+};
+
+static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_ns, group);
+}
+
+static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
+{
+ return !list_empty_careful(&ns->dev_link);
+}
+
+struct nvmet_cq {
+ u16 qid;
+ u16 size;
+};
+
+struct nvmet_sq {
+ struct nvmet_ctrl *ctrl;
+ struct percpu_ref ref;
+ u16 qid;
+ u16 size;
+ struct completion free_done;
+};
+
+/**
+ * struct nvmet_port - Common structure to keep port
+ * information for the target.
+ * @entry: List head for holding a list of these elements.
+ * @disc_addr: Address information is stored in a format defined
+ * for a discovery log page entry.
+ * @group: ConfigFS group for this element's folder.
+ * @priv: Private data for the transport.
+ */
+struct nvmet_port {
+ struct list_head entry;
+ struct nvmf_disc_rsp_page_entry disc_addr;
+ struct config_group group;
+ struct config_group subsys_group;
+ struct list_head subsystems;
+ struct config_group referrals_group;
+ struct list_head referrals;
+ void *priv;
+ bool enabled;
+};
+
+static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_port,
+ group);
+}
+
+struct nvmet_ctrl {
+ struct nvmet_subsys *subsys;
+ struct nvmet_cq **cqs;
+ struct nvmet_sq **sqs;
+
+ struct mutex lock;
+ u64 cap;
+ u32 cc;
+ u32 csts;
+
+ u16 cntlid;
+ u32 kato;
+
+ struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
+ unsigned int nr_async_event_cmds;
+ struct list_head async_events;
+ struct work_struct async_event_work;
+
+ struct list_head subsys_entry;
+ struct kref ref;
+ struct delayed_work ka_work;
+ struct work_struct fatal_err_work;
+
+ struct nvmet_fabrics_ops *ops;
+
+ char subsysnqn[NVMF_NQN_FIELD_LEN];
+ char hostnqn[NVMF_NQN_FIELD_LEN];
+};
+
+struct nvmet_subsys {
+ enum nvme_subsys_type type;
+
+ struct mutex lock;
+ struct kref ref;
+
+ struct list_head namespaces;
+ unsigned int max_nsid;
+
+ struct list_head ctrls;
+ struct ida cntlid_ida;
+
+ struct list_head hosts;
+ bool allow_any_host;
+
+ u16 max_qid;
+
+ u64 ver;
+ char *subsysnqn;
+
+ struct config_group group;
+
+ struct config_group namespaces_group;
+ struct config_group allowed_hosts_group;
+};
+
+static inline struct nvmet_subsys *to_subsys(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_subsys, group);
+}
+
+static inline struct nvmet_subsys *namespaces_to_subsys(
+ struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_subsys,
+ namespaces_group);
+}
+
+struct nvmet_host {
+ struct config_group group;
+};
+
+static inline struct nvmet_host *to_host(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_host, group);
+}
+
+static inline char *nvmet_host_name(struct nvmet_host *host)
+{
+ return config_item_name(&host->group.cg_item);
+}
+
+struct nvmet_host_link {
+ struct list_head entry;
+ struct nvmet_host *host;
+};
+
+struct nvmet_subsys_link {
+ struct list_head entry;
+ struct nvmet_subsys *subsys;
+};
+
+struct nvmet_req;
+struct nvmet_fabrics_ops {
+ struct module *owner;
+ unsigned int type;
+ unsigned int sqe_inline_size;
+ unsigned int msdbd;
+ bool has_keyed_sgls : 1;
+ void (*queue_response)(struct nvmet_req *req);
+ int (*add_port)(struct nvmet_port *port);
+ void (*remove_port)(struct nvmet_port *port);
+ void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
+};
+
+#define NVMET_MAX_INLINE_BIOVEC 8
+
+struct nvmet_req {
+ struct nvme_command *cmd;
+ struct nvme_completion *rsp;
+ struct nvmet_sq *sq;
+ struct nvmet_cq *cq;
+ struct nvmet_ns *ns;
+ struct scatterlist *sg;
+ struct bio inline_bio;
+ struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
+ int sg_cnt;
+ size_t data_len;
+
+ struct nvmet_port *port;
+
+ void (*execute)(struct nvmet_req *req);
+ struct nvmet_fabrics_ops *ops;
+};
+
+static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
+{
+ req->rsp->status = cpu_to_le16(status << 1);
+}
+
+static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
+{
+ req->rsp->result = cpu_to_le32(result);
+}
+
+/*
+ * NVMe command writes actually are DMA reads for us on the target side.
+ */
+static inline enum dma_data_direction
+nvmet_data_dir(struct nvmet_req *req)
+{
+ return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+}
+
+struct nvmet_async_event {
+ struct list_head entry;
+ u8 event_type;
+ u8 event_info;
+ u8 log_page;
+};
+
+int nvmet_parse_connect_cmd(struct nvmet_req *req);
+int nvmet_parse_io_cmd(struct nvmet_req *req);
+int nvmet_parse_admin_cmd(struct nvmet_req *req);
+int nvmet_parse_discovery_cmd(struct nvmet_req *req);
+int nvmet_parse_fabrics_cmd(struct nvmet_req *req);
+
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
+void nvmet_req_complete(struct nvmet_req *req, u16 status);
+
+void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
+ u16 size);
+void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
+ u16 size);
+void nvmet_sq_destroy(struct nvmet_sq *sq);
+int nvmet_sq_init(struct nvmet_sq *sq);
+
+void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
+
+void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
+u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
+ struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
+u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
+ struct nvmet_req *req, struct nvmet_ctrl **ret);
+void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
+
+struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
+ enum nvme_subsys_type type);
+void nvmet_subsys_put(struct nvmet_subsys *subsys);
+
+struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
+void nvmet_put_namespace(struct nvmet_ns *ns);
+int nvmet_ns_enable(struct nvmet_ns *ns);
+void nvmet_ns_disable(struct nvmet_ns *ns);
+struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
+void nvmet_ns_free(struct nvmet_ns *ns);
+
+int nvmet_register_transport(struct nvmet_fabrics_ops *ops);
+void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops);
+
+int nvmet_enable_port(struct nvmet_port *port);
+void nvmet_disable_port(struct nvmet_port *port);
+
+void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
+void nvmet_referral_disable(struct nvmet_port *port);
+
+u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
+ size_t len);
+u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
+ size_t len);
+
+u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+
+#define NVMET_QUEUE_SIZE 1024
+#define NVMET_NR_QUEUES 64
+#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
+#define NVMET_KAS 10
+#define NVMET_DISC_KATO 120
+
+int __init nvmet_init_configfs(void);
+void __exit nvmet_exit_configfs(void);
+
+int __init nvmet_init_discovery(void);
+void nvmet_exit_discovery(void);
+
+extern struct nvmet_subsys *nvmet_disc_subsys;
+extern u64 nvmet_genctr;
+extern struct rw_semaphore nvmet_config_sem;
+
+bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
+ const char *hostnqn);
+
+#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
new file mode 100644
index 000000000000..e06d504bdf0c
--- /dev/null
+++ b/drivers/nvme/target/rdma.c
@@ -0,0 +1,1448 @@
+/*
+ * NVMe over Fabrics RDMA target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/atomic.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/nvme.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/inet.h>
+#include <asm/unaligned.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/rw.h>
+
+#include <linux/nvme-rdma.h>
+#include "nvmet.h"
+
+/*
+ * We allow up to a page of inline data to go with the SQE
+ */
+#define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE
+
+struct nvmet_rdma_cmd {
+ struct ib_sge sge[2];
+ struct ib_cqe cqe;
+ struct ib_recv_wr wr;
+ struct scatterlist inline_sg;
+ struct page *inline_page;
+ struct nvme_command *nvme_cmd;
+ struct nvmet_rdma_queue *queue;
+};
+
+enum {
+ NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
+ NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
+};
+
+struct nvmet_rdma_rsp {
+ struct ib_sge send_sge;
+ struct ib_cqe send_cqe;
+ struct ib_send_wr send_wr;
+
+ struct nvmet_rdma_cmd *cmd;
+ struct nvmet_rdma_queue *queue;
+
+ struct ib_cqe read_cqe;
+ struct rdma_rw_ctx rw;
+
+ struct nvmet_req req;
+
+ u8 n_rdma;
+ u32 flags;
+ u32 invalidate_rkey;
+
+ struct list_head wait_list;
+ struct list_head free_list;
+};
+
+enum nvmet_rdma_queue_state {
+ NVMET_RDMA_Q_CONNECTING,
+ NVMET_RDMA_Q_LIVE,
+ NVMET_RDMA_Q_DISCONNECTING,
+};
+
+struct nvmet_rdma_queue {
+ struct rdma_cm_id *cm_id;
+ struct nvmet_port *port;
+ struct ib_cq *cq;
+ atomic_t sq_wr_avail;
+ struct nvmet_rdma_device *dev;
+ spinlock_t state_lock;
+ enum nvmet_rdma_queue_state state;
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+
+ struct nvmet_rdma_rsp *rsps;
+ struct list_head free_rsps;
+ spinlock_t rsps_lock;
+ struct nvmet_rdma_cmd *cmds;
+
+ struct work_struct release_work;
+ struct list_head rsp_wait_list;
+ struct list_head rsp_wr_wait_list;
+ spinlock_t rsp_wr_wait_lock;
+
+ int idx;
+ int host_qid;
+ int recv_queue_size;
+ int send_queue_size;
+
+ struct list_head queue_list;
+};
+
+struct nvmet_rdma_device {
+ struct ib_device *device;
+ struct ib_pd *pd;
+ struct ib_srq *srq;
+ struct nvmet_rdma_cmd *srq_cmds;
+ size_t srq_size;
+ struct kref ref;
+ struct list_head entry;
+};
+
+static bool nvmet_rdma_use_srq;
+module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
+MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
+
+static DEFINE_IDA(nvmet_rdma_queue_ida);
+static LIST_HEAD(nvmet_rdma_queue_list);
+static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_mutex);
+
+static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
+static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
+static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
+
+static struct nvmet_fabrics_ops nvmet_rdma_ops;
+
+/* XXX: really should move to a generic header sooner or later.. */
+static inline u32 get_unaligned_le24(const u8 *p)
+{
+ return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
+}
+
+static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
+{
+ return nvme_is_write(rsp->req.cmd) &&
+ rsp->req.data_len &&
+ !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
+}
+
+static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
+{
+ return !nvme_is_write(rsp->req.cmd) &&
+ rsp->req.data_len &&
+ !rsp->req.rsp->status &&
+ !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
+}
+
+static inline struct nvmet_rdma_rsp *
+nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
+{
+ struct nvmet_rdma_rsp *rsp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rsps_lock, flags);
+ rsp = list_first_entry(&queue->free_rsps,
+ struct nvmet_rdma_rsp, free_list);
+ list_del(&rsp->free_list);
+ spin_unlock_irqrestore(&queue->rsps_lock, flags);
+
+ return rsp;
+}
+
+static inline void
+nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
+ list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
+ spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
+}
+
+static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
+{
+ struct scatterlist *sg;
+ int count;
+
+ if (!sgl || !nents)
+ return;
+
+ for_each_sg(sgl, sg, nents, count)
+ __free_page(sg_page(sg));
+ kfree(sgl);
+}
+
+static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
+ u32 length)
+{
+ struct scatterlist *sg;
+ struct page *page;
+ unsigned int nent;
+ int i = 0;
+
+ nent = DIV_ROUND_UP(length, PAGE_SIZE);
+ sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
+ if (!sg)
+ goto out;
+
+ sg_init_table(sg, nent);
+
+ while (length) {
+ u32 page_len = min_t(u32, length, PAGE_SIZE);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ goto out_free_pages;
+
+ sg_set_page(&sg[i], page, page_len, 0);
+ length -= page_len;
+ i++;
+ }
+ *sgl = sg;
+ *nents = nent;
+ return 0;
+
+out_free_pages:
+ while (i > 0) {
+ i--;
+ __free_page(sg_page(&sg[i]));
+ }
+ kfree(sg);
+out:
+ return NVME_SC_INTERNAL;
+}
+
+static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c, bool admin)
+{
+ /* NVMe command / RDMA RECV */
+ c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
+ if (!c->nvme_cmd)
+ goto out;
+
+ c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
+ sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
+ goto out_free_cmd;
+
+ c->sge[0].length = sizeof(*c->nvme_cmd);
+ c->sge[0].lkey = ndev->pd->local_dma_lkey;
+
+ if (!admin) {
+ c->inline_page = alloc_pages(GFP_KERNEL,
+ get_order(NVMET_RDMA_INLINE_DATA_SIZE));
+ if (!c->inline_page)
+ goto out_unmap_cmd;
+ c->sge[1].addr = ib_dma_map_page(ndev->device,
+ c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
+ DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
+ goto out_free_inline_page;
+ c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
+ c->sge[1].lkey = ndev->pd->local_dma_lkey;
+ }
+
+ c->cqe.done = nvmet_rdma_recv_done;
+
+ c->wr.wr_cqe = &c->cqe;
+ c->wr.sg_list = c->sge;
+ c->wr.num_sge = admin ? 1 : 2;
+
+ return 0;
+
+out_free_inline_page:
+ if (!admin) {
+ __free_pages(c->inline_page,
+ get_order(NVMET_RDMA_INLINE_DATA_SIZE));
+ }
+out_unmap_cmd:
+ ib_dma_unmap_single(ndev->device, c->sge[0].addr,
+ sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+out_free_cmd:
+ kfree(c->nvme_cmd);
+
+out:
+ return -ENOMEM;
+}
+
+static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c, bool admin)
+{
+ if (!admin) {
+ ib_dma_unmap_page(ndev->device, c->sge[1].addr,
+ NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
+ __free_pages(c->inline_page,
+ get_order(NVMET_RDMA_INLINE_DATA_SIZE));
+ }
+ ib_dma_unmap_single(ndev->device, c->sge[0].addr,
+ sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+ kfree(c->nvme_cmd);
+}
+
+static struct nvmet_rdma_cmd *
+nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
+ int nr_cmds, bool admin)
+{
+ struct nvmet_rdma_cmd *cmds;
+ int ret = -EINVAL, i;
+
+ cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
+ if (!cmds)
+ goto out;
+
+ for (i = 0; i < nr_cmds; i++) {
+ ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
+ if (ret)
+ goto out_free;
+ }
+
+ return cmds;
+
+out_free:
+ while (--i >= 0)
+ nvmet_rdma_free_cmd(ndev, cmds + i, admin);
+ kfree(cmds);
+out:
+ return ERR_PTR(ret);
+}
+
+static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
+{
+ int i;
+
+ for (i = 0; i < nr_cmds; i++)
+ nvmet_rdma_free_cmd(ndev, cmds + i, admin);
+ kfree(cmds);
+}
+
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_rsp *r)
+{
+ /* NVMe CQE / RDMA SEND */
+ r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
+ if (!r->req.rsp)
+ goto out;
+
+ r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
+ sizeof(*r->req.rsp), DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
+ goto out_free_rsp;
+
+ r->send_sge.length = sizeof(*r->req.rsp);
+ r->send_sge.lkey = ndev->pd->local_dma_lkey;
+
+ r->send_cqe.done = nvmet_rdma_send_done;
+
+ r->send_wr.wr_cqe = &r->send_cqe;
+ r->send_wr.sg_list = &r->send_sge;
+ r->send_wr.num_sge = 1;
+ r->send_wr.send_flags = IB_SEND_SIGNALED;
+
+ /* Data In / RDMA READ */
+ r->read_cqe.done = nvmet_rdma_read_data_done;
+ return 0;
+
+out_free_rsp:
+ kfree(r->req.rsp);
+out:
+ return -ENOMEM;
+}
+
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_rsp *r)
+{
+ ib_dma_unmap_single(ndev->device, r->send_sge.addr,
+ sizeof(*r->req.rsp), DMA_TO_DEVICE);
+ kfree(r->req.rsp);
+}
+
+static int
+nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
+{
+ struct nvmet_rdma_device *ndev = queue->dev;
+ int nr_rsps = queue->recv_queue_size * 2;
+ int ret = -EINVAL, i;
+
+ queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
+ GFP_KERNEL);
+ if (!queue->rsps)
+ goto out;
+
+ for (i = 0; i < nr_rsps; i++) {
+ struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+ ret = nvmet_rdma_alloc_rsp(ndev, rsp);
+ if (ret)
+ goto out_free;
+
+ list_add_tail(&rsp->free_list, &queue->free_rsps);
+ }
+
+ return 0;
+
+out_free:
+ while (--i >= 0) {
+ struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+ list_del(&rsp->free_list);
+ nvmet_rdma_free_rsp(ndev, rsp);
+ }
+ kfree(queue->rsps);
+out:
+ return ret;
+}
+
+static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
+{
+ struct nvmet_rdma_device *ndev = queue->dev;
+ int i, nr_rsps = queue->recv_queue_size * 2;
+
+ for (i = 0; i < nr_rsps; i++) {
+ struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+ list_del(&rsp->free_list);
+ nvmet_rdma_free_rsp(ndev, rsp);
+ }
+ kfree(queue->rsps);
+}
+
+static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *cmd)
+{
+ struct ib_recv_wr *bad_wr;
+
+ if (ndev->srq)
+ return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
+ return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
+}
+
+static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
+{
+ spin_lock(&queue->rsp_wr_wait_lock);
+ while (!list_empty(&queue->rsp_wr_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+ bool ret;
+
+ rsp = list_entry(queue->rsp_wr_wait_list.next,
+ struct nvmet_rdma_rsp, wait_list);
+ list_del(&rsp->wait_list);
+
+ spin_unlock(&queue->rsp_wr_wait_lock);
+ ret = nvmet_rdma_execute_command(rsp);
+ spin_lock(&queue->rsp_wr_wait_lock);
+
+ if (!ret) {
+ list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
+ break;
+ }
+ }
+ spin_unlock(&queue->rsp_wr_wait_lock);
+}
+
+
+static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvmet_rdma_queue *queue = rsp->queue;
+
+ atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
+
+ if (rsp->n_rdma) {
+ rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+ queue->cm_id->port_num, rsp->req.sg,
+ rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
+ }
+
+ if (rsp->req.sg != &rsp->cmd->inline_sg)
+ nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
+
+ if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
+ nvmet_rdma_process_wr_wait_list(queue);
+
+ nvmet_rdma_put_rsp(rsp);
+}
+
+static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
+{
+ if (queue->nvme_sq.ctrl) {
+ nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
+ } else {
+ /*
+ * we didn't setup the controller yet in case
+ * of admin connect error, just disconnect and
+ * cleanup the queue
+ */
+ nvmet_rdma_queue_disconnect(queue);
+ }
+}
+
+static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
+
+ nvmet_rdma_release_rsp(rsp);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR)) {
+ pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
+ wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
+ nvmet_rdma_error_comp(rsp->queue);
+ }
+}
+
+static void nvmet_rdma_queue_response(struct nvmet_req *req)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(req, struct nvmet_rdma_rsp, req);
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct ib_send_wr *first_wr, *bad_wr;
+
+ if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
+ rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
+ rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
+ } else {
+ rsp->send_wr.opcode = IB_WR_SEND;
+ }
+
+ if (nvmet_rdma_need_data_out(rsp))
+ first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+ cm_id->port_num, NULL, &rsp->send_wr);
+ else
+ first_wr = &rsp->send_wr;
+
+ nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+ if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
+ pr_err("sending cmd response failed\n");
+ nvmet_rdma_release_rsp(rsp);
+ }
+}
+
+static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
+ struct nvmet_rdma_queue *queue = cq->cq_context;
+
+ WARN_ON(rsp->n_rdma <= 0);
+ atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
+ rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+ queue->cm_id->port_num, rsp->req.sg,
+ rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
+ rsp->n_rdma = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_rdma_release_rsp(rsp);
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
+ wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+ return;
+ }
+
+ rsp->req.execute(&rsp->req);
+}
+
+static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
+ u64 off)
+{
+ sg_init_table(&rsp->cmd->inline_sg, 1);
+ sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
+ rsp->req.sg = &rsp->cmd->inline_sg;
+ rsp->req.sg_cnt = 1;
+}
+
+static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
+ u64 off = le64_to_cpu(sgl->addr);
+ u32 len = le32_to_cpu(sgl->length);
+
+ if (!nvme_is_write(rsp->req.cmd))
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+
+ if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
+ pr_err("invalid inline data offset!\n");
+ return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
+ }
+
+ /* no data command? */
+ if (!len)
+ return 0;
+
+ nvmet_rdma_use_inline_sg(rsp, len, off);
+ rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
+ return 0;
+}
+
+static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
+ struct nvme_keyed_sgl_desc *sgl, bool invalidate)
+{
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ u64 addr = le64_to_cpu(sgl->addr);
+ u32 len = get_unaligned_le24(sgl->length);
+ u32 key = get_unaligned_le32(sgl->key);
+ int ret;
+ u16 status;
+
+ /* no data command? */
+ if (!len)
+ return 0;
+
+ /* use the already allocated data buffer if possible */
+ if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) {
+ nvmet_rdma_use_inline_sg(rsp, len, 0);
+ } else {
+ status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
+ len);
+ if (status)
+ return status;
+ }
+
+ ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
+ rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
+ nvmet_data_dir(&rsp->req));
+ if (ret < 0)
+ return NVME_SC_INTERNAL;
+ rsp->n_rdma += ret;
+
+ if (invalidate) {
+ rsp->invalidate_rkey = key;
+ rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
+
+ switch (sgl->type >> 4) {
+ case NVME_SGL_FMT_DATA_DESC:
+ switch (sgl->type & 0xf) {
+ case NVME_SGL_FMT_OFFSET:
+ return nvmet_rdma_map_sgl_inline(rsp);
+ default:
+ pr_err("invalid SGL subtype: %#x\n", sgl->type);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+ case NVME_KEY_SGL_FMT_DATA_DESC:
+ switch (sgl->type & 0xf) {
+ case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
+ return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
+ case NVME_SGL_FMT_ADDRESS:
+ return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
+ default:
+ pr_err("invalid SGL subtype: %#x\n", sgl->type);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+ default:
+ pr_err("invalid SGL type: %#x\n", sgl->type);
+ return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
+ }
+}
+
+static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvmet_rdma_queue *queue = rsp->queue;
+
+ if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
+ &queue->sq_wr_avail) < 0)) {
+ pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
+ 1 + rsp->n_rdma, queue->idx,
+ queue->nvme_sq.ctrl->cntlid);
+ atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
+ return false;
+ }
+
+ if (nvmet_rdma_need_data_in(rsp)) {
+ if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
+ queue->cm_id->port_num, &rsp->read_cqe, NULL))
+ nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
+ } else {
+ rsp->req.execute(&rsp->req);
+ }
+
+ return true;
+}
+
+static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
+ struct nvmet_rdma_rsp *cmd)
+{
+ u16 status;
+
+ cmd->queue = queue;
+ cmd->n_rdma = 0;
+ cmd->req.port = queue->port;
+
+ if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
+ &queue->nvme_sq, &nvmet_rdma_ops))
+ return;
+
+ status = nvmet_rdma_map_sgl(cmd);
+ if (status)
+ goto out_err;
+
+ if (unlikely(!nvmet_rdma_execute_command(cmd))) {
+ spin_lock(&queue->rsp_wr_wait_lock);
+ list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
+ spin_unlock(&queue->rsp_wr_wait_lock);
+ }
+
+ return;
+
+out_err:
+ nvmet_req_complete(&cmd->req, status);
+}
+
+static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_cmd *cmd =
+ container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
+ struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_rsp *rsp;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
+ wc->wr_cqe, ib_wc_status_msg(wc->status),
+ wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+ return;
+ }
+
+ if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
+ pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
+ nvmet_rdma_error_comp(queue);
+ return;
+ }
+
+ cmd->queue = queue;
+ rsp = nvmet_rdma_get_rsp(queue);
+ rsp->cmd = cmd;
+ rsp->flags = 0;
+ rsp->req.cmd = cmd->nvme_cmd;
+
+ if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ if (queue->state == NVMET_RDMA_Q_CONNECTING)
+ list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
+ else
+ nvmet_rdma_put_rsp(rsp);
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ return;
+ }
+
+ nvmet_rdma_handle_command(queue, rsp);
+}
+
+static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
+{
+ if (!ndev->srq)
+ return;
+
+ nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
+ ib_destroy_srq(ndev->srq);
+}
+
+static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
+{
+ struct ib_srq_init_attr srq_attr = { NULL, };
+ struct ib_srq *srq;
+ size_t srq_size;
+ int ret, i;
+
+ srq_size = 4095; /* XXX: tune */
+
+ srq_attr.attr.max_wr = srq_size;
+ srq_attr.attr.max_sge = 2;
+ srq_attr.attr.srq_limit = 0;
+ srq_attr.srq_type = IB_SRQT_BASIC;
+ srq = ib_create_srq(ndev->pd, &srq_attr);
+ if (IS_ERR(srq)) {
+ /*
+ * If SRQs aren't supported we just go ahead and use normal
+ * non-shared receive queues.
+ */
+ pr_info("SRQ requested but not supported.\n");
+ return 0;
+ }
+
+ ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
+ if (IS_ERR(ndev->srq_cmds)) {
+ ret = PTR_ERR(ndev->srq_cmds);
+ goto out_destroy_srq;
+ }
+
+ ndev->srq = srq;
+ ndev->srq_size = srq_size;
+
+ for (i = 0; i < srq_size; i++)
+ nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+
+ return 0;
+
+out_destroy_srq:
+ ib_destroy_srq(srq);
+ return ret;
+}
+
+static void nvmet_rdma_free_dev(struct kref *ref)
+{
+ struct nvmet_rdma_device *ndev =
+ container_of(ref, struct nvmet_rdma_device, ref);
+
+ mutex_lock(&device_list_mutex);
+ list_del(&ndev->entry);
+ mutex_unlock(&device_list_mutex);
+
+ nvmet_rdma_destroy_srq(ndev);
+ ib_dealloc_pd(ndev->pd);
+
+ kfree(ndev);
+}
+
+static struct nvmet_rdma_device *
+nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
+{
+ struct nvmet_rdma_device *ndev;
+ int ret;
+
+ mutex_lock(&device_list_mutex);
+ list_for_each_entry(ndev, &device_list, entry) {
+ if (ndev->device->node_guid == cm_id->device->node_guid &&
+ kref_get_unless_zero(&ndev->ref))
+ goto out_unlock;
+ }
+
+ ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ goto out_err;
+
+ ndev->device = cm_id->device;
+ kref_init(&ndev->ref);
+
+ ndev->pd = ib_alloc_pd(ndev->device);
+ if (IS_ERR(ndev->pd))
+ goto out_free_dev;
+
+ if (nvmet_rdma_use_srq) {
+ ret = nvmet_rdma_init_srq(ndev);
+ if (ret)
+ goto out_free_pd;
+ }
+
+ list_add(&ndev->entry, &device_list);
+out_unlock:
+ mutex_unlock(&device_list_mutex);
+ pr_debug("added %s.\n", ndev->device->name);
+ return ndev;
+
+out_free_pd:
+ ib_dealloc_pd(ndev->pd);
+out_free_dev:
+ kfree(ndev);
+out_err:
+ mutex_unlock(&device_list_mutex);
+ return NULL;
+}
+
+static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
+{
+ struct ib_qp_init_attr qp_attr;
+ struct nvmet_rdma_device *ndev = queue->dev;
+ int comp_vector, nr_cqe, ret, i;
+
+ /*
+ * Spread the io queues across completion vectors,
+ * but still keep all admin queues on vector 0.
+ */
+ comp_vector = !queue->host_qid ? 0 :
+ queue->idx % ndev->device->num_comp_vectors;
+
+ /*
+ * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
+ */
+ nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
+
+ queue->cq = ib_alloc_cq(ndev->device, queue,
+ nr_cqe + 1, comp_vector,
+ IB_POLL_WORKQUEUE);
+ if (IS_ERR(queue->cq)) {
+ ret = PTR_ERR(queue->cq);
+ pr_err("failed to create CQ cqe= %d ret= %d\n",
+ nr_cqe + 1, ret);
+ goto out;
+ }
+
+ memset(&qp_attr, 0, sizeof(qp_attr));
+ qp_attr.qp_context = queue;
+ qp_attr.event_handler = nvmet_rdma_qp_event;
+ qp_attr.send_cq = queue->cq;
+ qp_attr.recv_cq = queue->cq;
+ qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ qp_attr.qp_type = IB_QPT_RC;
+ /* +1 for drain */
+ qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
+ qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
+ qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
+ ndev->device->attrs.max_sge);
+
+ if (ndev->srq) {
+ qp_attr.srq = ndev->srq;
+ } else {
+ /* +1 for drain */
+ qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
+ qp_attr.cap.max_recv_sge = 2;
+ }
+
+ ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
+ if (ret) {
+ pr_err("failed to create_qp ret= %d\n", ret);
+ goto err_destroy_cq;
+ }
+
+ atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
+
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+ __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
+ qp_attr.cap.max_send_wr, queue->cm_id);
+
+ if (!ndev->srq) {
+ for (i = 0; i < queue->recv_queue_size; i++) {
+ queue->cmds[i].queue = queue;
+ nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
+ }
+ }
+
+out:
+ return ret;
+
+err_destroy_cq:
+ ib_free_cq(queue->cq);
+ goto out;
+}
+
+static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
+{
+ rdma_destroy_qp(queue->cm_id);
+ ib_free_cq(queue->cq);
+}
+
+static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
+{
+ pr_info("freeing queue %d\n", queue->idx);
+
+ nvmet_sq_destroy(&queue->nvme_sq);
+
+ nvmet_rdma_destroy_queue_ib(queue);
+ if (!queue->dev->srq) {
+ nvmet_rdma_free_cmds(queue->dev, queue->cmds,
+ queue->recv_queue_size,
+ !queue->host_qid);
+ }
+ nvmet_rdma_free_rsps(queue);
+ ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+ kfree(queue);
+}
+
+static void nvmet_rdma_release_queue_work(struct work_struct *w)
+{
+ struct nvmet_rdma_queue *queue =
+ container_of(w, struct nvmet_rdma_queue, release_work);
+ struct rdma_cm_id *cm_id = queue->cm_id;
+ struct nvmet_rdma_device *dev = queue->dev;
+
+ nvmet_rdma_free_queue(queue);
+ rdma_destroy_id(cm_id);
+ kref_put(&dev->ref, nvmet_rdma_free_dev);
+}
+
+static int
+nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
+ struct nvmet_rdma_queue *queue)
+{
+ struct nvme_rdma_cm_req *req;
+
+ req = (struct nvme_rdma_cm_req *)conn->private_data;
+ if (!req || conn->private_data_len == 0)
+ return NVME_RDMA_CM_INVALID_LEN;
+
+ if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
+ return NVME_RDMA_CM_INVALID_RECFMT;
+
+ queue->host_qid = le16_to_cpu(req->qid);
+
+ /*
+ * req->hsqsize corresponds to our recv queue size
+ * req->hrqsize corresponds to our send queue size
+ */
+ queue->recv_queue_size = le16_to_cpu(req->hsqsize);
+ queue->send_queue_size = le16_to_cpu(req->hrqsize);
+
+ if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
+ return NVME_RDMA_CM_INVALID_HSQSIZE;
+
+ /* XXX: Should we enforce some kind of max for IO queues? */
+
+ return 0;
+}
+
+static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
+ enum nvme_rdma_cm_status status)
+{
+ struct nvme_rdma_cm_rej rej;
+
+ rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+ rej.sts = cpu_to_le16(status);
+
+ return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
+}
+
+static struct nvmet_rdma_queue *
+nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
+ struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct nvmet_rdma_queue *queue;
+ int ret;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_reject;
+ }
+
+ ret = nvmet_sq_init(&queue->nvme_sq);
+ if (ret)
+ goto out_free_queue;
+
+ ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
+ if (ret)
+ goto out_destroy_sq;
+
+ /*
+ * Schedules the actual release because calling rdma_destroy_id from
+ * inside a CM callback would trigger a deadlock. (great API design..)
+ */
+ INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
+ queue->dev = ndev;
+ queue->cm_id = cm_id;
+
+ spin_lock_init(&queue->state_lock);
+ queue->state = NVMET_RDMA_Q_CONNECTING;
+ INIT_LIST_HEAD(&queue->rsp_wait_list);
+ INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
+ spin_lock_init(&queue->rsp_wr_wait_lock);
+ INIT_LIST_HEAD(&queue->free_rsps);
+ spin_lock_init(&queue->rsps_lock);
+
+ queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
+ if (queue->idx < 0) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_free_queue;
+ }
+
+ ret = nvmet_rdma_alloc_rsps(queue);
+ if (ret) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_ida_remove;
+ }
+
+ if (!ndev->srq) {
+ queue->cmds = nvmet_rdma_alloc_cmds(ndev,
+ queue->recv_queue_size,
+ !queue->host_qid);
+ if (IS_ERR(queue->cmds)) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_free_responses;
+ }
+ }
+
+ ret = nvmet_rdma_create_queue_ib(queue);
+ if (ret) {
+ pr_err("%s: creating RDMA queue failed (%d).\n",
+ __func__, ret);
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_free_cmds;
+ }
+
+ return queue;
+
+out_free_cmds:
+ if (!ndev->srq) {
+ nvmet_rdma_free_cmds(queue->dev, queue->cmds,
+ queue->recv_queue_size,
+ !queue->host_qid);
+ }
+out_free_responses:
+ nvmet_rdma_free_rsps(queue);
+out_ida_remove:
+ ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+out_destroy_sq:
+ nvmet_sq_destroy(&queue->nvme_sq);
+out_free_queue:
+ kfree(queue);
+out_reject:
+ nvmet_rdma_cm_reject(cm_id, ret);
+ return NULL;
+}
+
+static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
+{
+ struct nvmet_rdma_queue *queue = priv;
+
+ switch (event->event) {
+ case IB_EVENT_COMM_EST:
+ rdma_notify(queue->cm_id, event->event);
+ break;
+ default:
+ pr_err("received unrecognized IB QP event %d\n", event->event);
+ break;
+ }
+}
+
+static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
+ struct nvmet_rdma_queue *queue,
+ struct rdma_conn_param *p)
+{
+ struct rdma_conn_param param = { };
+ struct nvme_rdma_cm_rep priv = { };
+ int ret = -ENOMEM;
+
+ param.rnr_retry_count = 7;
+ param.flow_control = 1;
+ param.initiator_depth = min_t(u8, p->initiator_depth,
+ queue->dev->device->attrs.max_qp_init_rd_atom);
+ param.private_data = &priv;
+ param.private_data_len = sizeof(priv);
+ priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+ priv.crqsize = cpu_to_le16(queue->recv_queue_size);
+
+ ret = rdma_accept(cm_id, &param);
+ if (ret)
+ pr_err("rdma_accept failed (error code = %d)\n", ret);
+
+ return ret;
+}
+
+static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct nvmet_rdma_device *ndev;
+ struct nvmet_rdma_queue *queue;
+ int ret = -EINVAL;
+
+ ndev = nvmet_rdma_find_get_device(cm_id);
+ if (!ndev) {
+ pr_err("no client data!\n");
+ nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
+ return -ECONNREFUSED;
+ }
+
+ queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
+ if (!queue) {
+ ret = -ENOMEM;
+ goto put_device;
+ }
+ queue->port = cm_id->context;
+
+ ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
+ if (ret)
+ goto release_queue;
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ return 0;
+
+release_queue:
+ nvmet_rdma_free_queue(queue);
+put_device:
+ kref_put(&ndev->ref, nvmet_rdma_free_dev);
+
+ return ret;
+}
+
+static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ if (queue->state != NVMET_RDMA_Q_CONNECTING) {
+ pr_warn("trying to establish a connected queue\n");
+ goto out_unlock;
+ }
+ queue->state = NVMET_RDMA_Q_LIVE;
+
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *cmd;
+
+ cmd = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp, wait_list);
+ list_del(&cmd->wait_list);
+
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ nvmet_rdma_handle_command(queue, cmd);
+ spin_lock_irqsave(&queue->state_lock, flags);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+}
+
+static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
+{
+ bool disconnect = false;
+ unsigned long flags;
+
+ pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ switch (queue->state) {
+ case NVMET_RDMA_Q_CONNECTING:
+ case NVMET_RDMA_Q_LIVE:
+ disconnect = true;
+ queue->state = NVMET_RDMA_Q_DISCONNECTING;
+ break;
+ case NVMET_RDMA_Q_DISCONNECTING:
+ break;
+ }
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+
+ if (disconnect) {
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->cm_id->qp);
+ schedule_work(&queue->release_work);
+ }
+}
+
+static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
+{
+ bool disconnect = false;
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ if (!list_empty(&queue->queue_list)) {
+ list_del_init(&queue->queue_list);
+ disconnect = true;
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ if (disconnect)
+ __nvmet_rdma_queue_disconnect(queue);
+}
+
+static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
+ struct nvmet_rdma_queue *queue)
+{
+ WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
+
+ pr_err("failed to connect queue\n");
+ schedule_work(&queue->release_work);
+}
+
+static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct nvmet_rdma_queue *queue = NULL;
+ int ret = 0;
+
+ if (cm_id->qp)
+ queue = cm_id->qp->qp_context;
+
+ pr_debug("%s (%d): status %d id %p\n",
+ rdma_event_msg(event->event), event->event,
+ event->status, cm_id);
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ ret = nvmet_rdma_queue_connect(cm_id, event);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ nvmet_rdma_queue_established(queue);
+ break;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ /*
+ * We can get the device removal callback even for a
+ * CM ID that we aren't actually using. In that case
+ * the context pointer is NULL, so we shouldn't try
+ * to disconnect a non-existing queue. But we also
+ * need to return 1 so that the core will destroy
+ * it's own ID. What a great API design..
+ */
+ if (queue)
+ nvmet_rdma_queue_disconnect(queue);
+ else
+ ret = 1;
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ nvmet_rdma_queue_connect_fail(cm_id, queue);
+ break;
+ default:
+ pr_err("received unrecognized RDMA CM event %d\n",
+ event->event);
+ break;
+ }
+
+ return ret;
+}
+
+static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_rdma_queue *queue;
+
+restart:
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
+ if (queue->nvme_sq.ctrl == ctrl) {
+ list_del_init(&queue->queue_list);
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ __nvmet_rdma_queue_disconnect(queue);
+ goto restart;
+ }
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+}
+
+static int nvmet_rdma_add_port(struct nvmet_port *port)
+{
+ struct rdma_cm_id *cm_id;
+ struct sockaddr_in addr_in;
+ u16 port_in;
+ int ret;
+
+ switch (port->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ break;
+ default:
+ pr_err("address family %d not supported\n",
+ port->disc_addr.adrfam);
+ return -EINVAL;
+ }
+
+ ret = kstrtou16(port->disc_addr.trsvcid, 0, &port_in);
+ if (ret)
+ return ret;
+
+ addr_in.sin_family = AF_INET;
+ addr_in.sin_addr.s_addr = in_aton(port->disc_addr.traddr);
+ addr_in.sin_port = htons(port_in);
+
+ cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ pr_err("CM ID creation failed\n");
+ return PTR_ERR(cm_id);
+ }
+
+ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr_in);
+ if (ret) {
+ pr_err("binding CM ID to %pISpc failed (%d)\n", &addr_in, ret);
+ goto out_destroy_id;
+ }
+
+ ret = rdma_listen(cm_id, 128);
+ if (ret) {
+ pr_err("listening to %pISpc failed (%d)\n", &addr_in, ret);
+ goto out_destroy_id;
+ }
+
+ pr_info("enabling port %d (%pISpc)\n",
+ le16_to_cpu(port->disc_addr.portid), &addr_in);
+ port->priv = cm_id;
+ return 0;
+
+out_destroy_id:
+ rdma_destroy_id(cm_id);
+ return ret;
+}
+
+static void nvmet_rdma_remove_port(struct nvmet_port *port)
+{
+ struct rdma_cm_id *cm_id = port->priv;
+
+ rdma_destroy_id(cm_id);
+}
+
+static struct nvmet_fabrics_ops nvmet_rdma_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_RDMA,
+ .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE,
+ .msdbd = 1,
+ .has_keyed_sgls = 1,
+ .add_port = nvmet_rdma_add_port,
+ .remove_port = nvmet_rdma_remove_port,
+ .queue_response = nvmet_rdma_queue_response,
+ .delete_ctrl = nvmet_rdma_delete_ctrl,
+};
+
+static int __init nvmet_rdma_init(void)
+{
+ return nvmet_register_transport(&nvmet_rdma_ops);
+}
+
+static void __exit nvmet_rdma_exit(void)
+{
+ struct nvmet_rdma_queue *queue;
+
+ nvmet_unregister_transport(&nvmet_rdma_ops);
+
+ flush_scheduled_work();
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
+ struct nvmet_rdma_queue, queue_list))) {
+ list_del_init(&queue->queue_list);
+
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+ __nvmet_rdma_queue_disconnect(queue);
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ flush_scheduled_work();
+ ida_destroy(&nvmet_rdma_queue_ida);
+}
+
+module_init(nvmet_rdma_init);
+module_exit(nvmet_rdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 3041d48e7155..f550c4596a7a 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -15,7 +15,8 @@ if NVMEM
config NVMEM_IMX_OCOTP
tristate "i.MX6 On-Chip OTP Controller support"
- depends on SOC_IMX6
+ depends on SOC_IMX6 || COMPILE_TEST
+ depends on HAS_IOMEM
help
This is a driver for the On-Chip OTP Controller (OCOTP) available on
i.MX6 SoCs, providing access to 4 Kbits of one-time programmable
@@ -50,7 +51,6 @@ config MTK_EFUSE
tristate "Mediatek SoCs EFUSE support"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on HAS_IOMEM
- select REGMAP_MMIO
help
This is a driver to access hardware related data like sensor
calibration, HDMI impedance etc.
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 75e66ef5b0ec..ac27b9bac3b9 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -15,6 +15,7 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -26,6 +27,7 @@
struct ocotp_priv {
struct device *dev;
+ struct clk *clk;
void __iomem *base;
unsigned int nregs;
};
@@ -36,7 +38,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
struct ocotp_priv *priv = context;
unsigned int count;
u32 *buf = val;
- int i;
+ int i, ret;
u32 index;
index = offset >> 2;
@@ -45,9 +47,16 @@ static int imx_ocotp_read(void *context, unsigned int offset,
if (count > (priv->nregs - index))
count = priv->nregs - index;
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
+ return ret;
+ }
for (i = index; i < (index + count); i++)
*buf++ = readl(priv->base + 0x400 + i * 0x10);
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -85,8 +94,12 @@ static int imx_ocotp_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
of_id = of_match_device(imx_ocotp_dt_ids, dev);
- priv->nregs = (unsigned int)of_id->data;
+ priv->nregs = (unsigned long)of_id->data;
imx_ocotp_nvmem_config.size = 4 * priv->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 9c49369beea5..32fd572e18c5 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -14,15 +14,35 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
-static struct regmap_config mtk_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
+static int mtk_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ void __iomem *base = context;
+ u32 *val = _val;
+ int i = 0, words = bytes / 4;
+
+ while (words--)
+ *val++ = readl(base + reg + (i++ * 4));
+
+ return 0;
+}
+
+static int mtk_reg_write(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ void __iomem *base = context;
+ u32 *val = _val;
+ int i = 0, words = bytes / 4;
+
+ while (words--)
+ writel(*val++, base + reg + (i++ * 4));
+
+ return 0;
+}
static int mtk_efuse_probe(struct platform_device *pdev)
{
@@ -30,7 +50,6 @@ static int mtk_efuse_probe(struct platform_device *pdev)
struct resource *res;
struct nvmem_device *nvmem;
struct nvmem_config *econfig;
- struct regmap *regmap;
void __iomem *base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -42,14 +61,12 @@ static int mtk_efuse_probe(struct platform_device *pdev)
if (!econfig)
return -ENOMEM;
- mtk_regmap_config.max_register = resource_size(res) - 1;
-
- regmap = devm_regmap_init_mmio(dev, base, &mtk_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
-
+ econfig->stride = 4;
+ econfig->word_size = 4;
+ econfig->reg_read = mtk_reg_read;
+ econfig->reg_write = mtk_reg_write;
+ econfig->size = resource_size(res);
+ econfig->priv = base;
econfig->dev = dev;
econfig->owner = THIS_MODULE;
nvmem = nvmem_register(econfig);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 2bb3c5799ac4..d26dd03cec80 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -25,7 +25,6 @@
#include <linux/nvmem-provider.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/stmp_device.h>
@@ -66,11 +65,10 @@ static int mxs_ocotp_wait(struct mxs_ocotp *otp)
return 0;
}
-static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int mxs_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct mxs_ocotp *otp = context;
- unsigned int offset = *(u32 *)reg;
u32 *buf = val;
int ret;
@@ -94,17 +92,16 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
if (ret)
goto close_banks;
- while (val_size >= reg_size) {
+ while (bytes) {
if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
/* fill up non-data register */
- *buf = 0;
+ *buf++ = 0;
} else {
- *buf = readl(otp->base + offset);
+ *buf++ = readl(otp->base + offset);
}
- buf++;
- val_size -= reg_size;
- offset += reg_size;
+ bytes -= 4;
+ offset += 4;
}
close_banks:
@@ -117,57 +114,29 @@ disable_clk:
return ret;
}
-static int mxs_ocotp_write(void *context, const void *data, size_t count)
-{
- /* We don't want to support writing */
- return 0;
-}
-
-static bool mxs_ocotp_writeable_reg(struct device *dev, unsigned int reg)
-{
- return false;
-}
-
static struct nvmem_config ocotp_config = {
.name = "mxs-ocotp",
+ .stride = 16,
+ .word_size = 4,
.owner = THIS_MODULE,
+ .reg_read = mxs_ocotp_read,
};
-static const struct regmap_range imx23_ranges[] = {
- regmap_reg_range(OCOTP_DATA_OFFSET, 0x210),
-};
-
-static const struct regmap_access_table imx23_access = {
- .yes_ranges = imx23_ranges,
- .n_yes_ranges = ARRAY_SIZE(imx23_ranges),
-};
-
-static const struct regmap_range imx28_ranges[] = {
- regmap_reg_range(OCOTP_DATA_OFFSET, 0x290),
-};
-
-static const struct regmap_access_table imx28_access = {
- .yes_ranges = imx28_ranges,
- .n_yes_ranges = ARRAY_SIZE(imx28_ranges),
+struct mxs_data {
+ int size;
};
-static struct regmap_bus mxs_ocotp_bus = {
- .read = mxs_ocotp_read,
- .write = mxs_ocotp_write, /* make regmap_init() happy */
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+static const struct mxs_data imx23_data = {
+ .size = 0x220,
};
-static struct regmap_config mxs_ocotp_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 16,
- .writeable_reg = mxs_ocotp_writeable_reg,
+static const struct mxs_data imx28_data = {
+ .size = 0x2a0,
};
static const struct of_device_id mxs_ocotp_match[] = {
- { .compatible = "fsl,imx23-ocotp", .data = &imx23_access },
- { .compatible = "fsl,imx28-ocotp", .data = &imx28_access },
+ { .compatible = "fsl,imx23-ocotp", .data = &imx23_data },
+ { .compatible = "fsl,imx28-ocotp", .data = &imx28_data },
{ /* sentinel */},
};
MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
@@ -175,11 +144,10 @@ MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
static int mxs_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct mxs_data *data;
struct mxs_ocotp *otp;
struct resource *res;
const struct of_device_id *match;
- struct regmap *regmap;
- const struct regmap_access_table *access;
int ret;
match = of_match_device(dev->driver->of_match_table, dev);
@@ -205,17 +173,10 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
return ret;
}
- access = match->data;
- mxs_ocotp_config.rd_table = access;
- mxs_ocotp_config.max_register = access->yes_ranges[0].range_max;
-
- regmap = devm_regmap_init(dev, &mxs_ocotp_bus, otp, &mxs_ocotp_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- ret = PTR_ERR(regmap);
- goto err_clk;
- }
+ data = match->data;
+ ocotp_config.size = data->size;
+ ocotp_config.priv = otp;
ocotp_config.dev = dev;
otp->nvmem = nvmem_register(&ocotp_config);
if (IS_ERR(otp->nvmem)) {
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 0f2784bc1874..ed5a097f0801 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -91,8 +91,8 @@ static int __init of_numa_parse_memory_nodes(void)
pr_debug("NUMA: base = %llx len = %llx, node = %u\n",
rsrc.start, rsrc.end - rsrc.start + 1, nid);
- r = numa_add_memblk(nid, rsrc.start,
- rsrc.end - rsrc.start + 1);
+
+ r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1);
if (r)
break;
}
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 1fa6925733d3..8db5079f09a7 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -51,6 +51,9 @@ obj-$(CONFIG_ACPI) += pci-acpi.o
# SMBIOS provided firmware instance and labels
obj-$(CONFIG_PCI_LABEL) += pci-label.o
+# Intel MID platform PM support
+obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
+
obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
new file mode 100644
index 000000000000..c878aa71173b
--- /dev/null
+++ b/drivers/pci/pci-mid.c
@@ -0,0 +1,77 @@
+/*
+ * Intel MID platform PM support
+ *
+ * Copyright (C) 2016, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/intel-mid.h>
+
+#include "pci.h"
+
+static bool mid_pci_power_manageable(struct pci_dev *dev)
+{
+ return true;
+}
+
+static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+ return intel_mid_pci_set_power_state(pdev, state);
+}
+
+static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
+{
+ return PCI_D3hot;
+}
+
+static int mid_pci_sleep_wake(struct pci_dev *dev, bool enable)
+{
+ return 0;
+}
+
+static int mid_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+ return 0;
+}
+
+static bool mid_pci_need_resume(struct pci_dev *dev)
+{
+ return false;
+}
+
+static struct pci_platform_pm_ops mid_pci_platform_pm = {
+ .is_manageable = mid_pci_power_manageable,
+ .set_state = mid_pci_set_power_state,
+ .choose_state = mid_pci_choose_state,
+ .sleep_wake = mid_pci_sleep_wake,
+ .run_wake = mid_pci_run_wake,
+ .need_resume = mid_pci_need_resume,
+};
+
+#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id lpss_cpu_ids[] = {
+ ICPU(INTEL_FAM6_ATOM_MERRIFIELD1),
+ {}
+};
+
+static int __init mid_pci_init(void)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(lpss_cpu_ids);
+ if (id)
+ pci_set_platform_pm(&mid_pci_platform_pm);
+ return 0;
+}
+arch_initcall(mid_pci_init);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c8b4dbdd1bdd..badbddc683f0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -530,8 +530,8 @@ static const struct pci_platform_pm_ops *pci_platform_pm;
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
{
- if (!ops->is_manageable || !ops->set_state || !ops->choose_state
- || !ops->sleep_wake)
+ if (!ops->is_manageable || !ops->set_state || !ops->choose_state ||
+ !ops->sleep_wake || !ops->run_wake || !ops->need_resume)
return -EINVAL;
pci_platform_pm = ops;
return 0;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index b869b98835f4..cc0b69545385 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -44,6 +44,16 @@ config ARMADA375_USBCLUSTER_PHY
depends on OF && HAS_IOMEM
select GENERIC_PHY
+config PHY_DA8XX_USB
+ tristate "TI DA8xx USB PHY Driver"
+ depends on ARCH_DAVINCI_DA8XX
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the USB PHY on DA8xx SoCs.
+
+ This driver controls both the USB 1.1 PHY and the USB 2.0 PHY.
+
config PHY_DM816X_USB
tristate "TI dm816x USB PHY driver"
depends on ARCH_OMAP2PLUS
@@ -176,6 +186,7 @@ config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
depends on USB_SUPPORT
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't 'y'
select GENERIC_PHY
select USB_PHY
help
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 9c3e73ccabc4..fa8480e89724 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_GENERIC_PHY) += phy-core.o
obj-$(CONFIG_PHY_BCM_NS_USB2) += phy-bcm-ns-usb2.o
obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o
obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o
+obj-$(CONFIG_PHY_DA8XX_USB) += phy-da8xx-usb.o
obj-$(CONFIG_PHY_DM816X_USB) += phy-dm816x-usb.o
obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o
obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/phy-brcm-sata.c
index 6c4c5cb791ca..18d662610075 100644
--- a/drivers/phy/phy-brcm-sata.c
+++ b/drivers/phy/phy-brcm-sata.c
@@ -45,6 +45,7 @@ enum brcm_sata_phy_version {
BRCM_SATA_PHY_STB_28NM,
BRCM_SATA_PHY_STB_40NM,
BRCM_SATA_PHY_IPROC_NS2,
+ BRCM_SATA_PHY_IPROC_NSP,
};
struct brcm_sata_port {
@@ -73,6 +74,13 @@ enum sata_phy_regs {
PLL_REG_BANK_0 = 0x050,
PLL_REG_BANK_0_PLLCONTROL_0 = 0x81,
+ PLLCONTROL_0_FREQ_DET_RESTART = BIT(13),
+ PLLCONTROL_0_FREQ_MONITOR = BIT(12),
+ PLLCONTROL_0_SEQ_START = BIT(15),
+ PLL_CAP_CONTROL = 0x85,
+ PLL_ACTRL2 = 0x8b,
+ PLL_ACTRL2_SELDIV_MASK = 0x1f,
+ PLL_ACTRL2_SELDIV_SHIFT = 9,
PLL1_REG_BANK = 0x060,
PLL1_ACTRL2 = 0x82,
@@ -80,6 +88,7 @@ enum sata_phy_regs {
PLL1_ACTRL4 = 0x84,
OOB_REG_BANK = 0x150,
+ OOB1_REG_BANK = 0x160,
OOB_CTRL1 = 0x80,
OOB_CTRL1_BURST_MAX_MASK = 0xf,
OOB_CTRL1_BURST_MAX_SHIFT = 12,
@@ -271,6 +280,73 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port)
return 0;
}
+static int brcm_nsp_sata_init(struct brcm_sata_port *port)
+{
+ struct brcm_sata_phy *priv = port->phy_priv;
+ struct device *dev = port->phy_priv->dev;
+ void __iomem *base = priv->phy_base;
+ unsigned int oob_bank;
+ unsigned int val, try;
+
+ /* Configure OOB control */
+ if (port->portnum == 0)
+ oob_bank = OOB_REG_BANK;
+ else if (port->portnum == 1)
+ oob_bank = OOB1_REG_BANK;
+ else
+ return -EINVAL;
+
+ val = 0x0;
+ val |= (0x0f << OOB_CTRL1_BURST_MAX_SHIFT);
+ val |= (0x06 << OOB_CTRL1_BURST_MIN_SHIFT);
+ val |= (0x0f << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT);
+ val |= (0x06 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT);
+ brcm_sata_phy_wr(base, oob_bank, OOB_CTRL1, 0x0, val);
+
+ val = 0x0;
+ val |= (0x2e << OOB_CTRL2_RESET_IDLE_MAX_SHIFT);
+ val |= (0x02 << OOB_CTRL2_BURST_CNT_SHIFT);
+ val |= (0x16 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT);
+ brcm_sata_phy_wr(base, oob_bank, OOB_CTRL2, 0x0, val);
+
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_ACTRL2,
+ ~(PLL_ACTRL2_SELDIV_MASK << PLL_ACTRL2_SELDIV_SHIFT),
+ 0x0c << PLL_ACTRL2_SELDIV_SHIFT);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CONTROL,
+ 0xff0, 0x4f0);
+
+ val = PLLCONTROL_0_FREQ_DET_RESTART | PLLCONTROL_0_FREQ_MONITOR;
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ ~val, val);
+ val = PLLCONTROL_0_SEQ_START;
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ ~val, 0);
+ mdelay(10);
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ ~val, val);
+
+ /* Wait for pll_seq_done bit */
+ try = 50;
+ while (try--) {
+ val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ BLOCK0_XGXSSTATUS);
+ if (val & BLOCK0_XGXSSTATUS_PLL_LOCK)
+ break;
+ msleep(20);
+ }
+ if (!try) {
+ /* PLL did not lock; give up */
+ dev_err(dev, "port%d PLL did not lock\n", port->portnum);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "port%d initialized\n", port->portnum);
+
+ return 0;
+}
+
static int brcm_sata_phy_init(struct phy *phy)
{
int rc;
@@ -284,6 +360,9 @@ static int brcm_sata_phy_init(struct phy *phy)
case BRCM_SATA_PHY_IPROC_NS2:
rc = brcm_ns2_sata_init(port);
break;
+ case BRCM_SATA_PHY_IPROC_NSP:
+ rc = brcm_nsp_sata_init(port);
+ break;
default:
rc = -ENODEV;
};
@@ -303,6 +382,8 @@ static const struct of_device_id brcm_sata_phy_of_match[] = {
.data = (void *)BRCM_SATA_PHY_STB_40NM },
{ .compatible = "brcm,iproc-ns2-sata-phy",
.data = (void *)BRCM_SATA_PHY_IPROC_NS2 },
+ { .compatible = "brcm,iproc-nsp-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_IPROC_NSP },
{},
};
MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match);
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b72e9a3b6429..8eca906b6e70 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -342,6 +342,21 @@ int phy_power_off(struct phy *phy)
}
EXPORT_SYMBOL_GPL(phy_power_off);
+int phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ int ret;
+
+ if (!phy || !phy->ops->set_mode)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->set_mode(phy, mode);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_set_mode);
+
/**
* _of_phy_get() - lookup and obtain a reference to a phy by phandle
* @np: device_node for which to get the phy
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
new file mode 100644
index 000000000000..b2e59b6170ac
--- /dev/null
+++ b/drivers/phy/phy-da8xx-usb.c
@@ -0,0 +1,245 @@
+/*
+ * phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct da8xx_usb_phy {
+ struct phy_provider *phy_provider;
+ struct phy *usb11_phy;
+ struct phy *usb20_phy;
+ struct clk *usb11_clk;
+ struct clk *usb20_clk;
+ struct regmap *regmap;
+};
+
+static int da8xx_usb11_phy_power_on(struct phy *phy)
+{
+ struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(d_phy->usb11_clk);
+ if (ret)
+ return ret;
+
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_USB1SUSPENDM,
+ CFGCHIP2_USB1SUSPENDM);
+
+ return 0;
+}
+
+static int da8xx_usb11_phy_power_off(struct phy *phy)
+{
+ struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_USB1SUSPENDM, 0);
+
+ clk_disable_unprepare(d_phy->usb11_clk);
+
+ return 0;
+}
+
+static const struct phy_ops da8xx_usb11_phy_ops = {
+ .power_on = da8xx_usb11_phy_power_on,
+ .power_off = da8xx_usb11_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int da8xx_usb20_phy_power_on(struct phy *phy)
+{
+ struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(d_phy->usb20_clk);
+ if (ret)
+ return ret;
+
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGPWRDN, 0);
+
+ return 0;
+}
+
+static int da8xx_usb20_phy_power_off(struct phy *phy)
+{
+ struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGPWRDN,
+ CFGCHIP2_OTGPWRDN);
+
+ clk_disable_unprepare(d_phy->usb20_clk);
+
+ return 0;
+}
+
+static int da8xx_usb20_phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+ u32 val;
+
+ switch (mode) {
+ case PHY_MODE_USB_HOST: /* Force VBUS valid, ID = 0 */
+ val = CFGCHIP2_OTGMODE_FORCE_HOST;
+ break;
+ case PHY_MODE_USB_DEVICE: /* Force VBUS valid, ID = 1 */
+ val = CFGCHIP2_OTGMODE_FORCE_DEVICE;
+ break;
+ case PHY_MODE_USB_OTG: /* Don't override the VBUS/ID comparators */
+ val = CFGCHIP2_OTGMODE_NO_OVERRIDE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGMODE_MASK,
+ val);
+
+ return 0;
+}
+
+static const struct phy_ops da8xx_usb20_phy_ops = {
+ .power_on = da8xx_usb20_phy_power_on,
+ .power_off = da8xx_usb20_phy_power_off,
+ .set_mode = da8xx_usb20_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *da8xx_usb_phy_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct da8xx_usb_phy *d_phy = dev_get_drvdata(dev);
+
+ if (!d_phy)
+ return ERR_PTR(-ENODEV);
+
+ switch (args->args[0]) {
+ case 0:
+ return d_phy->usb20_phy;
+ case 1:
+ return d_phy->usb11_phy;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static int da8xx_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct da8xx_usb_phy *d_phy;
+
+ d_phy = devm_kzalloc(dev, sizeof(*d_phy), GFP_KERNEL);
+ if (!d_phy)
+ return -ENOMEM;
+
+ if (node)
+ d_phy->regmap = syscon_regmap_lookup_by_compatible(
+ "ti,da830-cfgchip");
+ else
+ d_phy->regmap = syscon_regmap_lookup_by_pdevname("syscon.0");
+ if (IS_ERR(d_phy->regmap)) {
+ dev_err(dev, "Failed to get syscon\n");
+ return PTR_ERR(d_phy->regmap);
+ }
+
+ d_phy->usb11_clk = devm_clk_get(dev, "usb11_phy");
+ if (IS_ERR(d_phy->usb11_clk)) {
+ dev_err(dev, "Failed to get usb11_phy clock\n");
+ return PTR_ERR(d_phy->usb11_clk);
+ }
+
+ d_phy->usb20_clk = devm_clk_get(dev, "usb20_phy");
+ if (IS_ERR(d_phy->usb20_clk)) {
+ dev_err(dev, "Failed to get usb20_phy clock\n");
+ return PTR_ERR(d_phy->usb20_clk);
+ }
+
+ d_phy->usb11_phy = devm_phy_create(dev, node, &da8xx_usb11_phy_ops);
+ if (IS_ERR(d_phy->usb11_phy)) {
+ dev_err(dev, "Failed to create usb11 phy\n");
+ return PTR_ERR(d_phy->usb11_phy);
+ }
+
+ d_phy->usb20_phy = devm_phy_create(dev, node, &da8xx_usb20_phy_ops);
+ if (IS_ERR(d_phy->usb20_phy)) {
+ dev_err(dev, "Failed to create usb20 phy\n");
+ return PTR_ERR(d_phy->usb20_phy);
+ }
+
+ platform_set_drvdata(pdev, d_phy);
+ phy_set_drvdata(d_phy->usb11_phy, d_phy);
+ phy_set_drvdata(d_phy->usb20_phy, d_phy);
+
+ if (node) {
+ d_phy->phy_provider = devm_of_phy_provider_register(dev,
+ da8xx_usb_phy_of_xlate);
+ if (IS_ERR(d_phy->phy_provider)) {
+ dev_err(dev, "Failed to create phy provider\n");
+ return PTR_ERR(d_phy->phy_provider);
+ }
+ } else {
+ int ret;
+
+ ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+ if (ret)
+ dev_warn(dev, "Failed to create usb11 phy lookup\n");
+ ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy",
+ "musb-da8xx");
+ if (ret)
+ dev_warn(dev, "Failed to create usb20 phy lookup\n");
+ }
+
+ return 0;
+}
+
+static int da8xx_usb_phy_remove(struct platform_device *pdev)
+{
+ struct da8xx_usb_phy *d_phy = platform_get_drvdata(pdev);
+
+ if (!pdev->dev.of_node) {
+ phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx");
+ phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_usb_phy_ids[] = {
+ { .compatible = "ti,da830-usb-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, da8xx_usb_phy_ids);
+
+static struct platform_driver da8xx_usb_phy_driver = {
+ .probe = da8xx_usb_phy_probe,
+ .remove = da8xx_usb_phy_remove,
+ .driver = {
+ .name = "da8xx-usb-phy",
+ .of_match_table = da8xx_usb_phy_ids,
+ },
+};
+
+module_platform_driver(da8xx_usb_phy_driver);
+
+MODULE_ALIAS("platform:da8xx-usb-phy");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_DESCRIPTION("TI DA8xx USB PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
index 56631e77c11d..6ee51490f786 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -140,7 +140,6 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
- dev_err(dev, "%s: failed to allocate phy\n", __func__);
err = -ENOMEM;
goto out;
}
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
index b16ea77d07b9..770087ab05e2 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
@@ -196,7 +196,6 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
- dev_err(dev, "%s: failed to allocate phy\n", __func__);
err = -ENOMEM;
goto out;
}
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
index 4be3f5dbbc9f..31156c9c4707 100644
--- a/drivers/phy/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/phy-rcar-gen3-usb2.c
@@ -21,6 +21,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
/******* USB2.0 Host registers (original offset is +0x200) *******/
#define USB2_INT_ENABLE 0x000
@@ -81,9 +82,25 @@ struct rcar_gen3_chan {
struct extcon_dev *extcon;
struct phy *phy;
struct regulator *vbus;
+ struct work_struct work;
+ bool extcon_host;
bool has_otg;
};
+static void rcar_gen3_phy_usb2_work(struct work_struct *work)
+{
+ struct rcar_gen3_chan *ch = container_of(work, struct rcar_gen3_chan,
+ work);
+
+ if (ch->extcon_host) {
+ extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, true);
+ extcon_set_cable_state_(ch->extcon, EXTCON_USB, false);
+ } else {
+ extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, false);
+ extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
+ }
+}
+
static void rcar_gen3_set_host_mode(struct rcar_gen3_chan *ch, int host)
{
void __iomem *usb2_base = ch->base;
@@ -130,8 +147,8 @@ static void rcar_gen3_init_for_host(struct rcar_gen3_chan *ch)
rcar_gen3_set_host_mode(ch, 1);
rcar_gen3_enable_vbus_ctrl(ch, 1);
- extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, true);
- extcon_set_cable_state_(ch->extcon, EXTCON_USB, false);
+ ch->extcon_host = true;
+ schedule_work(&ch->work);
}
static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
@@ -140,8 +157,8 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
rcar_gen3_set_host_mode(ch, 0);
rcar_gen3_enable_vbus_ctrl(ch, 0);
- extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, false);
- extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
+ ch->extcon_host = false;
+ schedule_work(&ch->work);
}
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
@@ -301,6 +318,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
if (irq >= 0) {
int ret;
+ INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
IRQF_SHARED, dev_name(dev), channel);
if (irq < 0)
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index d60b149cff0f..2a7381f4fe4c 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -236,9 +236,10 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
goto err_clk_prov;
}
- err = devm_add_action(base->dev, rockchip_usb_phy_action, rk_phy);
+ err = devm_add_action_or_reset(base->dev, rockchip_usb_phy_action,
+ rk_phy);
if (err)
- goto err_devm_action;
+ return err;
rk_phy->phy = devm_phy_create(base->dev, child, &ops);
if (IS_ERR(rk_phy->phy)) {
@@ -256,9 +257,6 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
else
return rockchip_usb_phy_power(rk_phy, 1);
-err_devm_action:
- if (!rk_phy->uart_enabled)
- of_clk_del_provider(child);
err_clk_prov:
if (!rk_phy->uart_enabled)
clk_unregister(rk_phy->clk480m);
@@ -397,8 +395,13 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
phy_base->pdata = match->data;
phy_base->dev = dev;
- phy_base->reg_base = syscon_regmap_lookup_by_phandle(dev->of_node,
- "rockchip,grf");
+ phy_base->reg_base = ERR_PTR(-ENODEV);
+ if (dev->parent && dev->parent->of_node)
+ phy_base->reg_base = syscon_node_to_regmap(
+ dev->parent->of_node);
+ if (IS_ERR(phy_base->reg_base))
+ phy_base->reg_base = syscon_regmap_lookup_by_phandle(
+ dev->of_node, "rockchip,grf");
if (IS_ERR(phy_base->reg_base)) {
dev_err(&pdev->dev, "Missing rockchip,grf property\n");
return PTR_ERR(phy_base->reg_base);
@@ -463,7 +466,11 @@ static int __init rockchip_init_usb_uart(void)
return -ENOTSUPP;
}
- grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ grf = ERR_PTR(-ENODEV);
+ if (np->parent)
+ grf = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(grf))
+ grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(grf)) {
pr_err("%s: Missing rockchip,grf property, %lu\n",
__func__, PTR_ERR(grf));
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index de3101fbbf40..0a45bc6088ae 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -94,6 +94,7 @@
enum sun4i_usb_phy_type {
sun4i_a10_phy,
+ sun6i_a31_phy,
sun8i_a33_phy,
sun8i_h3_phy,
};
@@ -122,7 +123,6 @@ struct sun4i_usb_phy_data {
/* phy0 / otg related variables */
struct extcon_dev *extcon;
bool phy0_init;
- bool phy0_poll;
struct gpio_desc *id_det_gpio;
struct gpio_desc *vbus_det_gpio;
struct power_supply *vbus_power_supply;
@@ -343,6 +343,24 @@ static bool sun4i_usb_phy0_have_vbus_det(struct sun4i_usb_phy_data *data)
return data->vbus_det_gpio || data->vbus_power_supply;
}
+static bool sun4i_usb_phy0_poll(struct sun4i_usb_phy_data *data)
+{
+ if ((data->id_det_gpio && data->id_det_irq <= 0) ||
+ (data->vbus_det_gpio && data->vbus_det_irq <= 0))
+ return true;
+
+ /*
+ * The A31 companion pmic (axp221) does not generate vbus change
+ * interrupts when the board is driving vbus, so we must poll
+ * when using the pmic for vbus-det _and_ we're driving vbus.
+ */
+ if (data->cfg->type == sun6i_a31_phy &&
+ data->vbus_power_supply && data->phys[0].regulator_on)
+ return true;
+
+ return false;
+}
+
static int sun4i_usb_phy_power_on(struct phy *_phy)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
@@ -364,7 +382,7 @@ static int sun4i_usb_phy_power_on(struct phy *_phy)
phy->regulator_on = true;
/* We must report Vbus high within OTG_TIME_A_WAIT_VRISE msec. */
- if (phy->index == 0 && data->vbus_det_gpio && data->phy0_poll)
+ if (phy->index == 0 && sun4i_usb_phy0_poll(data))
mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
return 0;
@@ -385,7 +403,7 @@ static int sun4i_usb_phy_power_off(struct phy *_phy)
* phy0 vbus typically slowly discharges, sometimes this causes the
* Vbus gpio to not trigger an edge irq on Vbus off, so force a rescan.
*/
- if (phy->index == 0 && data->vbus_det_gpio && !data->phy0_poll)
+ if (phy->index == 0 && !sun4i_usb_phy0_poll(data))
mod_delayed_work(system_wq, &data->detect, POLL_TIME);
return 0;
@@ -468,7 +486,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
if (vbus_notify)
extcon_set_cable_state_(data->extcon, EXTCON_USB, vbus_det);
- if (data->phy0_poll)
+ if (sun4i_usb_phy0_poll(data))
queue_delayed_work(system_wq, &data->detect, POLL_TIME);
}
@@ -644,11 +662,6 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
- data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
- if ((data->id_det_gpio && data->id_det_irq <= 0) ||
- (data->vbus_det_gpio && data->vbus_det_irq <= 0))
- data->phy0_poll = true;
-
if (data->id_det_irq > 0) {
ret = devm_request_irq(dev, data->id_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
@@ -660,6 +673,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
}
+ data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
if (data->vbus_det_irq > 0) {
ret = devm_request_irq(dev, data->vbus_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
@@ -711,7 +725,7 @@ static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = {
static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
.num_phys = 3,
- .type = sun4i_a10_phy,
+ .type = sun6i_a31_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 385362e5b2f6..ae266e0c8368 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -518,7 +518,7 @@ enum clk_type_t {
CLK_INT_SING = 2, /* Internal single ended */
};
-enum phy_mode {
+enum xgene_phy_mode {
MODE_SATA = 0, /* List them for simple reference */
MODE_SGMII = 1,
MODE_PCIE = 2,
@@ -542,7 +542,7 @@ struct xgene_sata_override_param {
struct xgene_phy_ctx {
struct device *dev;
struct phy *phy;
- enum phy_mode mode; /* Mode of operation */
+ enum xgene_phy_mode mode; /* Mode of operation */
enum clk_type_t clk_type; /* Input clock selection */
void __iomem *sds_base; /* PHY CSR base addr */
struct clk *clk; /* Optional clock */
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index 6d8ee3b15872..8abd80dbcbed 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -151,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
goto exit;
}
+ if (u_cmd.outsize != s_cmd->outsize ||
+ u_cmd.insize != s_cmd->insize) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
s_cmd->command += ec->cmd_offset;
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
- if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
+ if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
ret = -EFAULT;
exit:
kfree(s_cmd);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 3ec0025d19e7..81b8dcca8891 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -603,6 +603,8 @@ config ASUS_WIRELESS
tristate "Asus Wireless Radio Control Driver"
depends on ACPI
depends on INPUT
+ select NEW_LEDS
+ select LEDS_CLASS
---help---
The Asus Wireless Radio Control handles the airplane mode hotkey
present on some Asus laptops.
@@ -668,6 +670,7 @@ config ACPI_TOSHIBA
depends on SERIO_I8042 || SERIO_I8042 = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL || RFKILL = n
+ depends on IIO
select INPUT_POLLDEV
select INPUT_SPARSEKMAP
---help---
@@ -770,6 +773,18 @@ config INTEL_HID_EVENT
To compile this driver as a module, choose M here: the module will
be called intel_hid.
+config INTEL_VBTN
+ tristate "INTEL VIRTUAL BUTTON"
+ depends on ACPI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ help
+ This driver provides support for the Intel Virtual Button interface.
+ Some laptops require this driver for power button support.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vbtn.
+
config INTEL_SCU_IPC
bool "Intel SCU IPC Support"
depends on X86_INTEL_MID
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 9b11b4073e03..2efa86d2a1a7 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
obj-$(CONFIG_TOSHIBA_HAPS) += toshiba_haps.o
obj-$(CONFIG_TOSHIBA_WMI) += toshiba-wmi.o
obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
+obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 091ca7ada8fc..adecc1c555f0 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -78,6 +78,15 @@ static struct quirk_entry quirk_asus_x200ca = {
.wapf = 2,
};
+static struct quirk_entry quirk_no_rfkill = {
+ .no_rfkill = true,
+};
+
+static struct quirk_entry quirk_no_rfkill_wapf4 = {
+ .wapf = 4,
+ .no_rfkill = true,
+};
+
static int dmi_matched(const struct dmi_system_id *dmi)
{
quirks = dmi->driver_data;
@@ -133,7 +142,7 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
},
- .driver_data = &quirk_asus_wapf4,
+ .driver_data = &quirk_no_rfkill_wapf4,
},
{
.callback = dmi_matched,
@@ -142,7 +151,7 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
},
- .driver_data = &quirk_asus_wapf4,
+ .driver_data = &quirk_no_rfkill_wapf4,
},
{
.callback = dmi_matched,
@@ -306,6 +315,42 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_x200ca,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X555UB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X555UB"),
+ },
+ .driver_data = &quirk_no_rfkill,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. N552VW",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N552VW"),
+ },
+ .driver_data = &quirk_no_rfkill,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. U303LB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U303LB"),
+ },
+ .driver_data = &quirk_no_rfkill,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. Z550MA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z550MA"),
+ },
+ .driver_data = &quirk_no_rfkill,
+ },
{},
};
@@ -356,6 +401,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
{ KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
{ KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
{ KE_KEY, 0x82, { KEY_CAMERA } },
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index 9ec721e26532..9f31bc1a47d0 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -15,11 +15,78 @@
#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/pci_ids.h>
+#include <linux/leds.h>
+
+#define ASUS_WIRELESS_LED_STATUS 0x2
+#define ASUS_WIRELESS_LED_OFF 0x4
+#define ASUS_WIRELESS_LED_ON 0x5
struct asus_wireless_data {
struct input_dev *idev;
+ struct acpi_device *adev;
+ struct workqueue_struct *wq;
+ struct work_struct led_work;
+ struct led_classdev led;
+ int led_state;
};
+static u64 asus_wireless_method(acpi_handle handle, const char *method,
+ int param)
+{
+ struct acpi_object_list p;
+ union acpi_object obj;
+ acpi_status s;
+ u64 ret;
+
+ acpi_handle_debug(handle, "Evaluating method %s, parameter %#x\n",
+ method, param);
+ obj.type = ACPI_TYPE_INTEGER;
+ obj.integer.value = param;
+ p.count = 1;
+ p.pointer = &obj;
+
+ s = acpi_evaluate_integer(handle, (acpi_string) method, &p, &ret);
+ if (ACPI_FAILURE(s))
+ acpi_handle_err(handle,
+ "Failed to eval method %s, param %#x (%d)\n",
+ method, param, s);
+ acpi_handle_debug(handle, "%s returned %#x\n", method, (uint) ret);
+ return ret;
+}
+
+static enum led_brightness led_state_get(struct led_classdev *led)
+{
+ struct asus_wireless_data *data;
+ int s;
+
+ data = container_of(led, struct asus_wireless_data, led);
+ s = asus_wireless_method(acpi_device_handle(data->adev), "HSWC",
+ ASUS_WIRELESS_LED_STATUS);
+ if (s == ASUS_WIRELESS_LED_ON)
+ return LED_FULL;
+ return LED_OFF;
+}
+
+static void led_state_update(struct work_struct *work)
+{
+ struct asus_wireless_data *data;
+
+ data = container_of(work, struct asus_wireless_data, led_work);
+ asus_wireless_method(acpi_device_handle(data->adev), "HSWC",
+ data->led_state);
+}
+
+static void led_state_set(struct led_classdev *led,
+ enum led_brightness value)
+{
+ struct asus_wireless_data *data;
+
+ data = container_of(led, struct asus_wireless_data, led);
+ data->led_state = value == LED_OFF ? ASUS_WIRELESS_LED_OFF :
+ ASUS_WIRELESS_LED_ON;
+ queue_work(data->wq, &data->led_work);
+}
+
static void asus_wireless_notify(struct acpi_device *adev, u32 event)
{
struct asus_wireless_data *data = acpi_driver_data(adev);
@@ -37,6 +104,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
static int asus_wireless_add(struct acpi_device *adev)
{
struct asus_wireless_data *data;
+ int err;
data = devm_kzalloc(&adev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -52,11 +120,32 @@ static int asus_wireless_add(struct acpi_device *adev)
data->idev->id.vendor = PCI_VENDOR_ID_ASUSTEK;
set_bit(EV_KEY, data->idev->evbit);
set_bit(KEY_RFKILL, data->idev->keybit);
- return input_register_device(data->idev);
+ err = input_register_device(data->idev);
+ if (err)
+ return err;
+
+ data->adev = adev;
+ data->wq = create_singlethread_workqueue("asus_wireless_workqueue");
+ if (!data->wq)
+ return -ENOMEM;
+ INIT_WORK(&data->led_work, led_state_update);
+ data->led.name = "asus-wireless::airplane";
+ data->led.brightness_set = led_state_set;
+ data->led.brightness_get = led_state_get;
+ data->led.flags = LED_CORE_SUSPENDRESUME;
+ data->led.max_brightness = 1;
+ err = devm_led_classdev_register(&adev->dev, &data->led);
+ if (err)
+ destroy_workqueue(data->wq);
+ return err;
}
static int asus_wireless_remove(struct acpi_device *adev)
{
+ struct asus_wireless_data *data = acpi_driver_data(adev);
+
+ if (data->wq)
+ destroy_workqueue(data->wq);
return 0;
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a26dca3640ea..7c093a0b78bb 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2069,9 +2069,11 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_leds;
- err = asus_wmi_rfkill_init(asus);
- if (err)
- goto fail_rfkill;
+ if (!asus->driver->quirks->no_rfkill) {
+ err = asus_wmi_rfkill_init(asus);
+ if (err)
+ goto fail_rfkill;
+ }
/* Some Asus desktop boards export an acpi-video backlight interface,
stop this from showing up */
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 4da4c8bafe70..5de1df510ebd 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -38,6 +38,7 @@ struct key_entry;
struct asus_wmi;
struct quirk_entry {
+ bool no_rfkill;
bool hotplug_wireless;
bool scalar_panel_brightness;
bool store_backlight_power;
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 15c6f1191aec..d2bc092defd7 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -80,66 +80,115 @@ static const struct dmi_system_id dell_wmi_smbios_list[] __initconst = {
};
/*
+ * Keymap for WMI events of type 0x0000
+ *
* Certain keys are flagged as KE_IGNORE. All of these are either
* notifications (rather than requests for change) or are also sent
* via the keyboard controller so should not be sent again.
*/
-
-static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
+static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
{ KE_IGNORE, 0x003a, { KEY_CAPSLOCK } },
- { KE_KEY, 0xe045, { KEY_PROG1 } },
- { KE_KEY, 0xe009, { KEY_EJECTCD } },
-
- /* These also contain the brightness level at offset 6 */
- { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
- { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
+ /* Key code is followed by brightness level */
+ { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
/* Battery health status button */
- { KE_KEY, 0xe007, { KEY_BATTERY } },
+ { KE_KEY, 0xe007, { KEY_BATTERY } },
- /* Radio devices state change */
+ /* Radio devices state change, key code is followed by other values */
{ KE_IGNORE, 0xe008, { KEY_RFKILL } },
- /* The next device is at offset 6, the active devices are at
- offset 8 and the attached devices at offset 10 */
- { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0xe009, { KEY_EJECTCD } },
+ /* Key code is followed by: next, active and attached devices */
+ { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
+
+ /* Key code is followed by keyboard illumination level */
{ KE_IGNORE, 0xe00c, { KEY_KBDILLUMTOGGLE } },
/* BIOS error detected */
{ KE_IGNORE, 0xe00d, { KEY_RESERVED } },
+ /* Unknown, defined in ACPI DSDT */
+ /* { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, */
+
/* Wifi Catcher */
- { KE_KEY, 0xe011, {KEY_PROG2 } },
+ { KE_KEY, 0xe011, { KEY_PROG2 } },
/* Ambient light sensor toggle */
{ KE_IGNORE, 0xe013, { KEY_RESERVED } },
{ KE_IGNORE, 0xe020, { KEY_MUTE } },
+ /* Unknown, defined in ACPI DSDT */
+ /* { KE_IGNORE, 0xe023, { KEY_RESERVED } }, */
+
+ /* Untested, Dell Instant Launch key on Inspiron 7520 */
+ /* { KE_IGNORE, 0xe024, { KEY_RESERVED } }, */
+
/* Dell Instant Launch key */
- { KE_KEY, 0xe025, { KEY_PROG4 } },
- { KE_KEY, 0xe029, { KEY_PROG4 } },
+ { KE_KEY, 0xe025, { KEY_PROG4 } },
/* Audio panel key */
{ KE_IGNORE, 0xe026, { KEY_RESERVED } },
+ /* LCD Display On/Off Control key */
+ { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
+
+ /* Untested, Multimedia key on Dell Vostro 3560 */
+ /* { KE_IGNORE, 0xe028, { KEY_RESERVED } }, */
+
+ /* Dell Instant Launch key */
+ { KE_KEY, 0xe029, { KEY_PROG4 } },
+
+ /* Untested, Windows Mobility Center button on Inspiron 7520 */
+ /* { KE_IGNORE, 0xe02a, { KEY_RESERVED } }, */
+
+ /* Unknown, defined in ACPI DSDT */
+ /* { KE_IGNORE, 0xe02b, { KEY_RESERVED } }, */
+
+ /* Untested, Dell Audio With Preset Switch button on Inspiron 7520 */
+ /* { KE_IGNORE, 0xe02c, { KEY_RESERVED } }, */
+
{ KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } },
{ KE_IGNORE, 0xe030, { KEY_VOLUMEUP } },
{ KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } },
{ KE_IGNORE, 0xe034, { KEY_KBDILLUMDOWN } },
{ KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } },
+
+ /* NIC Link is Up */
+ { KE_IGNORE, 0xe043, { KEY_RESERVED } },
+
+ /* NIC Link is Down */
+ { KE_IGNORE, 0xe044, { KEY_RESERVED } },
+
+ /*
+ * This entry is very suspicious!
+ * Originally Matthew Garrett created this dell-wmi driver specially for
+ * "button with a picture of a battery" which has event code 0xe045.
+ * Later Mario Limonciello from Dell told us that event code 0xe045 is
+ * reported by Num Lock and should be ignored because key is send also
+ * by keyboard controller.
+ * So for now we will ignore this event to prevent potential double
+ * Num Lock key press.
+ */
{ KE_IGNORE, 0xe045, { KEY_NUMLOCK } },
+
+ /* Scroll lock and also going to tablet mode on portable devices */
{ KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } },
+
+ /* Untested, going from tablet mode on portable devices */
+ /* { KE_IGNORE, 0xe047, { KEY_RESERVED } }, */
+
+ /* Dell Support Center key */
+ { KE_IGNORE, 0xe06e, { KEY_RESERVED } },
+
{ KE_IGNORE, 0xe0f7, { KEY_MUTE } },
{ KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } },
{ KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } },
- { KE_END, 0 }
};
-static bool dell_new_hk_type;
-
struct dell_bios_keymap_entry {
u16 scancode;
u16 keycode;
@@ -153,6 +202,7 @@ struct dell_bios_hotkey_table {
struct dell_dmi_results {
int err;
+ int keymap_size;
struct key_entry *keymap;
};
@@ -201,10 +251,12 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
};
/*
+ * Keymap for WMI events of type 0x0010
+ *
* These are applied if the 0xB2 DMI hotkey table is present and doesn't
* override them.
*/
-static const struct key_entry dell_wmi_extra_keymap[] __initconst = {
+static const struct key_entry dell_wmi_keymap_type_0010[] __initconst = {
/* Fn-lock */
{ KE_IGNORE, 0x151, { KEY_RESERVED } },
@@ -224,21 +276,39 @@ static const struct key_entry dell_wmi_extra_keymap[] __initconst = {
{ KE_IGNORE, 0x155, { KEY_RESERVED } },
};
+/*
+ * Keymap for WMI events of type 0x0011
+ */
+static const struct key_entry dell_wmi_keymap_type_0011[] __initconst = {
+ /* Battery unplugged */
+ { KE_IGNORE, 0xfff0, { KEY_RESERVED } },
+
+ /* Battery inserted */
+ { KE_IGNORE, 0xfff1, { KEY_RESERVED } },
+
+ /* Keyboard backlight level changed */
+ { KE_IGNORE, 0x01e1, { KEY_RESERVED } },
+ { KE_IGNORE, 0x02ea, { KEY_RESERVED } },
+ { KE_IGNORE, 0x02eb, { KEY_RESERVED } },
+ { KE_IGNORE, 0x02ec, { KEY_RESERVED } },
+ { KE_IGNORE, 0x02f6, { KEY_RESERVED } },
+};
+
static struct input_dev *dell_wmi_input_dev;
-static void dell_wmi_process_key(int reported_key)
+static void dell_wmi_process_key(int type, int code)
{
const struct key_entry *key;
key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
- reported_key);
+ (type << 16) | code);
if (!key) {
- pr_info("Unknown key with scancode 0x%x pressed\n",
- reported_key);
+ pr_info("Unknown key with type 0x%04x and code 0x%04x pressed\n",
+ type, code);
return;
}
- pr_debug("Key %x pressed\n", reported_key);
+ pr_debug("Key with type 0x%04x and code 0x%04x pressed\n", type, code);
/* Don't report brightness notifications that will also come via ACPI */
if ((key->keycode == KEY_BRIGHTNESSUP ||
@@ -246,7 +316,7 @@ static void dell_wmi_process_key(int reported_key)
acpi_video_handles_brightness_key_presses())
return;
- if (reported_key == 0xe025 && !wmi_requires_smbios_request)
+ if (type == 0x0000 && code == 0xe025 && !wmi_requires_smbios_request)
return;
sparse_keymap_report_entry(dell_wmi_input_dev, key, 1, true);
@@ -284,18 +354,6 @@ static void dell_wmi_notify(u32 value, void *context)
buffer_entry = (u16 *)obj->buffer.pointer;
buffer_size = obj->buffer.length/2;
-
- if (!dell_new_hk_type) {
- if (buffer_size >= 3 && buffer_entry[1] == 0x0)
- dell_wmi_process_key(buffer_entry[2]);
- else if (buffer_size >= 2)
- dell_wmi_process_key(buffer_entry[1]);
- else
- pr_info("Received unknown WMI event\n");
- kfree(obj);
- return;
- }
-
buffer_end = buffer_entry + buffer_size;
/*
@@ -330,62 +388,18 @@ static void dell_wmi_notify(u32 value, void *context)
pr_debug("Process buffer (%*ph)\n", len*2, buffer_entry);
switch (buffer_entry[1]) {
- case 0x00:
- for (i = 2; i < len; ++i) {
- switch (buffer_entry[i]) {
- case 0xe043:
- /* NIC Link is Up */
- pr_debug("NIC Link is Up\n");
- break;
- case 0xe044:
- /* NIC Link is Down */
- pr_debug("NIC Link is Down\n");
- break;
- case 0xe045:
- /* Unknown event but defined in DSDT */
- default:
- /* Unknown event */
- pr_info("Unknown WMI event type 0x00: "
- "0x%x\n", (int)buffer_entry[i]);
- break;
- }
- }
+ case 0x0000: /* One key pressed or event occurred */
+ if (len > 2)
+ dell_wmi_process_key(0x0000, buffer_entry[2]);
+ /* Other entries could contain additional information */
break;
- case 0x10:
- /* Keys pressed */
+ case 0x0010: /* Sequence of keys pressed */
+ case 0x0011: /* Sequence of events occurred */
for (i = 2; i < len; ++i)
- dell_wmi_process_key(buffer_entry[i]);
- break;
- case 0x11:
- for (i = 2; i < len; ++i) {
- switch (buffer_entry[i]) {
- case 0xfff0:
- /* Battery unplugged */
- pr_debug("Battery unplugged\n");
- break;
- case 0xfff1:
- /* Battery inserted */
- pr_debug("Battery inserted\n");
- break;
- case 0x01e1:
- case 0x02ea:
- case 0x02eb:
- case 0x02ec:
- case 0x02f6:
- /* Keyboard backlight level changed */
- pr_debug("Keyboard backlight level "
- "changed\n");
- break;
- default:
- /* Unknown event */
- pr_info("Unknown WMI event type 0x11: "
- "0x%x\n", (int)buffer_entry[i]);
- break;
- }
- }
+ dell_wmi_process_key(buffer_entry[1],
+ buffer_entry[i]);
break;
- default:
- /* Unknown event */
+ default: /* Unknown event */
pr_info("Unknown WMI event type 0x%x\n",
(int)buffer_entry[1]);
break;
@@ -410,7 +424,6 @@ static bool have_scancode(u32 scancode, const struct key_entry *keymap, int len)
}
static void __init handle_dmi_entry(const struct dmi_header *dm,
-
void *opaque)
{
@@ -418,7 +431,6 @@ static void __init handle_dmi_entry(const struct dmi_header *dm,
struct dell_bios_hotkey_table *table;
int hotkey_num, i, pos = 0;
struct key_entry *keymap;
- int num_bios_keys;
if (results->err || results->keymap)
return; /* We already found the hotkey table. */
@@ -442,8 +454,7 @@ static void __init handle_dmi_entry(const struct dmi_header *dm,
return;
}
- keymap = kcalloc(hotkey_num + ARRAY_SIZE(dell_wmi_extra_keymap) + 1,
- sizeof(struct key_entry), GFP_KERNEL);
+ keymap = kcalloc(hotkey_num, sizeof(struct key_entry), GFP_KERNEL);
if (!keymap) {
results->err = -ENOMEM;
return;
@@ -480,31 +491,15 @@ static void __init handle_dmi_entry(const struct dmi_header *dm,
pos++;
}
- num_bios_keys = pos;
-
- for (i = 0; i < ARRAY_SIZE(dell_wmi_extra_keymap); i++) {
- const struct key_entry *entry = &dell_wmi_extra_keymap[i];
-
- /*
- * Check if we've already found this scancode. This takes
- * quadratic time, but it doesn't matter unless the list
- * of extra keys gets very long.
- */
- if (!have_scancode(entry->code, keymap, num_bios_keys)) {
- keymap[pos] = *entry;
- pos++;
- }
- }
-
- keymap[pos].type = KE_END;
-
results->keymap = keymap;
+ results->keymap_size = pos;
}
static int __init dell_wmi_input_setup(void)
{
struct dell_dmi_results dmi_results = {};
- int err;
+ struct key_entry *keymap;
+ int err, i, pos = 0;
dell_wmi_input_dev = input_allocate_device();
if (!dell_wmi_input_dev)
@@ -528,21 +523,71 @@ static int __init dell_wmi_input_setup(void)
goto err_free_dev;
}
- if (dmi_results.keymap) {
- dell_new_hk_type = true;
+ keymap = kcalloc(dmi_results.keymap_size +
+ ARRAY_SIZE(dell_wmi_keymap_type_0000) +
+ ARRAY_SIZE(dell_wmi_keymap_type_0010) +
+ ARRAY_SIZE(dell_wmi_keymap_type_0011) +
+ 1,
+ sizeof(struct key_entry), GFP_KERNEL);
+ if (!keymap) {
+ kfree(dmi_results.keymap);
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ /* Append table with events of type 0x0010 which comes from DMI */
+ for (i = 0; i < dmi_results.keymap_size; i++) {
+ keymap[pos] = dmi_results.keymap[i];
+ keymap[pos].code |= (0x0010 << 16);
+ pos++;
+ }
+
+ kfree(dmi_results.keymap);
- err = sparse_keymap_setup(dell_wmi_input_dev,
- dmi_results.keymap, NULL);
+ /* Append table with extra events of type 0x0010 which are not in DMI */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0010); i++) {
+ const struct key_entry *entry = &dell_wmi_keymap_type_0010[i];
/*
- * Sparse keymap library makes a copy of keymap so we
- * don't need the original one that was allocated.
+ * Check if we've already found this scancode. This takes
+ * quadratic time, but it doesn't matter unless the list
+ * of extra keys gets very long.
*/
- kfree(dmi_results.keymap);
- } else {
- err = sparse_keymap_setup(dell_wmi_input_dev,
- dell_wmi_legacy_keymap, NULL);
+ if (dmi_results.keymap_size &&
+ have_scancode(entry->code | (0x0010 << 16),
+ keymap, dmi_results.keymap_size)
+ )
+ continue;
+
+ keymap[pos] = *entry;
+ keymap[pos].code |= (0x0010 << 16);
+ pos++;
+ }
+
+ /* Append table with events of type 0x0011 */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0011); i++) {
+ keymap[pos] = dell_wmi_keymap_type_0011[i];
+ keymap[pos].code |= (0x0011 << 16);
+ pos++;
}
+
+ /*
+ * Now append also table with "legacy" events of type 0x0000. Some of
+ * them are reported also on laptops which have scancodes in DMI.
+ */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0000); i++) {
+ keymap[pos] = dell_wmi_keymap_type_0000[i];
+ pos++;
+ }
+
+ keymap[pos].type = KE_END;
+
+ err = sparse_keymap_setup(dell_wmi_input_dev, keymap, NULL);
+ /*
+ * Sparse keymap library makes a copy of keymap so we don't need the
+ * original one that was allocated.
+ */
+ kfree(keymap);
if (err)
goto err_free_dev;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ce41bc34288d..61f39abf5dc8 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -88,9 +88,6 @@
#define ACPI_FUJITSU_NOTIFY_CODE1 0x80
-#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86
-#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
-
/* FUNC interface - command values */
#define FUNC_RFKILL 0x1000
#define FUNC_LEDS 0x1001
@@ -108,6 +105,8 @@
#define LOGOLAMP_POWERON 0x2000
#define LOGOLAMP_ALWAYS 0x4000
#define RADIO_LED_ON 0x20
+#define ECO_LED 0x10000
+#define ECO_LED_ON 0x80000
#endif
/* Hotkey details */
@@ -121,13 +120,6 @@
#define RINGBUFFERSIZE 40
/* Debugging */
-#define FUJLAPTOP_LOG ACPI_FUJITSU_HID ": "
-#define FUJLAPTOP_ERR KERN_ERR FUJLAPTOP_LOG
-#define FUJLAPTOP_NOTICE KERN_NOTICE FUJLAPTOP_LOG
-#define FUJLAPTOP_INFO KERN_INFO FUJLAPTOP_LOG
-#define FUJLAPTOP_DEBUG KERN_DEBUG FUJLAPTOP_LOG
-
-#define FUJLAPTOP_DBG_ALL 0xffff
#define FUJLAPTOP_DBG_ERROR 0x0001
#define FUJLAPTOP_DBG_WARN 0x0002
#define FUJLAPTOP_DBG_INFO 0x0004
@@ -136,7 +128,7 @@
#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
#define vdbg_printk(a_dbg_level, format, arg...) \
do { if (dbg_level & a_dbg_level) \
- printk(FUJLAPTOP_DEBUG "%s: " format, __func__ , ## arg); \
+ printk(KERN_DEBUG pr_fmt("%s: " format), __func__, ## arg); \
} while (0)
#else
#define vdbg_printk(a_dbg_level, format, arg...) \
@@ -176,6 +168,7 @@ struct fujitsu_hotkey_t {
int logolamp_registered;
int kblamps_registered;
int radio_led_registered;
+ int eco_led_registered;
};
static struct fujitsu_hotkey_t *fujitsu_hotkey;
@@ -212,6 +205,16 @@ static struct led_classdev radio_led = {
.brightness_get = radio_led_get,
.brightness_set = radio_led_set
};
+
+static enum led_brightness eco_led_get(struct led_classdev *cdev);
+static void eco_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness);
+
+static struct led_classdev eco_led = {
+ .name = "fujitsu::eco_led",
+ .brightness_get = eco_led_get,
+ .brightness_set = eco_led_set
+};
#endif
#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
@@ -296,6 +299,18 @@ static void radio_led_set(struct led_classdev *cdev,
call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
}
+static void eco_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ int curr;
+
+ curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0);
+ if (brightness >= LED_FULL)
+ call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON);
+ else
+ call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON);
+}
+
static enum led_brightness logolamp_get(struct led_classdev *cdev)
{
enum led_brightness brightness = LED_OFF;
@@ -330,6 +345,16 @@ static enum led_brightness radio_led_get(struct led_classdev *cdev)
return brightness;
}
+
+static enum led_brightness eco_led_get(struct led_classdev *cdev)
+{
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) & ECO_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
#endif
/* Hardware access for LCD brightness control */
@@ -856,6 +881,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
set_bit(fujitsu->keycode3, input->keybit);
set_bit(fujitsu->keycode4, input->keybit);
set_bit(fujitsu->keycode5, input->keybit);
+ set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit);
set_bit(KEY_UNKNOWN, input->keybit);
error = input_register_device(input);
@@ -943,6 +969,23 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
result);
}
}
+
+ /* Support for eco led is not always signaled in bit corresponding
+ * to the bit used to control the led. According to the DSDT table,
+ * bit 14 seems to indicate presence of said led as well.
+ * Confirm by testing the status.
+ */
+ if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
+ (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
+ result = led_classdev_register(&fujitsu->pf_device->dev,
+ &eco_led);
+ if (result == 0) {
+ fujitsu_hotkey->eco_led_registered = 1;
+ } else {
+ pr_err("Could not register LED handler for eco LED, error %i\n",
+ result);
+ }
+ }
#endif
return result;
@@ -972,6 +1015,9 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
if (fujitsu_hotkey->radio_led_registered)
led_classdev_unregister(&radio_led);
+
+ if (fujitsu_hotkey->eco_led_registered)
+ led_classdev_unregister(&eco_led);
#endif
input_unregister_device(input);
@@ -1060,6 +1106,19 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
}
}
+ /* On some models (first seen on the Skylake-based Lifebook
+ * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is
+ * handled in software; its state is queried using FUNC_RFKILL
+ */
+ if ((fujitsu_hotkey->rfkill_supported & BIT(26)) &&
+ (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) {
+ keycode = KEY_TOUCHPAD_TOGGLE;
+ input_report_key(input, keycode, 1);
+ input_sync(input);
+ input_report_key(input, keycode, 0);
+ input_sync(input);
+ }
+
break;
default:
keycode = KEY_UNKNOWN;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 6f145f2d004d..96ffda493266 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -718,6 +718,11 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device)
if (err)
return err;
+ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless,
+ sizeof(wireless), 0);
+ if (err)
+ return err;
+
if (wireless & 0x1) {
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
RFKILL_TYPE_WLAN,
@@ -882,7 +887,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
wwan_rfkill = NULL;
rfkill2_count = 0;
- if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device))
+ if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
err = device_create_file(&device->dev, &dev_attr_display);
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index a818db6aa08f..ed5874217ee7 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -122,8 +122,8 @@ static int intel_hid_input_setup(struct platform_device *device)
return 0;
err_free_device:
- input_free_device(priv->input_dev);
- return ret;
+ input_free_device(priv->input_dev);
+ return ret;
}
static void intel_hid_input_destroy(struct platform_device *device)
@@ -224,7 +224,6 @@ static int intel_hid_remove(struct platform_device *device)
acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
intel_hid_input_destroy(device);
intel_hid_set_enable(&device->dev, 0);
- acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
/*
* Even if we failed to shut off the event stream, we can still
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
new file mode 100644
index 000000000000..146d02f8c9bc
--- /dev/null
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -0,0 +1,188 @@
+/*
+ * Intel Virtual Button driver for Windows 8.1+
+ *
+ * Copyright (C) 2016 AceLan Kao <acelan.kao@canonical.com>
+ * Copyright (C) 2016 Alex Hung <alex.hung@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("AceLan Kao");
+
+static const struct acpi_device_id intel_vbtn_ids[] = {
+ {"INT33D6", 0},
+ {"", 0},
+};
+
+/* In theory, these are HID usages. */
+static const struct key_entry intel_vbtn_keymap[] = {
+ { KE_IGNORE, 0xC0, { KEY_POWER } }, /* power key press */
+ { KE_KEY, 0xC1, { KEY_POWER } }, /* power key release */
+ { KE_END },
+};
+
+struct intel_vbtn_priv {
+ struct input_dev *input_dev;
+};
+
+static int intel_vbtn_input_setup(struct platform_device *device)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+ int ret;
+
+ priv->input_dev = input_allocate_device();
+ if (!priv->input_dev)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL);
+ if (ret)
+ goto err_free_device;
+
+ priv->input_dev->dev.parent = &device->dev;
+ priv->input_dev->name = "Intel Virtual Button driver";
+ priv->input_dev->id.bustype = BUS_HOST;
+
+ ret = input_register_device(priv->input_dev);
+ if (ret)
+ goto err_free_device;
+
+ return 0;
+
+err_free_device:
+ input_free_device(priv->input_dev);
+ return ret;
+}
+
+static void intel_vbtn_input_destroy(struct platform_device *device)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+
+ input_unregister_device(priv->input_dev);
+}
+
+static void notify_handler(acpi_handle handle, u32 event, void *context)
+{
+ struct platform_device *device = context;
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+
+ if (!sparse_keymap_report_event(priv->input_dev, event, 1, true))
+ dev_info(&device->dev, "unknown event index 0x%x\n",
+ event);
+}
+
+static int intel_vbtn_probe(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ struct intel_vbtn_priv *priv;
+ acpi_status status;
+ int err;
+
+ status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+ if (!ACPI_SUCCESS(status)) {
+ dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(&device->dev, priv);
+
+ err = intel_vbtn_input_setup(device);
+ if (err) {
+ pr_err("Failed to setup Intel Virtual Button\n");
+ return err;
+ }
+
+ status = acpi_install_notify_handler(handle,
+ ACPI_DEVICE_NOTIFY,
+ notify_handler,
+ device);
+ if (ACPI_FAILURE(status)) {
+ err = -EBUSY;
+ goto err_remove_input;
+ }
+
+ return 0;
+
+err_remove_input:
+ intel_vbtn_input_destroy(device);
+
+ return err;
+}
+
+static int intel_vbtn_remove(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+
+ intel_vbtn_input_destroy(device);
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
+
+ /*
+ * Even if we failed to shut off the event stream, we can still
+ * safely detach from the device.
+ */
+ return 0;
+}
+
+static struct platform_driver intel_vbtn_pl_driver = {
+ .driver = {
+ .name = "intel-vbtn",
+ .acpi_match_table = intel_vbtn_ids,
+ },
+ .probe = intel_vbtn_probe,
+ .remove = intel_vbtn_remove,
+};
+MODULE_DEVICE_TABLE(acpi, intel_vbtn_ids);
+
+static acpi_status __init
+check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ const struct acpi_device_id *ids = context;
+ struct acpi_device *dev;
+
+ if (acpi_bus_get_device(handle, &dev) != 0)
+ return AE_OK;
+
+ if (acpi_match_device_ids(dev, ids) == 0)
+ if (acpi_create_platform_device(dev))
+ dev_info(&dev->dev,
+ "intel-vbtn: created platform device\n");
+
+ return AE_OK;
+}
+
+static int __init intel_vbtn_init(void)
+{
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, check_acpi_dev, NULL,
+ (void *)intel_vbtn_ids, NULL);
+
+ return platform_driver_register(&intel_vbtn_pl_driver);
+}
+module_init(intel_vbtn_init);
+
+static void __exit intel_vbtn_exit(void)
+{
+ platform_driver_unregister(&intel_vbtn_pl_driver);
+}
+module_exit(intel_vbtn_exit);
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 2776bec89c88..520b58a04daa 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -23,9 +23,9 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/pci.h>
-#include <linux/seq_file.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/pmc_core.h>
#include "intel_pmc_core.h"
@@ -77,30 +77,18 @@ int intel_pmc_slp_s0_counter_read(u32 *data)
}
EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read);
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static int pmc_core_dev_state_show(struct seq_file *s, void *unused)
+static int pmc_core_dev_state_get(void *data, u64 *val)
{
- struct pmc_dev *pmcdev = s->private;
- u32 counter_val;
+ struct pmc_dev *pmcdev = data;
+ u32 value;
- counter_val = pmc_core_reg_read(pmcdev,
- SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
- seq_printf(s, "%u\n", pmc_core_adjust_slp_s0_step(counter_val));
+ value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
+ *val = pmc_core_adjust_slp_s0_step(value);
return 0;
}
-static int pmc_core_dev_state_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmc_core_dev_state_show, inode->i_private);
-}
-
-static const struct file_operations pmc_core_dev_state_ops = {
- .open = pmc_core_dev_state_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
@@ -112,12 +100,12 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
struct dentry *dir, *file;
dir = debugfs_create_dir("pmc_core", NULL);
- if (!dir)
+ if (IS_ERR_OR_NULL(dir))
return -ENOMEM;
pmcdev->dbgfs_dir = dir;
file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
- dir, pmcdev, &pmc_core_dev_state_ops);
+ dir, pmcdev, &pmc_core_dev_state);
if (!file) {
pmc_core_dbgfs_unregister(pmcdev);
@@ -126,22 +114,12 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
return 0;
}
-#else
-static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
-{
- return 0;
-}
-
-static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- { X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT,
- (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
- { X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT,
- (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_MOBILE, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL},
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_DESKTOP, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL},
{}
};
@@ -182,10 +160,8 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
err = pmc_core_dbgfs_register(pmcdev);
- if (err < 0) {
- dev_err(&dev->dev, "PMC Core: debugfs register failed.\n");
- return err;
- }
+ if (err < 0)
+ dev_warn(&dev->dev, "PMC Core: debugfs register failed.\n");
pmc.has_slp_s0_res = true;
return 0;
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index a9dadaf787c1..e3f671f4d122 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -23,6 +23,7 @@
/* Sunrise Point Power Management Controller PCI Device ID */
#define SPT_PMC_PCI_DEVICE_ID 0x9d21
+
#define SPT_PMC_BASE_ADDR_OFFSET 0x48
#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c
#define SPT_PMC_MMIO_REG_LEN 0x100
@@ -42,9 +43,7 @@
struct pmc_dev {
u32 base_addr;
void __iomem *regbase;
-#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
-#endif /* CONFIG_DEBUG_FS */
bool has_slp_s0_res;
};
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index f5134acd6ff0..ef29f18b1951 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -32,6 +32,7 @@
#include <linux/suspend.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/intel_pmc_ipc.h>
#include <asm/intel_punit_ipc.h>
#include <asm/intel_telemetry.h>
@@ -78,7 +79,7 @@
#define TELEM_EVT_LEN(x) (sizeof(x)/sizeof((x)[0]))
#define TELEM_DEBUGFS_CPU(model, data) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&data}
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data}
#define TELEM_CHECK_AND_PARSE_EVTS(EVTID, EVTNUM, BUF, EVTLOG, EVTDAT, MASK) { \
if (evtlog[index].telem_evtid == (EVTID)) { \
@@ -331,7 +332,7 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
};
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
- TELEM_DEBUGFS_CPU(0x5c, telem_apl_debugfs_conf),
+ TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_debugfs_conf),
{}
};
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 09c84a2b1c2c..6ebdbd2b04fc 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -28,6 +28,7 @@
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/intel_pmc_ipc.h>
#include <asm/intel_punit_ipc.h>
#include <asm/intel_telemetry.h>
@@ -82,7 +83,7 @@
#define TELEM_SET_VERBOSITY_BITS(x, y) ((x) |= ((y) << 27))
#define TELEM_CPU(model, data) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&data }
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data }
enum telemetry_action {
TELEM_UPDATE = 0,
@@ -163,7 +164,7 @@ static struct telemetry_plt_config telem_apl_config = {
};
static const struct x86_cpu_id telemetry_cpu_ids[] = {
- TELEM_CPU(0x5c, telem_apl_config),
+ TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config),
{}
};
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 01e12d221a8b..9d60a40d8b3f 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -4,7 +4,7 @@
* Copyright (C) 2002-2004 John Belmonte
* Copyright (C) 2008 Philip Langdale
* Copyright (C) 2010 Pierre Ducroquet
- * Copyright (C) 2014-2015 Azael Avalos
+ * Copyright (C) 2014-2016 Azael Avalos
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,7 +31,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define TOSHIBA_ACPI_VERSION "0.23"
+#define TOSHIBA_ACPI_VERSION "0.24"
#define PROC_INTERFACE_VERSION 1
#include <linux/kernel.h>
@@ -53,6 +53,7 @@
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/rfkill.h>
+#include <linux/iio/iio.h>
#include <linux/toshiba.h>
#include <acpi/video.h>
@@ -134,6 +135,7 @@ MODULE_LICENSE("GPL");
/* Field definitions */
#define HCI_ACCEL_MASK 0x7fff
+#define HCI_ACCEL_DIRECTION_MASK 0x8000
#define HCI_HOTKEY_DISABLE 0x0b
#define HCI_HOTKEY_ENABLE 0x09
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
@@ -178,6 +180,7 @@ struct toshiba_acpi_dev {
struct led_classdev eco_led;
struct miscdevice miscdev;
struct rfkill *wwan_rfk;
+ struct iio_dev *indio_dev;
int force_fan;
int last_key_event;
@@ -1958,28 +1961,6 @@ static ssize_t touchpad_show(struct device *dev,
}
static DEVICE_ATTR_RW(touchpad);
-static ssize_t position_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
- u32 xyval, zval, tmp;
- u16 x, y, z;
- int ret;
-
- xyval = zval = 0;
- ret = toshiba_accelerometer_get(toshiba, &xyval, &zval);
- if (ret < 0)
- return ret;
-
- x = xyval & HCI_ACCEL_MASK;
- tmp = xyval >> HCI_MISC_SHIFT;
- y = tmp & HCI_ACCEL_MASK;
- z = zval & HCI_ACCEL_MASK;
-
- return sprintf(buf, "%d %d %d\n", x, y, z);
-}
-static DEVICE_ATTR_RO(position);
-
static ssize_t usb_sleep_charge_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2350,7 +2331,6 @@ static struct attribute *toshiba_attributes[] = {
&dev_attr_available_kbd_modes.attr,
&dev_attr_kbd_backlight_timeout.attr,
&dev_attr_touchpad.attr,
- &dev_attr_position.attr,
&dev_attr_usb_sleep_charge.attr,
&dev_attr_sleep_functions_on_battery.attr,
&dev_attr_usb_rapid_charge.attr,
@@ -2377,8 +2357,6 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false;
else if (attr == &dev_attr_touchpad.attr)
exists = (drv->touchpad_supported) ? true : false;
- else if (attr == &dev_attr_position.attr)
- exists = (drv->accelerometer_supported) ? true : false;
else if (attr == &dev_attr_usb_sleep_charge.attr)
exists = (drv->usb_sleep_charge_supported) ? true : false;
else if (attr == &dev_attr_sleep_functions_on_battery.attr)
@@ -2420,6 +2398,81 @@ static void toshiba_acpi_kbd_bl_work(struct work_struct *work)
}
/*
+ * IIO device
+ */
+
+enum toshiba_iio_accel_chan {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z
+};
+
+static int toshiba_iio_accel_get_axis(enum toshiba_iio_accel_chan chan)
+{
+ u32 xyval, zval;
+ int ret;
+
+ ret = toshiba_accelerometer_get(toshiba_acpi, &xyval, &zval);
+ if (ret < 0)
+ return ret;
+
+ switch (chan) {
+ case AXIS_X:
+ return xyval & HCI_ACCEL_DIRECTION_MASK ?
+ -(xyval & HCI_ACCEL_MASK) : xyval & HCI_ACCEL_MASK;
+ case AXIS_Y:
+ return (xyval >> HCI_MISC_SHIFT) & HCI_ACCEL_DIRECTION_MASK ?
+ -((xyval >> HCI_MISC_SHIFT) & HCI_ACCEL_MASK) :
+ (xyval >> HCI_MISC_SHIFT) & HCI_ACCEL_MASK;
+ case AXIS_Z:
+ return zval & HCI_ACCEL_DIRECTION_MASK ?
+ -(zval & HCI_ACCEL_MASK) : zval & HCI_ACCEL_MASK;
+ }
+
+ return ret;
+}
+
+static int toshiba_iio_accel_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = toshiba_iio_accel_get_axis(chan->channel);
+ if (ret == -EIO || ret == -ENODEV)
+ return ret;
+
+ *val = ret;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+#define TOSHIBA_IIO_ACCEL_CHANNEL(axis, chan) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel = chan, \
+ .channel2 = IIO_MOD_##axis, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+static const struct iio_chan_spec toshiba_iio_accel_channels[] = {
+ TOSHIBA_IIO_ACCEL_CHANNEL(X, AXIS_X),
+ TOSHIBA_IIO_ACCEL_CHANNEL(Y, AXIS_Y),
+ TOSHIBA_IIO_ACCEL_CHANNEL(Z, AXIS_Z),
+};
+
+static const struct iio_info toshiba_iio_accel_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &toshiba_iio_accel_read_raw,
+};
+
+/*
* Misc device
*/
static int toshiba_acpi_smm_bridge(SMMRegisters *regs)
@@ -2904,6 +2957,11 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
remove_toshiba_proc_entries(dev);
+ if (dev->accelerometer_supported && dev->indio_dev) {
+ iio_device_unregister(dev->indio_dev);
+ iio_device_free(dev->indio_dev);
+ }
+
if (dev->sysfs_created)
sysfs_remove_group(&dev->acpi_dev->dev.kobj,
&toshiba_attr_group);
@@ -3051,6 +3109,30 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
dev->touchpad_supported = !ret;
toshiba_accelerometer_available(dev);
+ if (dev->accelerometer_supported) {
+ dev->indio_dev = iio_device_alloc(sizeof(*dev));
+ if (!dev->indio_dev) {
+ pr_err("Unable to allocate iio device\n");
+ goto iio_error;
+ }
+
+ pr_info("Registering Toshiba accelerometer iio device\n");
+
+ dev->indio_dev->info = &toshiba_iio_accel_info;
+ dev->indio_dev->name = "Toshiba accelerometer";
+ dev->indio_dev->dev.parent = &acpi_dev->dev;
+ dev->indio_dev->modes = INDIO_DIRECT_MODE;
+ dev->indio_dev->channels = toshiba_iio_accel_channels;
+ dev->indio_dev->num_channels =
+ ARRAY_SIZE(toshiba_iio_accel_channels);
+
+ ret = iio_device_register(dev->indio_dev);
+ if (ret < 0) {
+ pr_err("Unable to register iio device\n");
+ iio_device_free(dev->indio_dev);
+ }
+ }
+iio_error:
toshiba_usb_sleep_charge_available(dev);
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 5edee645d890..262285e48a09 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -21,7 +21,7 @@
#include <linux/isapnp.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
extern struct pnp_protocol isapnp_protocol;
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 81603d99082b..bedb361746a0 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -46,7 +46,6 @@
*/
#include <linux/types.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
@@ -587,6 +586,6 @@ static int __init pnpbios_thread_init(void)
}
/* Start the kernel thread later: */
-module_init(pnpbios_thread_init);
+device_initcall(pnpbios_thread_init);
EXPORT_SYMBOL(pnpbios_protocol);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 421770ddafa3..0f11a0f4c369 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -309,6 +309,7 @@ config BATTERY_RX51
config CHARGER_ISP1704
tristate "ISP1704 USB Charger Detection"
depends on USB_PHY
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
help
Say Y to enable support for USB Charger Detection with
ISP1707/ISP1704 USB transceivers.
diff --git a/drivers/power/axp288_charger.c b/drivers/power/axp288_charger.c
index e4d569f57acc..4030eeb7cf65 100644
--- a/drivers/power/axp288_charger.c
+++ b/drivers/power/axp288_charger.c
@@ -129,10 +129,6 @@
#define AXP288_EXTCON_DEV_NAME "axp288_extcon"
-#define AXP288_EXTCON_SLOW_CHARGER "SLOW-CHARGER"
-#define AXP288_EXTCON_DOWNSTREAM_CHARGER "CHARGE-DOWNSTREAM"
-#define AXP288_EXTCON_FAST_CHARGER "FAST-CHARGER"
-
enum {
VBUS_OV_IRQ = 0,
CHARGE_DONE_IRQ,
@@ -158,7 +154,7 @@ struct axp288_chrg_info {
/* OTG/Host mode */
struct {
struct work_struct work;
- struct extcon_specific_cable_nb cable;
+ struct extcon_dev *cable;
struct notifier_block id_nb;
bool id_short;
} otg;
@@ -586,17 +582,15 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
bool old_connected = info->cable.connected;
/* Determine cable/charger type */
- if (extcon_get_cable_state(edev, AXP288_EXTCON_SLOW_CHARGER) > 0) {
+ if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_SDP) > 0) {
dev_dbg(&info->pdev->dev, "USB SDP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
- } else if (extcon_get_cable_state(edev,
- AXP288_EXTCON_DOWNSTREAM_CHARGER) > 0) {
+ } else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_CDP) > 0) {
dev_dbg(&info->pdev->dev, "USB CDP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB_CDP;
- } else if (extcon_get_cable_state(edev,
- AXP288_EXTCON_FAST_CHARGER) > 0) {
+ } else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_DCP) > 0) {
dev_dbg(&info->pdev->dev, "USB DCP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB_DCP;
@@ -692,8 +686,8 @@ static int axp288_charger_handle_otg_evt(struct notifier_block *nb,
{
struct axp288_chrg_info *info =
container_of(nb, struct axp288_chrg_info, otg.id_nb);
- struct extcon_dev *edev = param;
- int usb_host = extcon_get_cable_state(edev, "USB-Host");
+ struct extcon_dev *edev = info->otg.cable;
+ int usb_host = extcon_get_cable_state_(edev, EXTCON_USB_HOST);
dev_dbg(&info->pdev->dev, "external connector USB-Host is %s\n",
usb_host ? "attached" : "detached");
@@ -848,10 +842,33 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register for extcon notification */
INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
- ret = extcon_register_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
+ ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
+ &info->cable.nb);
+ if (ret) {
+ dev_err(&info->pdev->dev,
+ "failed to register extcon notifier for SDP %d\n", ret);
+ return ret;
+ }
+
+ ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
+ &info->cable.nb);
+ if (ret) {
+ dev_err(&info->pdev->dev,
+ "failed to register extcon notifier for CDP %d\n", ret);
+ extcon_unregister_notifier(info->cable.edev,
+ EXTCON_CHG_USB_SDP, &info->cable.nb);
+ return ret;
+ }
+
+ ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
+ &info->cable.nb);
if (ret) {
dev_err(&info->pdev->dev,
- "failed to register extcon notifier %d\n", ret);
+ "failed to register extcon notifier for DCP %d\n", ret);
+ extcon_unregister_notifier(info->cable.edev,
+ EXTCON_CHG_USB_SDP, &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev,
+ EXTCON_CHG_USB_CDP, &info->cable.nb);
return ret;
}
@@ -871,14 +888,14 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register for OTG notification */
INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
- ret = extcon_register_interest(&info->otg.cable, NULL, "USB-Host",
+ ret = extcon_register_notifier(info->otg.cable, EXTCON_USB_HOST,
&info->otg.id_nb);
if (ret)
dev_warn(&pdev->dev, "failed to register otg notifier\n");
- if (info->otg.cable.edev)
- info->otg.id_short = extcon_get_cable_state(
- info->otg.cable.edev, "USB-Host");
+ if (info->otg.cable)
+ info->otg.id_short = extcon_get_cable_state_(
+ info->otg.cable, EXTCON_USB_HOST);
/* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) {
@@ -905,11 +922,17 @@ static int axp288_charger_probe(struct platform_device *pdev)
return 0;
intr_reg_failed:
- if (info->otg.cable.edev)
- extcon_unregister_interest(&info->otg.cable);
+ if (info->otg.cable)
+ extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
+ &info->otg.id_nb);
power_supply_unregister(info->psy_usb);
psy_reg_failed:
- extcon_unregister_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
+ &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
+ &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
+ &info->cable.nb);
return ret;
}
@@ -917,10 +940,16 @@ static int axp288_charger_remove(struct platform_device *pdev)
{
struct axp288_chrg_info *info = dev_get_drvdata(&pdev->dev);
- if (info->otg.cable.edev)
- extcon_unregister_interest(&info->otg.cable);
+ if (info->otg.cable)
+ extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
+ &info->otg.id_nb);
- extcon_unregister_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
+ &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
+ &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
+ &info->cable.nb);
power_supply_unregister(info->psy_usb);
return 0;
diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c
index 45f6ebf88df6..e90b3f307e0f 100644
--- a/drivers/power/bq27xxx_battery.c
+++ b/drivers/power/bq27xxx_battery.c
@@ -735,11 +735,8 @@ static void bq27xxx_battery_poll(struct work_struct *work)
bq27xxx_battery_update(di);
- if (poll_interval > 0) {
- /* The timer does not have to be accurate. */
- set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
+ if (poll_interval > 0)
schedule_delayed_work(&di->work, poll_interval * HZ);
- }
}
/*
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index b2766b867b0e..fbab29dfa793 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -33,6 +33,7 @@
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
/* Local defines */
#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
@@ -335,14 +336,14 @@ static int release_zone(struct powercap_zone *power_zone)
static int find_nr_power_limit(struct rapl_domain *rd)
{
- int i;
+ int i, nr_pl = 0;
for (i = 0; i < NR_POWER_LIMITS; i++) {
- if (rd->rpl[i].name == NULL)
- break;
+ if (rd->rpl[i].name)
+ nr_pl++;
}
- return i;
+ return nr_pl;
}
static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
@@ -425,15 +426,38 @@ static const struct powercap_zone_ops zone_ops[] = {
},
};
-static int set_power_limit(struct powercap_zone *power_zone, int id,
+
+/*
+ * Constraint index used by powercap can be different than power limit (PL)
+ * index in that some PLs maybe missing due to non-existant MSRs. So we
+ * need to convert here by finding the valid PLs only (name populated).
+ */
+static int contraint_to_pl(struct rapl_domain *rd, int cid)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < NR_POWER_LIMITS; i++) {
+ if ((rd->rpl[i].name) && j++ == cid) {
+ pr_debug("%s: index %d\n", __func__, i);
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int set_power_limit(struct powercap_zone *power_zone, int cid,
u64 power_limit)
{
struct rapl_domain *rd;
struct rapl_package *rp;
int ret = 0;
+ int id;
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
+ id = contraint_to_pl(rd, cid);
+
rp = rd->rp;
if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
@@ -460,16 +484,18 @@ set_exit:
return ret;
}
-static int get_current_power_limit(struct powercap_zone *power_zone, int id,
+static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
u64 *data)
{
struct rapl_domain *rd;
u64 val;
int prim;
int ret = 0;
+ int id;
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
+ id = contraint_to_pl(rd, cid);
switch (rd->rpl[id].prim_id) {
case PL1_ENABLE:
prim = POWER_LIMIT1;
@@ -491,14 +517,17 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int id,
return ret;
}
-static int set_time_window(struct powercap_zone *power_zone, int id,
+static int set_time_window(struct powercap_zone *power_zone, int cid,
u64 window)
{
struct rapl_domain *rd;
int ret = 0;
+ int id;
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
+ id = contraint_to_pl(rd, cid);
+
switch (rd->rpl[id].prim_id) {
case PL1_ENABLE:
rapl_write_data_raw(rd, TIME_WINDOW1, window);
@@ -513,14 +542,17 @@ static int set_time_window(struct powercap_zone *power_zone, int id,
return ret;
}
-static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data)
+static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data)
{
struct rapl_domain *rd;
u64 val;
int ret = 0;
+ int id;
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
+ id = contraint_to_pl(rd, cid);
+
switch (rd->rpl[id].prim_id) {
case PL1_ENABLE:
ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val);
@@ -539,15 +571,17 @@ static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data)
return ret;
}
-static const char *get_constraint_name(struct powercap_zone *power_zone, int id)
+static const char *get_constraint_name(struct powercap_zone *power_zone, int cid)
{
- struct rapl_power_limit *rpl;
struct rapl_domain *rd;
+ int id;
rd = power_zone_to_rapl_domain(power_zone);
- rpl = (struct rapl_power_limit *) &rd->rpl[id];
+ id = contraint_to_pl(rd, cid);
+ if (id >= 0)
+ return rd->rpl[id].name;
- return rpl->name;
+ return NULL;
}
@@ -1096,27 +1130,36 @@ static const struct rapl_defaults rapl_defaults_cht = {
}
static const struct x86_cpu_id rapl_ids[] __initconst = {
- RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */
- RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */
- RAPL_CPU(0x37, rapl_defaults_byt),/* Valleyview */
- RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
- RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
- RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
- RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
- RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
- RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
- RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
- RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
- RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
- RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
- RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */
- RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
- RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */
- RAPL_CPU(0X5C, rapl_defaults_core),/* Broxton */
- RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */
- RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
- RAPL_CPU(0x8E, rapl_defaults_core),/* Kabylake */
- RAPL_CPU(0x9E, rapl_defaults_core),/* Kabylake */
+ RAPL_CPU(INTEL_FAM6_SANDYBRIDGE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_SANDYBRIDGE_X, rapl_defaults_core),
+
+ RAPL_CPU(INTEL_FAM6_IVYBRIDGE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_IVYBRIDGE_X, rapl_defaults_core),
+
+ RAPL_CPU(INTEL_FAM6_HASWELL_CORE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_HASWELL_ULT, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_HASWELL_GT3E, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_HASWELL_X, rapl_defaults_hsw_server),
+
+ RAPL_CPU(INTEL_FAM6_BROADWELL_CORE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_BROADWELL_GT3E, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_BROADWELL_XEON_D, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_BROADWELL_X, rapl_defaults_hsw_server),
+
+ RAPL_CPU(INTEL_FAM6_SKYLAKE_DESKTOP, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_SKYLAKE_MOBILE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_SKYLAKE_X, rapl_defaults_hsw_server),
+ RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP, rapl_defaults_core),
+
+ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt),
+ RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht),
+ RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD1, rapl_defaults_tng),
+ RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD2, rapl_defaults_ann),
+ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
+
+ RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
{}
};
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
@@ -1373,6 +1416,37 @@ static int rapl_check_domain(int cpu, int domain)
return 0;
}
+
+/*
+ * Check if power limits are available. Two cases when they are not available:
+ * 1. Locked by BIOS, in this case we still provide read-only access so that
+ * users can see what limit is set by the BIOS.
+ * 2. Some CPUs make some domains monitoring only which means PLx MSRs may not
+ * exist at all. In this case, we do not show the contraints in powercap.
+ *
+ * Called after domains are detected and initialized.
+ */
+static void rapl_detect_powerlimit(struct rapl_domain *rd)
+{
+ u64 val64;
+ int i;
+
+ /* check if the domain is locked by BIOS, ignore if MSR doesn't exist */
+ if (!rapl_read_data_raw(rd, FW_LOCK, false, &val64)) {
+ if (val64) {
+ pr_info("RAPL package %d domain %s locked by BIOS\n",
+ rd->rp->id, rd->name);
+ rd->state |= DOMAIN_STATE_BIOS_LOCKED;
+ }
+ }
+ /* check if power limit MSRs exists, otherwise domain is monitoring only */
+ for (i = 0; i < NR_POWER_LIMITS; i++) {
+ int prim = rd->rpl[i].prim_id;
+ if (rapl_read_data_raw(rd, prim, false, &val64))
+ rd->rpl[i].name = NULL;
+ }
+}
+
/* Detect active and valid domains for the given CPU, caller must
* ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
*/
@@ -1381,7 +1455,6 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
int i;
int ret = 0;
struct rapl_domain *rd;
- u64 locked;
for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
/* use physical package id to read counters */
@@ -1392,7 +1465,7 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
}
rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
if (!rp->nr_domains) {
- pr_err("no valid rapl domains found in package %d\n", rp->id);
+ pr_debug("no valid rapl domains found in package %d\n", rp->id);
ret = -ENODEV;
goto done;
}
@@ -1406,17 +1479,9 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
}
rapl_init_domains(rp);
- for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
- /* check if the domain is locked by BIOS */
- ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked);
- if (ret)
- return ret;
- if (locked) {
- pr_info("RAPL package %d domain %s locked by BIOS\n",
- rp->id, rd->name);
- rd->state |= DOMAIN_STATE_BIOS_LOCKED;
- }
- }
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++)
+ rapl_detect_powerlimit(rd);
+
done:
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index 38a8bbe74810..83797d89c30f 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -195,7 +195,7 @@ static void parport_detach(struct parport *port)
struct pps_client_pp *device;
/* FIXME: oooh, this is ugly! */
- if (strcmp(pardev->name, KBUILD_MODNAME))
+ if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
/* not our port */
return;
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 526bf23dcb49..6c7fe4778793 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -152,7 +152,6 @@ static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
.enable = rpm_reg_enable,
.disable = rpm_reg_disable,
.is_enabled = rpm_reg_is_enabled,
- .list_voltage = regulator_list_voltage_linear_range,
.get_voltage = rpm_reg_get_voltage,
.set_voltage = rpm_reg_set_voltage,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 42b34cd1f002..fd2eff440098 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -228,7 +228,7 @@ check_XRC (struct ccw1 *de_ccw,
data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
- rc = get_sync_clock(&data->ep_sys_time);
+ rc = get_phys_clock(&data->ep_sys_time);
/* Ignore return code if sync clock is switched off. */
if (rc == -EOPNOTSUPP || rc == -EACCES)
rc = 0;
@@ -339,7 +339,7 @@ static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
- rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
+ rc = get_phys_clock(&pfxdata->define_extent.ep_sys_time);
/* Ignore return code if sync clock is switched off. */
if (rc == -EOPNOTSUPP || rc == -EACCES)
rc = 0;
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 31d544a87ba9..e2fa759bf2ad 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -45,7 +45,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
gdp->major = DASD_MAJOR;
gdp->first_minor = base->devindex << DASD_PARTN_BITS;
gdp->fops = &dasd_device_operations;
- gdp->driverfs_dev = &base->cdev->dev;
/*
* Set device name.
@@ -76,7 +75,7 @@ int dasd_gendisk_alloc(struct dasd_block *block)
gdp->queue = block->request_queue;
block->gdp = gdp;
set_capacity(block->gdp, 0);
- add_disk(block->gdp);
+ device_add_disk(&base->cdev->dev, block->gdp);
return 0;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index bed53c46dd90..fac1b51ea0de 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -615,9 +615,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
- dev_info->gd->driverfs_dev = &dev_info->dev;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
@@ -655,7 +655,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
goto put_dev;
get_device(&dev_info->dev);
- add_disk(dev_info->gd);
+ device_add_disk(&dev_info->dev, dev_info->gd);
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index e6f54d3b8969..9f16ea6964ec 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -512,7 +512,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
goto out_queue;
rq->queuedata = scmdev;
- bdev->gendisk->driverfs_dev = &scmdev->dev;
bdev->gendisk->private_data = scmdev;
bdev->gendisk->fops = &scm_blk_devops;
bdev->gendisk->queue = rq;
@@ -531,7 +530,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
/* 512 byte sectors */
set_capacity(bdev->gendisk, scmdev->size >> 9);
- add_disk(bdev->gendisk);
+ device_add_disk(&scmdev->dev, bdev->gendisk);
return 0;
out_queue:
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index ef04a9f7a704..7b9c50aa4cc9 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -438,18 +438,9 @@ do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
return -EFAULT;
if (len > sizeof(u_kbs->kb_string))
return -EINVAL;
- p = kmalloc(len, GFP_KERNEL);
- if (!p)
- return -ENOMEM;
- if (copy_from_user(p, u_kbs->kb_string, len)) {
- kfree(p);
- return -EFAULT;
- }
- /*
- * Make sure the string is terminated by 0. User could have
- * modified it between us running strnlen_user() and copying it.
- */
- p[len - 1] = 0;
+ p = memdup_user_nul(u_kbs->kb_string, len);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
kfree(kbd->func_table[kb_func]);
kbd->func_table[kb_func] = p;
break;
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 5880def98fc1..6037bc87e767 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -319,7 +319,8 @@ sclp_console_init(void)
int i;
int rc;
- if (!CONSOLE_IS_SCLP)
+ /* SCLP consoles are handled together */
+ if (!(CONSOLE_IS_SCLP || CONSOLE_IS_VT220))
return 0;
rc = sclp_rw_init();
if (rc)
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 2ced50ccca63..1406fb688a26 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -47,7 +47,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
int cpu;
struct device *dev;
- s390_adjust_jiffies();
+ s390_update_cpu_mhz();
pr_info("CPU capability may have changed\n");
get_online_cpus();
for_each_online_cpu(cpu) {
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 5043ecfa1fbc..16992e2a40ad 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -185,7 +185,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
{
if (ipl_block) {
diag308(DIAG308_SET, ipl_block);
- diag308(DIAG308_IPL, NULL);
+ diag308(DIAG308_LOAD_CLEAR, NULL);
}
return count;
}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 50597f9522fe..e96aced58627 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -47,8 +47,6 @@ static DEFINE_MUTEX(info_lock);
/* Time after which channel-path status may be outdated. */
static unsigned long chp_info_expires;
-/* Workqueue to perform pending configure tasks. */
-static struct workqueue_struct *chp_wq;
static struct work_struct cfg_work;
/* Wait queue for configure completion events. */
@@ -428,11 +426,14 @@ int chp_update_desc(struct channel_path *chp)
if (rc)
return rc;
- rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
- if (rc)
- return rc;
+ /*
+ * Fetching the following data is optional. Not all machines or
+ * hypervisors implement the required chsc commands.
+ */
+ chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+ chsc_get_channel_measurement_chars(chp);
- return chsc_get_channel_measurement_chars(chp);
+ return 0;
}
/**
@@ -714,7 +715,7 @@ static void cfg_func(struct work_struct *work)
wake_up_interruptible(&cfg_wait_queue);
return;
}
- queue_work(chp_wq, &cfg_work);
+ schedule_work(&cfg_work);
}
/**
@@ -732,7 +733,7 @@ void chp_cfg_schedule(struct chp_id chpid, int configure)
cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
cfg_busy = 1;
mutex_unlock(&cfg_lock);
- queue_work(chp_wq, &cfg_work);
+ schedule_work(&cfg_work);
}
/**
@@ -766,11 +767,6 @@ static int __init chp_init(void)
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
return ret;
- chp_wq = create_singlethread_workqueue("cio_chp");
- if (!chp_wq) {
- crw_unregister_handler(CRW_RSC_CPATH);
- return -ENOMEM;
- }
INIT_WORK(&cfg_work, cfg_func);
init_waitqueue_head(&cfg_wait_queue);
if (info_update())
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index af0232290dc4..bb5a68226cda 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -4,7 +4,7 @@
*/
#ifndef S390_CHP_H
-#define S390_CHP_H S390_CHP_H
+#define S390_CHP_H
#include <linux/types.h>
#include <linux/device.h>
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c424c0c7367e..940e725bde1e 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -907,7 +907,8 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
struct chsc_scpd *scpd_area;
int ccode, ret;
- if ((rfmt == 1) && !css_general_characteristics.fcs)
+ if ((rfmt == 1 || rfmt == 0) && c == 1 &&
+ !css_general_characteristics.fcs)
return -EINVAL;
if ((rfmt == 2) && !css_general_characteristics.cib)
return -EINVAL;
@@ -939,7 +940,6 @@ EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc)
{
- struct chsc_response_struct *chsc_resp;
struct chsc_scpd *scpd_area;
unsigned long flags;
int ret;
@@ -949,8 +949,8 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
if (ret)
goto out;
- chsc_resp = (void *)&scpd_area->response;
- memcpy(desc, &chsc_resp->data, sizeof(*desc));
+
+ memcpy(desc, scpd_area->data, sizeof(*desc));
out:
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
@@ -959,18 +959,17 @@ out:
int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
struct channel_path_desc_fmt1 *desc)
{
- struct chsc_response_struct *chsc_resp;
struct chsc_scpd *scpd_area;
unsigned long flags;
int ret;
spin_lock_irqsave(&chsc_page_lock, flags);
scpd_area = chsc_page;
- ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
+ ret = chsc_determine_channel_path_desc(chpid, 0, 1, 1, 0, scpd_area);
if (ret)
goto out;
- chsc_resp = (void *)&scpd_area->response;
- memcpy(desc, &chsc_resp->data, sizeof(*desc));
+
+ memcpy(desc, scpd_area->data, sizeof(*desc));
out:
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
@@ -1020,7 +1019,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
chp->cmg = -1;
if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
- return 0;
+ return -EINVAL;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
@@ -1176,7 +1175,7 @@ exit:
EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
-int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
{
struct {
struct chsc_header request;
@@ -1186,7 +1185,9 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
unsigned int ctrl : 16;
unsigned int rsvd2[5];
struct chsc_header response;
- unsigned int rsvd3[7];
+ unsigned int rsvd3[3];
+ u64 clock_delta;
+ unsigned int rsvd4[2];
} __attribute__ ((packed)) *rr;
int rc;
@@ -1200,6 +1201,8 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
if (rc)
return -EIO;
rc = (rr->response.code == 0x0001) ? 0 : -EIO;
+ if (clock_delta)
+ *clock_delta = rr->clock_delta;
return rc;
}
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 0de134c3a204..67c87b6e63ec 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -112,8 +112,9 @@ struct chsc_scpd {
u32 last_chpid:8;
u32 zeroes1;
struct chsc_header response;
- u8 data[PAGE_SIZE - 20];
-} __attribute__ ((packed));
+ u32:32;
+ u8 data[0];
+} __packed;
struct chsc_sda_area {
struct chsc_header request;
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index b6f12c2bb114..735052ecd3e5 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -552,7 +552,7 @@ static int chsc_ioctl_info_cu(void __user *user_cd)
goto out_free;
}
scucd_area->request.length = 0x0010;
- scucd_area->request.code = 0x0028;
+ scucd_area->request.code = 0x0026;
scucd_area->m = cd->m;
scucd_area->fmt1 = cd->fmt;
scucd_area->cssid = cd->cssid;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index b2afad5a5682..268aa23afa01 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -164,6 +164,9 @@ static inline u64 time_to_avg_nsec(u32 value, u32 count)
return ret;
}
+#define CMF_OFF 0
+#define CMF_ON 2
+
/*
* Activate or deactivate the channel monitor. When area is NULL,
* the monitor is deactivated. The channel monitor needs to
@@ -176,7 +179,7 @@ static inline void cmf_activate(void *area, unsigned int onoff)
register long __gpr1 asm("1");
__gpr2 = area;
- __gpr1 = onoff ? 2 : 0;
+ __gpr1 = onoff;
/* activate channel measurement */
asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
}
@@ -587,7 +590,7 @@ static int alloc_cmb(struct ccw_device *cdev)
/* everything ok */
memset(mem, 0, size);
cmb_area.mem = mem;
- cmf_activate(cmb_area.mem, 1);
+ cmf_activate(cmb_area.mem, CMF_ON);
}
}
@@ -621,7 +624,7 @@ static void free_cmb(struct ccw_device *cdev)
if (list_empty(&cmb_area.list)) {
ssize_t size;
size = sizeof(struct cmb) * cmb_area.num_channels;
- cmf_activate(NULL, 0);
+ cmf_activate(NULL, CMF_OFF);
free_pages((unsigned long)cmb_area.mem, get_order(size));
cmb_area.mem = NULL;
}
@@ -753,6 +756,17 @@ static void reset_cmb(struct ccw_device *cdev)
cmf_generic_reset(cdev);
}
+static int cmf_enabled(struct ccw_device *cdev)
+{
+ int enabled;
+
+ spin_lock_irq(cdev->ccwlock);
+ enabled = !!cdev->private->cmb;
+ spin_unlock_irq(cdev->ccwlock);
+
+ return enabled;
+}
+
static struct attribute_group cmf_attr_group;
static struct cmb_operations cmbops_basic = {
@@ -830,7 +844,7 @@ static int alloc_cmbe(struct ccw_device *cdev)
/* activate global measurement if this is the first channel */
if (list_empty(&cmb_area.list))
- cmf_activate(NULL, 1);
+ cmf_activate(NULL, CMF_ON);
list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
spin_unlock_irq(cdev->ccwlock);
@@ -867,7 +881,7 @@ static void free_cmbe(struct ccw_device *cdev)
/* deactivate global measurement if this is the last channel */
list_del_init(&cdev->private->cmb_list);
if (list_empty(&cmb_area.list))
- cmf_activate(NULL, 0);
+ cmf_activate(NULL, CMF_OFF);
spin_unlock_irq(cdev->ccwlock);
spin_unlock(&cmb_area.lock);
}
@@ -1153,13 +1167,8 @@ static ssize_t cmb_enable_show(struct device *dev,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
- int enabled;
- spin_lock_irq(cdev->ccwlock);
- enabled = !!cdev->private->cmb;
- spin_unlock_irq(cdev->ccwlock);
-
- return sprintf(buf, "%d\n", enabled);
+ return sprintf(buf, "%d\n", cmf_enabled(cdev));
}
static ssize_t cmb_enable_store(struct device *dev,
@@ -1199,15 +1208,20 @@ int ccw_set_cmf(struct ccw_device *cdev, int enable)
* @cdev: The ccw device to be enabled
*
* Returns %0 for success or a negative error value.
- *
+ * Note: If this is called on a device for which channel measurement is already
+ * enabled a reset of the measurement data is triggered.
* Context:
* non-atomic
*/
int enable_cmf(struct ccw_device *cdev)
{
- int ret;
+ int ret = 0;
device_lock(&cdev->dev);
+ if (cmf_enabled(cdev)) {
+ cmbops->reset(cdev);
+ goto out_unlock;
+ }
get_device(&cdev->dev);
ret = cmbops->alloc(cdev);
if (ret)
@@ -1226,7 +1240,7 @@ int enable_cmf(struct ccw_device *cdev)
out:
if (ret)
put_device(&cdev->dev);
-
+out_unlock:
device_unlock(&cdev->dev);
return ret;
}
@@ -1321,7 +1335,7 @@ void cmf_reactivate(void)
{
spin_lock(&cmb_area.lock);
if (!list_empty(&cmb_area.list))
- cmf_activate(cmb_area.mem, 1);
+ cmf_activate(cmb_area.mem, CMF_ON);
spin_unlock(&cmb_area.lock);
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index a69f702a2fcc..877d9f601e63 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -97,7 +97,7 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
}
/**
- * ccw_device_is_pathgroup - determine if paths to this device are grouped
+ * ccw_device_is_pathgroup() - determine if paths to this device are grouped
* @cdev: ccw device
*
* Return non-zero if there is a path group, zero otherwise.
@@ -109,7 +109,7 @@ int ccw_device_is_pathgroup(struct ccw_device *cdev)
EXPORT_SYMBOL(ccw_device_is_pathgroup);
/**
- * ccw_device_is_multipath - determine if device is operating in multipath mode
+ * ccw_device_is_multipath() - determine if device is operating in multipath mode
* @cdev: ccw device
*
* Return non-zero if device is operating in multipath mode, zero otherwise.
@@ -457,7 +457,7 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
}
/**
- * chp_get_chp_desc - return newly allocated channel-path descriptor
+ * ccw_device_get_chp_desc() - return newly allocated channel-path descriptor
* @cdev: device to obtain the descriptor for
* @chp_idx: index of the channel path
*
@@ -477,7 +477,7 @@ struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev,
}
/**
- * ccw_device_get_id - obtain a ccw device id
+ * ccw_device_get_id() - obtain a ccw device id
* @cdev: device to obtain the id for
* @dev_id: where to fill in the values
*/
@@ -488,7 +488,7 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
EXPORT_SYMBOL(ccw_device_get_id);
/**
- * ccw_device_tm_start_key - perform start function
+ * ccw_device_tm_start_key() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
@@ -533,7 +533,7 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
EXPORT_SYMBOL(ccw_device_tm_start_key);
/**
- * ccw_device_tm_start_timeout_key - perform start function
+ * ccw_device_tm_start_timeout_key() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
@@ -559,7 +559,7 @@ int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
/**
- * ccw_device_tm_start - perform start function
+ * ccw_device_tm_start() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
@@ -577,7 +577,7 @@ int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
EXPORT_SYMBOL(ccw_device_tm_start);
/**
- * ccw_device_tm_start_timeout - perform start function
+ * ccw_device_tm_start_timeout() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
@@ -596,7 +596,7 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
EXPORT_SYMBOL(ccw_device_tm_start_timeout);
/**
- * ccw_device_get_mdc - accumulate max data count
+ * ccw_device_get_mdc() - accumulate max data count
* @cdev: ccw device for which the max data count is accumulated
* @mask: mask of paths to use
*
@@ -642,7 +642,7 @@ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
EXPORT_SYMBOL(ccw_device_get_mdc);
/**
- * ccw_device_tm_intrg - perform interrogate function
+ * ccw_device_tm_intrg() - perform interrogate function
* @cdev: ccw device on which to perform the interrogate function
*
* Perform an interrogate function on the given ccw device. Return zero on
@@ -664,7 +664,7 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
EXPORT_SYMBOL(ccw_device_tm_intrg);
/**
- * ccw_device_get_schid - obtain a subchannel id
+ * ccw_device_get_schid() - obtain a subchannel id
* @cdev: device to obtain the id for
* @schid: where to fill in the values
*/
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 22b58104683b..89a787790888 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -4,7 +4,7 @@
*/
#ifndef S390_IDSET_H
-#define S390_IDSET_H S390_IDSET_H
+#define S390_IDSET_H
#include <asm/schid.h>
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 98984818618f..8225da619014 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -12,7 +12,7 @@
#include "orb.h"
#include "cio.h"
-int stsch(struct subchannel_id schid, struct schib *addr)
+static inline int __stsch(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode = -EIO;
@@ -26,13 +26,21 @@ int stsch(struct subchannel_id schid, struct schib *addr)
: "+d" (ccode), "=m" (*addr)
: "d" (reg1), "a" (addr)
: "cc");
+ return ccode;
+}
+
+int stsch(struct subchannel_id schid, struct schib *addr)
+{
+ int ccode;
+
+ ccode = __stsch(schid, addr);
trace_s390_cio_stsch(schid, addr, ccode);
return ccode;
}
EXPORT_SYMBOL(stsch);
-int msch(struct subchannel_id schid, struct schib *addr)
+static inline int __msch(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode = -EIO;
@@ -46,12 +54,20 @@ int msch(struct subchannel_id schid, struct schib *addr)
: "+d" (ccode)
: "d" (reg1), "a" (addr), "m" (*addr)
: "cc");
+ return ccode;
+}
+
+int msch(struct subchannel_id schid, struct schib *addr)
+{
+ int ccode;
+
+ ccode = __msch(schid, addr);
trace_s390_cio_msch(schid, addr, ccode);
return ccode;
}
-int tsch(struct subchannel_id schid, struct irb *addr)
+static inline int __tsch(struct subchannel_id schid, struct irb *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
@@ -63,12 +79,20 @@ int tsch(struct subchannel_id schid, struct irb *addr)
: "=d" (ccode), "=m" (*addr)
: "d" (reg1), "a" (addr)
: "cc");
+ return ccode;
+}
+
+int tsch(struct subchannel_id schid, struct irb *addr)
+{
+ int ccode;
+
+ ccode = __tsch(schid, addr);
trace_s390_cio_tsch(schid, addr, ccode);
return ccode;
}
-int ssch(struct subchannel_id schid, union orb *addr)
+static inline int __ssch(struct subchannel_id schid, union orb *addr)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode = -EIO;
@@ -82,13 +106,21 @@ int ssch(struct subchannel_id schid, union orb *addr)
: "+d" (ccode)
: "d" (reg1), "a" (addr), "m" (*addr)
: "cc", "memory");
+ return ccode;
+}
+
+int ssch(struct subchannel_id schid, union orb *addr)
+{
+ int ccode;
+
+ ccode = __ssch(schid, addr);
trace_s390_cio_ssch(schid, addr, ccode);
return ccode;
}
EXPORT_SYMBOL(ssch);
-int csch(struct subchannel_id schid)
+static inline int __csch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode;
@@ -100,6 +132,14 @@ int csch(struct subchannel_id schid)
: "=d" (ccode)
: "d" (reg1)
: "cc");
+ return ccode;
+}
+
+int csch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __csch(schid);
trace_s390_cio_csch(schid, ccode);
return ccode;
@@ -140,7 +180,7 @@ int chsc(void *chsc_area)
}
EXPORT_SYMBOL(chsc);
-int rchp(struct chp_id chpid)
+static inline int __rchp(struct chp_id chpid)
{
register struct chp_id reg1 asm ("1") = chpid;
int ccode;
@@ -151,12 +191,20 @@ int rchp(struct chp_id chpid)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode) : "d" (reg1) : "cc");
+ return ccode;
+}
+
+int rchp(struct chp_id chpid)
+{
+ int ccode;
+
+ ccode = __rchp(chpid);
trace_s390_cio_rchp(chpid, ccode);
return ccode;
}
-int rsch(struct subchannel_id schid)
+static inline int __rsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode;
@@ -168,12 +216,21 @@ int rsch(struct subchannel_id schid)
: "=d" (ccode)
: "d" (reg1)
: "cc", "memory");
+
+ return ccode;
+}
+
+int rsch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __rsch(schid);
trace_s390_cio_rsch(schid, ccode);
return ccode;
}
-int hsch(struct subchannel_id schid)
+static inline int __hsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode;
@@ -185,12 +242,20 @@ int hsch(struct subchannel_id schid)
: "=d" (ccode)
: "d" (reg1)
: "cc");
+ return ccode;
+}
+
+int hsch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __hsch(schid);
trace_s390_cio_hsch(schid, ccode);
return ccode;
}
-int xsch(struct subchannel_id schid)
+static inline int __xsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode;
@@ -202,6 +267,14 @@ int xsch(struct subchannel_id schid)
: "=d" (ccode)
: "d" (reg1)
: "cc");
+ return ccode;
+}
+
+int xsch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __xsch(schid);
trace_s390_cio_xsch(schid, ccode);
return ccode;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 327255da115a..4feb27215ab6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -169,6 +169,19 @@ static int ap_configuration_available(void)
return test_facility(12);
}
+static inline struct ap_queue_status
+__pqap_tapq(ap_qid_t qid, unsigned long *info)
+{
+ register unsigned long reg0 asm ("0") = qid;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ *info = reg2;
+ return reg1;
+}
+
/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
@@ -179,17 +192,15 @@ static int ap_configuration_available(void)
static inline struct ap_queue_status
ap_test_queue(ap_qid_t qid, unsigned long *info)
{
- register unsigned long reg0 asm ("0") = qid;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
+ struct ap_queue_status aqs;
+ unsigned long _info;
if (test_facility(15))
- reg0 |= 1UL << 23; /* set APFT T bit*/
- asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ qid |= 1UL << 23; /* set APFT T bit*/
+ aqs = __pqap_tapq(qid, &_info);
if (info)
- *info = reg2;
- return reg1;
+ *info = _info;
+ return aqs;
}
/**
@@ -237,14 +248,12 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
*
* Returns 0 on success, or -EOPNOTSUPP.
*/
-static inline int ap_query_configuration(void)
+static inline int __ap_query_configuration(void)
{
register unsigned long reg0 asm ("0") = 0x04000000UL;
register unsigned long reg1 asm ("1") = -EINVAL;
register void *reg2 asm ("2") = (void *) ap_configuration;
- if (!ap_configuration)
- return -EOPNOTSUPP;
asm volatile(
".long 0xb2af0000\n" /* PQAP(QCI) */
"0: la %1,0\n"
@@ -257,6 +266,13 @@ static inline int ap_query_configuration(void)
return reg1;
}
+static inline int ap_query_configuration(void)
+{
+ if (!ap_configuration)
+ return -EOPNOTSUPP;
+ return __ap_query_configuration();
+}
+
/**
* ap_init_configuration(): Allocate and query configuration array.
*/
@@ -346,6 +362,26 @@ static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind)
}
}
+static inline struct ap_queue_status
+__nqap(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+{
+ typedef struct { char _[length]; } msgblock;
+ register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = (unsigned long) msg;
+ register unsigned long reg3 asm ("3") = (unsigned long) length;
+ register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+ register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
+
+ asm volatile (
+ "0: .long 0xb2ad0042\n" /* NQAP */
+ " brc 2,0b"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+ : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
+ : "cc");
+ return reg1;
+}
+
/**
* __ap_send(): Send message to adjunct processor queue.
* @qid: The AP queue number
@@ -363,24 +399,9 @@ static inline struct ap_queue_status
__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
unsigned int special)
{
- typedef struct { char _[length]; } msgblock;
- register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = (unsigned long) msg;
- register unsigned long reg3 asm ("3") = (unsigned long) length;
- register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
-
if (special == 1)
- reg0 |= 0x400000UL;
-
- asm volatile (
- "0: .long 0xb2ad0042\n" /* NQAP */
- " brc 2,0b"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
- : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
- : "cc" );
- return reg1;
+ qid |= 0x400000UL;
+ return __nqap(qid, psmid, msg, length);
}
int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 80b1979e8d95..df036b872b05 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_l2_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ac544330daeb..709b52339ff9 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3226,6 +3226,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d6a691e27d33..d6803a9e5ab8 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -10093,6 +10093,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg->intr_flag = IPR_USE_MSI;
else {
ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->clear_isr = 1;
ioa_cfg->nvectors = 1;
dev_info(&pdev->dev, "Cannot enable MSI.\n");
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 935c43095109..497bc1558377 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -233,15 +233,8 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->task_state_flags = SAS_TASK_STATE_PENDING;
qc->lldd_task = task;
- switch (qc->tf.protocol) {
- case ATA_PROT_NCQ:
- task->ata_task.use_ncq = 1;
- /* fall through */
- case ATAPI_PROT_DMA:
- case ATA_PROT_DMA:
- task->ata_task.dma_xfer = 1;
- break;
- }
+ task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
+ task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol);
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, task);
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 3b11aad03752..2f2a9910e30e 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -726,7 +726,7 @@ static int _osd_req_list_objects(struct osd_request *or,
return PTR_ERR(bio);
}
- bio->bi_rw &= ~REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
or->in.bio = bio;
or->in.total_bytes = bio->bi_iter.bi_size;
return 0;
@@ -824,7 +824,7 @@ void osd_req_write(struct osd_request *or,
{
_osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
WARN_ON(or->out.bio || or->out.total_bytes);
- WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
+ WARN_ON(!op_is_write(bio_op(bio)));
or->out.bio = bio;
or->out.total_bytes = len;
}
@@ -839,7 +839,7 @@ int osd_req_write_kern(struct osd_request *or,
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
osd_req_write(or, obj, offset, bio, len);
return 0;
}
@@ -875,7 +875,7 @@ void osd_req_read(struct osd_request *or,
{
_osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
WARN_ON(or->in.bio || or->in.total_bytes);
- WARN_ON(bio->bi_rw & REQ_WRITE);
+ WARN_ON(op_is_write(bio_op(bio)));
or->in.bio = bio;
or->in.total_bytes = len;
}
@@ -956,7 +956,7 @@ static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
/* integrity check the continuation before the bio is linked
* with the other data segments since the continuation
@@ -1077,7 +1077,7 @@ int osd_req_write_sg_kern(struct osd_request *or,
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
osd_req_write_sg(or, obj, bio, sglist, numentries);
return 0;
@@ -1558,18 +1558,25 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
static struct request *_make_request(struct request_queue *q, bool has_write,
struct _osd_io_info *oii, gfp_t flags)
{
- if (oii->bio)
- return blk_make_request(q, oii->bio, flags);
- else {
- struct request *req;
-
- req = blk_get_request(q, has_write ? WRITE : READ, flags);
- if (IS_ERR(req))
- return req;
+ struct request *req;
+ struct bio *bio = oii->bio;
+ int ret;
- blk_rq_set_block_pc(req);
+ req = blk_get_request(q, has_write ? WRITE : READ, flags);
+ if (IS_ERR(req))
return req;
+ blk_rq_set_block_pc(req);
+
+ for_each_bio(bio) {
+ struct bio *bounce_bio = bio;
+
+ blk_queue_bounce(req->q, &bounce_bio);
+ ret = blk_rq_append_bio(req, bounce_bio);
+ if (ret)
+ return ERR_PTR(ret);
}
+
+ return req;
}
static int _init_blk_request(struct osd_request *or,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5649c200d37c..a92a62dea793 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2548,7 +2548,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!vha->flags.online)
return;
- if (rsp->msix->cpuid != smp_processor_id()) {
+ if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
/* if kernel does not notify qla of IRQ's CPU change,
* then set it here.
*/
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index ff41c310c900..eaccd651ccda 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -429,7 +429,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* here, and we don't know what device it is
* trying to work with, leave it as-is.
*/
- vmax = 8; /* max length of vendor */
+ vmax = sizeof(devinfo->vendor);
vskip = vendor;
while (vmax > 0 && *vskip == ' ') {
vmax--;
@@ -439,7 +439,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
while (vmax > 0 && vskip[vmax - 1] == ' ')
--vmax;
- mmax = 16; /* max length of model */
+ mmax = sizeof(devinfo->model);
mskip = model;
while (mmax > 0 && *mskip == ' ') {
mmax--;
@@ -455,10 +455,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* Behave like the older version of get_device_flags.
*/
if (memcmp(devinfo->vendor, vskip, vmax) ||
- devinfo->vendor[vmax])
+ (vmax < sizeof(devinfo->vendor) &&
+ devinfo->vendor[vmax]))
continue;
if (memcmp(devinfo->model, mskip, mmax) ||
- devinfo->model[mmax])
+ (mmax < sizeof(devinfo->model) &&
+ devinfo->model[mmax]))
continue;
return devinfo;
} else {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 60bff78e9ead..d3e852ad5aa3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1012,7 +1012,8 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
} else {
- scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
+ scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n",
+ req_op(rq), (unsigned long long) rq->cmd_flags);
goto out;
}
@@ -1137,21 +1138,26 @@ static int sd_init_command(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
- if (rq->cmd_flags & REQ_DISCARD)
+ switch (req_op(rq)) {
+ case REQ_OP_DISCARD:
return sd_setup_discard_cmnd(cmd);
- else if (rq->cmd_flags & REQ_WRITE_SAME)
+ case REQ_OP_WRITE_SAME:
return sd_setup_write_same_cmnd(cmd);
- else if (rq->cmd_flags & REQ_FLUSH)
+ case REQ_OP_FLUSH:
return sd_setup_flush_cmnd(cmd);
- else
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
+ default:
+ BUG();
+ }
}
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
- if (rq->cmd_flags & REQ_DISCARD)
+ if (req_op(rq) == REQ_OP_DISCARD)
__free_page(rq->completion_data);
if (SCpnt->cmnd != rq->cmd) {
@@ -1613,8 +1619,7 @@ static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
return -EOPNOTSUPP;
return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
old_key, new_key, 0,
- (1 << 0) /* APTPL */ |
- (1 << 2) /* ALL_TG_PT */);
+ (1 << 0) /* APTPL */);
}
static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
@@ -1774,7 +1779,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned char op = SCpnt->cmnd[0];
unsigned char unmap = SCpnt->cmnd[1] & 8;
- if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) {
+ if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) {
if (!result) {
good_bytes = blk_rq_bytes(req);
scsi_set_resid(SCpnt, 0);
@@ -2988,7 +2993,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sd_revalidate_disk(gd);
- gd->driverfs_dev = &sdp->sdev_gendev;
gd->flags = GENHD_FL_EXT_DEVT;
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
@@ -2996,7 +3000,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
}
blk_pm_runtime_init(sdp->request_queue, dev);
- add_disk(gd);
+ device_add_disk(dev, gd);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 64c867405ad4..ed179348de80 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -713,7 +713,6 @@ static int sr_probe(struct device *dev)
get_capabilities(cd);
sr_vendor_init(cd);
- disk->driverfs_dev = &sdev->sdev_gendev;
set_capacity(disk, cd->capacity);
disk->private_data = &cd->driver;
disk->queue = sdev->request_queue;
@@ -730,7 +729,7 @@ static int sr_probe(struct device *dev)
dev_set_drvdata(dev, cd);
disk->flags |= GENHD_FL_REMOVABLE;
- add_disk(disk);
+ device_add_disk(&sdev->sdev_gendev, disk);
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index a9bac3bf20de..c887ecdaf19b 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,15 +34,6 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
static int __init sh_pm_runtime_init(void)
{
- if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
- if (!of_find_compatible_node(NULL, NULL,
- "renesas,cpg-mstp-clocks"))
- return 0;
- if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS_OF) &&
- of_find_node_with_property(NULL, "#power-domain-cells"))
- return 0;
- }
-
pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 77e6e45951f4..7589c8af4368 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -622,6 +622,8 @@ void spi_unregister_device(struct spi_device *spi)
if (spi->dev.of_node)
of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
+ if (ACPI_COMPANION(&spi->dev))
+ acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_unregister(&spi->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_device);
@@ -1646,18 +1648,15 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
return 1;
}
-static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
+static acpi_status acpi_register_spi_device(struct spi_master *master,
+ struct acpi_device *adev)
{
- struct spi_master *master = data;
struct list_head resource_list;
- struct acpi_device *adev;
struct spi_device *spi;
int ret;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
return AE_OK;
spi = spi_alloc_device(master);
@@ -1683,6 +1682,8 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
if (spi->irq < 0)
spi->irq = acpi_dev_gpio_irq_get(adev, 0);
+ acpi_device_set_enumerated(adev);
+
adev->power.flags.ignore_parent = true;
strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
if (spi_add_device(spi)) {
@@ -1695,6 +1696,18 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
return AE_OK;
}
+static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct spi_master *master = data;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ return acpi_register_spi_device(master, adev);
+}
+
static void acpi_register_spi_devices(struct spi_master *master)
{
acpi_status status;
@@ -3107,6 +3120,77 @@ static struct notifier_block spi_of_notifier = {
extern struct notifier_block spi_of_notifier;
#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+#if IS_ENABLED(CONFIG_ACPI)
+static int spi_acpi_master_match(struct device *dev, const void *data)
+{
+ return ACPI_COMPANION(dev->parent) == data;
+}
+
+static int spi_acpi_device_match(struct device *dev, void *data)
+{
+ return ACPI_COMPANION(dev) == data;
+}
+
+static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = class_find_device(&spi_master_class, NULL, adev,
+ spi_acpi_master_match);
+ if (!dev)
+ return NULL;
+
+ return container_of(dev, struct spi_master, dev);
+}
+
+static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
+
+ return dev ? to_spi_device(dev) : NULL;
+}
+
+static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
+ void *arg)
+{
+ struct acpi_device *adev = arg;
+ struct spi_master *master;
+ struct spi_device *spi;
+
+ switch (value) {
+ case ACPI_RECONFIG_DEVICE_ADD:
+ master = acpi_spi_find_master_by_adev(adev->parent);
+ if (!master)
+ break;
+
+ acpi_register_spi_device(master, adev);
+ put_device(&master->dev);
+ break;
+ case ACPI_RECONFIG_DEVICE_REMOVE:
+ if (!acpi_device_enumerated(adev))
+ break;
+
+ spi = acpi_spi_find_device_by_adev(adev);
+ if (!spi)
+ break;
+
+ spi_unregister_device(spi);
+ put_device(&spi->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block spi_acpi_notifier = {
+ .notifier_call = acpi_spi_notify,
+};
+#else
+extern struct notifier_block spi_acpi_notifier;
+#endif
+
static int __init spi_init(void)
{
int status;
@@ -3127,6 +3211,8 @@ static int __init spi_init(void)
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
+ if (IS_ENABLED(CONFIG_ACPI))
+ WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
return 0;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7c197d1a1231..af9476460023 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -102,4 +102,6 @@ source "drivers/staging/most/Kconfig"
source "drivers/staging/i4l/Kconfig"
+source "drivers/staging/ks7010/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a470c7276142..9f6009dcafa8 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
obj-$(CONFIG_WILC1000) += wilc1000/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_ISDN_I4L) += i4l/
+obj-$(CONFIG_KS7010) += ks7010/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 6480f60ebf6c..06e41d24ec62 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -24,26 +24,19 @@ config ANDROID_LOW_MEMORY_KILLER
scripts (/init.rc), and it defines priority values with minimum free memory size
for each priority.
-config SYNC
- bool "Synchronization framework"
- default n
- select ANON_INODES
- select DMA_SHARED_BUFFER
- ---help---
- This option enables the framework for synchronization between multiple
- drivers. Sync implementations can take advantage of hardware
- synchronization built into devices like GPUs.
-
config SW_SYNC
- bool "Software synchronization objects"
+ bool "Software synchronization framework"
default n
- depends on SYNC
depends on SYNC_FILE
+ depends on DEBUG_FS
---help---
A sync object driver that uses a 32bit counter to coordinate
synchronization. Useful when there is no hardware primitive backing
the synchronization.
+ WARNING: improper use of this can result in deadlocking kernel
+ drivers from userspace. Intended for test and debug only.
+
source "drivers/staging/android/ion/Kconfig"
endif # if ANDROID
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 980d6dc4b265..7ca61b77a8d4 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -4,5 +4,4 @@ obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
-obj-$(CONFIG_SYNC) += sync.o sync_debug.o
-obj-$(CONFIG_SW_SYNC) += sw_sync.o
+obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
index af39ff58fa33..115c9174705f 100644
--- a/drivers/staging/android/sw_sync.c
+++ b/drivers/staging/android/sw_sync.c
@@ -1,5 +1,5 @@
/*
- * drivers/base/sw_sync.c
+ * drivers/dma-buf/sw_sync.c
*
* Copyright (C) 2012 Google, Inc.
*
@@ -14,76 +14,331 @@
*
*/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
-#include "sw_sync.h"
+#include "sync_debug.h"
-struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
+#define CREATE_TRACE_POINTS
+#include "trace/sync.h"
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+static const struct fence_ops timeline_fence_ops;
+
+static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
+{
+ if (fence->ops != &timeline_fence_ops)
+ return NULL;
+ return container_of(fence, struct sync_pt, base);
+}
+
+/**
+ * sync_timeline_create() - creates a sync object
+ * @name: sync_timeline name
+ *
+ * Creates a new sync_timeline. Returns the sync_timeline object or NULL in
+ * case of error.
+ */
+struct sync_timeline *sync_timeline_create(const char *name)
+{
+ struct sync_timeline *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ kref_init(&obj->kref);
+ obj->context = fence_context_alloc(1);
+ strlcpy(obj->name, name, sizeof(obj->name));
+
+ INIT_LIST_HEAD(&obj->child_list_head);
+ INIT_LIST_HEAD(&obj->active_list_head);
+ spin_lock_init(&obj->child_list_lock);
+
+ sync_timeline_debug_add(obj);
+
+ return obj;
+}
+
+static void sync_timeline_free(struct kref *kref)
+{
+ struct sync_timeline *obj =
+ container_of(kref, struct sync_timeline, kref);
+
+ sync_timeline_debug_remove(obj);
+
+ kfree(obj);
+}
+
+static void sync_timeline_get(struct sync_timeline *obj)
+{
+ kref_get(&obj->kref);
+}
+
+static void sync_timeline_put(struct sync_timeline *obj)
+{
+ kref_put(&obj->kref, sync_timeline_free);
+}
+
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj: sync_timeline to signal
+ * @inc: num to increment on timeline->value
+ *
+ * A sync implementation should call this any time one of it's fences
+ * has signaled or has an error condition.
+ */
+static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+{
+ unsigned long flags;
+ struct sync_pt *pt, *next;
+
+ trace_sync_timeline(obj);
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+
+ obj->value += inc;
+
+ list_for_each_entry_safe(pt, next, &obj->active_list_head,
+ active_list) {
+ if (fence_is_signaled_locked(&pt->base))
+ list_del_init(&pt->active_list);
+ }
+
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent: fence's parent sync_timeline
+ * @size: size to allocate for this pt
+ * @inc: value of the fence
+ *
+ * Creates a new sync_pt as a child of @parent. @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct. Returns the sync_pt object or
+ * NULL in case of error.
+ */
+static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
+ unsigned int value)
+{
+ unsigned long flags;
+ struct sync_pt *pt;
+
+ if (size < sizeof(*pt))
+ return NULL;
+
+ pt = kzalloc(size, GFP_KERNEL);
+ if (!pt)
+ return NULL;
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ sync_timeline_get(obj);
+ fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+ obj->context, value);
+ list_add_tail(&pt->child_list, &obj->child_list_head);
+ INIT_LIST_HEAD(&pt->active_list);
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+ return pt;
+}
+
+static const char *timeline_fence_get_driver_name(struct fence *fence)
+{
+ return "sw_sync";
+}
+
+static const char *timeline_fence_get_timeline_name(struct fence *fence)
+{
+ struct sync_timeline *parent = fence_parent(fence);
+
+ return parent->name;
+}
+
+static void timeline_fence_release(struct fence *fence)
{
- struct sw_sync_pt *pt;
+ struct sync_pt *pt = fence_to_sync_pt(fence);
+ struct sync_timeline *parent = fence_parent(fence);
+ unsigned long flags;
- pt = (struct sw_sync_pt *)
- sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
+ spin_lock_irqsave(fence->lock, flags);
+ list_del(&pt->child_list);
+ if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
+ list_del(&pt->active_list);
+ spin_unlock_irqrestore(fence->lock, flags);
- pt->value = value;
+ sync_timeline_put(parent);
+ fence_free(fence);
+}
+
+static bool timeline_fence_signaled(struct fence *fence)
+{
+ struct sync_timeline *parent = fence_parent(fence);
- return (struct fence *)pt;
+ return (fence->seqno > parent->value) ? false : true;
}
-EXPORT_SYMBOL(sw_sync_pt_create);
-static int sw_sync_fence_has_signaled(struct fence *fence)
+static bool timeline_fence_enable_signaling(struct fence *fence)
{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)fence;
- struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)fence_parent(fence);
+ struct sync_pt *pt = fence_to_sync_pt(fence);
+ struct sync_timeline *parent = fence_parent(fence);
- return (pt->value > obj->value) ? 0 : 1;
+ if (timeline_fence_signaled(fence))
+ return false;
+
+ list_add_tail(&pt->active_list, &parent->active_list_head);
+ return true;
}
-static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
- char *str, int size)
+static void timeline_fence_value_str(struct fence *fence,
+ char *str, int size)
{
- struct sw_sync_timeline *timeline =
- (struct sw_sync_timeline *)sync_timeline;
- snprintf(str, size, "%d", timeline->value);
+ snprintf(str, size, "%d", fence->seqno);
}
-static void sw_sync_fence_value_str(struct fence *fence, char *str, int size)
+static void timeline_fence_timeline_value_str(struct fence *fence,
+ char *str, int size)
{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)fence;
+ struct sync_timeline *parent = fence_parent(fence);
- snprintf(str, size, "%d", pt->value);
+ snprintf(str, size, "%d", parent->value);
}
-static struct sync_timeline_ops sw_sync_timeline_ops = {
- .driver_name = "sw_sync",
- .has_signaled = sw_sync_fence_has_signaled,
- .timeline_value_str = sw_sync_timeline_value_str,
- .fence_value_str = sw_sync_fence_value_str,
+static const struct fence_ops timeline_fence_ops = {
+ .get_driver_name = timeline_fence_get_driver_name,
+ .get_timeline_name = timeline_fence_get_timeline_name,
+ .enable_signaling = timeline_fence_enable_signaling,
+ .signaled = timeline_fence_signaled,
+ .wait = fence_default_wait,
+ .release = timeline_fence_release,
+ .fence_value_str = timeline_fence_value_str,
+ .timeline_value_str = timeline_fence_timeline_value_str,
};
-struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+/*
+ * *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
{
- struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
- sync_timeline_create(&sw_sync_timeline_ops,
- sizeof(struct sw_sync_timeline),
- name);
+ struct sync_timeline *obj;
+ char task_comm[TASK_COMM_LEN];
- return obj;
+ get_task_comm(task_comm, current);
+
+ obj = sync_timeline_create(task_comm);
+ if (!obj)
+ return -ENOMEM;
+
+ file->private_data = obj;
+
+ return 0;
}
-EXPORT_SYMBOL(sw_sync_timeline_create);
-void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
{
- obj->value += inc;
+ struct sync_timeline *obj = file->private_data;
+
+ smp_wmb();
+
+ sync_timeline_put(obj);
+ return 0;
+}
+
+static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
+ unsigned long arg)
+{
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int err;
+ struct sync_pt *pt;
+ struct sync_file *sync_file;
+ struct sw_sync_create_fence_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ pt = sync_pt_create(obj, sizeof(*pt), data.value);
+ if (!pt) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ sync_file = sync_file_create(&pt->base);
+ if (!sync_file) {
+ fence_put(&pt->base);
+ err = -ENOMEM;
+ goto err;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ fput(sync_file->file);
+ err = -EFAULT;
+ goto err;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ return 0;
+
+err:
+ put_unused_fd(fd);
+ return err;
+}
- sync_timeline_signal(&obj->obj);
+static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
+{
+ u32 value;
+
+ if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+ return -EFAULT;
+
+ sync_timeline_signal(obj, value);
+
+ return 0;
+}
+
+static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sync_timeline *obj = file->private_data;
+
+ switch (cmd) {
+ case SW_SYNC_IOC_CREATE_FENCE:
+ return sw_sync_ioctl_create_fence(obj, arg);
+
+ case SW_SYNC_IOC_INC:
+ return sw_sync_ioctl_inc(obj, arg);
+
+ default:
+ return -ENOTTY;
+ }
}
-EXPORT_SYMBOL(sw_sync_timeline_inc);
+
+const struct file_operations sw_sync_debugfs_fops = {
+ .open = sw_sync_debugfs_open,
+ .release = sw_sync_debugfs_release,
+ .unlocked_ioctl = sw_sync_ioctl,
+ .compat_ioctl = sw_sync_ioctl,
+};
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
deleted file mode 100644
index e18667bfb0ca..000000000000
--- a/drivers/staging/android/sw_sync.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * include/linux/sw_sync.h
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_SW_SYNC_H
-#define _LINUX_SW_SYNC_H
-
-#include <linux/types.h>
-#include <linux/kconfig.h>
-#include "sync.h"
-#include "uapi/sw_sync.h"
-
-struct sw_sync_timeline {
- struct sync_timeline obj;
-
- u32 value;
-};
-
-struct sw_sync_pt {
- struct fence pt;
-
- u32 value;
-};
-
-#if IS_ENABLED(CONFIG_SW_SYNC)
-struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
-void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
-
-struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
-#else
-static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
-{
- return NULL;
-}
-
-static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
-{
-}
-
-static inline struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj,
- u32 value)
-{
- return NULL;
-}
-#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
-
-#endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
deleted file mode 100644
index 1d14c83c7f7c..000000000000
--- a/drivers/staging/android/sync.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * drivers/base/sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/anon_inodes.h>
-
-#include "sync.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace/sync.h"
-
-static const struct fence_ops android_fence_ops;
-
-struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
- int size, const char *name)
-{
- struct sync_timeline *obj;
-
- if (size < sizeof(struct sync_timeline))
- return NULL;
-
- obj = kzalloc(size, GFP_KERNEL);
- if (!obj)
- return NULL;
-
- kref_init(&obj->kref);
- obj->ops = ops;
- obj->context = fence_context_alloc(1);
- strlcpy(obj->name, name, sizeof(obj->name));
-
- INIT_LIST_HEAD(&obj->child_list_head);
- INIT_LIST_HEAD(&obj->active_list_head);
- spin_lock_init(&obj->child_list_lock);
-
- sync_timeline_debug_add(obj);
-
- return obj;
-}
-EXPORT_SYMBOL(sync_timeline_create);
-
-static void sync_timeline_free(struct kref *kref)
-{
- struct sync_timeline *obj =
- container_of(kref, struct sync_timeline, kref);
-
- sync_timeline_debug_remove(obj);
-
- kfree(obj);
-}
-
-static void sync_timeline_get(struct sync_timeline *obj)
-{
- kref_get(&obj->kref);
-}
-
-static void sync_timeline_put(struct sync_timeline *obj)
-{
- kref_put(&obj->kref, sync_timeline_free);
-}
-
-void sync_timeline_destroy(struct sync_timeline *obj)
-{
- obj->destroyed = true;
- /*
- * Ensure timeline is marked as destroyed before
- * changing timeline's fences status.
- */
- smp_wmb();
-
- sync_timeline_put(obj);
-}
-EXPORT_SYMBOL(sync_timeline_destroy);
-
-void sync_timeline_signal(struct sync_timeline *obj)
-{
- unsigned long flags;
- struct fence *fence, *next;
-
- trace_sync_timeline(obj);
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
-
- list_for_each_entry_safe(fence, next, &obj->active_list_head,
- active_list) {
- if (fence_is_signaled_locked(fence))
- list_del_init(&fence->active_list);
- }
-
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-EXPORT_SYMBOL(sync_timeline_signal);
-
-struct fence *sync_pt_create(struct sync_timeline *obj, int size)
-{
- unsigned long flags;
- struct fence *fence;
-
- if (size < sizeof(*fence))
- return NULL;
-
- fence = kzalloc(size, GFP_KERNEL);
- if (!fence)
- return NULL;
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- sync_timeline_get(obj);
- fence_init(fence, &android_fence_ops, &obj->child_list_lock,
- obj->context, ++obj->value);
- list_add_tail(&fence->child_list, &obj->child_list_head);
- INIT_LIST_HEAD(&fence->active_list);
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
- return fence;
-}
-EXPORT_SYMBOL(sync_pt_create);
-
-static const char *android_fence_get_driver_name(struct fence *fence)
-{
- struct sync_timeline *parent = fence_parent(fence);
-
- return parent->ops->driver_name;
-}
-
-static const char *android_fence_get_timeline_name(struct fence *fence)
-{
- struct sync_timeline *parent = fence_parent(fence);
-
- return parent->name;
-}
-
-static void android_fence_release(struct fence *fence)
-{
- struct sync_timeline *parent = fence_parent(fence);
- unsigned long flags;
-
- spin_lock_irqsave(fence->lock, flags);
- list_del(&fence->child_list);
- if (WARN_ON_ONCE(!list_empty(&fence->active_list)))
- list_del(&fence->active_list);
- spin_unlock_irqrestore(fence->lock, flags);
-
- sync_timeline_put(parent);
- fence_free(fence);
-}
-
-static bool android_fence_signaled(struct fence *fence)
-{
- struct sync_timeline *parent = fence_parent(fence);
- int ret;
-
- ret = parent->ops->has_signaled(fence);
- if (ret < 0)
- fence->status = ret;
- return ret;
-}
-
-static bool android_fence_enable_signaling(struct fence *fence)
-{
- struct sync_timeline *parent = fence_parent(fence);
-
- if (android_fence_signaled(fence))
- return false;
-
- list_add_tail(&fence->active_list, &parent->active_list_head);
- return true;
-}
-
-static void android_fence_value_str(struct fence *fence,
- char *str, int size)
-{
- struct sync_timeline *parent = fence_parent(fence);
-
- if (!parent->ops->fence_value_str) {
- if (size)
- *str = 0;
- return;
- }
- parent->ops->fence_value_str(fence, str, size);
-}
-
-static void android_fence_timeline_value_str(struct fence *fence,
- char *str, int size)
-{
- struct sync_timeline *parent = fence_parent(fence);
-
- if (!parent->ops->timeline_value_str) {
- if (size)
- *str = 0;
- return;
- }
- parent->ops->timeline_value_str(parent, str, size);
-}
-
-static const struct fence_ops android_fence_ops = {
- .get_driver_name = android_fence_get_driver_name,
- .get_timeline_name = android_fence_get_timeline_name,
- .enable_signaling = android_fence_enable_signaling,
- .signaled = android_fence_signaled,
- .wait = fence_default_wait,
- .release = android_fence_release,
- .fence_value_str = android_fence_value_str,
- .timeline_value_str = android_fence_timeline_value_str,
-};
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
deleted file mode 100644
index b56885c14839..000000000000
--- a/drivers/staging/android/sync.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * include/linux/sync.h
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_SYNC_H
-#define _LINUX_SYNC_H
-
-#include <linux/types.h>
-#include <linux/kref.h>
-#include <linux/ktime.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/fence.h>
-
-#include <linux/sync_file.h>
-#include <uapi/linux/sync_file.h>
-
-struct sync_timeline;
-
-/**
- * struct sync_timeline_ops - sync object implementation ops
- * @driver_name: name of the implementation
- * @has_signaled: returns:
- * 1 if pt has signaled
- * 0 if pt has not signaled
- * <0 on error
- * @timeline_value_str: fill str with the value of the sync_timeline's counter
- * @fence_value_str: fill str with the value of the fence
- */
-struct sync_timeline_ops {
- const char *driver_name;
-
- /* required */
- int (*has_signaled)(struct fence *fence);
-
- /* optional */
- void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
- int size);
-
- /* optional */
- void (*fence_value_str)(struct fence *fence, char *str, int size);
-};
-
-/**
- * struct sync_timeline - sync object
- * @kref: reference count on fence.
- * @ops: ops that define the implementation of the sync_timeline
- * @name: name of the sync_timeline. Useful for debugging
- * @destroyed: set when sync_timeline is destroyed
- * @child_list_head: list of children sync_pts for this sync_timeline
- * @child_list_lock: lock protecting @child_list_head, destroyed, and
- * fence.status
- * @active_list_head: list of active (unsignaled/errored) sync_pts
- * @sync_timeline_list: membership in global sync_timeline_list
- */
-struct sync_timeline {
- struct kref kref;
- const struct sync_timeline_ops *ops;
- char name[32];
-
- /* protected by child_list_lock */
- bool destroyed;
- int context, value;
-
- struct list_head child_list_head;
- spinlock_t child_list_lock;
-
- struct list_head active_list_head;
-
-#ifdef CONFIG_DEBUG_FS
- struct list_head sync_timeline_list;
-#endif
-};
-
-static inline struct sync_timeline *fence_parent(struct fence *fence)
-{
- return container_of(fence->lock, struct sync_timeline,
- child_list_lock);
-}
-
-/*
- * API for sync_timeline implementers
- */
-
-/**
- * sync_timeline_create() - creates a sync object
- * @ops: specifies the implementation ops for the object
- * @size: size to allocate for this obj
- * @name: sync_timeline name
- *
- * Creates a new sync_timeline which will use the implementation specified by
- * @ops. @size bytes will be allocated allowing for implementation specific
- * data to be kept after the generic sync_timeline struct. Returns the
- * sync_timeline object or NULL in case of error.
- */
-struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
- int size, const char *name);
-
-/**
- * sync_timeline_destroy() - destroys a sync object
- * @obj: sync_timeline to destroy
- *
- * A sync implementation should call this when the @obj is going away
- * (i.e. module unload.) @obj won't actually be freed until all its children
- * fences are freed.
- */
-void sync_timeline_destroy(struct sync_timeline *obj);
-
-/**
- * sync_timeline_signal() - signal a status change on a sync_timeline
- * @obj: sync_timeline to signal
- *
- * A sync implementation should call this any time one of it's fences
- * has signaled or has an error condition.
- */
-void sync_timeline_signal(struct sync_timeline *obj);
-
-/**
- * sync_pt_create() - creates a sync pt
- * @parent: fence's parent sync_timeline
- * @size: size to allocate for this pt
- *
- * Creates a new fence as a child of @parent. @size bytes will be
- * allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct. Returns the fence object or
- * NULL in case of error.
- */
-struct fence *sync_pt_create(struct sync_timeline *parent, int size);
-
-#ifdef CONFIG_DEBUG_FS
-
-void sync_timeline_debug_add(struct sync_timeline *obj);
-void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_file_debug_add(struct sync_file *fence);
-void sync_file_debug_remove(struct sync_file *fence);
-void sync_dump(void);
-
-#else
-# define sync_timeline_debug_add(obj)
-# define sync_timeline_debug_remove(obj)
-# define sync_file_debug_add(fence)
-# define sync_file_debug_remove(fence)
-# define sync_dump()
-#endif
-
-#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index 5f57499c98bf..4c5a85595a85 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -15,21 +15,7 @@
*/
#include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/anon_inodes.h>
-#include <linux/time64.h>
-#include <linux/sync_file.h>
-#include "sw_sync.h"
-
-#ifdef CONFIG_DEBUG_FS
+#include "sync_debug.h"
static struct dentry *dbgfs;
@@ -105,7 +91,7 @@ static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
- if ((!fence || fence->ops->timeline_value_str) &&
+ if (fence->ops->timeline_value_str &&
fence->ops->fence_value_str) {
char value[64];
bool success;
@@ -113,10 +99,9 @@ static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
fence->ops->fence_value_str(fence, value, sizeof(value));
success = strlen(value);
- if (success)
+ if (success) {
seq_printf(s, ": %s", value);
- if (success && fence) {
fence->ops->timeline_value_str(fence, value,
sizeof(value));
@@ -133,22 +118,13 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
struct list_head *pos;
unsigned long flags;
- seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
-
- if (obj->ops->timeline_value_str) {
- char value[64];
-
- obj->ops->timeline_value_str(obj, value, sizeof(value));
- seq_printf(s, ": %s", value);
- }
-
- seq_puts(s, "\n");
+ seq_printf(s, "%s: %d\n", obj->name, obj->value);
spin_lock_irqsave(&obj->child_list_lock, flags);
list_for_each(pos, &obj->child_list_head) {
- struct fence *fence =
- container_of(pos, struct fence, child_list);
- sync_print_fence(s, fence, false);
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, child_list);
+ sync_print_fence(s, &pt->base, false);
}
spin_unlock_irqrestore(&obj->child_list_lock, flags);
}
@@ -209,126 +185,19 @@ static const struct file_operations sync_info_debugfs_fops = {
.release = single_release,
};
-/*
- * *WARNING*
- *
- * improper use of this can result in deadlocking kernel drivers from userspace.
- */
-
-/* opening sw_sync create a new sync obj */
-static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj;
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, current);
-
- obj = sw_sync_timeline_create(task_comm);
- if (!obj)
- return -ENOMEM;
-
- file->private_data = obj;
-
- return 0;
-}
-
-static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- sync_timeline_destroy(&obj->obj);
- return 0;
-}
-
-static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
- unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct fence *fence;
- struct sync_file *sync_file;
- struct sw_sync_create_fence_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err;
- }
-
- fence = sw_sync_pt_create(obj, data.value);
- if (!fence) {
- err = -ENOMEM;
- goto err;
- }
-
- sync_file = sync_file_create(fence);
- if (!sync_file) {
- fence_put(fence);
- err = -ENOMEM;
- goto err;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- fput(sync_file->file);
- err = -EFAULT;
- goto err;
- }
-
- fd_install(fd, sync_file->file);
-
- return 0;
-
-err:
- put_unused_fd(fd);
- return err;
-}
-
-static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
-{
- u32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- sw_sync_timeline_inc(obj, value);
-
- return 0;
-}
-
-static long sw_sync_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- switch (cmd) {
- case SW_SYNC_IOC_CREATE_FENCE:
- return sw_sync_ioctl_create_fence(obj, arg);
-
- case SW_SYNC_IOC_INC:
- return sw_sync_ioctl_inc(obj, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sw_sync_debugfs_fops = {
- .open = sw_sync_debugfs_open,
- .release = sw_sync_debugfs_release,
- .unlocked_ioctl = sw_sync_ioctl,
- .compat_ioctl = sw_sync_ioctl,
-};
-
static __init int sync_debugfs_init(void)
{
dbgfs = debugfs_create_dir("sync", NULL);
- debugfs_create_file("info", 0444, dbgfs, NULL, &sync_info_debugfs_fops);
- debugfs_create_file("sw_sync", 0644, dbgfs, NULL,
- &sw_sync_debugfs_fops);
+ /*
+ * The debugfs files won't ever get removed and thus, there is
+ * no need to protect it against removal races. The use of
+ * debugfs_create_file_unsafe() is actually safe here.
+ */
+ debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
+ &sync_info_debugfs_fops);
+ debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
+ &sw_sync_debugfs_fops);
return 0;
}
@@ -359,5 +228,3 @@ void sync_dump(void)
}
}
}
-
-#endif
diff --git a/drivers/staging/android/sync_debug.h b/drivers/staging/android/sync_debug.h
new file mode 100644
index 000000000000..425ebc5c32aa
--- /dev/null
+++ b/drivers/staging/android/sync_debug.h
@@ -0,0 +1,83 @@
+/*
+ * include/linux/sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_H
+#define _LINUX_SYNC_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/fence.h>
+
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
+
+/**
+ * struct sync_timeline - sync object
+ * @kref: reference count on fence.
+ * @name: name of the sync_timeline. Useful for debugging
+ * @child_list_head: list of children sync_pts for this sync_timeline
+ * @child_list_lock: lock protecting @child_list_head and fence.status
+ * @active_list_head: list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list: membership in global sync_timeline_list
+ */
+struct sync_timeline {
+ struct kref kref;
+ char name[32];
+
+ /* protected by child_list_lock */
+ int context, value;
+
+ struct list_head child_list_head;
+ spinlock_t child_list_lock;
+
+ struct list_head active_list_head;
+
+ struct list_head sync_timeline_list;
+};
+
+static inline struct sync_timeline *fence_parent(struct fence *fence)
+{
+ return container_of(fence->lock, struct sync_timeline,
+ child_list_lock);
+}
+
+/**
+ * struct sync_pt - sync_pt object
+ * @base: base fence object
+ * @child_list: sync timeline child's list
+ * @active_list: sync timeline active child's list
+ */
+struct sync_pt {
+ struct fence base;
+ struct list_head child_list;
+ struct list_head active_list;
+};
+
+#ifdef CONFIG_SW_SYNC
+
+extern const struct file_operations sw_sync_debugfs_fops;
+
+void sync_timeline_debug_add(struct sync_timeline *obj);
+void sync_timeline_debug_remove(struct sync_timeline *obj);
+void sync_file_debug_add(struct sync_file *fence);
+void sync_file_debug_remove(struct sync_file *fence);
+void sync_dump(void);
+
+#else
+# define sync_timeline_debug_add(obj)
+# define sync_timeline_debug_remove(obj)
+# define sync_file_debug_add(fence)
+# define sync_file_debug_remove(fence)
+# define sync_dump()
+#endif
+
+#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h
index a0f80f41677e..6b5ce9640ddd 100644
--- a/drivers/staging/android/trace/sync.h
+++ b/drivers/staging/android/trace/sync.h
@@ -5,7 +5,7 @@
#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SYNC_H
-#include "../sync.h"
+#include "../sync_debug.h"
#include <linux/tracepoint.h>
TRACE_EVENT(sync_timeline,
@@ -15,21 +15,15 @@ TRACE_EVENT(sync_timeline,
TP_STRUCT__entry(
__string(name, timeline->name)
- __array(char, value, 32)
+ __field(u32, value)
),
TP_fast_assign(
__assign_str(name, timeline->name);
- if (timeline->ops->timeline_value_str) {
- timeline->ops->timeline_value_str(timeline,
- __entry->value,
- sizeof(__entry->value));
- } else {
- __entry->value[0] = '\0';
- }
+ __entry->value = timeline->value;
),
- TP_printk("name=%s value=%s", __get_str(name), __entry->value)
+ TP_printk("name=%s value=%d", __get_str(name), __entry->value)
);
#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
deleted file mode 100644
index 9b5d4869505c..000000000000
--- a/drivers/staging/android/uapi/sw_sync.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_SW_SYNC_H
-#define _UAPI_LINUX_SW_SYNC_H
-
-#include <linux/types.h>
-
-struct sw_sync_create_fence_data {
- __u32 value;
- char name[32];
- __s32 fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC 'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
- struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
-#endif /* _UAPI_LINUX_SW_SYNC_H */
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index ad5297f6d418..08fb26b51a5f 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -779,7 +779,7 @@ struct comedi_subdinfo {
unsigned int flags;
unsigned int range_type;
unsigned int settling_time_0;
- unsigned insn_bits_support;
+ unsigned int insn_bits_support;
unsigned int unused[8];
};
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 629080f39db0..1999eed4f4c5 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -312,8 +312,8 @@ static void comedi_file_reset(struct file *file)
}
cfp->last_attached = dev->attached;
cfp->last_detach_count = dev->detach_count;
- ACCESS_ONCE(cfp->read_subdev) = read_s;
- ACCESS_ONCE(cfp->write_subdev) = write_s;
+ WRITE_ONCE(cfp->read_subdev, read_s);
+ WRITE_ONCE(cfp->write_subdev, write_s);
}
static void comedi_file_check(struct file *file)
@@ -331,7 +331,7 @@ static struct comedi_subdevice *comedi_file_read_subdevice(struct file *file)
struct comedi_file *cfp = file->private_data;
comedi_file_check(file);
- return ACCESS_ONCE(cfp->read_subdev);
+ return READ_ONCE(cfp->read_subdev);
}
static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
@@ -339,7 +339,7 @@ static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
struct comedi_file *cfp = file->private_data;
comedi_file_check(file);
- return ACCESS_ONCE(cfp->write_subdev);
+ return READ_ONCE(cfp->write_subdev);
}
static int resize_async_buffer(struct comedi_device *dev,
@@ -1256,16 +1256,17 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
switch (insn->insn) {
case INSN_GTOD:
{
- struct timeval tv;
+ struct timespec64 tv;
if (insn->n != 2) {
ret = -EINVAL;
break;
}
- do_gettimeofday(&tv);
- data[0] = tv.tv_sec;
- data[1] = tv.tv_usec;
+ ktime_get_real_ts64(&tv);
+ /* unsigned data safe until 2106 */
+ data[0] = (unsigned int)tv.tv_sec;
+ data[1] = tv.tv_nsec / NSEC_PER_USEC;
ret = 2;
break;
@@ -1992,7 +1993,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
!(s_old->async->cmd.flags & CMDF_WRITE))
return -EBUSY;
- ACCESS_ONCE(cfp->read_subdev) = s_new;
+ WRITE_ONCE(cfp->read_subdev, s_new);
return 0;
}
@@ -2034,7 +2035,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
(s_old->async->cmd.flags & CMDF_WRITE))
return -EBUSY;
- ACCESS_ONCE(cfp->write_subdev) = s_new;
+ WRITE_ONCE(cfp->write_subdev, s_new);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
deleted file mode 100644
index f0c0d58383ca..000000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ /dev/null
@@ -1,187 +0,0 @@
-static int apci1564_timer_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int ctrl;
-
- devpriv->tsk_current = current;
-
- /* Stop the timer */
- ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_ENA);
- outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
-
- if (data[1] == 1) {
- /* Enable timer int & disable all the other int sources */
- outl(ADDI_TCW_CTRL_IRQ_ENA,
- devpriv->timer + ADDI_TCW_CTRL_REG);
- outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
- outl(0x0, dev->iobase + APCI1564_DO_IRQ_REG);
- outl(0x0, dev->iobase + APCI1564_WDOG_IRQ_REG);
- if (devpriv->counters) {
- unsigned long iobase;
-
- iobase = devpriv->counters + ADDI_TCW_IRQ_REG;
- outl(0x0, iobase + APCI1564_COUNTER(0));
- outl(0x0, iobase + APCI1564_COUNTER(1));
- outl(0x0, iobase + APCI1564_COUNTER(2));
- }
- } else {
- /* disable Timer interrupt */
- outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
- }
-
- /* Loading Timebase */
- outl(data[2], devpriv->timer + ADDI_TCW_TIMEBASE_REG);
-
- /* Loading the Reload value */
- outl(data[3], devpriv->timer + ADDI_TCW_RELOAD_REG);
-
- ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE_MASK |
- ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
- ADDI_TCW_CTRL_WARN_ENA | ADDI_TCW_CTRL_ENA);
- ctrl |= ADDI_TCW_CTRL_MODE(2) | ADDI_TCW_CTRL_TIMER_ENA;
- outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-static int apci1564_timer_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int ctrl;
-
- ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
- switch (data[1]) {
- case 0: /* Stop The Timer */
- ctrl &= ~ADDI_TCW_CTRL_ENA;
- break;
- case 1: /* Enable the Timer */
- ctrl |= ADDI_TCW_CTRL_ENA;
- break;
- }
- outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-static int apci1564_timer_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
-
- /* Stores the status of the Timer */
- data[0] = inl(devpriv->timer + ADDI_TCW_STATUS_REG) &
- ADDI_TCW_STATUS_OVERFLOW;
-
- /* Stores the Actual value of the Timer */
- data[1] = inl(devpriv->timer + ADDI_TCW_VAL_REG);
-
- return insn->n;
-}
-
-static int apci1564_counter_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
- unsigned int ctrl;
-
- devpriv->tsk_current = current;
-
- /* Stop The Timer */
- ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
- ADDI_TCW_CTRL_ENA);
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
- /* Set the reload value */
- outl(data[3], iobase + ADDI_TCW_RELOAD_REG);
-
- /* Set the mode */
- ctrl &= ~(ADDI_TCW_CTRL_EXT_CLK_MASK | ADDI_TCW_CTRL_MODE_MASK |
- ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
- ADDI_TCW_CTRL_WARN_ENA);
- ctrl |= ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE(data[4]);
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
- /* Enable or Disable Interrupt */
- if (data[1])
- ctrl |= ADDI_TCW_CTRL_IRQ_ENA;
- else
- ctrl &= ~ADDI_TCW_CTRL_IRQ_ENA;
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
- /* Set the Up/Down selection */
- if (data[6])
- ctrl |= ADDI_TCW_CTRL_CNT_UP;
- else
- ctrl &= ~ADDI_TCW_CTRL_CNT_UP;
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-static int apci1564_counter_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
- unsigned int ctrl;
-
- ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
- ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
- switch (data[1]) {
- case 0: /* Stops the Counter subdevice */
- ctrl = 0;
- break;
- case 1: /* Start the Counter subdevice */
- ctrl |= ADDI_TCW_CTRL_ENA;
- break;
- case 2: /* Clears the Counter subdevice */
- ctrl |= ADDI_TCW_CTRL_GATE;
- break;
- }
- outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
- return insn->n;
-}
-
-static int apci1564_counter_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct apci1564_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
- unsigned int status;
-
- /* Read the Counter Actual Value. */
- data[0] = inl(iobase + ADDI_TCW_VAL_REG);
-
- status = inl(iobase + ADDI_TCW_STATUS_REG);
- data[1] = (status & ADDI_TCW_STATUS_SOFT_TRIG) ? 1 : 0;
- data[2] = (status & ADDI_TCW_STATUS_HARDWARE_TRIG) ? 1 : 0;
- data[3] = (status & ADDI_TCW_STATUS_SOFT_CLR) ? 1 : 0;
- data[4] = (status & ADDI_TCW_STATUS_OVERFLOW) ? 1 : 0;
-
- return insn->n;
-}
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index f1ccfbd4c578..9bfb79c2e5c8 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -21,9 +21,62 @@
* details.
*/
+/*
+ * Driver: addi_apci_1564
+ * Description: ADDI-DATA APCI-1564 Digital I/O board
+ * Devices: [ADDI-DATA] APCI-1564 (addi_apci_1564)
+ * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Updated: Thu, 02 Jun 2016 13:12:46 -0700
+ * Status: untested
+ *
+ * Configuration Options: not applicable, uses comedi PCI auto config
+ *
+ * This board has the following features:
+ * - 32 optically isolated digital inputs (24V), 16 of which can
+ * generate change-of-state (COS) interrupts (channels 4 to 19)
+ * - 32 optically isolated digital outputs (10V to 36V)
+ * - 1 8-bit watchdog for resetting the outputs
+ * - 1 12-bit timer
+ * - 3 32-bit counters
+ * - 2 diagnostic inputs
+ *
+ * The COS, timer, and counter subdevices all use the dev->read_subdev to
+ * return the interrupt status. The sample data is updated and returned when
+ * any of these subdevices generate an interrupt. The sample data format is:
+ *
+ * Bit Description
+ * ----- ------------------------------------------
+ * 31 COS interrupt
+ * 30 timer interrupt
+ * 29 counter 2 interrupt
+ * 28 counter 1 interrupt
+ * 27 counter 0 interrupt
+ * 26:20 not used
+ * 19:4 COS digital input state (channels 19 to 4)
+ * 3:0 not used
+ *
+ * The COS interrupts must be configured using an INSN_CONFIG_DIGITAL_TRIG
+ * instruction before they can be enabled by an async command. The COS
+ * interrupts will stay active until canceled.
+ *
+ * The timer subdevice does not use an async command. All control is handled
+ * by the (*insn_config).
+ *
+ * FIXME: The format of the ADDI_TCW_TIMEBASE_REG is not descibed in the
+ * datasheet I have. The INSN_CONFIG_SET_CLOCK_SRC currently just writes
+ * the raw data[1] to this register along with the raw data[2] value to the
+ * ADDI_TCW_RELOAD_REG. If anyone tests this and can determine the actual
+ * timebase/reload operation please let me know.
+ *
+ * The counter subdevice also does not use an async command. All control is
+ * handled by the (*insn_config).
+ *
+ * FIXME: The operation of the counters is not really described in the
+ * datasheet I have. The (*insn_config) needs more work.
+ */
+
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/sched.h>
#include "../comedi_pci.h"
#include "addi_tcw.h"
@@ -77,6 +130,7 @@
#define APCI1564_DI_REG 0x00
#define APCI1564_DI_INT_MODE1_REG 0x04
#define APCI1564_DI_INT_MODE2_REG 0x08
+#define APCI1564_DI_INT_MODE_MASK 0x000ffff0 /* chans [19:4] */
#define APCI1564_DI_INT_STATUS_REG 0x0c
#define APCI1564_DI_IRQ_REG 0x10
#define APCI1564_DI_IRQ_ENA BIT(2)
@@ -90,14 +144,7 @@
#define APCI1564_DO_INT_STATUS_VCC BIT(0)
#define APCI1564_DO_IRQ_REG 0x20
#define APCI1564_DO_IRQ_INTR BIT(0)
-#define APCI1564_WDOG_REG 0x24
-#define APCI1564_WDOG_RELOAD_REG 0x28
-#define APCI1564_WDOG_TIMEBASE_REG 0x2c
-#define APCI1564_WDOG_CTRL_REG 0x30
-#define APCI1564_WDOG_STATUS_REG 0x34
-#define APCI1564_WDOG_IRQ_REG 0x38
-#define APCI1564_WDOG_WARN_TIMEVAL_REG 0x3c
-#define APCI1564_WDOG_WARN_TIMEBASE_REG 0x40
+#define APCI1564_WDOG_IOBASE 0x24
/*
* devpriv->timer Register Map (see addi_tcw.h for register/bit defines)
@@ -111,18 +158,24 @@
*/
#define APCI1564_COUNTER(x) ((x) * 0x20)
+/*
+ * The dev->read_subdev is used to return the interrupt events along with
+ * the state of the interrupt capable inputs.
+ */
+#define APCI1564_EVENT_COS BIT(31)
+#define APCI1564_EVENT_TIMER BIT(30)
+#define APCI1564_EVENT_COUNTER(x) BIT(27 + (x)) /* counter 0-2 */
+#define APCI1564_EVENT_MASK 0xfff0000f /* all but [19:4] */
+
struct apci1564_private {
unsigned long eeprom; /* base address of EEPROM register */
unsigned long timer; /* base address of 12-bit timer */
unsigned long counters; /* base address of 32-bit counters */
- unsigned int mode1; /* riding-edge/high level channels */
+ unsigned int mode1; /* rising-edge/high level channels */
unsigned int mode2; /* falling-edge/low level channels */
unsigned int ctrl; /* interrupt mode OR (edge) . AND (level) */
- struct task_struct *tsk_current;
};
-#include "addi-data/hwdrv_apci1564.c"
-
static int apci1564_reset(struct comedi_device *dev)
{
struct apci1564_private *devpriv = dev->private;
@@ -138,7 +191,7 @@ static int apci1564_reset(struct comedi_device *dev)
outl(0x0, dev->iobase + APCI1564_DO_INT_CTRL_REG);
/* Reset the watchdog registers */
- addi_watchdog_reset(dev->iobase + APCI1564_WDOG_REG);
+ addi_watchdog_reset(dev->iobase + APCI1564_WDOG_IOBASE);
/* Reset the timer registers */
outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
@@ -165,55 +218,54 @@ static irqreturn_t apci1564_interrupt(int irq, void *d)
unsigned int ctrl;
unsigned int chan;
+ s->state &= ~APCI1564_EVENT_MASK;
+
status = inl(dev->iobase + APCI1564_DI_IRQ_REG);
if (status & APCI1564_DI_IRQ_ENA) {
- /* disable the interrupt */
+ /* get the COS interrupt state and set the event flag */
+ s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
+ s->state &= APCI1564_DI_INT_MODE_MASK;
+ s->state |= APCI1564_EVENT_COS;
+
+ /* clear the interrupt */
outl(status & ~APCI1564_DI_IRQ_ENA,
dev->iobase + APCI1564_DI_IRQ_REG);
-
- s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG) &
- 0xffff;
- comedi_buf_write_samples(s, &s->state, 1);
- comedi_handle_events(dev, s);
-
- /* enable the interrupt */
outl(status, dev->iobase + APCI1564_DI_IRQ_REG);
}
status = inl(devpriv->timer + ADDI_TCW_IRQ_REG);
- if (status & 0x01) {
- /* Disable Timer Interrupt */
+ if (status & ADDI_TCW_IRQ) {
+ s->state |= APCI1564_EVENT_TIMER;
+
+ /* clear the interrupt */
ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_current, 0);
-
- /* Enable Timer Interrupt */
outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
}
if (devpriv->counters) {
- for (chan = 0; chan < 4; chan++) {
+ for (chan = 0; chan < 3; chan++) {
unsigned long iobase;
iobase = devpriv->counters + APCI1564_COUNTER(chan);
status = inl(iobase + ADDI_TCW_IRQ_REG);
- if (status & 0x01) {
- /* Disable Counter Interrupt */
+ if (status & ADDI_TCW_IRQ) {
+ s->state |= APCI1564_EVENT_COUNTER(chan);
+
+ /* clear the interrupt */
ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
outl(0x0, iobase + ADDI_TCW_CTRL_REG);
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_current, 0);
-
- /* Enable Counter Interrupt */
outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
}
}
}
+ if (s->state & APCI1564_EVENT_MASK) {
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
+ }
+
return IRQ_HANDLED;
}
@@ -255,7 +307,7 @@ static int apci1564_diag_insn_bits(struct comedi_device *dev,
/*
* Change-Of-State (COS) interrupt configuration
*
- * Channels 0 to 15 are interruptible. These channels can be configured
+ * Channels 4 to 19 are interruptible. These channels can be configured
* to generate interrupts based on AND/OR logic for the desired channels.
*
* OR logic
@@ -343,6 +395,10 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
default:
return -EINVAL;
}
+
+ /* ensure the mode bits are in-range for channels [19:4] */
+ devpriv->mode1 &= APCI1564_DI_INT_MODE_MASK;
+ devpriv->mode2 &= APCI1564_DI_INT_MODE_MASK;
break;
default:
return -EINVAL;
@@ -409,7 +465,7 @@ static int apci1564_cos_cmd(struct comedi_device *dev,
{
struct apci1564_private *devpriv = dev->private;
- if (!devpriv->ctrl) {
+ if (!devpriv->ctrl && !(devpriv->mode1 || devpriv->mode2)) {
dev_warn(dev->class_dev,
"Interrupts disabled due to mode configuration!\n");
return -EINVAL;
@@ -433,6 +489,173 @@ static int apci1564_cos_cancel(struct comedi_device *dev,
return 0;
}
+static int apci1564_timer_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int val;
+
+ switch (data[0]) {
+ case INSN_CONFIG_ARM:
+ if (data[1] > s->maxdata)
+ return -EINVAL;
+ outl(data[1], devpriv->timer + ADDI_TCW_RELOAD_REG);
+ outl(ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_TIMER_ENA,
+ devpriv->timer + ADDI_TCW_CTRL_REG);
+ break;
+ case INSN_CONFIG_DISARM:
+ outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
+ break;
+ case INSN_CONFIG_GET_COUNTER_STATUS:
+ data[1] = 0;
+ val = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ if (val & ADDI_TCW_CTRL_IRQ_ENA)
+ data[1] |= COMEDI_COUNTER_ARMED;
+ if (val & ADDI_TCW_CTRL_TIMER_ENA)
+ data[1] |= COMEDI_COUNTER_COUNTING;
+ val = inl(devpriv->timer + ADDI_TCW_STATUS_REG);
+ if (val & ADDI_TCW_STATUS_OVERFLOW)
+ data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
+ data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
+ COMEDI_COUNTER_TERMINAL_COUNT;
+ break;
+ case INSN_CONFIG_SET_CLOCK_SRC:
+ if (data[2] > s->maxdata)
+ return -EINVAL;
+ outl(data[1], devpriv->timer + ADDI_TCW_TIMEBASE_REG);
+ outl(data[2], devpriv->timer + ADDI_TCW_RELOAD_REG);
+ break;
+ case INSN_CONFIG_GET_CLOCK_SRC:
+ data[1] = inl(devpriv->timer + ADDI_TCW_TIMEBASE_REG);
+ data[2] = inl(devpriv->timer + ADDI_TCW_RELOAD_REG);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return insn->n;
+}
+
+static int apci1564_timer_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+
+ /* just write the last last to the reload register */
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
+
+ outl(val, devpriv->timer + ADDI_TCW_RELOAD_REG);
+ }
+
+ return insn->n;
+}
+
+static int apci1564_timer_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ int i;
+
+ /* return the actual value of the timer */
+ for (i = 0; i < insn->n; i++)
+ data[i] = inl(devpriv->timer + ADDI_TCW_VAL_REG);
+
+ return insn->n;
+}
+
+static int apci1564_counter_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+ unsigned int val;
+
+ switch (data[0]) {
+ case INSN_CONFIG_ARM:
+ val = inl(iobase + ADDI_TCW_CTRL_REG);
+ val |= ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_CNTR_ENA;
+ outl(data[1], iobase + ADDI_TCW_RELOAD_REG);
+ outl(val, iobase + ADDI_TCW_CTRL_REG);
+ break;
+ case INSN_CONFIG_DISARM:
+ val = inl(iobase + ADDI_TCW_CTRL_REG);
+ val &= ~(ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_CNTR_ENA);
+ outl(val, iobase + ADDI_TCW_CTRL_REG);
+ break;
+ case INSN_CONFIG_SET_COUNTER_MODE:
+ /*
+ * FIXME: The counter operation is not described in the
+ * datasheet. For now just write the raw data[1] value to
+ * the control register.
+ */
+ outl(data[1], iobase + ADDI_TCW_CTRL_REG);
+ break;
+ case INSN_CONFIG_GET_COUNTER_STATUS:
+ data[1] = 0;
+ val = inl(iobase + ADDI_TCW_CTRL_REG);
+ if (val & ADDI_TCW_CTRL_IRQ_ENA)
+ data[1] |= COMEDI_COUNTER_ARMED;
+ if (val & ADDI_TCW_CTRL_CNTR_ENA)
+ data[1] |= COMEDI_COUNTER_COUNTING;
+ val = inl(iobase + ADDI_TCW_STATUS_REG);
+ if (val & ADDI_TCW_STATUS_OVERFLOW)
+ data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
+ data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
+ COMEDI_COUNTER_TERMINAL_COUNT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return insn->n;
+}
+
+static int apci1564_counter_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+
+ /* just write the last last to the reload register */
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
+
+ outl(val, iobase + ADDI_TCW_RELOAD_REG);
+ }
+
+ return insn->n;
+}
+
+static int apci1564_counter_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+ int i;
+
+ /* return the actual value of the counter */
+ for (i = 0; i < insn->n; i++)
+ data[i] = inl(iobase + ADDI_TCW_VAL_REG);
+
+ return insn->n;
+}
+
static int apci1564_auto_attach(struct comedi_device *dev,
unsigned long context_unused)
{
@@ -501,7 +724,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
if (dev->irq) {
dev->read_subdev = s;
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ | SDF_LSAMPL;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -543,7 +766,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
/* Initialize the watchdog subdevice */
s = &dev->subdevices[5];
- ret = addi_watchdog_init(s, dev->iobase + APCI1564_WDOG_REG);
+ ret = addi_watchdog_init(s, dev->iobase + APCI1564_WDOG_IOBASE);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 4437ea3abe8d..be70bd333807 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -570,7 +570,7 @@ static int pci9118_ai_cancel(struct comedi_device *dev,
/* set default config (disable burst and triggers) */
devpriv->ai_cfg = PCI9118_AI_CFG_PDTRG | PCI9118_AI_CFG_PETRG;
outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
- /* reset acqusition control */
+ /* reset acquisition control */
devpriv->ai_ctrl = 0;
outl(devpriv->ai_ctrl, dev->iobase + PCI9118_AI_CTRL_REG);
outl(0, dev->iobase + PCI9118_AI_BURST_NUM_REG);
@@ -1022,12 +1022,12 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/*
* Configure analog input and load the chanlist.
- * The acqusition control bits are enabled later.
+ * The acquisition control bits are enabled later.
*/
pci9118_set_chanlist(dev, s, cmd->chanlist_len, cmd->chanlist,
devpriv->ai_add_front, devpriv->ai_add_back);
- /* Determine acqusition mode and calculate timing */
+ /* Determine acquisition mode and calculate timing */
devpriv->ai_do = 0;
if (cmd->scan_begin_src != TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) {
@@ -1097,7 +1097,7 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->ai_do == 0) {
dev_err(dev->class_dev,
- "Unable to determine acqusition mode! BUG in (*do_cmdtest)?\n");
+ "Unable to determine acquisition mode! BUG in (*do_cmdtest)?\n");
return -EINVAL;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index c773b8ca6599..1f9c08a845b6 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1238,7 +1238,7 @@ static void disable_plx_interrupts(struct comedi_device *dev)
devpriv->plx_intcsr_bits = 0;
writel(devpriv->plx_intcsr_bits,
- devpriv->plx9080_iobase + PLX_INTRCS_REG);
+ devpriv->plx9080_iobase + PLX_REG_INTCSR);
}
static void disable_ai_interrupts(struct comedi_device *dev)
@@ -1291,14 +1291,14 @@ static void init_plx9080(struct comedi_device *dev)
void __iomem *plx_iobase = devpriv->plx9080_iobase;
devpriv->plx_control_bits =
- readl(devpriv->plx9080_iobase + PLX_CONTROL_REG);
+ readl(devpriv->plx9080_iobase + PLX_REG_CNTRL);
#ifdef __BIG_ENDIAN
- bits = BIGEND_DMA0 | BIGEND_DMA1;
+ bits = PLX_BIGEND_DMA0 | PLX_BIGEND_DMA1;
#else
bits = 0;
#endif
- writel(bits, devpriv->plx9080_iobase + PLX_BIGEND_REG);
+ writel(bits, devpriv->plx9080_iobase + PLX_REG_BIGEND);
disable_plx_interrupts(dev);
@@ -1308,38 +1308,39 @@ static void init_plx9080(struct comedi_device *dev)
/* configure dma0 mode */
bits = 0;
/* enable ready input, not sure if this is necessary */
- bits |= PLX_DMA_EN_READYIN_BIT;
+ bits |= PLX_DMAMODE_READYIEN;
/* enable bterm, not sure if this is necessary */
- bits |= PLX_EN_BTERM_BIT;
+ bits |= PLX_DMAMODE_BTERMIEN;
/* enable dma chaining */
- bits |= PLX_EN_CHAIN_BIT;
+ bits |= PLX_DMAMODE_CHAINEN;
/* enable interrupt on dma done
* (probably don't need this, since chain never finishes) */
- bits |= PLX_EN_DMA_DONE_INTR_BIT;
+ bits |= PLX_DMAMODE_DONEIEN;
/* don't increment local address during transfers
* (we are transferring from a fixed fifo register) */
- bits |= PLX_LOCAL_ADDR_CONST_BIT;
+ bits |= PLX_DMAMODE_LACONST;
/* route dma interrupt to pci bus */
- bits |= PLX_DMA_INTR_PCI_BIT;
+ bits |= PLX_DMAMODE_INTRPCI;
/* enable demand mode */
- bits |= PLX_DEMAND_MODE_BIT;
+ bits |= PLX_DMAMODE_DEMAND;
/* enable local burst mode */
- bits |= PLX_DMA_LOCAL_BURST_EN_BIT;
+ bits |= PLX_DMAMODE_BURSTEN;
/* 4020 uses 32 bit dma */
if (board->layout == LAYOUT_4020)
- bits |= PLX_LOCAL_BUS_32_WIDE_BITS;
+ bits |= PLX_DMAMODE_WIDTH32;
else /* localspace0 bus is 16 bits wide */
- bits |= PLX_LOCAL_BUS_16_WIDE_BITS;
- writel(bits, plx_iobase + PLX_DMA1_MODE_REG);
+ bits |= PLX_DMAMODE_WIDTH16;
+ writel(bits, plx_iobase + PLX_REG_DMAMODE1);
if (ao_cmd_is_supported(board))
- writel(bits, plx_iobase + PLX_DMA0_MODE_REG);
+ writel(bits, plx_iobase + PLX_REG_DMAMODE0);
/* enable interrupts on plx 9080 */
devpriv->plx_intcsr_bits |=
- ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
- ICS_DMA0_E | ICS_DMA1_E;
+ PLX_INTCSR_LSEABORTEN | PLX_INTCSR_LSEPARITYEN | PLX_INTCSR_PIEN |
+ PLX_INTCSR_PLIEN | PLX_INTCSR_PABORTIEN | PLX_INTCSR_LIOEN |
+ PLX_INTCSR_DMA0IEN | PLX_INTCSR_DMA1IEN;
writel(devpriv->plx_intcsr_bits,
- devpriv->plx9080_iobase + PLX_INTRCS_REG);
+ devpriv->plx9080_iobase + PLX_REG_INTCSR);
}
static void disable_ai_pacing(struct comedi_device *dev)
@@ -1533,8 +1534,8 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
cpu_to_le32((devpriv->ai_dma_desc_bus_addr +
((i + 1) % ai_dma_ring_count(board)) *
sizeof(devpriv->ai_dma_desc[0])) |
- PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI);
+ PLX_DMADPR_DESCPCI | PLX_DMADPR_TCINTR |
+ PLX_DMADPR_XFERL2P);
}
if (ao_cmd_is_supported(board)) {
for (i = 0; i < AO_DMA_RING_COUNT; i++) {
@@ -1548,8 +1549,8 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
cpu_to_le32((devpriv->ao_dma_desc_bus_addr +
((i + 1) % (AO_DMA_RING_COUNT)) *
sizeof(devpriv->ao_dma_desc[0])) |
- PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT);
+ PLX_DMADPR_DESCPCI |
+ PLX_DMADPR_TCINTR);
}
}
return 0;
@@ -1613,9 +1614,9 @@ static const int i2c_low_udelay = 10;
static void i2c_set_sda(struct comedi_device *dev, int state)
{
struct pcidas64_private *devpriv = dev->private;
- static const int data_bit = CTL_EE_W;
+ static const int data_bit = PLX_CNTRL_EEWB;
void __iomem *plx_control_addr = devpriv->plx9080_iobase +
- PLX_CONTROL_REG;
+ PLX_REG_CNTRL;
if (state) {
/* set data line high */
@@ -1634,9 +1635,9 @@ static void i2c_set_sda(struct comedi_device *dev, int state)
static void i2c_set_scl(struct comedi_device *dev, int state)
{
struct pcidas64_private *devpriv = dev->private;
- static const int clock_bit = CTL_USERO;
+ static const int clock_bit = PLX_CNTRL_USERO;
void __iomem *plx_control_addr = devpriv->plx9080_iobase +
- PLX_CONTROL_REG;
+ PLX_REG_CNTRL;
if (state) {
/* set clock line high */
@@ -1707,7 +1708,7 @@ static void i2c_write(struct comedi_device *dev, unsigned int address,
*/
/* make sure we dont send anything to eeprom */
- devpriv->plx_control_bits &= ~CTL_EE_CS;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EECS;
i2c_stop(dev);
i2c_start(dev);
@@ -2367,14 +2368,8 @@ static inline void dma_start_sync(struct comedi_device *dev,
/* spinlock for plx dma control/status reg */
spin_lock_irqsave(&dev->spinlock, flags);
- if (channel)
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT |
- PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- else
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT |
- PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
+ writeb(PLX_DMACSR_ENABLE | PLX_DMACSR_START | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_iobase + PLX_REG_DMACSR(channel));
spin_unlock_irqrestore(&dev->spinlock, flags);
}
@@ -2552,21 +2547,17 @@ static inline void load_first_dma_descriptor(struct comedi_device *dev,
* block. Initializing them to zero seems to fix the problem.
*/
if (dma_channel) {
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA1_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG);
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA1_LOCAL_ADDRESS_REG);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMASIZ1);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMAPADR1);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMALADR1);
writel(descriptor_bits,
- devpriv->plx9080_iobase + PLX_DMA1_DESCRIPTOR_REG);
+ devpriv->plx9080_iobase + PLX_REG_DMADPR1);
} else {
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA0_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG);
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA0_LOCAL_ADDRESS_REG);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMASIZ0);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMAPADR0);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMALADR0);
writel(descriptor_bits,
- devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
+ devpriv->plx9080_iobase + PLX_REG_DMADPR0);
}
}
@@ -2643,9 +2634,9 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* give location of first dma descriptor */
load_first_dma_descriptor(dev, 1,
devpriv->ai_dma_desc_bus_addr |
- PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI);
+ PLX_DMADPR_DESCPCI |
+ PLX_DMADPR_TCINTR |
+ PLX_DMADPR_XFERL2P);
dma_start_sync(dev, 1);
}
@@ -2803,12 +2794,7 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
int num_samples = 0;
void __iomem *pci_addr_reg;
- if (channel)
- pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG;
- else
- pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
+ pci_addr_reg = devpriv->plx9080_iobase + PLX_REG_DMAPADR(channel);
/* loop until we have read all the full buffers */
for (j = 0, next_transfer_addr = readl(pci_addr_reg);
@@ -2850,12 +2836,12 @@ static void handle_ai_interrupt(struct comedi_device *dev,
}
/* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
- dma1_status = readb(devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- if (plx_status & ICS_DMA1_A) { /* dma chan 1 interrupt */
- writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
+ dma1_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR1);
+ if (plx_status & PLX_INTCSR_DMA1IA) { /* dma chan 1 interrupt */
+ writeb((dma1_status & PLX_DMACSR_ENABLE) | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_iobase + PLX_REG_DMACSR1);
- if (dma1_status & PLX_DMA_EN_BIT)
+ if (dma1_status & PLX_DMACSR_ENABLE)
drain_dma_buffers(dev, 1);
}
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -2902,12 +2888,12 @@ static int last_ao_dma_load_completed(struct comedi_device *dev)
unsigned short dma_status;
buffer_index = prev_ao_dma_index(dev);
- dma_status = readb(devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- if ((dma_status & PLX_DMA_DONE_BIT) == 0)
+ dma_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+ if ((dma_status & PLX_DMACSR_DONE) == 0)
return 0;
transfer_address =
- readl(devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG);
+ readl(devpriv->plx9080_iobase + PLX_REG_DMAPADR0);
if (transfer_address != devpriv->ao_buffer_bus_addr[buffer_index])
return 0;
@@ -2917,8 +2903,8 @@ static int last_ao_dma_load_completed(struct comedi_device *dev)
static inline int ao_dma_needs_restart(struct comedi_device *dev,
unsigned short dma_status)
{
- if ((dma_status & PLX_DMA_DONE_BIT) == 0 ||
- (dma_status & PLX_DMA_EN_BIT) == 0)
+ if ((dma_status & PLX_DMACSR_DONE) == 0 ||
+ (dma_status & PLX_DMACSR_ENABLE) == 0)
return 0;
if (last_ao_dma_load_completed(dev))
return 0;
@@ -2931,9 +2917,8 @@ static void restart_ao_dma(struct comedi_device *dev)
struct pcidas64_private *devpriv = dev->private;
unsigned int dma_desc_bits;
- dma_desc_bits =
- readl(devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
- dma_desc_bits &= ~PLX_END_OF_CHAIN_BIT;
+ dma_desc_bits = readl(devpriv->plx9080_iobase + PLX_REG_DMADPR0);
+ dma_desc_bits &= ~PLX_DMADPR_CHAINEND;
load_first_dma_descriptor(dev, 0, dma_desc_bits);
dma_start_sync(dev, 0);
@@ -2974,14 +2959,14 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
devpriv->ao_dma_desc[buffer_index].transfer_size = cpu_to_le32(nbytes);
/* set end of chain bit so we catch underruns */
next_bits = le32_to_cpu(devpriv->ao_dma_desc[buffer_index].next);
- next_bits |= PLX_END_OF_CHAIN_BIT;
+ next_bits |= PLX_DMADPR_CHAINEND;
devpriv->ao_dma_desc[buffer_index].next = cpu_to_le32(next_bits);
/*
* clear end of chain bit on previous buffer now that we have set it
* for the last buffer
*/
next_bits = le32_to_cpu(devpriv->ao_dma_desc[prev_buffer_index].next);
- next_bits &= ~PLX_END_OF_CHAIN_BIT;
+ next_bits &= ~PLX_DMADPR_CHAINEND;
devpriv->ao_dma_desc[prev_buffer_index].next = cpu_to_le32(next_bits);
devpriv->ao_dma_index = (buffer_index + 1) % AO_DMA_RING_COUNT;
@@ -2994,8 +2979,7 @@ static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
struct pcidas64_private *devpriv = dev->private;
unsigned int num_bytes;
unsigned int next_transfer_addr;
- void __iomem *pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
+ void __iomem *pci_addr_reg = devpriv->plx9080_iobase + PLX_REG_DMAPADR0;
unsigned int buffer_index;
do {
@@ -3030,17 +3014,18 @@ static void handle_ao_interrupt(struct comedi_device *dev,
/* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
- dma0_status = readb(devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- if (plx_status & ICS_DMA0_A) { /* dma chan 0 interrupt */
- if ((dma0_status & PLX_DMA_EN_BIT) &&
- !(dma0_status & PLX_DMA_DONE_BIT))
- writeb(PLX_DMA_EN_BIT | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- else
- writeb(PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
+ dma0_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+ if (plx_status & PLX_INTCSR_DMA0IA) { /* dma chan 0 interrupt */
+ if ((dma0_status & PLX_DMACSR_ENABLE) &&
+ !(dma0_status & PLX_DMACSR_DONE)) {
+ writeb(PLX_DMACSR_ENABLE | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+ } else {
+ writeb(PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+ }
spin_unlock_irqrestore(&dev->spinlock, flags);
- if (dma0_status & PLX_DMA_EN_BIT) {
+ if (dma0_status & PLX_DMACSR_ENABLE) {
load_ao_dma(dev, cmd);
/* try to recover from dma end-of-chain event */
if (ao_dma_needs_restart(dev, dma0_status))
@@ -3069,7 +3054,7 @@ static irqreturn_t handle_interrupt(int irq, void *d)
uint32_t plx_status;
uint32_t plx_bits;
- plx_status = readl(devpriv->plx9080_iobase + PLX_INTRCS_REG);
+ plx_status = readl(devpriv->plx9080_iobase + PLX_REG_INTCSR);
status = readw(devpriv->main_iobase + HW_STATUS_REG);
/*
@@ -3083,10 +3068,11 @@ static irqreturn_t handle_interrupt(int irq, void *d)
handle_ai_interrupt(dev, status, plx_status);
handle_ao_interrupt(dev, status, plx_status);
- /* clear possible plx9080 interrupt sources */
- if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
- plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
+ /* clear possible plx9080 interrupt sources */
+ if (plx_status & PLX_INTCSR_LDBIA) {
+ /* clear local doorbell interrupt */
+ plx_bits = readl(devpriv->plx9080_iobase + PLX_REG_L2PDBELL);
+ writel(plx_bits, devpriv->plx9080_iobase + PLX_REG_L2PDBELL);
}
return IRQ_HANDLED;
@@ -3324,7 +3310,7 @@ static int ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
set_dac_select_reg(dev, cmd);
set_dac_interval_regs(dev, cmd);
load_first_dma_descriptor(dev, 0, devpriv->ao_dma_desc_bus_addr |
- PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT);
+ PLX_DMADPR_DESCPCI | PLX_DMADPR_TCINTR);
set_dac_control1_reg(dev, cmd);
s->async->inttrig = ao_inttrig;
@@ -3725,19 +3711,19 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
unsigned int bitstream = (read_command << 8) | address;
unsigned int bit;
void __iomem * const plx_control_addr =
- devpriv->plx9080_iobase + PLX_CONTROL_REG;
+ devpriv->plx9080_iobase + PLX_REG_CNTRL;
uint16_t value;
static const int value_length = 16;
static const int eeprom_udelay = 1;
udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CLK & ~CTL_EE_CS;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EESK & ~PLX_CNTRL_EECS;
/* make sure we don't send anything to the i2c bus on 4020 */
- devpriv->plx_control_bits |= CTL_USERO;
+ devpriv->plx_control_bits |= PLX_CNTRL_USERO;
writel(devpriv->plx_control_bits, plx_control_addr);
/* activate serial eeprom */
udelay(eeprom_udelay);
- devpriv->plx_control_bits |= CTL_EE_CS;
+ devpriv->plx_control_bits |= PLX_CNTRL_EECS;
writel(devpriv->plx_control_bits, plx_control_addr);
/* write read command and desired memory address */
@@ -3745,16 +3731,16 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
/* set bit to be written */
udelay(eeprom_udelay);
if (bitstream & bit)
- devpriv->plx_control_bits |= CTL_EE_W;
+ devpriv->plx_control_bits |= PLX_CNTRL_EEWB;
else
- devpriv->plx_control_bits &= ~CTL_EE_W;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EEWB;
writel(devpriv->plx_control_bits, plx_control_addr);
/* clock in bit */
udelay(eeprom_udelay);
- devpriv->plx_control_bits |= CTL_EE_CLK;
+ devpriv->plx_control_bits |= PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CLK;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
}
/* read back value from eeprom memory location */
@@ -3762,19 +3748,19 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
for (bit = 1 << (value_length - 1); bit; bit >>= 1) {
/* clock out bit */
udelay(eeprom_udelay);
- devpriv->plx_control_bits |= CTL_EE_CLK;
+ devpriv->plx_control_bits |= PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CLK;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
udelay(eeprom_udelay);
- if (readl(plx_control_addr) & CTL_EE_R)
+ if (readl(plx_control_addr) & PLX_CNTRL_EERB)
value |= bit;
}
/* deactivate eeprom serial input */
udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CS;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EECS;
writel(devpriv->plx_control_bits, plx_control_addr);
return value;
@@ -3962,7 +3948,8 @@ static int setup_subdevices(struct comedi_device *dev)
/* serial EEPROM, if present */
s = &dev->subdevices[8];
- if (readl(devpriv->plx9080_iobase + PLX_CONTROL_REG) & CTL_EECHK) {
+ if (readl(devpriv->plx9080_iobase + PLX_REG_CNTRL) &
+ PLX_CNTRL_EEPRESENT) {
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
s->n_chan = 128;
@@ -4019,16 +4006,16 @@ static int auto_attach(struct comedi_device *dev,
}
/* figure out what local addresses are */
- local_range = readl(devpriv->plx9080_iobase + PLX_LAS0RNG_REG) &
- LRNG_MEM_MASK;
- local_decode = readl(devpriv->plx9080_iobase + PLX_LAS0MAP_REG) &
- local_range & LMAP_MEM_MASK;
+ local_range = readl(devpriv->plx9080_iobase + PLX_REG_LAS0RR) &
+ PLX_LASRR_MEM_MASK;
+ local_decode = readl(devpriv->plx9080_iobase + PLX_REG_LAS0BA) &
+ local_range & PLX_LASBA_MEM_MASK;
devpriv->local0_iobase = ((uint32_t)devpriv->main_phys_iobase &
~local_range) | local_decode;
- local_range = readl(devpriv->plx9080_iobase + PLX_LAS1RNG_REG) &
- LRNG_MEM_MASK;
- local_decode = readl(devpriv->plx9080_iobase + PLX_LAS1MAP_REG) &
- local_range & LMAP_MEM_MASK;
+ local_range = readl(devpriv->plx9080_iobase + PLX_REG_LAS1RR) &
+ PLX_LASRR_MEM_MASK;
+ local_decode = readl(devpriv->plx9080_iobase + PLX_REG_LAS1BA) &
+ local_range & PLX_LASBA_MEM_MASK;
devpriv->local1_iobase = ((uint32_t)devpriv->dio_counter_phys_iobase &
~local_range) | local_decode;
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 50b76eccb7d7..64a5ea3810d4 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -55,16 +55,16 @@
struct bonded_device {
struct comedi_device *dev;
- unsigned minor;
- unsigned subdev;
- unsigned nchans;
+ unsigned int minor;
+ unsigned int subdev;
+ unsigned int nchans;
};
struct comedi_bond_private {
char name[256];
struct bonded_device **devs;
- unsigned ndevs;
- unsigned nchans;
+ unsigned int ndevs;
+ unsigned int nchans;
};
static int bonding_dio_insn_bits(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index a536a15c1d30..65daef0c00d5 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -116,12 +116,12 @@
#define DAQBOARD2000_SUBSYSTEM_IDS4 0x0004 /* Daqboard/2000 - 4 Dacs */
/* Initialization bits for the Serial EEPROM Control Register */
-#define DAQBOARD2000_SECRProgPinHi 0x8001767e
-#define DAQBOARD2000_SECRProgPinLo 0x8000767e
-#define DAQBOARD2000_SECRLocalBusHi 0xc000767e
-#define DAQBOARD2000_SECRLocalBusLo 0x8000767e
-#define DAQBOARD2000_SECRReloadHi 0xa000767e
-#define DAQBOARD2000_SECRReloadLo 0x8000767e
+#define DB2K_SECR_PROG_PIN_HI 0x8001767e
+#define DB2K_SECR_PROG_PIN_LO 0x8000767e
+#define DB2K_SECR_LOCAL_BUS_HI 0xc000767e
+#define DB2K_SECR_LOCAL_BUS_LO 0x8000767e
+#define DB2K_SECR_RELOAD_HI 0xa000767e
+#define DB2K_SECR_RELOAD_LO 0x8000767e
/* SECR status bits */
#define DAQBOARD2000_EEPROM_PRESENT 0x10000000
@@ -151,119 +151,108 @@ static const struct comedi_lrange range_daqboard2000_ai = {
/*
* Register Memory Map
*/
-#define acqControl 0x00 /* u16 */
-#define acqScanListFIFO 0x02 /* u16 */
-#define acqPacerClockDivLow 0x04 /* u32 */
-#define acqScanCounter 0x08 /* u16 */
-#define acqPacerClockDivHigh 0x0a /* u16 */
-#define acqTriggerCount 0x0c /* u16 */
-#define acqResultsFIFO 0x10 /* u16 */
-#define acqResultsShadow 0x14 /* u16 */
-#define acqAdcResult 0x18 /* u16 */
-#define dacScanCounter 0x1c /* u16 */
-#define dacControl 0x20 /* u16 */
-#define dacFIFO 0x24 /* s16 */
-#define dacPacerClockDiv 0x2a /* u16 */
-#define refDacs 0x2c /* u16 */
-#define dioControl 0x30 /* u16 */
-#define dioP3hsioData 0x32 /* s16 */
-#define dioP3Control 0x34 /* u16 */
-#define calEepromControl 0x36 /* u16 */
-#define dacSetting(x) (0x38 + (x)*2) /* s16 */
-#define dioP2ExpansionIO8Bit 0x40 /* s16 */
-#define ctrTmrControl 0x80 /* u16 */
-#define ctrInput(x) (0x88 + (x)*2) /* s16 */
-#define timerDivisor(x) (0xa0 + (x)*2) /* u16 */
-#define dmaControl 0xb0 /* u16 */
-#define trigControl 0xb2 /* u16 */
-#define calEeprom 0xb8 /* u16 */
-#define acqDigitalMark 0xba /* u16 */
-#define trigDacs 0xbc /* u16 */
-#define dioP2ExpansionIO16Bit(x) (0xc0 + (x)*2) /* s16 */
+#define DB2K_REG_ACQ_CONTROL 0x00 /* u16 (w) */
+#define DB2K_REG_ACQ_STATUS 0x00 /* u16 (r) */
+#define DB2K_REG_ACQ_SCAN_LIST_FIFO 0x02 /* u16 */
+#define DB2K_REG_ACQ_PACER_CLOCK_DIV_LOW 0x04 /* u32 */
+#define DB2K_REG_ACQ_SCAN_COUNTER 0x08 /* u16 */
+#define DB2K_REG_ACQ_PACER_CLOCK_DIV_HIGH 0x0a /* u16 */
+#define DB2K_REG_ACQ_TRIGGER_COUNT 0x0c /* u16 */
+#define DB2K_REG_ACQ_RESULTS_FIFO 0x10 /* u16 */
+#define DB2K_REG_ACQ_RESULTS_SHADOW 0x14 /* u16 */
+#define DB2K_REG_ACQ_ADC_RESULT 0x18 /* u16 */
+#define DB2K_REG_DAC_SCAN_COUNTER 0x1c /* u16 */
+#define DB2K_REG_DAC_CONTROL 0x20 /* u16 (w) */
+#define DB2K_REG_DAC_STATUS 0x20 /* u16 (r) */
+#define DB2K_REG_DAC_FIFO 0x24 /* s16 */
+#define DB2K_REG_DAC_PACER_CLOCK_DIV 0x2a /* u16 */
+#define DB2K_REG_REF_DACS 0x2c /* u16 */
+#define DB2K_REG_DIO_CONTROL 0x30 /* u16 */
+#define DB2K_REG_P3_HSIO_DATA 0x32 /* s16 */
+#define DB2K_REG_P3_CONTROL 0x34 /* u16 */
+#define DB2K_REG_CAL_EEPROM_CONTROL 0x36 /* u16 */
+#define DB2K_REG_DAC_SETTING(x) (0x38 + (x) * 2) /* s16 */
+#define DB2K_REG_DIO_P2_EXP_IO_8_BIT 0x40 /* s16 */
+#define DB2K_REG_COUNTER_TIMER_CONTROL 0x80 /* u16 */
+#define DB2K_REG_COUNTER_INPUT(x) (0x88 + (x) * 2) /* s16 */
+#define DB2K_REG_TIMER_DIV(x) (0xa0 + (x) * 2) /* u16 */
+#define DB2K_REG_DMA_CONTROL 0xb0 /* u16 */
+#define DB2K_REG_TRIG_CONTROL 0xb2 /* u16 */
+#define DB2K_REG_CAL_EEPROM 0xb8 /* u16 */
+#define DB2K_REG_ACQ_DIGITAL_MARK 0xba /* u16 */
+#define DB2K_REG_TRIG_DACS 0xbc /* u16 */
+#define DB2K_REG_DIO_P2_EXP_IO_16_BIT(x) (0xc0 + (x) * 2) /* s16 */
/* Scan Sequencer programming */
-#define DAQBOARD2000_SeqStartScanList 0x0011
-#define DAQBOARD2000_SeqStopScanList 0x0010
+#define DB2K_ACQ_CONTROL_SEQ_START_SCAN_LIST 0x0011
+#define DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST 0x0010
/* Prepare for acquisition */
-#define DAQBOARD2000_AcqResetScanListFifo 0x0004
-#define DAQBOARD2000_AcqResetResultsFifo 0x0002
-#define DAQBOARD2000_AcqResetConfigPipe 0x0001
-
-/* Acqusition status bits */
-#define DAQBOARD2000_AcqResultsFIFOMore1Sample 0x0001
-#define DAQBOARD2000_AcqResultsFIFOHasValidData 0x0002
-#define DAQBOARD2000_AcqResultsFIFOOverrun 0x0004
-#define DAQBOARD2000_AcqLogicScanning 0x0008
-#define DAQBOARD2000_AcqConfigPipeFull 0x0010
-#define DAQBOARD2000_AcqScanListFIFOEmpty 0x0020
-#define DAQBOARD2000_AcqAdcNotReady 0x0040
-#define DAQBOARD2000_ArbitrationFailure 0x0080
-#define DAQBOARD2000_AcqPacerOverrun 0x0100
-#define DAQBOARD2000_DacPacerOverrun 0x0200
-#define DAQBOARD2000_AcqHardwareError 0x01c0
-
-/* Scan Sequencer programming */
-#define DAQBOARD2000_SeqStartScanList 0x0011
-#define DAQBOARD2000_SeqStopScanList 0x0010
+#define DB2K_ACQ_CONTROL_RESET_SCAN_LIST_FIFO 0x0004
+#define DB2K_ACQ_CONTROL_RESET_RESULTS_FIFO 0x0002
+#define DB2K_ACQ_CONTROL_RESET_CONFIG_PIPE 0x0001
/* Pacer Clock Control */
-#define DAQBOARD2000_AdcPacerInternal 0x0030
-#define DAQBOARD2000_AdcPacerExternal 0x0032
-#define DAQBOARD2000_AdcPacerEnable 0x0031
-#define DAQBOARD2000_AdcPacerEnableDacPacer 0x0034
-#define DAQBOARD2000_AdcPacerDisable 0x0030
-#define DAQBOARD2000_AdcPacerNormalMode 0x0060
-#define DAQBOARD2000_AdcPacerCompatibilityMode 0x0061
-#define DAQBOARD2000_AdcPacerInternalOutEnable 0x0008
-#define DAQBOARD2000_AdcPacerExternalRising 0x0100
+#define DB2K_ACQ_CONTROL_ADC_PACER_INTERNAL 0x0030
+#define DB2K_ACQ_CONTROL_ADC_PACER_EXTERNAL 0x0032
+#define DB2K_ACQ_CONTROL_ADC_PACER_ENABLE 0x0031
+#define DB2K_ACQ_CONTROL_ADC_PACER_ENABLE_DAC_PACER 0x0034
+#define DB2K_ACQ_CONTROL_ADC_PACER_DISABLE 0x0030
+#define DB2K_ACQ_CONTROL_ADC_PACER_NORMAL_MODE 0x0060
+#define DB2K_ACQ_CONTROL_ADC_PACER_COMPATIBILITY_MODE 0x0061
+#define DB2K_ACQ_CONTROL_ADC_PACER_INTERNAL_OUT_ENABLE 0x0008
+#define DB2K_ACQ_CONTROL_ADC_PACER_EXTERNAL_RISING 0x0100
+
+/* Acquisition status bits */
+#define DB2K_ACQ_STATUS_RESULTS_FIFO_MORE_1_SAMPLE 0x0001
+#define DB2K_ACQ_STATUS_RESULTS_FIFO_HAS_DATA 0x0002
+#define DB2K_ACQ_STATUS_RESULTS_FIFO_OVERRUN 0x0004
+#define DB2K_ACQ_STATUS_LOGIC_SCANNING 0x0008
+#define DB2K_ACQ_STATUS_CONFIG_PIPE_FULL 0x0010
+#define DB2K_ACQ_STATUS_SCAN_LIST_FIFO_EMPTY 0x0020
+#define DB2K_ACQ_STATUS_ADC_NOT_READY 0x0040
+#define DB2K_ACQ_STATUS_ARBITRATION_FAILURE 0x0080
+#define DB2K_ACQ_STATUS_ADC_PACER_OVERRUN 0x0100
+#define DB2K_ACQ_STATUS_DAC_PACER_OVERRUN 0x0200
/* DAC status */
-#define DAQBOARD2000_DacFull 0x0001
-#define DAQBOARD2000_RefBusy 0x0002
-#define DAQBOARD2000_TrgBusy 0x0004
-#define DAQBOARD2000_CalBusy 0x0008
-#define DAQBOARD2000_Dac0Busy 0x0010
-#define DAQBOARD2000_Dac1Busy 0x0020
-#define DAQBOARD2000_Dac2Busy 0x0040
-#define DAQBOARD2000_Dac3Busy 0x0080
+#define DB2K_DAC_STATUS_DAC_FULL 0x0001
+#define DB2K_DAC_STATUS_REF_BUSY 0x0002
+#define DB2K_DAC_STATUS_TRIG_BUSY 0x0004
+#define DB2K_DAC_STATUS_CAL_BUSY 0x0008
+#define DB2K_DAC_STATUS_DAC_BUSY(x) (0x0010 << (x))
/* DAC control */
-#define DAQBOARD2000_Dac0Enable 0x0021
-#define DAQBOARD2000_Dac1Enable 0x0031
-#define DAQBOARD2000_Dac2Enable 0x0041
-#define DAQBOARD2000_Dac3Enable 0x0051
-#define DAQBOARD2000_DacEnableBit 0x0001
-#define DAQBOARD2000_Dac0Disable 0x0020
-#define DAQBOARD2000_Dac1Disable 0x0030
-#define DAQBOARD2000_Dac2Disable 0x0040
-#define DAQBOARD2000_Dac3Disable 0x0050
-#define DAQBOARD2000_DacResetFifo 0x0004
-#define DAQBOARD2000_DacPatternDisable 0x0060
-#define DAQBOARD2000_DacPatternEnable 0x0061
-#define DAQBOARD2000_DacSelectSignedData 0x0002
-#define DAQBOARD2000_DacSelectUnsignedData 0x0000
+#define DB2K_DAC_CONTROL_ENABLE_BIT 0x0001
+#define DB2K_DAC_CONTROL_DATA_IS_SIGNED 0x0002
+#define DB2K_DAC_CONTROL_RESET_FIFO 0x0004
+#define DB2K_DAC_CONTROL_DAC_DISABLE(x) (0x0020 + ((x) << 4))
+#define DB2K_DAC_CONTROL_DAC_ENABLE(x) (0x0021 + ((x) << 4))
+#define DB2K_DAC_CONTROL_PATTERN_DISABLE 0x0060
+#define DB2K_DAC_CONTROL_PATTERN_ENABLE 0x0061
/* Trigger Control */
-#define DAQBOARD2000_TrigAnalog 0x0000
-#define DAQBOARD2000_TrigTTL 0x0010
-#define DAQBOARD2000_TrigTransHiLo 0x0004
-#define DAQBOARD2000_TrigTransLoHi 0x0000
-#define DAQBOARD2000_TrigAbove 0x0000
-#define DAQBOARD2000_TrigBelow 0x0004
-#define DAQBOARD2000_TrigLevelSense 0x0002
-#define DAQBOARD2000_TrigEdgeSense 0x0000
-#define DAQBOARD2000_TrigEnable 0x0001
-#define DAQBOARD2000_TrigDisable 0x0000
+#define DB2K_TRIG_CONTROL_TYPE_ANALOG 0x0000
+#define DB2K_TRIG_CONTROL_TYPE_TTL 0x0010
+#define DB2K_TRIG_CONTROL_EDGE_HI_LO 0x0004
+#define DB2K_TRIG_CONTROL_EDGE_LO_HI 0x0000
+#define DB2K_TRIG_CONTROL_LEVEL_ABOVE 0x0000
+#define DB2K_TRIG_CONTROL_LEVEL_BELOW 0x0004
+#define DB2K_TRIG_CONTROL_SENSE_LEVEL 0x0002
+#define DB2K_TRIG_CONTROL_SENSE_EDGE 0x0000
+#define DB2K_TRIG_CONTROL_ENABLE 0x0001
+#define DB2K_TRIG_CONTROL_DISABLE 0x0000
/* Reference Dac Selection */
-#define DAQBOARD2000_PosRefDacSelect 0x0100
-#define DAQBOARD2000_NegRefDacSelect 0x0000
+#define DB2K_REF_DACS_SET 0x0080
+#define DB2K_REF_DACS_SELECT_POS_REF 0x0100
+#define DB2K_REF_DACS_SELECT_NEG_REF 0x0000
struct daq200_boardtype {
const char *name;
int id;
};
+
static const struct daq200_boardtype boardtypes[] = {
{"ids2", DAQBOARD2000_SUBSYSTEM_IDS2},
{"ids4", DAQBOARD2000_SUBSYSTEM_IDS4},
@@ -276,15 +265,16 @@ struct daqboard2000_private {
void __iomem *plx;
};
-static void writeAcqScanListEntry(struct comedi_device *dev, u16 entry)
+static void daqboard2000_write_acq_scan_list_entry(struct comedi_device *dev,
+ u16 entry)
{
- /* udelay(4); */
- writew(entry & 0x00ff, dev->mmio + acqScanListFIFO);
- /* udelay(4); */
- writew((entry >> 8) & 0x00ff, dev->mmio + acqScanListFIFO);
+ writew(entry & 0x00ff, dev->mmio + DB2K_REG_ACQ_SCAN_LIST_FIFO);
+ writew((entry >> 8) & 0x00ff,
+ dev->mmio + DB2K_REG_ACQ_SCAN_LIST_FIFO);
}
-static void setup_sampling(struct comedi_device *dev, int chan, int gain)
+static void daqboard2000_setup_sampling(struct comedi_device *dev, int chan,
+ int gain)
{
u16 word0, word1, word2, word3;
@@ -315,17 +305,13 @@ static void setup_sampling(struct comedi_device *dev, int chan, int gain)
word3 = 0;
break;
}
-/*
- dev->eeprom.correctionDACSE[i][j][k].offset = 0x800;
- dev->eeprom.correctionDACSE[i][j][k].gain = 0xc00;
-*/
/* These should be read from EEPROM */
- word2 |= 0x0800;
- word3 |= 0xc000;
- writeAcqScanListEntry(dev, word0);
- writeAcqScanListEntry(dev, word1);
- writeAcqScanListEntry(dev, word2);
- writeAcqScanListEntry(dev, word3);
+ word2 |= 0x0800; /* offset */
+ word3 |= 0xc000; /* gain */
+ daqboard2000_write_acq_scan_list_entry(dev, word0);
+ daqboard2000_write_acq_scan_list_entry(dev, word1);
+ daqboard2000_write_acq_scan_list_entry(dev, word2);
+ daqboard2000_write_acq_scan_list_entry(dev, word3);
}
static int daqboard2000_ai_status(struct comedi_device *dev,
@@ -335,7 +321,7 @@ static int daqboard2000_ai_status(struct comedi_device *dev,
{
unsigned int status;
- status = readw(dev->mmio + acqControl);
+ status = readw(dev->mmio + DB2K_REG_ACQ_STATUS);
if (status & context)
return 0;
return -EBUSY;
@@ -350,50 +336,58 @@ static int daqboard2000_ai_insn_read(struct comedi_device *dev,
int ret;
int i;
- writew(DAQBOARD2000_AcqResetScanListFifo |
- DAQBOARD2000_AcqResetResultsFifo |
- DAQBOARD2000_AcqResetConfigPipe, dev->mmio + acqControl);
+ writew(DB2K_ACQ_CONTROL_RESET_SCAN_LIST_FIFO |
+ DB2K_ACQ_CONTROL_RESET_RESULTS_FIFO |
+ DB2K_ACQ_CONTROL_RESET_CONFIG_PIPE,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
/*
* If pacer clock is not set to some high value (> 10 us), we
* risk multiple samples to be put into the result FIFO.
*/
/* 1 second, should be long enough */
- writel(1000000, dev->mmio + acqPacerClockDivLow);
- writew(0, dev->mmio + acqPacerClockDivHigh);
+ writel(1000000, dev->mmio + DB2K_REG_ACQ_PACER_CLOCK_DIV_LOW);
+ writew(0, dev->mmio + DB2K_REG_ACQ_PACER_CLOCK_DIV_HIGH);
gain = CR_RANGE(insn->chanspec);
chan = CR_CHAN(insn->chanspec);
- /* This doesn't look efficient. I decided to take the conservative
+ /*
+ * This doesn't look efficient. I decided to take the conservative
* approach when I did the insn conversion. Perhaps it would be
* better to have broken it completely, then someone would have been
- * forced to fix it. --ds */
+ * forced to fix it. --ds
+ */
for (i = 0; i < insn->n; i++) {
- setup_sampling(dev, chan, gain);
+ daqboard2000_setup_sampling(dev, chan, gain);
/* Enable reading from the scanlist FIFO */
- writew(DAQBOARD2000_SeqStartScanList, dev->mmio + acqControl);
+ writew(DB2K_ACQ_CONTROL_SEQ_START_SCAN_LIST,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
- DAQBOARD2000_AcqConfigPipeFull);
+ DB2K_ACQ_STATUS_CONFIG_PIPE_FULL);
if (ret)
return ret;
- writew(DAQBOARD2000_AdcPacerEnable, dev->mmio + acqControl);
+ writew(DB2K_ACQ_CONTROL_ADC_PACER_ENABLE,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
- DAQBOARD2000_AcqLogicScanning);
+ DB2K_ACQ_STATUS_LOGIC_SCANNING);
if (ret)
return ret;
- ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
- DAQBOARD2000_AcqResultsFIFOHasValidData);
+ ret =
+ comedi_timeout(dev, s, insn, daqboard2000_ai_status,
+ DB2K_ACQ_STATUS_RESULTS_FIFO_HAS_DATA);
if (ret)
return ret;
- data[i] = readw(dev->mmio + acqResultsFIFO);
- writew(DAQBOARD2000_AdcPacerDisable, dev->mmio + acqControl);
- writew(DAQBOARD2000_SeqStopScanList, dev->mmio + acqControl);
+ data[i] = readw(dev->mmio + DB2K_REG_ACQ_RESULTS_FIFO);
+ writew(DB2K_ACQ_CONTROL_ADC_PACER_DISABLE,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
+ writew(DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
}
return i;
@@ -407,8 +401,8 @@ static int daqboard2000_ao_eoc(struct comedi_device *dev,
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int status;
- status = readw(dev->mmio + dacControl);
- if ((status & ((chan + 1) * 0x0010)) == 0)
+ status = readw(dev->mmio + DB2K_REG_DAC_STATUS);
+ if ((status & DB2K_DAC_STATUS_DAC_BUSY(chan)) == 0)
return 0;
return -EBUSY;
}
@@ -425,7 +419,7 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
unsigned int val = data[i];
int ret;
- writew(val, dev->mmio + dacSetting(chan));
+ writew(val, dev->mmio + DB2K_REG_DAC_SETTING(chan));
ret = comedi_timeout(dev, s, insn, daqboard2000_ao_eoc, 0);
if (ret)
@@ -437,39 +431,39 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
return insn->n;
}
-static void daqboard2000_resetLocalBus(struct comedi_device *dev)
+static void daqboard2000_reset_local_bus(struct comedi_device *dev)
{
struct daqboard2000_private *devpriv = dev->private;
- writel(DAQBOARD2000_SECRLocalBusHi, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_LOCAL_BUS_HI, devpriv->plx + 0x6c);
mdelay(10);
- writel(DAQBOARD2000_SECRLocalBusLo, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_LOCAL_BUS_LO, devpriv->plx + 0x6c);
mdelay(10);
}
-static void daqboard2000_reloadPLX(struct comedi_device *dev)
+static void daqboard2000_reload_plx(struct comedi_device *dev)
{
struct daqboard2000_private *devpriv = dev->private;
- writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_RELOAD_LO, devpriv->plx + 0x6c);
mdelay(10);
- writel(DAQBOARD2000_SECRReloadHi, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_RELOAD_HI, devpriv->plx + 0x6c);
mdelay(10);
- writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_RELOAD_LO, devpriv->plx + 0x6c);
mdelay(10);
}
-static void daqboard2000_pulseProgPin(struct comedi_device *dev)
+static void daqboard2000_pulse_prog_pin(struct comedi_device *dev)
{
struct daqboard2000_private *devpriv = dev->private;
- writel(DAQBOARD2000_SECRProgPinHi, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_PROG_PIN_HI, devpriv->plx + 0x6c);
mdelay(10);
- writel(DAQBOARD2000_SECRProgPinLo, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_PROG_PIN_LO, devpriv->plx + 0x6c);
mdelay(10); /* Not in the original code, but I like symmetry... */
}
-static int daqboard2000_pollCPLD(struct comedi_device *dev, int mask)
+static int daqboard2000_poll_cpld(struct comedi_device *dev, int mask)
{
int result = 0;
int i;
@@ -482,17 +476,17 @@ static int daqboard2000_pollCPLD(struct comedi_device *dev, int mask)
result = 1;
break;
}
- udelay(100);
+ usleep_range(100, 1000);
}
udelay(5);
return result;
}
-static int daqboard2000_writeCPLD(struct comedi_device *dev, int data)
+static int daqboard2000_write_cpld(struct comedi_device *dev, int data)
{
int result = 0;
- udelay(10);
+ usleep_range(10, 20);
writew(data, dev->mmio + 0x1000);
if ((readw(dev->mmio + 0x1000) & DAQBOARD2000_CPLD_INIT) ==
DAQBOARD2000_CPLD_INIT) {
@@ -501,9 +495,9 @@ static int daqboard2000_writeCPLD(struct comedi_device *dev, int data)
return result;
}
-static int initialize_daqboard2000(struct comedi_device *dev,
- const u8 *cpld_array, size_t len,
- unsigned long context)
+static int daqboard2000_load_firmware(struct comedi_device *dev,
+ const u8 *cpld_array, size_t len,
+ unsigned long context)
{
struct daqboard2000_private *devpriv = dev->private;
int result = -EIO;
@@ -518,10 +512,10 @@ static int initialize_daqboard2000(struct comedi_device *dev,
return -EIO;
for (retry = 0; retry < 3; retry++) {
- daqboard2000_resetLocalBus(dev);
- daqboard2000_reloadPLX(dev);
- daqboard2000_pulseProgPin(dev);
- if (daqboard2000_pollCPLD(dev, DAQBOARD2000_CPLD_INIT)) {
+ daqboard2000_reset_local_bus(dev);
+ daqboard2000_reload_plx(dev);
+ daqboard2000_pulse_prog_pin(dev);
+ if (daqboard2000_poll_cpld(dev, DAQBOARD2000_CPLD_INIT)) {
for (i = 0; i < len; i++) {
if (cpld_array[i] == 0xff &&
cpld_array[i + 1] == 0x20)
@@ -530,12 +524,12 @@ static int initialize_daqboard2000(struct comedi_device *dev,
for (; i < len; i += 2) {
int data =
(cpld_array[i] << 8) + cpld_array[i + 1];
- if (!daqboard2000_writeCPLD(dev, data))
+ if (!daqboard2000_write_cpld(dev, data))
break;
}
if (i >= len) {
- daqboard2000_resetLocalBus(dev);
- daqboard2000_reloadPLX(dev);
+ daqboard2000_reset_local_bus(dev);
+ daqboard2000_reload_plx(dev);
result = 0;
break;
}
@@ -544,79 +538,83 @@ static int initialize_daqboard2000(struct comedi_device *dev,
return result;
}
-static void daqboard2000_adcStopDmaTransfer(struct comedi_device *dev)
+static void daqboard2000_adc_stop_dma_transfer(struct comedi_device *dev)
{
}
-static void daqboard2000_adcDisarm(struct comedi_device *dev)
+static void daqboard2000_adc_disarm(struct comedi_device *dev)
{
/* Disable hardware triggers */
udelay(2);
- writew(DAQBOARD2000_TrigAnalog | DAQBOARD2000_TrigDisable,
- dev->mmio + trigControl);
+ writew(DB2K_TRIG_CONTROL_TYPE_ANALOG | DB2K_TRIG_CONTROL_DISABLE,
+ dev->mmio + DB2K_REG_TRIG_CONTROL);
udelay(2);
- writew(DAQBOARD2000_TrigTTL | DAQBOARD2000_TrigDisable,
- dev->mmio + trigControl);
+ writew(DB2K_TRIG_CONTROL_TYPE_TTL | DB2K_TRIG_CONTROL_DISABLE,
+ dev->mmio + DB2K_REG_TRIG_CONTROL);
/* Stop the scan list FIFO from loading the configuration pipe */
udelay(2);
- writew(DAQBOARD2000_SeqStopScanList, dev->mmio + acqControl);
+ writew(DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
/* Stop the pacer clock */
udelay(2);
- writew(DAQBOARD2000_AdcPacerDisable, dev->mmio + acqControl);
+ writew(DB2K_ACQ_CONTROL_ADC_PACER_DISABLE,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
/* Stop the input dma (abort channel 1) */
- daqboard2000_adcStopDmaTransfer(dev);
+ daqboard2000_adc_stop_dma_transfer(dev);
}
-static void daqboard2000_activateReferenceDacs(struct comedi_device *dev)
+static void daqboard2000_activate_reference_dacs(struct comedi_device *dev)
{
unsigned int val;
int timeout;
/* Set the + reference dac value in the FPGA */
- writew(0x80 | DAQBOARD2000_PosRefDacSelect, dev->mmio + refDacs);
+ writew(DB2K_REF_DACS_SET | DB2K_REF_DACS_SELECT_POS_REF,
+ dev->mmio + DB2K_REG_REF_DACS);
for (timeout = 0; timeout < 20; timeout++) {
- val = readw(dev->mmio + dacControl);
- if ((val & DAQBOARD2000_RefBusy) == 0)
+ val = readw(dev->mmio + DB2K_REG_DAC_STATUS);
+ if ((val & DB2K_DAC_STATUS_REF_BUSY) == 0)
break;
udelay(2);
}
/* Set the - reference dac value in the FPGA */
- writew(0x80 | DAQBOARD2000_NegRefDacSelect, dev->mmio + refDacs);
+ writew(DB2K_REF_DACS_SET | DB2K_REF_DACS_SELECT_NEG_REF,
+ dev->mmio + DB2K_REG_REF_DACS);
for (timeout = 0; timeout < 20; timeout++) {
- val = readw(dev->mmio + dacControl);
- if ((val & DAQBOARD2000_RefBusy) == 0)
+ val = readw(dev->mmio + DB2K_REG_DAC_STATUS);
+ if ((val & DB2K_DAC_STATUS_REF_BUSY) == 0)
break;
udelay(2);
}
}
-static void daqboard2000_initializeCtrs(struct comedi_device *dev)
+static void daqboard2000_initialize_ctrs(struct comedi_device *dev)
{
}
-static void daqboard2000_initializeTmrs(struct comedi_device *dev)
+static void daqboard2000_initialize_tmrs(struct comedi_device *dev)
{
}
-static void daqboard2000_dacDisarm(struct comedi_device *dev)
+static void daqboard2000_dac_disarm(struct comedi_device *dev)
{
}
-static void daqboard2000_initializeAdc(struct comedi_device *dev)
+static void daqboard2000_initialize_adc(struct comedi_device *dev)
{
- daqboard2000_adcDisarm(dev);
- daqboard2000_activateReferenceDacs(dev);
- daqboard2000_initializeCtrs(dev);
- daqboard2000_initializeTmrs(dev);
+ daqboard2000_adc_disarm(dev);
+ daqboard2000_activate_reference_dacs(dev);
+ daqboard2000_initialize_ctrs(dev);
+ daqboard2000_initialize_tmrs(dev);
}
-static void daqboard2000_initializeDac(struct comedi_device *dev)
+static void daqboard2000_initialize_dac(struct comedi_device *dev)
{
- daqboard2000_dacDisarm(dev);
+ daqboard2000_dac_disarm(dev);
}
static int daqboard2000_8255_cb(struct comedi_device *dev,
@@ -683,12 +681,12 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
result = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
DAQBOARD2000_FIRMWARE,
- initialize_daqboard2000, 0);
+ daqboard2000_load_firmware, 0);
if (result < 0)
return result;
- daqboard2000_initializeAdc(dev);
- daqboard2000_initializeDac(dev);
+ daqboard2000_initialize_adc(dev);
+ daqboard2000_initialize_dac(dev);
s = &dev->subdevices[0];
/* ai subdevice */
@@ -714,7 +712,7 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[2];
return subdev_8255_init(dev, s, daqboard2000_8255_cb,
- dioP2ExpansionIO8Bit);
+ DB2K_REG_DIO_P2_EXP_IO_8_BIT);
}
static void daqboard2000_detach(struct comedi_device *dev)
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index fd8e0b76f764..5d157951f63f 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -92,37 +92,37 @@
#define DAS16_AO_LSB_REG(x) ((x) ? 0x06 : 0x04)
#define DAS16_AO_MSB_REG(x) ((x) ? 0x07 : 0x05)
#define DAS16_STATUS_REG 0x08
-#define DAS16_STATUS_BUSY (1 << 7)
-#define DAS16_STATUS_UNIPOLAR (1 << 6)
-#define DAS16_STATUS_MUXBIT (1 << 5)
-#define DAS16_STATUS_INT (1 << 4)
+#define DAS16_STATUS_BUSY BIT(7)
+#define DAS16_STATUS_UNIPOLAR BIT(6)
+#define DAS16_STATUS_MUXBIT BIT(5)
+#define DAS16_STATUS_INT BIT(4)
#define DAS16_CTRL_REG 0x09
-#define DAS16_CTRL_INTE (1 << 7)
+#define DAS16_CTRL_INTE BIT(7)
#define DAS16_CTRL_IRQ(x) (((x) & 0x7) << 4)
-#define DAS16_CTRL_DMAE (1 << 2)
+#define DAS16_CTRL_DMAE BIT(2)
#define DAS16_CTRL_PACING_MASK (3 << 0)
#define DAS16_CTRL_INT_PACER (3 << 0)
#define DAS16_CTRL_EXT_PACER (2 << 0)
#define DAS16_CTRL_SOFT_PACER (0 << 0)
#define DAS16_PACER_REG 0x0a
#define DAS16_PACER_BURST_LEN(x) (((x) & 0xf) << 4)
-#define DAS16_PACER_CTR0 (1 << 1)
-#define DAS16_PACER_TRIG0 (1 << 0)
+#define DAS16_PACER_CTR0 BIT(1)
+#define DAS16_PACER_TRIG0 BIT(0)
#define DAS16_GAIN_REG 0x0b
#define DAS16_TIMER_BASE_REG 0x0c /* to 0x0f */
#define DAS1600_CONV_REG 0x404
-#define DAS1600_CONV_DISABLE (1 << 6)
+#define DAS1600_CONV_DISABLE BIT(6)
#define DAS1600_BURST_REG 0x405
-#define DAS1600_BURST_VAL (1 << 6)
+#define DAS1600_BURST_VAL BIT(6)
#define DAS1600_ENABLE_REG 0x406
-#define DAS1600_ENABLE_VAL (1 << 6)
+#define DAS1600_ENABLE_VAL BIT(6)
#define DAS1600_STATUS_REG 0x407
-#define DAS1600_STATUS_BME (1 << 6)
-#define DAS1600_STATUS_ME (1 << 5)
-#define DAS1600_STATUS_CD (1 << 4)
-#define DAS1600_STATUS_WS (1 << 1)
-#define DAS1600_STATUS_CLK_10MHZ (1 << 0)
+#define DAS1600_STATUS_BME BIT(6)
+#define DAS1600_STATUS_ME BIT(5)
+#define DAS1600_STATUS_CD BIT(4)
+#define DAS1600_STATUS_WS BIT(1)
+#define DAS1600_STATUS_CLK_10MHZ BIT(0)
static const struct comedi_lrange range_das1x01_bip = {
4, {
@@ -198,6 +198,7 @@ enum {
das16_pg_1601,
das16_pg_1602,
};
+
static const int *const das16_gainlists[] = {
NULL,
das16jr_gainlist,
@@ -428,8 +429,10 @@ static const struct das16_board das16_boards[] = {
},
};
-/* Period for timer interrupt in jiffies. It's a function
- * to deal with possibility of dynamic HZ patches */
+/*
+ * Period for timer interrupt in jiffies. It's a function
+ * to deal with possibility of dynamic HZ patches
+ */
static inline int timer_period(void)
{
return HZ / 20;
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index 3a37373fbb6f..bb8d6ec0632e 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -1,56 +1,52 @@
/*
- comedi/drivers/das16m1.c
- CIO-DAS16/M1 driver
- Author: Frank Mori Hess, based on code from the das16
- driver.
- Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Comedi driver for CIO-DAS16/M1
+ * Author: Frank Mori Hess, based on code from the das16 driver.
+ * Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: das16m1
-Description: CIO-DAS16/M1
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Measurement Computing] CIO-DAS16/M1 (das16m1)
-Status: works
-
-This driver supports a single board - the CIO-DAS16/M1.
-As far as I know, there are no other boards that have
-the same register layout. Even the CIO-DAS16/M1/16 is
-significantly different.
-
-I was _barely_ able to reach the full 1 MHz capability
-of this board, using a hard real-time interrupt
-(set the TRIG_RT flag in your struct comedi_cmd and use
-rtlinux or RTAI). The board can't do dma, so the bottleneck is
-pulling the data across the ISA bus. I timed the interrupt
-handler, and it took my computer ~470 microseconds to pull 512
-samples from the board. So at 1 Mhz sampling rate,
-expect your CPU to be spending almost all of its
-time in the interrupt handler.
-
-This board has some unusual restrictions for its channel/gain list. If the
-list has 2 or more channels in it, then two conditions must be satisfied:
-(1) - even/odd channels must appear at even/odd indices in the list
-(2) - the list must have an even number of entries.
-
-Options:
- [0] - base io address
- [1] - irq (optional, but you probably want it)
-
-irq can be omitted, although the cmd interface will not work without it.
-*/
+ * Driver: das16m1
+ * Description: CIO-DAS16/M1
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [Measurement Computing] CIO-DAS16/M1 (das16m1)
+ * Status: works
+ *
+ * This driver supports a single board - the CIO-DAS16/M1. As far as I know,
+ * there are no other boards that have the same register layout. Even the
+ * CIO-DAS16/M1/16 is significantly different.
+ *
+ * I was _barely_ able to reach the full 1 MHz capability of this board, using
+ * a hard real-time interrupt (set the TRIG_RT flag in your struct comedi_cmd
+ * and use rtlinux or RTAI). The board can't do dma, so the bottleneck is
+ * pulling the data across the ISA bus. I timed the interrupt handler, and it
+ * took my computer ~470 microseconds to pull 512 samples from the board. So
+ * at 1 Mhz sampling rate, expect your CPU to be spending almost all of its
+ * time in the interrupt handler.
+ *
+ * This board has some unusual restrictions for its channel/gain list. If the
+ * list has 2 or more channels in it, then two conditions must be satisfied:
+ * (1) - even/odd channels must appear at even/odd indices in the list
+ * (2) - the list must have an even number of entries.
+ *
+ * Configuration options:
+ * [0] - base io address
+ * [1] - irq (optional, but you probably want it)
+ *
+ * irq can be omitted, although the cmd interface will not work without it.
+ */
#include <linux/module.h>
#include <linux/slab.h>
@@ -60,52 +56,38 @@ irq can be omitted, although the cmd interface will not work without it.
#include "8255.h"
#include "comedi_8254.h"
-#define DAS16M1_SIZE2 8
-
-#define FIFO_SIZE 1024 /* 1024 sample fifo */
-
/*
- CIO-DAS16_M1.pdf
-
- "cio-das16/m1"
-
- 0 a/d bits 0-3, mux start 12 bit
- 1 a/d bits 4-11 unused
- 2 status control
- 3 di 4 bit do 4 bit
- 4 unused clear interrupt
- 5 interrupt, pacer
- 6 channel/gain queue address
- 7 channel/gain queue data
- 89ab 8254
- cdef 8254
- 400 8255
- 404-407 8254
-
-*/
-
-#define DAS16M1_AI 0 /* 16-bit wide register */
-#define AI_CHAN(x) ((x) & 0xf)
-#define DAS16M1_CS 2
-#define EXT_TRIG_BIT 0x1
-#define OVRUN 0x20
-#define IRQDATA 0x80
-#define DAS16M1_DIO 3
-#define DAS16M1_CLEAR_INTR 4
-#define DAS16M1_INTR_CONTROL 5
-#define EXT_PACER 0x2
-#define INT_PACER 0x3
-#define PACER_MASK 0x3
-#define INTE 0x80
-#define DAS16M1_QUEUE_ADDR 6
-#define DAS16M1_QUEUE_DATA 7
-#define Q_CHAN(x) ((x) & 0x7)
-#define Q_RANGE(x) (((x) & 0xf) << 4)
-#define UNIPOLAR 0x40
-#define DAS16M1_8254_FIRST 0x8
-#define DAS16M1_8254_SECOND 0xc
-#define DAS16M1_82C55 0x400
-#define DAS16M1_8254_THIRD 0x404
+ * Register map (dev->iobase)
+ */
+#define DAS16M1_AI_REG 0x00 /* 16-bit register */
+#define DAS16M1_AI_TO_CHAN(x) (((x) >> 0) & 0xf)
+#define DAS16M1_AI_TO_SAMPLE(x) (((x) >> 4) & 0xfff)
+#define DAS16M1_CS_REG 0x02
+#define DAS16M1_CS_EXT_TRIG BIT(0)
+#define DAS16M1_CS_OVRUN BIT(5)
+#define DAS16M1_CS_IRQDATA BIT(7)
+#define DAS16M1_DI_REG 0x03
+#define DAS16M1_DO_REG 0x03
+#define DAS16M1_CLR_INTR_REG 0x04
+#define DAS16M1_INTR_CTRL_REG 0x05
+#define DAS16M1_INTR_CTRL_PACER(x) (((x) & 0x3) << 0)
+#define DAS16M1_INTR_CTRL_PACER_EXT DAS16M1_INTR_CTRL_PACER(2)
+#define DAS16M1_INTR_CTRL_PACER_INT DAS16M1_INTR_CTRL_PACER(3)
+#define DAS16M1_INTR_CTRL_PACER_MASK DAS16M1_INTR_CTRL_PACER(3)
+#define DAS16M1_INTR_CTRL_IRQ(x) (((x) & 0x7) << 4)
+#define DAS16M1_INTR_CTRL_INTE BIT(7)
+#define DAS16M1_Q_ADDR_REG 0x06
+#define DAS16M1_Q_REG 0x07
+#define DAS16M1_Q_CHAN(x) (((x) & 0x7) << 0)
+#define DAS16M1_Q_RANGE(x) (((x) & 0xf) << 4)
+#define DAS16M1_8254_IOBASE1 0x08
+#define DAS16M1_8254_IOBASE2 0x0c
+#define DAS16M1_8255_IOBASE 0x400
+#define DAS16M1_8254_IOBASE3 0x404
+
+#define DAS16M1_SIZE2 0x08
+
+#define DAS16M1_AI_FIFO_SZ 1024 /* # samples */
static const struct comedi_lrange range_das16m1 = {
9, {
@@ -121,29 +103,46 @@ static const struct comedi_lrange range_das16m1 = {
}
};
-struct das16m1_private_struct {
+struct das16m1_private {
struct comedi_8254 *counter;
- unsigned int control_state;
- unsigned int adc_count; /* number of samples completed */
- /* initial value in lower half of hardware conversion counter,
- * needed to keep track of whether new count has been loaded into
- * counter yet (loaded by first sample conversion) */
+ unsigned int intr_ctrl;
+ unsigned int adc_count;
u16 initial_hw_count;
- unsigned short ai_buffer[FIFO_SIZE];
+ unsigned short ai_buffer[DAS16M1_AI_FIFO_SZ];
unsigned long extra_iobase;
};
-static inline unsigned short munge_sample(unsigned short data)
+static void das16m1_ai_set_queue(struct comedi_device *dev,
+ unsigned int *chanspec, unsigned int len)
{
- return (data >> 4) & 0xfff;
+ unsigned int i;
+
+ for (i = 0; i < len; i++) {
+ unsigned int chan = CR_CHAN(chanspec[i]);
+ unsigned int range = CR_RANGE(chanspec[i]);
+
+ outb(i, dev->iobase + DAS16M1_Q_ADDR_REG);
+ outb(DAS16M1_Q_CHAN(chan) | DAS16M1_Q_RANGE(range),
+ dev->iobase + DAS16M1_Q_REG);
+ }
}
-static void munge_sample_array(unsigned short *array, unsigned int num_elements)
+static void das16m1_ai_munge(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index)
{
+ unsigned short *array = data;
+ unsigned int nsamples = comedi_bytes_to_samples(s, num_bytes);
unsigned int i;
- for (i = 0; i < num_elements; i++)
- array[i] = munge_sample(array[i]);
+ /*
+ * The fifo values have the channel number in the lower 4-bits and
+ * the sample in the upper 12-bits. This just shifts the values
+ * to remove the channel numbers.
+ */
+ for (i = 0; i < nsamples; i++)
+ array[i] = DAS16M1_AI_TO_SAMPLE(array[i]);
}
static int das16m1_ai_check_chanlist(struct comedi_device *dev,
@@ -174,8 +173,9 @@ static int das16m1_ai_check_chanlist(struct comedi_device *dev,
return 0;
}
-static int das16m1_cmd_test(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
+static int das16m1_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
int err = 0;
@@ -245,17 +245,13 @@ static int das16m1_cmd_test(struct comedi_device *dev,
return 0;
}
-static int das16m1_cmd_exec(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int das16m1_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct das16m1_private_struct *devpriv = dev->private;
+ struct das16m1_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int byte, i;
-
- /* disable interrupts and internal pacer */
- devpriv->control_state &= ~INTE & ~PACER_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+ unsigned int byte;
/* set software count */
devpriv->adc_count = 0;
@@ -274,48 +270,47 @@ static int das16m1_cmd_exec(struct comedi_device *dev,
*/
devpriv->initial_hw_count = comedi_8254_read(devpriv->counter, 1);
- /* setup channel/gain queue */
- for (i = 0; i < cmd->chanlist_len; i++) {
- outb(i, dev->iobase + DAS16M1_QUEUE_ADDR);
- byte =
- Q_CHAN(CR_CHAN(cmd->chanlist[i])) |
- Q_RANGE(CR_RANGE(cmd->chanlist[i]));
- outb(byte, dev->iobase + DAS16M1_QUEUE_DATA);
- }
+ das16m1_ai_set_queue(dev, cmd->chanlist, cmd->chanlist_len);
/* enable interrupts and set internal pacer counter mode and counts */
- devpriv->control_state &= ~PACER_MASK;
+ devpriv->intr_ctrl &= ~DAS16M1_INTR_CTRL_PACER_MASK;
if (cmd->convert_src == TRIG_TIMER) {
comedi_8254_update_divisors(dev->pacer);
comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- devpriv->control_state |= INT_PACER;
+ devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_PACER_INT;
} else { /* TRIG_EXT */
- devpriv->control_state |= EXT_PACER;
+ devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_PACER_EXT;
}
/* set control & status register */
byte = 0;
- /* if we are using external start trigger (also board dislikes having
- * both start and conversion triggers external simultaneously) */
+ /*
+ * If we are using external start trigger (also board dislikes having
+ * both start and conversion triggers external simultaneously).
+ */
if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT)
- byte |= EXT_TRIG_BIT;
+ byte |= DAS16M1_CS_EXT_TRIG;
- outb(byte, dev->iobase + DAS16M1_CS);
- /* clear interrupt bit */
- outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
+ outb(byte, dev->iobase + DAS16M1_CS_REG);
+
+ /* clear interrupt */
+ outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
- devpriv->control_state |= INTE;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+ devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_INTE;
+ outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
return 0;
}
-static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
+static int das16m1_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct das16m1_private_struct *devpriv = dev->private;
+ struct das16m1_private *devpriv = dev->private;
- devpriv->control_state &= ~INTE & ~PACER_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+ /* disable interrupts and pacer */
+ devpriv->intr_ctrl &= ~(DAS16M1_INTR_CTRL_INTE |
+ DAS16M1_INTR_CTRL_PACER_MASK);
+ outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
return 0;
}
@@ -327,67 +322,58 @@ static int das16m1_ai_eoc(struct comedi_device *dev,
{
unsigned int status;
- status = inb(dev->iobase + DAS16M1_CS);
- if (status & IRQDATA)
+ status = inb(dev->iobase + DAS16M1_CS_REG);
+ if (status & DAS16M1_CS_IRQDATA)
return 0;
return -EBUSY;
}
-static int das16m1_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das16m1_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct das16m1_private_struct *devpriv = dev->private;
int ret;
- int n;
- int byte;
-
- /* disable interrupts and internal pacer */
- devpriv->control_state &= ~INTE & ~PACER_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
-
- /* setup channel/gain queue */
- outb(0, dev->iobase + DAS16M1_QUEUE_ADDR);
- byte =
- Q_CHAN(CR_CHAN(insn->chanspec)) | Q_RANGE(CR_RANGE(insn->chanspec));
- outb(byte, dev->iobase + DAS16M1_QUEUE_DATA);
-
- for (n = 0; n < insn->n; n++) {
- /* clear IRQDATA bit */
- outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
+ int i;
+
+ das16m1_ai_set_queue(dev, &insn->chanspec, 1);
+
+ for (i = 0; i < insn->n; i++) {
+ unsigned short val;
+
+ /* clear interrupt */
+ outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
/* trigger conversion */
- outb(0, dev->iobase);
+ outb(0, dev->iobase + DAS16M1_AI_REG);
ret = comedi_timeout(dev, s, insn, das16m1_ai_eoc, 0);
if (ret)
return ret;
- data[n] = munge_sample(inw(dev->iobase));
+ val = inw(dev->iobase + DAS16M1_AI_REG);
+ data[i] = DAS16M1_AI_TO_SAMPLE(val);
}
- return n;
+ return insn->n;
}
-static int das16m1_di_rbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das16m1_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int bits;
-
- bits = inb(dev->iobase + DAS16M1_DIO) & 0xf;
- data[1] = bits;
- data[0] = 0;
+ data[1] = inb(dev->iobase + DAS16M1_DI_REG) & 0xf;
return insn->n;
}
-static int das16m1_do_wbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int das16m1_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DAS16M1_DIO);
+ outb(s->state, dev->iobase + DAS16M1_DO_REG);
data[1] = s->state;
@@ -396,33 +382,33 @@ static int das16m1_do_wbits(struct comedi_device *dev,
static void das16m1_handler(struct comedi_device *dev, unsigned int status)
{
- struct das16m1_private_struct *devpriv = dev->private;
- struct comedi_subdevice *s;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
+ struct das16m1_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
u16 num_samples;
u16 hw_counter;
- s = dev->read_subdev;
- async = s->async;
- cmd = &async->cmd;
-
/* figure out how many samples are in fifo */
hw_counter = comedi_8254_read(devpriv->counter, 1);
- /* make sure hardware counter reading is not bogus due to initial value
- * not having been loaded yet */
+ /*
+ * Make sure hardware counter reading is not bogus due to initial
+ * value not having been loaded yet.
+ */
if (devpriv->adc_count == 0 &&
hw_counter == devpriv->initial_hw_count) {
num_samples = 0;
} else {
- /* The calculation of num_samples looks odd, but it uses the
+ /*
+ * The calculation of num_samples looks odd, but it uses the
* following facts. 16 bit hardware counter is initialized with
* value of zero (which really means 0x1000). The counter
* decrements by one on each conversion (when the counter
* decrements from zero it goes to 0xffff). num_samples is a
* 16 bit variable, so it will roll over in a similar fashion
* to the hardware counter. Work it out, and this is what you
- * get. */
+ * get.
+ */
num_samples = -hw_counter - devpriv->adc_count;
}
/* check if we only need some of the points */
@@ -431,10 +417,9 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
num_samples = cmd->stop_arg * cmd->chanlist_len;
}
/* make sure we dont try to get too many points if fifo has overrun */
- if (num_samples > FIFO_SIZE)
- num_samples = FIFO_SIZE;
+ if (num_samples > DAS16M1_AI_FIFO_SZ)
+ num_samples = DAS16M1_AI_FIFO_SZ;
insw(dev->iobase, devpriv->ai_buffer, num_samples);
- munge_sample_array(devpriv->ai_buffer, num_samples);
comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
devpriv->adc_count += num_samples;
@@ -445,9 +430,11 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
}
}
- /* this probably won't catch overruns since the card doesn't generate
- * overrun interrupts, but we might as well try */
- if (status & OVRUN) {
+ /*
+ * This probably won't catch overruns since the card doesn't generate
+ * overrun interrupts, but we might as well try.
+ */
+ if (status & DAS16M1_CS_OVRUN) {
async->events |= COMEDI_CB_ERROR;
dev_err(dev->class_dev, "fifo overflow\n");
}
@@ -455,14 +442,15 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
comedi_handle_events(dev, s);
}
-static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s)
+static int das16m1_ai_poll(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
unsigned long flags;
unsigned int status;
/* prevent race with interrupt handler */
spin_lock_irqsave(&dev->spinlock, flags);
- status = inb(dev->iobase + DAS16M1_CS);
+ status = inb(dev->iobase + DAS16M1_CS_REG);
das16m1_handler(dev, status);
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -481,9 +469,9 @@ static irqreturn_t das16m1_interrupt(int irq, void *d)
/* prevent race with comedi_poll() */
spin_lock(&dev->spinlock);
- status = inb(dev->iobase + DAS16M1_CS);
+ status = inb(dev->iobase + DAS16M1_CS_REG);
- if ((status & (IRQDATA | OVRUN)) == 0) {
+ if ((status & (DAS16M1_CS_IRQDATA | DAS16M1_CS_OVRUN)) == 0) {
dev_err(dev->class_dev, "spurious interrupt\n");
spin_unlock(&dev->spinlock);
return IRQ_NONE;
@@ -492,7 +480,7 @@ static irqreturn_t das16m1_interrupt(int irq, void *d)
das16m1_handler(dev, status);
/* clear interrupt */
- outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
+ outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
spin_unlock(&dev->spinlock);
return IRQ_HANDLED;
@@ -522,15 +510,10 @@ static int das16m1_irq_bits(unsigned int irq)
}
}
-/*
- * Options list:
- * 0 I/O base
- * 1 IRQ
- */
static int das16m1_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
- struct das16m1_private_struct *devpriv;
+ struct das16m1_private *devpriv;
struct comedi_subdevice *s;
int ret;
@@ -541,12 +524,12 @@ static int das16m1_attach(struct comedi_device *dev,
ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
- /* Request an additional region for the 8255 */
- ret = __comedi_request_region(dev, dev->iobase + DAS16M1_82C55,
+ /* Request an additional region for the 8255 and 3rd 8254 */
+ ret = __comedi_request_region(dev, dev->iobase + DAS16M1_8255_IOBASE,
DAS16M1_SIZE2);
if (ret)
return ret;
- devpriv->extra_iobase = dev->iobase + DAS16M1_82C55;
+ devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE;
/* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
if ((1 << it->options[1]) & 0xdcfc) {
@@ -556,12 +539,12 @@ static int das16m1_attach(struct comedi_device *dev,
dev->irq = it->options[1];
}
- dev->pacer = comedi_8254_init(dev->iobase + DAS16M1_8254_SECOND,
+ dev->pacer = comedi_8254_init(dev->iobase + DAS16M1_8254_IOBASE2,
I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
if (!dev->pacer)
return -ENOMEM;
- devpriv->counter = comedi_8254_init(dev->iobase + DAS16M1_8254_FIRST,
+ devpriv->counter = comedi_8254_init(dev->iobase + DAS16M1_8254_IOBASE1,
0, I8254_IO8, 0);
if (!devpriv->counter)
return -ENOMEM;
@@ -570,61 +553,62 @@ static int das16m1_attach(struct comedi_device *dev,
if (ret)
return ret;
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- /* ai */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF;
- s->n_chan = 8;
- s->maxdata = (1 << 12) - 1;
- s->range_table = &range_das16m1;
- s->insn_read = das16m1_ai_rinsn;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF;
+ s->n_chan = 8;
+ s->maxdata = 0x0fff;
+ s->range_table = &range_das16m1;
+ s->insn_read = das16m1_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 256;
- s->do_cmdtest = das16m1_cmd_test;
- s->do_cmd = das16m1_cmd_exec;
- s->cancel = das16m1_cancel;
- s->poll = das16m1_poll;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 256;
+ s->do_cmdtest = das16m1_ai_cmdtest;
+ s->do_cmd = das16m1_ai_cmd;
+ s->cancel = das16m1_ai_cancel;
+ s->poll = das16m1_ai_poll;
+ s->munge = das16m1_ai_munge;
}
+ /* Digital Input subdevice */
s = &dev->subdevices[1];
- /* di */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das16m1_di_rbits;
-
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das16m1_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[2];
- /* do */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das16m1_do_wbits;
-
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das16m1_do_insn_bits;
+
+ /* Digital I/O subdevice (8255) */
s = &dev->subdevices[3];
- /* 8255 */
- ret = subdev_8255_init(dev, s, NULL, DAS16M1_82C55);
+ ret = subdev_8255_init(dev, s, NULL, DAS16M1_8255_IOBASE);
if (ret)
return ret;
/* initialize digital output lines */
- outb(0, dev->iobase + DAS16M1_DIO);
+ outb(0, dev->iobase + DAS16M1_DO_REG);
/* set the interrupt level */
- devpriv->control_state = das16m1_irq_bits(dev->irq) << 4;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+ devpriv->intr_ctrl = DAS16M1_INTR_CTRL_IRQ(das16m1_irq_bits(dev->irq));
+ outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
return 0;
}
static void das16m1_detach(struct comedi_device *dev)
{
- struct das16m1_private_struct *devpriv = dev->private;
+ struct das16m1_private *devpriv = dev->private;
if (devpriv) {
if (devpriv->extra_iobase)
@@ -643,5 +627,5 @@ static struct comedi_driver das16m1_driver = {
module_comedi_driver(das16m1_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for CIO-DAS16/M1 ISA cards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index 1701294b79cd..0fdf5e02182f 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -50,48 +50,50 @@
#define DAS6402_AO_LSB_REG(x) (0x04 + ((x) * 2))
#define DAS6402_AO_MSB_REG(x) (0x05 + ((x) * 2))
#define DAS6402_STATUS_REG 0x08
-#define DAS6402_STATUS_FFNE (1 << 0)
-#define DAS6402_STATUS_FHALF (1 << 1)
-#define DAS6402_STATUS_FFULL (1 << 2)
-#define DAS6402_STATUS_XINT (1 << 3)
-#define DAS6402_STATUS_INT (1 << 4)
-#define DAS6402_STATUS_XTRIG (1 << 5)
-#define DAS6402_STATUS_INDGT (1 << 6)
-#define DAS6402_STATUS_10MHZ (1 << 7)
-#define DAS6402_STATUS_W_CLRINT (1 << 0)
-#define DAS6402_STATUS_W_CLRXTR (1 << 1)
-#define DAS6402_STATUS_W_CLRXIN (1 << 2)
-#define DAS6402_STATUS_W_EXTEND (1 << 4)
-#define DAS6402_STATUS_W_ARMED (1 << 5)
-#define DAS6402_STATUS_W_POSTMODE (1 << 6)
-#define DAS6402_STATUS_W_10MHZ (1 << 7)
+#define DAS6402_STATUS_FFNE BIT(0)
+#define DAS6402_STATUS_FHALF BIT(1)
+#define DAS6402_STATUS_FFULL BIT(2)
+#define DAS6402_STATUS_XINT BIT(3)
+#define DAS6402_STATUS_INT BIT(4)
+#define DAS6402_STATUS_XTRIG BIT(5)
+#define DAS6402_STATUS_INDGT BIT(6)
+#define DAS6402_STATUS_10MHZ BIT(7)
+#define DAS6402_STATUS_W_CLRINT BIT(0)
+#define DAS6402_STATUS_W_CLRXTR BIT(1)
+#define DAS6402_STATUS_W_CLRXIN BIT(2)
+#define DAS6402_STATUS_W_EXTEND BIT(4)
+#define DAS6402_STATUS_W_ARMED BIT(5)
+#define DAS6402_STATUS_W_POSTMODE BIT(6)
+#define DAS6402_STATUS_W_10MHZ BIT(7)
#define DAS6402_CTRL_REG 0x09
-#define DAS6402_CTRL_SOFT_TRIG (0 << 0)
-#define DAS6402_CTRL_EXT_FALL_TRIG (1 << 0)
-#define DAS6402_CTRL_EXT_RISE_TRIG (2 << 0)
-#define DAS6402_CTRL_PACER_TRIG (3 << 0)
-#define DAS6402_CTRL_BURSTEN (1 << 2)
-#define DAS6402_CTRL_XINTE (1 << 3)
+#define DAS6402_CTRL_TRIG(x) ((x) << 0)
+#define DAS6402_CTRL_SOFT_TRIG DAS6402_CTRL_TRIG(0)
+#define DAS6402_CTRL_EXT_FALL_TRIG DAS6402_CTRL_TRIG(1)
+#define DAS6402_CTRL_EXT_RISE_TRIG DAS6402_CTRL_TRIG(2)
+#define DAS6402_CTRL_PACER_TRIG DAS6402_CTRL_TRIG(3)
+#define DAS6402_CTRL_BURSTEN BIT(2)
+#define DAS6402_CTRL_XINTE BIT(3)
#define DAS6402_CTRL_IRQ(x) ((x) << 4)
-#define DAS6402_CTRL_INTE (1 << 7)
+#define DAS6402_CTRL_INTE BIT(7)
#define DAS6402_TRIG_REG 0x0a
-#define DAS6402_TRIG_TGEN (1 << 0)
-#define DAS6402_TRIG_TGSEL (1 << 1)
-#define DAS6402_TRIG_TGPOL (1 << 2)
-#define DAS6402_TRIG_PRETRIG (1 << 3)
+#define DAS6402_TRIG_TGEN BIT(0)
+#define DAS6402_TRIG_TGSEL BIT(1)
+#define DAS6402_TRIG_TGPOL BIT(2)
+#define DAS6402_TRIG_PRETRIG BIT(3)
#define DAS6402_AO_RANGE(_chan, _range) ((_range) << ((_chan) ? 6 : 4))
#define DAS6402_AO_RANGE_MASK(_chan) (3 << ((_chan) ? 6 : 4))
#define DAS6402_MODE_REG 0x0b
-#define DAS6402_MODE_RANGE(x) ((x) << 0)
-#define DAS6402_MODE_POLLED (0 << 2)
-#define DAS6402_MODE_FIFONEPTY (1 << 2)
-#define DAS6402_MODE_FIFOHFULL (2 << 2)
-#define DAS6402_MODE_EOB (3 << 2)
-#define DAS6402_MODE_ENHANCED (1 << 4)
-#define DAS6402_MODE_SE (1 << 5)
-#define DAS6402_MODE_UNI (1 << 6)
-#define DAS6402_MODE_DMA1 (0 << 7)
-#define DAS6402_MODE_DMA3 (1 << 7)
+#define DAS6402_MODE_RANGE(x) ((x) << 2)
+#define DAS6402_MODE_POLLED DAS6402_MODE_RANGE(0)
+#define DAS6402_MODE_FIFONEPTY DAS6402_MODE_RANGE(1)
+#define DAS6402_MODE_FIFOHFULL DAS6402_MODE_RANGE(2)
+#define DAS6402_MODE_EOB DAS6402_MODE_RANGE(3)
+#define DAS6402_MODE_ENHANCED BIT(4)
+#define DAS6402_MODE_SE BIT(5)
+#define DAS6402_MODE_UNI BIT(6)
+#define DAS6402_MODE_DMA(x) ((x) << 7)
+#define DAS6402_MODE_DMA1 DAS6402_MODE_DMA(0)
+#define DAS6402_MODE_DMA3 DAS6402_MODE_DMA(1)
#define DAS6402_TIMER_BASE 0x0c
static const struct comedi_lrange das6402_ai_ranges = {
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index b02f12201cf7..fd4cb4911671 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -1,56 +1,56 @@
/*
- comedi/drivers/das800.c
- Driver for Keitley das800 series boards and compatibles
- Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/das800.c
+ * Driver for Keitley das800 series boards and compatibles
+ * Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-Driver: das800
-Description: Keithley Metrabyte DAS800 (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Keithley Metrabyte] DAS-800 (das-800), DAS-801 (das-801),
- DAS-802 (das-802),
- [Measurement Computing] CIO-DAS800 (cio-das800),
- CIO-DAS801 (cio-das801), CIO-DAS802 (cio-das802),
- CIO-DAS802/16 (cio-das802/16)
-Status: works, cio-das802/16 untested - email me if you have tested it
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ (optional, required for timed or externally triggered conversions)
-
-Notes:
- IRQ can be omitted, although the cmd interface will not work without it.
-
- All entries in the channel/gain list must use the same gain and be
- consecutive channels counting upwards in channel number (these are
- hardware limitations.)
-
- I've never tested the gain setting stuff since I only have a
- DAS-800 board with fixed gain.
-
- The cio-das802/16 does not have a fifo-empty status bit! Therefore
- only fifo-half-full transfers are possible with this card.
-
-cmd triggers supported:
- start_src: TRIG_NOW | TRIG_EXT
- scan_begin_src: TRIG_FOLLOW
- scan_end_src: TRIG_COUNT
- convert_src: TRIG_TIMER | TRIG_EXT
- stop_src: TRIG_NONE | TRIG_COUNT
-*/
+ * Driver: das800
+ * Description: Keithley Metrabyte DAS800 (& compatibles)
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [Keithley Metrabyte] DAS-800 (das-800), DAS-801 (das-801),
+ * DAS-802 (das-802),
+ * [Measurement Computing] CIO-DAS800 (cio-das800),
+ * CIO-DAS801 (cio-das801), CIO-DAS802 (cio-das802),
+ * CIO-DAS802/16 (cio-das802/16)
+ * Status: works, cio-das802/16 untested - email me if you have tested it
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional, required for timed or externally triggered conversions)
+ *
+ * Notes:
+ * IRQ can be omitted, although the cmd interface will not work without it.
+ *
+ * All entries in the channel/gain list must use the same gain and be
+ * consecutive channels counting upwards in channel number (these are
+ * hardware limitations.)
+ *
+ * I've never tested the gain setting stuff since I only have a
+ * DAS-800 board with fixed gain.
+ *
+ * The cio-das802/16 does not have a fifo-empty status bit! Therefore
+ * only fifo-half-full transfers are possible with this card.
+ *
+ * cmd triggers supported:
+ * start_src: TRIG_NOW | TRIG_EXT
+ * scan_begin_src: TRIG_FOLLOW
+ * scan_end_src: TRIG_COUNT
+ * convert_src: TRIG_TIMER | TRIG_EXT
+ * stop_src: TRIG_NONE | TRIG_COUNT
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -218,7 +218,7 @@ struct das800_private {
};
static void das800_ind_write(struct comedi_device *dev,
- unsigned val, unsigned reg)
+ unsigned int val, unsigned int reg)
{
/*
* Select dev->iobase + 2 to be desired register
@@ -228,7 +228,7 @@ static void das800_ind_write(struct comedi_device *dev,
outb(val, dev->iobase + 2);
}
-static unsigned das800_ind_read(struct comedi_device *dev, unsigned reg)
+static unsigned int das800_ind_read(struct comedi_device *dev, unsigned int reg)
{
/*
* Select dev->iobase + 7 to be desired register
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 958c0d4aae5c..b8606ded0623 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -46,73 +46,75 @@
#define DMM32AT_AI_START_CONV_REG 0x00
#define DMM32AT_AI_LSB_REG 0x00
#define DMM32AT_AUX_DOUT_REG 0x01
-#define DMM32AT_AUX_DOUT2 (1 << 2) /* J3.42 - OUT2 (OUT2EN) */
-#define DMM32AT_AUX_DOUT1 (1 << 1) /* J3.43 */
-#define DMM32AT_AUX_DOUT0 (1 << 0) /* J3.44 - OUT0 (OUT0EN) */
+#define DMM32AT_AUX_DOUT2 BIT(2) /* J3.42 - OUT2 (OUT2EN) */
+#define DMM32AT_AUX_DOUT1 BIT(1) /* J3.43 */
+#define DMM32AT_AUX_DOUT0 BIT(0) /* J3.44 - OUT0 (OUT0EN) */
#define DMM32AT_AI_MSB_REG 0x01
#define DMM32AT_AI_LO_CHAN_REG 0x02
#define DMM32AT_AI_HI_CHAN_REG 0x03
#define DMM32AT_AUX_DI_REG 0x04
-#define DMM32AT_AUX_DI_DACBUSY (1 << 7)
-#define DMM32AT_AUX_DI_CALBUSY (1 << 6)
-#define DMM32AT_AUX_DI3 (1 << 3) /* J3.45 - ADCLK (CLKSEL) */
-#define DMM32AT_AUX_DI2 (1 << 2) /* J3.46 - GATE12 (GT12EN) */
-#define DMM32AT_AUX_DI1 (1 << 1) /* J3.47 - GATE0 (GT0EN) */
-#define DMM32AT_AUX_DI0 (1 << 0) /* J3.48 - CLK0 (SRC0) */
+#define DMM32AT_AUX_DI_DACBUSY BIT(7)
+#define DMM32AT_AUX_DI_CALBUSY BIT(6)
+#define DMM32AT_AUX_DI3 BIT(3) /* J3.45 - ADCLK (CLKSEL) */
+#define DMM32AT_AUX_DI2 BIT(2) /* J3.46 - GATE12 (GT12EN) */
+#define DMM32AT_AUX_DI1 BIT(1) /* J3.47 - GATE0 (GT0EN) */
+#define DMM32AT_AUX_DI0 BIT(0) /* J3.48 - CLK0 (SRC0) */
#define DMM32AT_AO_LSB_REG 0x04
#define DMM32AT_AO_MSB_REG 0x05
#define DMM32AT_AO_MSB_DACH(x) ((x) << 6)
#define DMM32AT_FIFO_DEPTH_REG 0x06
#define DMM32AT_FIFO_CTRL_REG 0x07
-#define DMM32AT_FIFO_CTRL_FIFOEN (1 << 3)
-#define DMM32AT_FIFO_CTRL_SCANEN (1 << 2)
-#define DMM32AT_FIFO_CTRL_FIFORST (1 << 1)
+#define DMM32AT_FIFO_CTRL_FIFOEN BIT(3)
+#define DMM32AT_FIFO_CTRL_SCANEN BIT(2)
+#define DMM32AT_FIFO_CTRL_FIFORST BIT(1)
#define DMM32AT_FIFO_STATUS_REG 0x07
-#define DMM32AT_FIFO_STATUS_EF (1 << 7)
-#define DMM32AT_FIFO_STATUS_HF (1 << 6)
-#define DMM32AT_FIFO_STATUS_FF (1 << 5)
-#define DMM32AT_FIFO_STATUS_OVF (1 << 4)
-#define DMM32AT_FIFO_STATUS_FIFOEN (1 << 3)
-#define DMM32AT_FIFO_STATUS_SCANEN (1 << 2)
+#define DMM32AT_FIFO_STATUS_EF BIT(7)
+#define DMM32AT_FIFO_STATUS_HF BIT(6)
+#define DMM32AT_FIFO_STATUS_FF BIT(5)
+#define DMM32AT_FIFO_STATUS_OVF BIT(4)
+#define DMM32AT_FIFO_STATUS_FIFOEN BIT(3)
+#define DMM32AT_FIFO_STATUS_SCANEN BIT(2)
#define DMM32AT_FIFO_STATUS_PAGE_MASK (3 << 0)
#define DMM32AT_CTRL_REG 0x08
-#define DMM32AT_CTRL_RESETA (1 << 5)
-#define DMM32AT_CTRL_RESETD (1 << 4)
-#define DMM32AT_CTRL_INTRST (1 << 3)
-#define DMM32AT_CTRL_PAGE_8254 (0 << 0)
-#define DMM32AT_CTRL_PAGE_8255 (1 << 0)
-#define DMM32AT_CTRL_PAGE_CALIB (3 << 0)
+#define DMM32AT_CTRL_RESETA BIT(5)
+#define DMM32AT_CTRL_RESETD BIT(4)
+#define DMM32AT_CTRL_INTRST BIT(3)
+#define DMM32AT_CTRL_PAGE(x) ((x) << 0)
+#define DMM32AT_CTRL_PAGE_8254 DMM32AT_CTRL_PAGE(0)
+#define DMM32AT_CTRL_PAGE_8255 DMM32AT_CTRL_PAGE(1)
+#define DMM32AT_CTRL_PAGE_CALIB DMM32AT_CTRL_PAGE(3)
#define DMM32AT_AI_STATUS_REG 0x08
-#define DMM32AT_AI_STATUS_STS (1 << 7)
-#define DMM32AT_AI_STATUS_SD1 (1 << 6)
-#define DMM32AT_AI_STATUS_SD0 (1 << 5)
+#define DMM32AT_AI_STATUS_STS BIT(7)
+#define DMM32AT_AI_STATUS_SD1 BIT(6)
+#define DMM32AT_AI_STATUS_SD0 BIT(5)
#define DMM32AT_AI_STATUS_ADCH_MASK (0x1f << 0)
#define DMM32AT_INTCLK_REG 0x09
-#define DMM32AT_INTCLK_ADINT (1 << 7)
-#define DMM32AT_INTCLK_DINT (1 << 6)
-#define DMM32AT_INTCLK_TINT (1 << 5)
-#define DMM32AT_INTCLK_CLKEN (1 << 1) /* 1=see below 0=software */
-#define DMM32AT_INTCLK_CLKSEL (1 << 0) /* 1=OUT2 0=EXTCLK */
+#define DMM32AT_INTCLK_ADINT BIT(7)
+#define DMM32AT_INTCLK_DINT BIT(6)
+#define DMM32AT_INTCLK_TINT BIT(5)
+#define DMM32AT_INTCLK_CLKEN BIT(1) /* 1=see below 0=software */
+#define DMM32AT_INTCLK_CLKSEL BIT(0) /* 1=OUT2 0=EXTCLK */
#define DMM32AT_CTRDIO_CFG_REG 0x0a
-#define DMM32AT_CTRDIO_CFG_FREQ12 (1 << 7) /* CLK12 1=100KHz 0=10MHz */
-#define DMM32AT_CTRDIO_CFG_FREQ0 (1 << 6) /* CLK0 1=10KHz 0=10MHz */
-#define DMM32AT_CTRDIO_CFG_OUT2EN (1 << 5) /* J3.42 1=OUT2 is DOUT2 */
-#define DMM32AT_CTRDIO_CFG_OUT0EN (1 << 4) /* J3,44 1=OUT0 is DOUT0 */
-#define DMM32AT_CTRDIO_CFG_GT0EN (1 << 2) /* J3.47 1=DIN1 is GATE0 */
-#define DMM32AT_CTRDIO_CFG_SRC0 (1 << 1) /* CLK0 is 0=FREQ0 1=J3.48 */
-#define DMM32AT_CTRDIO_CFG_GT12EN (1 << 0) /* J3.46 1=DIN2 is GATE12 */
+#define DMM32AT_CTRDIO_CFG_FREQ12 BIT(7) /* CLK12 1=100KHz 0=10MHz */
+#define DMM32AT_CTRDIO_CFG_FREQ0 BIT(6) /* CLK0 1=10KHz 0=10MHz */
+#define DMM32AT_CTRDIO_CFG_OUT2EN BIT(5) /* J3.42 1=OUT2 is DOUT2 */
+#define DMM32AT_CTRDIO_CFG_OUT0EN BIT(4) /* J3,44 1=OUT0 is DOUT0 */
+#define DMM32AT_CTRDIO_CFG_GT0EN BIT(2) /* J3.47 1=DIN1 is GATE0 */
+#define DMM32AT_CTRDIO_CFG_SRC0 BIT(1) /* CLK0 is 0=FREQ0 1=J3.48 */
+#define DMM32AT_CTRDIO_CFG_GT12EN BIT(0) /* J3.46 1=DIN2 is GATE12 */
#define DMM32AT_AI_CFG_REG 0x0b
-#define DMM32AT_AI_CFG_SCINT_20US (0 << 4)
-#define DMM32AT_AI_CFG_SCINT_15US (1 << 4)
-#define DMM32AT_AI_CFG_SCINT_10US (2 << 4)
-#define DMM32AT_AI_CFG_SCINT_5US (3 << 4)
-#define DMM32AT_AI_CFG_RANGE (1 << 3) /* 0=5V 1=10V */
-#define DMM32AT_AI_CFG_ADBU (1 << 2) /* 0=bipolar 1=unipolar */
+#define DMM32AT_AI_CFG_SCINT(x) ((x) << 4)
+#define DMM32AT_AI_CFG_SCINT_20US DMM32AT_AI_CFG_SCINT(0)
+#define DMM32AT_AI_CFG_SCINT_15US DMM32AT_AI_CFG_SCINT(1)
+#define DMM32AT_AI_CFG_SCINT_10US DMM32AT_AI_CFG_SCINT(2)
+#define DMM32AT_AI_CFG_SCINT_5US DMM32AT_AI_CFG_SCINT(3)
+#define DMM32AT_AI_CFG_RANGE BIT(3) /* 0=5V 1=10V */
+#define DMM32AT_AI_CFG_ADBU BIT(2) /* 0=bipolar 1=unipolar */
#define DMM32AT_AI_CFG_GAIN(x) ((x) << 0)
#define DMM32AT_AI_READBACK_REG 0x0b
-#define DMM32AT_AI_READBACK_WAIT (1 << 7) /* DMM32AT_AI_STATUS_STS */
-#define DMM32AT_AI_READBACK_RANGE (1 << 3)
-#define DMM32AT_AI_READBACK_ADBU (1 << 2)
+#define DMM32AT_AI_READBACK_WAIT BIT(7) /* DMM32AT_AI_STATUS_STS */
+#define DMM32AT_AI_READBACK_RANGE BIT(3)
+#define DMM32AT_AI_READBACK_ADBU BIT(2)
#define DMM32AT_AI_READBACK_GAIN_MASK (3 << 0)
#define DMM32AT_CLK1 0x0d
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 6c7b4d27c27c..c2ce1eb87385 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -4,30 +4,30 @@
*
*/
/*
-Driver: dt2801
-Description: Data Translation DT2801 series and DT01-EZ
-Author: ds
-Status: works
-Devices: [Data Translation] DT2801 (dt2801), DT2801-A, DT2801/5716A,
- DT2805, DT2805/5716A, DT2808, DT2818, DT2809, DT01-EZ
-
-This driver can autoprobe the type of board.
-
-Configuration options:
- [0] - I/O port base address
- [1] - unused
- [2] - A/D reference 0=differential, 1=single-ended
- [3] - A/D range
- 0 = [-10, 10]
- 1 = [0,10]
- [4] - D/A 0 range
- 0 = [-10, 10]
- 1 = [-5,5]
- 2 = [-2.5,2.5]
- 3 = [0,10]
- 4 = [0,5]
- [5] - D/A 1 range (same choices)
-*/
+ * Driver: dt2801
+ * Description: Data Translation DT2801 series and DT01-EZ
+ * Author: ds
+ * Status: works
+ * Devices: [Data Translation] DT2801 (dt2801), DT2801-A, DT2801/5716A,
+ * DT2805, DT2805/5716A, DT2808, DT2818, DT2809, DT01-EZ
+ *
+ * This driver can autoprobe the type of board.
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - unused
+ * [2] - A/D reference 0=differential, 1=single-ended
+ * [3] - A/D range
+ * 0 = [-10, 10]
+ * 1 = [0,10]
+ * [4] - D/A 0 range
+ * 0 = [-10, 10]
+ * 1 = [-5,5]
+ * 2 = [-2.5,2.5]
+ * 3 = [0,10]
+ * 4 = [0,5]
+ * [5] - D/A 1 range (same choices)
+ */
#include <linux/module.h>
#include "../comedidev.h"
@@ -65,9 +65,10 @@ Configuration options:
#define DT_C_SET_AD 0xd
#define DT_C_READ_AD 0xe
-/* Command modifiers (only used with read/write), EXTTRIG can be
- used with some other commands.
-*/
+/*
+ * Command modifiers (only used with read/write), EXTTRIG can be
+ * used with some other commands.
+ */
#define DT_MOD_DMA BIT(4)
#define DT_MOD_CONT BIT(5)
#define DT_MOD_EXTCLK BIT(6)
@@ -135,9 +136,10 @@ struct dt2801_board {
int dabits;
};
-/* Typeid's for the different boards of the DT2801-series
- (taken from the test-software, that comes with the board)
- */
+/*
+ * Typeid's for the different boards of the DT2801-series
+ * (taken from the test-software, that comes with the board)
+ */
static const struct dt2801_board boardtypes[] = {
{
.name = "dt2801",
@@ -209,15 +211,18 @@ struct dt2801_private {
const struct comedi_lrange *dac_range_types[2];
};
-/* These are the low-level routines:
- writecommand: write a command to the board
- writedata: write data byte
- readdata: read data byte
+/*
+ * These are the low-level routines:
+ * writecommand: write a command to the board
+ * writedata: write data byte
+ * readdata: read data byte
*/
-/* Only checks DataOutReady-flag, not the Ready-flag as it is done
- in the examples of the manual. I don't see why this should be
- necessary. */
+/*
+ * Only checks DataOutReady-flag, not the Ready-flag as it is done
+ * in the examples of the manual. I don't see why this should be
+ * necessary.
+ */
static int dt2801_readdata(struct comedi_device *dev, int *data)
{
int stat = 0;
@@ -517,14 +522,14 @@ static int dt2801_dio_insn_config(struct comedi_device *dev,
}
/*
- options:
- [0] - i/o base
- [1] - unused
- [2] - a/d 0=differential, 1=single-ended
- [3] - a/d range 0=[-10,10], 1=[0,10]
- [4] - dac0 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
- [5] - dac1 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
-*/
+ * options:
+ * [0] - i/o base
+ * [1] - unused
+ * [2] - a/d 0=differential, 1=single-ended
+ * [3] - a/d range 0=[-10,10], 1=[0,10]
+ * [4] - dac0 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
+ * [5] - dac1 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
+ */
static int dt2801_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct dt2801_board *board;
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index a80773291fdc..904f637797b6 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -1,224 +1,469 @@
/*
- comedi/drivers/dt2811.c
- Hardware driver for Data Translation DT2811
-
- COMEDI - Linux Control and Measurement Device Interface
- History:
- Base Version - David A. Schleef <ds@schleef.org>
- December 1998 - Updated to work. David does not have a DT2811
- board any longer so this was suffering from bitrot.
- Updated performed by ...
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * Comedi driver for Data Translation DT2811
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
+
/*
-Driver: dt2811
-Description: Data Translation DT2811
-Author: ds
-Devices: [Data Translation] DT2811-PGL (dt2811-pgl), DT2811-PGH (dt2811-pgh)
-Status: works
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ, although this is currently unused
- [2] - A/D reference
- 0 = signle-ended
- 1 = differential
- 2 = pseudo-differential (common reference)
- [3] - A/D range
- 0 = [-5, 5]
- 1 = [-2.5, 2.5]
- 2 = [0, 5]
- [4] - D/A 0 range (same choices)
- [4] - D/A 1 range (same choices)
-*/
+ * Driver: dt2811
+ * Description: Data Translation DT2811
+ * Author: ds
+ * Devices: [Data Translation] DT2811-PGL (dt2811-pgl), DT2811-PGH (dt2811-pgh)
+ * Status: works
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional, needed for async command support)
+ * [2] - A/D reference (# of analog inputs)
+ * 0 = single-ended (16 channels)
+ * 1 = differential (8 channels)
+ * 2 = pseudo-differential (16 channels)
+ * [3] - A/D range (deprecated, see below)
+ * [4] - D/A 0 range (deprecated, see below)
+ * [5] - D/A 1 range (deprecated, see below)
+ *
+ * Notes:
+ * - A/D ranges are not programmable but the gain is. The AI subdevice has
+ * a range_table containing all the possible analog input range/gain
+ * options for the dt2811-pgh or dt2811-pgl. Use the range that matches
+ * your board configuration and the desired gain to correctly convert
+ * between data values and physical units and to set the correct output
+ * gain.
+ * - D/A ranges are not programmable. The AO subdevice has a range_table
+ * containing all the possible analog output ranges. Use the range
+ * that matches your board configuration to convert between data
+ * values and physical units.
+ */
#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
#include "../comedidev.h"
-static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- UNI_RANGE(0.625)
- }
+/*
+ * Register I/O map
+ */
+#define DT2811_ADCSR_REG 0x00 /* r/w A/D Control/Status */
+#define DT2811_ADCSR_ADDONE BIT(7) /* r 1=A/D conv done */
+#define DT2811_ADCSR_ADERROR BIT(6) /* r 1=A/D error */
+#define DT2811_ADCSR_ADBUSY BIT(5) /* r 1=A/D busy */
+#define DT2811_ADCSR_CLRERROR BIT(4)
+#define DT2811_ADCSR_DMAENB BIT(3) /* r/w 1=dma ena */
+#define DT2811_ADCSR_INTENB BIT(2) /* r/w 1=interupts ena */
+#define DT2811_ADCSR_ADMODE(x) (((x) & 0x3) << 0)
+
+#define DT2811_ADGCR_REG 0x01 /* r/w A/D Gain/Channel */
+#define DT2811_ADGCR_GAIN(x) (((x) & 0x3) << 6)
+#define DT2811_ADGCR_CHAN(x) (((x) & 0xf) << 0)
+
+#define DT2811_ADDATA_LO_REG 0x02 /* r A/D Data low byte */
+#define DT2811_ADDATA_HI_REG 0x03 /* r A/D Data high byte */
+
+#define DT2811_DADATA_LO_REG(x) (0x02 + ((x) * 2)) /* w D/A Data low */
+#define DT2811_DADATA_HI_REG(x) (0x03 + ((x) * 2)) /* w D/A Data high */
+
+#define DT2811_DI_REG 0x06 /* r Digital Input Port 0 */
+#define DT2811_DO_REG 0x06 /* w Digital Output Port 1 */
+
+#define DT2811_TMRCTR_REG 0x07 /* r/w Timer/Counter */
+#define DT2811_TMRCTR_MANTISSA(x) (((x) & 0x7) << 3)
+#define DT2811_TMRCTR_EXPONENT(x) (((x) & 0x7) << 0)
+
+#define DT2811_OSC_BASE 1666 /* 600 kHz = 1666.6667ns */
+
+/*
+ * Timer frequency control:
+ * DT2811_TMRCTR_MANTISSA DT2811_TMRCTR_EXPONENT
+ * val divisor frequency val multiply divisor/divide frequency by
+ * 0 1 600 kHz 0 1
+ * 1 10 60 kHz 1 10
+ * 2 2 300 kHz 2 100
+ * 3 3 200 kHz 3 1000
+ * 4 4 150 kHz 4 10000
+ * 5 5 120 kHz 5 100000
+ * 6 6 100 kHz 6 1000000
+ * 7 12 50 kHz 7 10000000
+ */
+const unsigned int dt2811_clk_dividers[] = {
+ 1, 10, 2, 3, 4, 5, 6, 12
};
-static const struct comedi_lrange range_dt2811_pgh_ai_2_5_bipolar = {
- 4, {
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125)
- }
+const unsigned int dt2811_clk_multipliers[] = {
+ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000
};
-static const struct comedi_lrange range_dt2811_pgh_ai_5_bipolar = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
+/*
+ * The Analog Input range is set using jumpers on the board.
+ *
+ * Input Range W9 W10
+ * -5V to +5V In Out
+ * -2.5V to +2.5V In In
+ * 0V to +5V Out In
+ *
+ * The gain may be set to 1, 2, 4, or 8 (on the dt2811-pgh) or to
+ * 1, 10, 100, 500 (on the dt2811-pgl).
+ */
+static const struct comedi_lrange dt2811_pgh_ai_ranges = {
+ 12, {
+ BIP_RANGE(5), /* range 0: gain=1 */
+ BIP_RANGE(2.5), /* range 1: gain=2 */
+ BIP_RANGE(1.25), /* range 2: gain=4 */
+ BIP_RANGE(0.625), /* range 3: gain=8 */
+
+ BIP_RANGE(2.5), /* range 0+4: gain=1 */
+ BIP_RANGE(1.25), /* range 1+4: gain=2 */
+ BIP_RANGE(0.625), /* range 2+4: gain=4 */
+ BIP_RANGE(0.3125), /* range 3+4: gain=8 */
+
+ UNI_RANGE(5), /* range 0+8: gain=1 */
+ UNI_RANGE(2.5), /* range 1+8: gain=2 */
+ UNI_RANGE(1.25), /* range 2+8: gain=4 */
+ UNI_RANGE(0.625) /* range 3+8: gain=8 */
}
};
-static const struct comedi_lrange range_dt2811_pgl_ai_5_unipolar = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(0.5),
- UNI_RANGE(0.05),
- UNI_RANGE(0.01)
+static const struct comedi_lrange dt2811_pgl_ai_ranges = {
+ 12, {
+ BIP_RANGE(5), /* range 0: gain=1 */
+ BIP_RANGE(0.5), /* range 1: gain=10 */
+ BIP_RANGE(0.05), /* range 2: gain=100 */
+ BIP_RANGE(0.01), /* range 3: gain=500 */
+
+ BIP_RANGE(2.5), /* range 0+4: gain=1 */
+ BIP_RANGE(0.25), /* range 1+4: gain=10 */
+ BIP_RANGE(0.025), /* range 2+4: gain=100 */
+ BIP_RANGE(0.005), /* range 3+4: gain=500 */
+
+ UNI_RANGE(5), /* range 0+8: gain=1 */
+ UNI_RANGE(0.5), /* range 1+8: gain=10 */
+ UNI_RANGE(0.05), /* range 2+8: gain=100 */
+ UNI_RANGE(0.01) /* range 3+8: gain=500 */
}
};
-static const struct comedi_lrange range_dt2811_pgl_ai_2_5_bipolar = {
- 4, {
+/*
+ * The Analog Output range is set per-channel using jumpers on the board.
+ *
+ * DAC0 Jumpers DAC1 Jumpers
+ * Output Range W5 W6 W7 W8 W1 W2 W3 W4
+ * -5V to +5V In Out In Out In Out In Out
+ * -2.5V to +2.5V In Out Out In In Out Out In
+ * 0 to +5V Out In Out In Out In Out In
+ */
+static const struct comedi_lrange dt2811_ao_ranges = {
+ 3, {
+ BIP_RANGE(5), /* default setting from factory */
BIP_RANGE(2.5),
- BIP_RANGE(0.25),
- BIP_RANGE(0.025),
- BIP_RANGE(0.005)
+ UNI_RANGE(5)
}
};
-static const struct comedi_lrange range_dt2811_pgl_ai_5_bipolar = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01)
- }
+struct dt2811_board {
+ const char *name;
+ unsigned int is_pgh:1;
};
-/*
+static const struct dt2811_board dt2811_boards[] = {
+ {
+ .name = "dt2811-pgh",
+ .is_pgh = 1,
+ }, {
+ .name = "dt2811-pgl",
+ },
+};
- 0x00 ADCSR R/W A/D Control/Status Register
- bit 7 - (R) 1 indicates A/D conversion done
- reading ADDAT clears bit
- (W) ignored
- bit 6 - (R) 1 indicates A/D error
- (W) ignored
- bit 5 - (R) 1 indicates A/D busy, cleared at end
- of conversion
- (W) ignored
- bit 4 - (R) 0
- (W)
- bit 3 - (R) 0
- bit 2 - (R/W) 1 indicates interrupts enabled
- bits 1,0 - (R/W) mode bits
- 00 single conversion on ADGCR load
- 01 continuous conversion, internal clock,
- (clock enabled on ADGCR load)
- 10 continuous conversion, internal clock,
- external trigger
- 11 continuous conversion, external clock,
- external trigger
-
- 0x01 ADGCR R/W A/D Gain/Channel Register
- bit 6,7 - (R/W) gain select
- 00 gain=1, both PGH, PGL models
- 01 gain=2 PGH, 10 PGL
- 10 gain=4 PGH, 100 PGL
- 11 gain=8 PGH, 500 PGL
- bit 4,5 - reserved
- bit 3-0 - (R/W) channel select
- channel number from 0-15
-
- 0x02,0x03 (R) ADDAT A/D Data Register
- (W) DADAT0 D/A Data Register 0
- 0x02 low byte
- 0x03 high byte
-
- 0x04,0x05 (W) DADAT0 D/A Data Register 1
-
- 0x06 (R) DIO0 Digital Input Port 0
- (W) DIO1 Digital Output Port 1
-
- 0x07 TMRCTR (R/W) Timer/Counter Register
- bits 6,7 - reserved
- bits 5-3 - Timer frequency control (mantissa)
- 543 divisor freqency (kHz)
- 000 1 600
- 001 10 60
- 010 2 300
- 011 3 200
- 100 4 150
- 101 5 120
- 110 6 100
- 111 12 50
- bits 2-0 - Timer frequency control (exponent)
- 210 multiply divisor/divide frequency by
- 000 1
- 001 10
- 010 100
- 011 1000
- 100 10000
- 101 100000
- 110 1000000
- 111 10000000
+struct dt2811_private {
+ unsigned int ai_divisor;
+};
- */
+static unsigned int dt2811_ai_read_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int val;
-#define TIMEOUT 10000
+ val = inb(dev->iobase + DT2811_ADDATA_LO_REG) |
+ (inb(dev->iobase + DT2811_ADDATA_HI_REG) << 8);
-#define DT2811_ADCSR 0
-#define DT2811_ADGCR 1
-#define DT2811_ADDATLO 2
-#define DT2811_ADDATHI 3
-#define DT2811_DADAT0LO 2
-#define DT2811_DADAT0HI 3
-#define DT2811_DADAT1LO 4
-#define DT2811_DADAT1HI 5
-#define DT2811_DIO 6
-#define DT2811_TMRCTR 7
+ return val & s->maxdata;
+}
-/*
- * flags
- */
+static irqreturn_t dt2811_interrupt(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned int status;
-/* ADCSR */
+ if (!dev->attached)
+ return IRQ_NONE;
-#define DT2811_ADDONE 0x80
-#define DT2811_ADERROR 0x40
-#define DT2811_ADBUSY 0x20
-#define DT2811_CLRERROR 0x10
-#define DT2811_INTENB 0x04
-#define DT2811_ADMODE 0x03
+ status = inb(dev->iobase + DT2811_ADCSR_REG);
-struct dt2811_board {
- const char *name;
- const struct comedi_lrange *bip_5;
- const struct comedi_lrange *bip_2_5;
- const struct comedi_lrange *unip_5;
-};
+ if (status & DT2811_ADCSR_ADERROR) {
+ async->events |= COMEDI_CB_OVERFLOW;
-enum { card_2811_pgh, card_2811_pgl };
+ outb(status | DT2811_ADCSR_CLRERROR,
+ dev->iobase + DT2811_ADCSR_REG);
+ }
-struct dt2811_private {
- int ntrig;
- int curadchan;
- enum {
- adc_singleended, adc_diff, adc_pseudo_diff
- } adc_mux;
- enum {
- dac_bipolar_5, dac_bipolar_2_5, dac_unipolar_5
- } dac_range[2];
- const struct comedi_lrange *range_type_list[2];
-};
+ if (status & DT2811_ADCSR_ADDONE) {
+ unsigned short val;
-static const struct comedi_lrange *dac_range_types[] = {
- &range_bipolar5,
- &range_bipolar2_5,
- &range_unipolar5
-};
+ val = dt2811_ai_read_sample(dev, s);
+ comedi_buf_write_samples(s, &val, 1);
+ }
+
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+
+ comedi_handle_events(dev, s);
+
+ return IRQ_HANDLED;
+}
+
+static int dt2811_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ /*
+ * Mode 0
+ * Single conversion
+ *
+ * Loading a chanspec will trigger a conversion.
+ */
+ outb(DT2811_ADCSR_ADMODE(0), dev->iobase + DT2811_ADCSR_REG);
+
+ return 0;
+}
+
+static void dt2811_ai_set_chanspec(struct comedi_device *dev,
+ unsigned int chanspec)
+{
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+
+ outb(DT2811_ADGCR_CHAN(chan) | DT2811_ADGCR_GAIN(range),
+ dev->iobase + DT2811_ADGCR_REG);
+}
+
+static int dt2811_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct dt2811_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int mode;
+
+ if (cmd->start_src == TRIG_NOW) {
+ /*
+ * Mode 1
+ * Continuous conversion, internal trigger and clock
+ *
+ * This resets the trigger flip-flop, disabling A/D strobes.
+ * The timer/counter register is loaded with the division
+ * ratio which will give the required sample rate.
+ *
+ * Loading the first chanspec sets the trigger flip-flop,
+ * enabling the timer/counter. A/D strobes are then generated
+ * at the rate set by the internal clock/divider.
+ */
+ mode = DT2811_ADCSR_ADMODE(1);
+ } else { /* TRIG_EXT */
+ if (cmd->convert_src == TRIG_TIMER) {
+ /*
+ * Mode 2
+ * Continuous conversion, external trigger
+ *
+ * Similar to Mode 1, with the exception that the
+ * trigger flip-flop must be set by a negative edge
+ * on the external trigger input.
+ */
+ mode = DT2811_ADCSR_ADMODE(2);
+ } else { /* TRIG_EXT */
+ /*
+ * Mode 3
+ * Continuous conversion, external trigger, clock
+ *
+ * Similar to Mode 2, with the exception that the
+ * conversion rate is set by the frequency on the
+ * external clock/divider.
+ */
+ mode = DT2811_ADCSR_ADMODE(3);
+ }
+ }
+ outb(mode | DT2811_ADCSR_INTENB, dev->iobase + DT2811_ADCSR_REG);
+
+ /* load timer */
+ outb(devpriv->ai_divisor, dev->iobase + DT2811_TMRCTR_REG);
+
+ /* load chanspec - enables timer */
+ dt2811_ai_set_chanspec(dev, cmd->chanlist[0]);
+
+ return 0;
+}
+
+static unsigned int dt2811_ns_to_timer(unsigned int *nanosec,
+ unsigned int flags)
+{
+ unsigned long long ns = *nanosec;
+ unsigned int ns_lo = COMEDI_MIN_SPEED;
+ unsigned int ns_hi = 0;
+ unsigned int divisor_hi = 0;
+ unsigned int divisor_lo = 0;
+ unsigned int _div;
+ unsigned int _mult;
+
+ /*
+ * Work through all the divider/multiplier values to find the two
+ * closest divisors to generate the requested nanosecond timing.
+ */
+ for (_div = 0; _div <= 7; _div++) {
+ for (_mult = 0; _mult <= 7; _mult++) {
+ unsigned int div = dt2811_clk_dividers[_div];
+ unsigned int mult = dt2811_clk_multipliers[_mult];
+ unsigned long long divider = div * mult;
+ unsigned int divisor = DT2811_TMRCTR_MANTISSA(_div) |
+ DT2811_TMRCTR_EXPONENT(_mult);
+
+ /*
+ * The timer can be configured to run at a slowest
+ * speed of 0.005hz (600 Khz/120000000), which requires
+ * 37-bits to represent the nanosecond value. Limit the
+ * slowest timing to what comedi handles (32-bits).
+ */
+ ns = divider * DT2811_OSC_BASE;
+ if (ns > COMEDI_MIN_SPEED)
+ continue;
+
+ /* Check for fastest found timing */
+ if (ns <= *nanosec && ns > ns_hi) {
+ ns_hi = ns;
+ divisor_hi = divisor;
+ }
+ /* Check for slowest found timing */
+ if (ns >= *nanosec && ns < ns_lo) {
+ ns_lo = ns;
+ divisor_lo = divisor;
+ }
+ }
+ }
+
+ /*
+ * The slowest found timing will be invalid if the requested timing
+ * is faster than what can be generated by the timer. Fix it so that
+ * CMDF_ROUND_UP returns valid timing.
+ */
+ if (ns_lo == COMEDI_MIN_SPEED) {
+ ns_lo = ns_hi;
+ divisor_lo = divisor_hi;
+ }
+ /*
+ * The fastest found timing will be invalid if the requested timing
+ * is less than what can be generated by the timer. Fix it so that
+ * CMDF_ROUND_NEAREST and CMDF_ROUND_DOWN return valid timing.
+ */
+ if (ns_hi == 0) {
+ ns_hi = ns_lo;
+ divisor_hi = divisor_lo;
+ }
+
+ switch (flags & CMDF_ROUND_MASK) {
+ case CMDF_ROUND_NEAREST:
+ default:
+ if (ns_hi - *nanosec < *nanosec - ns_lo) {
+ *nanosec = ns_lo;
+ return divisor_lo;
+ }
+ *nanosec = ns_hi;
+ return divisor_hi;
+ case CMDF_ROUND_UP:
+ *nanosec = ns_lo;
+ return divisor_lo;
+ case CMDF_ROUND_DOWN:
+ *nanosec = ns_hi;
+ return divisor_hi;
+ }
+}
+
+static int dt2811_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ struct dt2811_private *devpriv = dev->private;
+ unsigned int arg;
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
+ err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
+ err |= comedi_check_trigger_src(&cmd->convert_src,
+ TRIG_TIMER | TRIG_EXT);
+ err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= comedi_check_trigger_is_unique(cmd->start_src);
+ err |= comedi_check_trigger_is_unique(cmd->convert_src);
+ err |= comedi_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (cmd->convert_src == TRIG_EXT && cmd->start_src != TRIG_EXT)
+ err |= -EINVAL;
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ if (cmd->convert_src == TRIG_TIMER)
+ err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 12500);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
+ cmd->chanlist_len);
+ if (cmd->stop_src == TRIG_COUNT)
+ err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
+ else /* TRIG_NONE */
+ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* Step 4: fix up any arguments */
+
+ if (cmd->convert_src == TRIG_TIMER) {
+ arg = cmd->convert_arg;
+ devpriv->ai_divisor = dt2811_ns_to_timer(&arg, cmd->flags);
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+ } else { /* TRIG_EXT */
+ /* The convert_arg is used to set the divisor. */
+ devpriv->ai_divisor = cmd->convert_arg;
+ }
+
+ if (err)
+ return 4;
+
+ /* Step 5: check channel list if it exists */
+
+ return 0;
+}
static int dt2811_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -227,32 +472,33 @@ static int dt2811_ai_eoc(struct comedi_device *dev,
{
unsigned int status;
- status = inb(dev->iobase + DT2811_ADCSR);
- if ((status & DT2811_ADBUSY) == 0)
+ status = inb(dev->iobase + DT2811_ADCSR_REG);
+ if ((status & DT2811_ADCSR_ADBUSY) == 0)
return 0;
return -EBUSY;
}
-static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int dt2811_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec);
int ret;
int i;
+ /* We will already be in Mode 0 */
for (i = 0; i < insn->n; i++) {
- outb(chan, dev->iobase + DT2811_ADGCR);
+ /* load chanspec and trigger conversion */
+ dt2811_ai_set_chanspec(dev, insn->chanspec);
ret = comedi_timeout(dev, s, insn, dt2811_ai_eoc, 0);
if (ret)
return ret;
- data[i] = inb(dev->iobase + DT2811_ADDATLO);
- data[i] |= inb(dev->iobase + DT2811_ADDATHI) << 8;
- data[i] &= 0xfff;
+ data[i] = dt2811_ai_read_sample(dev, s);
}
- return i;
+ return insn->n;
}
static int dt2811_ao_insn_write(struct comedi_device *dev,
@@ -266,9 +512,9 @@ static int dt2811_ao_insn_write(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
val = data[i];
- outb(val & 0xff, dev->iobase + DT2811_DADAT0LO + 2 * chan);
+ outb(val & 0xff, dev->iobase + DT2811_DADATA_LO_REG(chan));
outb((val >> 8) & 0xff,
- dev->iobase + DT2811_DADAT0HI + 2 * chan);
+ dev->iobase + DT2811_DADATA_HI_REG(chan));
}
s->readback[chan] = val;
@@ -277,9 +523,10 @@ static int dt2811_ao_insn_write(struct comedi_device *dev,
static int dt2811_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- data[1] = inb(dev->iobase + DT2811_DIO);
+ data[1] = inb(dev->iobase + DT2811_DI_REG);
return insn->n;
}
@@ -290,185 +537,118 @@ static int dt2811_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DT2811_DIO);
+ outb(s->state, dev->iobase + DT2811_DO_REG);
data[1] = s->state;
return insn->n;
}
-/*
- options[0] Board base address
- options[1] IRQ
- options[2] Input configuration
- 0 == single-ended
- 1 == differential
- 2 == pseudo-differential
- options[3] Analog input range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
- options[4] Analog output 0 range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
- options[5] Analog output 1 range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
-*/
+static void dt2811_reset(struct comedi_device *dev)
+{
+ /* This is the initialization sequence from the users manual */
+ outb(DT2811_ADCSR_ADMODE(0), dev->iobase + DT2811_ADCSR_REG);
+ usleep_range(100, 1000);
+ inb(dev->iobase + DT2811_ADDATA_LO_REG);
+ inb(dev->iobase + DT2811_ADDATA_HI_REG);
+ outb(DT2811_ADCSR_ADMODE(0) | DT2811_ADCSR_CLRERROR,
+ dev->iobase + DT2811_ADCSR_REG);
+}
+
static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
- /* int i; */
const struct dt2811_board *board = dev->board_ptr;
struct dt2811_private *devpriv;
- int ret;
struct comedi_subdevice *s;
+ int ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
ret = comedi_request_region(dev, it->options[0], 0x8);
if (ret)
return ret;
-#if 0
- outb(0, dev->iobase + DT2811_ADCSR);
- udelay(100);
- i = inb(dev->iobase + DT2811_ADDATLO);
- i = inb(dev->iobase + DT2811_ADDATHI);
-#endif
+ dt2811_reset(dev);
+
+ /* IRQ's 2,3,5,7 are valid for async command support */
+ if (it->options[1] <= 7 && (BIT(it->options[1]) & 0xac)) {
+ ret = request_irq(it->options[1], dt2811_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
+ }
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- switch (it->options[2]) {
- case 0:
- devpriv->adc_mux = adc_singleended;
- break;
- case 1:
- devpriv->adc_mux = adc_diff;
- break;
- case 2:
- devpriv->adc_mux = adc_pseudo_diff;
- break;
- default:
- devpriv->adc_mux = adc_singleended;
- break;
- }
- switch (it->options[4]) {
- case 0:
- devpriv->dac_range[0] = dac_bipolar_5;
- break;
- case 1:
- devpriv->dac_range[0] = dac_bipolar_2_5;
- break;
- case 2:
- devpriv->dac_range[0] = dac_unipolar_5;
- break;
- default:
- devpriv->dac_range[0] = dac_bipolar_5;
- break;
- }
- switch (it->options[5]) {
- case 0:
- devpriv->dac_range[1] = dac_bipolar_5;
- break;
- case 1:
- devpriv->dac_range[1] = dac_bipolar_2_5;
- break;
- case 2:
- devpriv->dac_range[1] = dac_unipolar_5;
- break;
- default:
- devpriv->dac_range[1] = dac_bipolar_5;
- break;
- }
-
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- /* initialize the ADC subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = devpriv->adc_mux == adc_diff ? 8 : 16;
- s->insn_read = dt2811_ai_insn;
- s->maxdata = 0xfff;
- switch (it->options[3]) {
- case 0:
- default:
- s->range_table = board->bip_5;
- break;
- case 1:
- s->range_table = board->bip_2_5;
- break;
- case 2:
- s->range_table = board->unip_5;
- break;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE |
+ (it->options[2] == 1) ? SDF_DIFF :
+ (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND;
+ s->n_chan = (it->options[2] == 1) ? 8 : 16;
+ s->maxdata = 0x0fff;
+ s->range_table = board->is_pgh ? &dt2811_pgh_ai_ranges
+ : &dt2811_pgl_ai_ranges;
+ s->insn_read = dt2811_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 1;
+ s->do_cmdtest = dt2811_ai_cmdtest;
+ s->do_cmd = dt2811_ai_cmd;
+ s->cancel = dt2811_ai_cancel;
}
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0xfff;
- s->range_table_list = devpriv->range_type_list;
- devpriv->range_type_list[0] = dac_range_types[devpriv->dac_range[0]];
- devpriv->range_type_list[1] = dac_range_types[devpriv->dac_range[1]];
- s->insn_write = dt2811_ao_insn_write;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 2;
+ s->maxdata = 0x0fff;
+ s->range_table = &dt2811_ao_ranges;
+ s->insn_write = dt2811_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
+ /* Digital Input subdevice */
s = &dev->subdevices[2];
- /* di subdevice */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 8;
- s->insn_bits = dt2811_di_insn_bits;
- s->maxdata = 1;
- s->range_table = &range_digital;
-
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = dt2811_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[3];
- /* do subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->insn_bits = dt2811_do_insn_bits;
- s->maxdata = 1;
- s->state = 0;
- s->range_table = &range_digital;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = dt2811_do_insn_bits;
return 0;
}
-static const struct dt2811_board boardtypes[] = {
- {
- .name = "dt2811-pgh",
- .bip_5 = &range_dt2811_pgh_ai_5_bipolar,
- .bip_2_5 = &range_dt2811_pgh_ai_2_5_bipolar,
- .unip_5 = &range_dt2811_pgh_ai_5_unipolar,
- }, {
- .name = "dt2811-pgl",
- .bip_5 = &range_dt2811_pgl_ai_5_bipolar,
- .bip_2_5 = &range_dt2811_pgl_ai_2_5_bipolar,
- .unip_5 = &range_dt2811_pgl_ai_5_unipolar,
- },
-};
-
static struct comedi_driver dt2811_driver = {
.driver_name = "dt2811",
.module = THIS_MODULE,
.attach = dt2811_attach,
.detach = comedi_legacy_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
+ .board_name = &dt2811_boards[0].name,
+ .num_names = ARRAY_SIZE(dt2811_boards),
.offset = sizeof(struct dt2811_board),
};
module_comedi_driver(dt2811_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Data Translation DT2811 series boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index 66705f9a0621..2f903bedcefa 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -1,38 +1,38 @@
/*
- comedi/drivers/dt2814.c
- Hardware driver for Data Translation DT2814
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/dt2814.c
+ * Hardware driver for Data Translation DT2814
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-Driver: dt2814
-Description: Data Translation DT2814
-Author: ds
-Status: complete
-Devices: [Data Translation] DT2814 (dt2814)
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ
-
-This card has 16 analog inputs multiplexed onto a 12 bit ADC. There
-is a minimally useful onboard clock. The base frequency for the
-clock is selected by jumpers, and the clock divider can be selected
-via programmed I/O. Unfortunately, the clock divider can only be
-a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In
-addition, the clock does not seem to be very accurate.
-*/
+ * Driver: dt2814
+ * Description: Data Translation DT2814
+ * Author: ds
+ * Status: complete
+ * Devices: [Data Translation] DT2814 (dt2814)
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ
+ *
+ * This card has 16 analog inputs multiplexed onto a 12 bit ADC. There
+ * is a minimally useful onboard clock. The base frequency for the
+ * clock is selected by jumpers, and the clock divider can be selected
+ * via programmed I/O. Unfortunately, the clock divider can only be
+ * a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In
+ * addition, the clock does not seem to be very accurate.
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -215,8 +215,10 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
int i;
outb(0, dev->iobase + DT2814_CSR);
- /* note: turning off timed mode triggers another
- sample. */
+ /*
+ * note: turning off timed mode triggers another
+ * sample.
+ */
for (i = 0; i < DT2814_TIMEOUT; i++) {
if (inb(dev->iobase + DT2814_CSR) & DT2814_FINISH)
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index fb08569c1ac1..0be77cc40a79 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -1,55 +1,55 @@
/*
- comedi/drivers/dt2815.c
- Hardware driver for Data Translation DT2815
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * comedi/drivers/dt2815.c
+ * Hardware driver for Data Translation DT2815
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
/*
-Driver: dt2815
-Description: Data Translation DT2815
-Author: ds
-Status: mostly complete, untested
-Devices: [Data Translation] DT2815 (dt2815)
-
-I'm not sure anyone has ever tested this board. If you have information
-contrary, please update.
-
-Configuration options:
- [0] - I/O port base base address
- [1] - IRQ (unused)
- [2] - Voltage unipolar/bipolar configuration
- 0 == unipolar 5V (0V -- +5V)
- 1 == bipolar 5V (-5V -- +5V)
- [3] - Current offset configuration
- 0 == disabled (0mA -- +32mAV)
- 1 == enabled (+4mA -- +20mAV)
- [4] - Firmware program configuration
- 0 == program 1 (see manual table 5-4)
- 1 == program 2 (see manual table 5-4)
- 2 == program 3 (see manual table 5-4)
- 3 == program 4 (see manual table 5-4)
- [5] - Analog output 0 range configuration
- 0 == voltage
- 1 == current
- [6] - Analog output 1 range configuration (same options)
- [7] - Analog output 2 range configuration (same options)
- [8] - Analog output 3 range configuration (same options)
- [9] - Analog output 4 range configuration (same options)
- [10] - Analog output 5 range configuration (same options)
- [11] - Analog output 6 range configuration (same options)
- [12] - Analog output 7 range configuration (same options)
-*/
+ * Driver: dt2815
+ * Description: Data Translation DT2815
+ * Author: ds
+ * Status: mostly complete, untested
+ * Devices: [Data Translation] DT2815 (dt2815)
+ *
+ * I'm not sure anyone has ever tested this board. If you have information
+ * contrary, please update.
+ *
+ * Configuration options:
+ * [0] - I/O port base base address
+ * [1] - IRQ (unused)
+ * [2] - Voltage unipolar/bipolar configuration
+ * 0 == unipolar 5V (0V -- +5V)
+ * 1 == bipolar 5V (-5V -- +5V)
+ * [3] - Current offset configuration
+ * 0 == disabled (0mA -- +32mAV)
+ * 1 == enabled (+4mA -- +20mAV)
+ * [4] - Firmware program configuration
+ * 0 == program 1 (see manual table 5-4)
+ * 1 == program 2 (see manual table 5-4)
+ * 2 == program 3 (see manual table 5-4)
+ * 3 == program 4 (see manual table 5-4)
+ * [5] - Analog output 0 range configuration
+ * 0 == voltage
+ * 1 == current
+ * [6] - Analog output 1 range configuration (same options)
+ * [7] - Analog output 2 range configuration (same options)
+ * [8] - Analog output 3 range configuration (same options)
+ * [9] - Analog output 4 range configuration (same options)
+ * [10] - Analog output 5 range configuration (same options)
+ * [11] - Analog output 6 range configuration (same options)
+ * [12] - Analog output 7 range configuration (same options)
+ */
#include <linux/module.h>
#include "../comedidev.h"
@@ -120,27 +120,27 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
}
/*
- options[0] Board base address
- options[1] IRQ (not applicable)
- options[2] Voltage unipolar/bipolar configuration
- 0 == unipolar 5V (0V -- +5V)
- 1 == bipolar 5V (-5V -- +5V)
- options[3] Current offset configuration
- 0 == disabled (0mA -- +32mAV)
- 1 == enabled (+4mA -- +20mAV)
- options[4] Firmware program configuration
- 0 == program 1 (see manual table 5-4)
- 1 == program 2 (see manual table 5-4)
- 2 == program 3 (see manual table 5-4)
- 3 == program 4 (see manual table 5-4)
- options[5] Analog output 0 range configuration
- 0 == voltage
- 1 == current
- options[6] Analog output 1 range configuration
- ...
- options[12] Analog output 7 range configuration
- 0 == voltage
- 1 == current
+ * options[0] Board base address
+ * options[1] IRQ (not applicable)
+ * options[2] Voltage unipolar/bipolar configuration
+ * 0 == unipolar 5V (0V -- +5V)
+ * 1 == bipolar 5V (-5V -- +5V)
+ * options[3] Current offset configuration
+ * 0 == disabled (0mA -- +32mAV)
+ * 1 == enabled (+4mA -- +20mAV)
+ * options[4] Firmware program configuration
+ * 0 == program 1 (see manual table 5-4)
+ * 1 == program 2 (see manual table 5-4)
+ * 2 == program 3 (see manual table 5-4)
+ * 3 == program 4 (see manual table 5-4)
+ * options[5] Analog output 0 range configuration
+ * 0 == voltage
+ * 1 == current
+ * options[6] Analog output 1 range configuration
+ * ...
+ * options[12] Analog output 7 range configuration
+ * 0 == voltage
+ * 1 == current
*/
static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it)
diff --git a/drivers/staging/comedi/drivers/dt2817.c b/drivers/staging/comedi/drivers/dt2817.c
index 5131deebf66f..39d2566e49bf 100644
--- a/drivers/staging/comedi/drivers/dt2817.c
+++ b/drivers/staging/comedi/drivers/dt2817.c
@@ -1,37 +1,37 @@
/*
- comedi/drivers/dt2817.c
- Hardware driver for Data Translation DT2817
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/dt2817.c
+ * Hardware driver for Data Translation DT2817
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-Driver: dt2817
-Description: Data Translation DT2817
-Author: ds
-Status: complete
-Devices: [Data Translation] DT2817 (dt2817)
-
-A very simple digital I/O card. Four banks of 8 lines, each bank
-is configurable for input or output. One wonders why it takes a
-50 page manual to describe this thing.
-
-The driver (which, btw, is much less than 50 pages) has 1 subdevice
-with 32 channels, configurable in groups of 8.
-
-Configuration options:
- [0] - I/O port base base address
-*/
+ * Driver: dt2817
+ * Description: Data Translation DT2817
+ * Author: ds
+ * Status: complete
+ * Devices: [Data Translation] DT2817 (dt2817)
+ *
+ * A very simple digital I/O card. Four banks of 8 lines, each bank
+ * is configurable for input or output. One wonders why it takes a
+ * 50 page manual to describe this thing.
+ *
+ * The driver (which, btw, is much less than 50 pages) has 1 subdevice
+ * with 32 channels, configurable in groups of 8.
+ *
+ * Configuration options:
+ * [0] - I/O port base base address
+ */
#include <linux/module.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 63b5cbc44bda..af4b4175af4d 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -158,10 +158,7 @@ static void gsc_hpdi_drain_dma(struct comedi_device *dev, unsigned int channel)
unsigned int size;
unsigned int next;
- if (channel)
- next = readl(devpriv->plx9080_mmio + PLX_DMA1_PCI_ADDRESS_REG);
- else
- next = readl(devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
+ next = readl(devpriv->plx9080_mmio + PLX_REG_DMAPADR(channel));
idx = devpriv->dma_desc_index;
start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
@@ -201,8 +198,9 @@ static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
if (!dev->attached)
return IRQ_NONE;
- plx_status = readl(devpriv->plx9080_mmio + PLX_INTRCS_REG);
- if ((plx_status & (ICS_DMA0_A | ICS_DMA1_A | ICS_LIA)) == 0)
+ plx_status = readl(devpriv->plx9080_mmio + PLX_REG_INTCSR);
+ if ((plx_status &
+ (PLX_INTCSR_DMA0IA | PLX_INTCSR_DMA1IA | PLX_INTCSR_PLIA)) == 0)
return IRQ_NONE;
hpdi_intr_status = readl(dev->mmio + INTERRUPT_STATUS_REG);
@@ -213,32 +211,32 @@ static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
/* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
- dma0_status = readb(devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
- if (plx_status & ICS_DMA0_A) {
+ dma0_status = readb(devpriv->plx9080_mmio + PLX_REG_DMACSR0);
+ if (plx_status & PLX_INTCSR_DMA0IA) {
/* dma chan 0 interrupt */
- writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+ writeb((dma0_status & PLX_DMACSR_ENABLE) | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_mmio + PLX_REG_DMACSR0);
- if (dma0_status & PLX_DMA_EN_BIT)
+ if (dma0_status & PLX_DMACSR_ENABLE)
gsc_hpdi_drain_dma(dev, 0);
}
spin_unlock_irqrestore(&dev->spinlock, flags);
/* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
- dma1_status = readb(devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
- if (plx_status & ICS_DMA1_A) {
+ dma1_status = readb(devpriv->plx9080_mmio + PLX_REG_DMACSR1);
+ if (plx_status & PLX_INTCSR_DMA1IA) {
/* XXX */ /* dma chan 1 interrupt */
- writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
+ writeb((dma1_status & PLX_DMACSR_ENABLE) | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_mmio + PLX_REG_DMACSR1);
}
spin_unlock_irqrestore(&dev->spinlock, flags);
/* clear possible plx9080 interrupt sources */
- if (plx_status & ICS_LDIA) {
+ if (plx_status & PLX_INTCSR_LDBIA) {
/* clear local doorbell interrupt */
- plx_bits = readl(devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
- writel(plx_bits, devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
+ plx_bits = readl(devpriv->plx9080_mmio + PLX_REG_L2PDBELL);
+ writel(plx_bits, devpriv->plx9080_mmio + PLX_REG_L2PDBELL);
}
if (hpdi_board_status & RX_OVERRUN_BIT) {
@@ -307,19 +305,19 @@ static int gsc_hpdi_cmd(struct comedi_device *dev,
* occasionally cause problems with transfer of first dma
* block. Initializing them to zero seems to fix the problem.
*/
- writel(0, devpriv->plx9080_mmio + PLX_DMA0_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
- writel(0, devpriv->plx9080_mmio + PLX_DMA0_LOCAL_ADDRESS_REG);
+ writel(0, devpriv->plx9080_mmio + PLX_REG_DMASIZ0);
+ writel(0, devpriv->plx9080_mmio + PLX_REG_DMAPADR0);
+ writel(0, devpriv->plx9080_mmio + PLX_REG_DMALADR0);
/* give location of first dma descriptor */
- bits = devpriv->dma_desc_phys_addr | PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI;
- writel(bits, devpriv->plx9080_mmio + PLX_DMA0_DESCRIPTOR_REG);
+ bits = devpriv->dma_desc_phys_addr | PLX_DMADPR_DESCPCI |
+ PLX_DMADPR_TCINTR | PLX_DMADPR_XFERL2P;
+ writel(bits, devpriv->plx9080_mmio + PLX_REG_DMADPR0);
/* enable dma transfer */
spin_lock_irqsave(&dev->spinlock, flags);
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+ writeb(PLX_DMACSR_ENABLE | PLX_DMACSR_START | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_mmio + PLX_REG_DMACSR0);
spin_unlock_irqrestore(&dev->spinlock, flags);
if (cmd->stop_src == TRIG_COUNT)
@@ -424,8 +422,8 @@ static int gsc_hpdi_setup_dma_descriptors(struct comedi_device *dev,
{
struct hpdi_private *devpriv = dev->private;
dma_addr_t phys_addr = devpriv->dma_desc_phys_addr;
- u32 next_bits = PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI;
+ u32 next_bits = PLX_DMADPR_DESCPCI | PLX_DMADPR_TCINTR |
+ PLX_DMADPR_XFERL2P;
unsigned int offset = 0;
unsigned int idx = 0;
unsigned int i;
@@ -536,9 +534,10 @@ static int gsc_hpdi_init(struct comedi_device *dev)
/* enable interrupts */
plx_intcsr_bits =
- ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
- ICS_DMA0_E;
- writel(plx_intcsr_bits, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+ PLX_INTCSR_LSEABORTEN | PLX_INTCSR_LSEPARITYEN | PLX_INTCSR_PIEN |
+ PLX_INTCSR_PLIEN | PLX_INTCSR_PABORTIEN | PLX_INTCSR_LIOEN |
+ PLX_INTCSR_DMA0IEN;
+ writel(plx_intcsr_bits, devpriv->plx9080_mmio + PLX_REG_INTCSR);
return 0;
}
@@ -550,13 +549,13 @@ static void gsc_hpdi_init_plx9080(struct comedi_device *dev)
void __iomem *plx_iobase = devpriv->plx9080_mmio;
#ifdef __BIG_ENDIAN
- bits = BIGEND_DMA0 | BIGEND_DMA1;
+ bits = PLX_BIGEND_DMA0 | PLX_BIGEND_DMA1;
#else
bits = 0;
#endif
- writel(bits, devpriv->plx9080_mmio + PLX_BIGEND_REG);
+ writel(bits, devpriv->plx9080_mmio + PLX_REG_BIGEND);
- writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+ writel(0, devpriv->plx9080_mmio + PLX_REG_INTCSR);
gsc_hpdi_abort_dma(dev, 0);
gsc_hpdi_abort_dma(dev, 1);
@@ -564,27 +563,27 @@ static void gsc_hpdi_init_plx9080(struct comedi_device *dev)
/* configure dma0 mode */
bits = 0;
/* enable ready input */
- bits |= PLX_DMA_EN_READYIN_BIT;
+ bits |= PLX_DMAMODE_READYIEN;
/* enable dma chaining */
- bits |= PLX_EN_CHAIN_BIT;
+ bits |= PLX_DMAMODE_CHAINEN;
/*
* enable interrupt on dma done
* (probably don't need this, since chain never finishes)
*/
- bits |= PLX_EN_DMA_DONE_INTR_BIT;
+ bits |= PLX_DMAMODE_DONEIEN;
/*
* don't increment local address during transfers
* (we are transferring from a fixed fifo register)
*/
- bits |= PLX_LOCAL_ADDR_CONST_BIT;
+ bits |= PLX_DMAMODE_LACONST;
/* route dma interrupt to pci bus */
- bits |= PLX_DMA_INTR_PCI_BIT;
+ bits |= PLX_DMAMODE_INTRPCI;
/* enable demand mode */
- bits |= PLX_DEMAND_MODE_BIT;
+ bits |= PLX_DMAMODE_DEMAND;
/* enable local burst mode */
- bits |= PLX_DMA_LOCAL_BURST_EN_BIT;
- bits |= PLX_LOCAL_BUS_32_WIDE_BITS;
- writel(bits, plx_iobase + PLX_DMA0_MODE_REG);
+ bits |= PLX_DMAMODE_BURSTEN;
+ bits |= PLX_DMAMODE_WIDTH32;
+ writel(bits, plx_iobase + PLX_REG_DMAMODE0);
}
static int gsc_hpdi_auto_attach(struct comedi_device *dev,
@@ -680,7 +679,7 @@ static void gsc_hpdi_detach(struct comedi_device *dev)
free_irq(dev->irq, dev);
if (devpriv) {
if (devpriv->plx9080_mmio) {
- writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+ writel(0, devpriv->plx9080_mmio + PLX_REG_INTCSR);
iounmap(devpriv->plx9080_mmio);
}
if (dev->mmio)
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index b87192e0f9aa..6c4ff023717f 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -1,20 +1,20 @@
/*
- comedi/drivers/jr3_pci.c
- hardware driver for JR3/PCI force sensor board
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2007 Anders Blomdell <anders.blomdell@control.lth.se>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/jr3_pci.c
+ * hardware driver for JR3/PCI force sensor board
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2007 Anders Blomdell <anders.blomdell@control.lth.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Driver: jr3_pci
* Description: JR3/PCI force sensor board
@@ -231,7 +231,7 @@ static unsigned int jr3_pci_ai_read_chan(struct comedi_device *dev,
if (chan < 56) {
unsigned int axis = chan % 8;
- unsigned filter = chan / 8;
+ unsigned int filter = chan / 8;
switch (axis) {
case 0:
@@ -690,7 +690,7 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
if (sizeof(struct jr3_channel) != 0xc00) {
dev_err(dev->class_dev,
"sizeof(struct jr3_channel) = %x [expected %x]\n",
- (unsigned)sizeof(struct jr3_channel), 0xc00);
+ (unsigned int)sizeof(struct jr3_channel), 0xc00);
return -EINVAL;
}
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 3bf0caa18ab0..c0b7a300e428 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -150,7 +150,7 @@ struct me_private_data {
unsigned short dac_ctrl; /* Mirror of the DAC_CONTROL register */
};
-static inline void sleep(unsigned sec)
+static inline void sleep(unsigned int sec)
{
schedule_timeout_interruptible(sec * HZ);
}
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index 826e4399c87e..9bda761433c2 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -103,7 +103,7 @@ static const struct comedi_lrange range_mpc624_bipolar1 = {
/* BIP_RANGE(1.01) this is correct, */
/* but my MPC-624 actually seems to have a range of 2.02 */
BIP_RANGE(2.02)
- }
+ }
};
static const struct comedi_lrange range_mpc624_bipolar10 = {
@@ -112,7 +112,7 @@ static const struct comedi_lrange range_mpc624_bipolar10 = {
/* BIP_RANGE(10.1) this is correct, */
/* but my MPC-624 actually seems to have a range of 20.2 */
BIP_RANGE(20.2)
- }
+ }
};
static unsigned int mpc624_ai_get_sample(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 251117be1205..07f38e385469 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -151,10 +151,10 @@ enum ni_65xx_boardid {
struct ni_65xx_board {
const char *name;
- unsigned num_dio_ports;
- unsigned num_di_ports;
- unsigned num_do_ports;
- unsigned legacy_invert:1;
+ unsigned int num_dio_ports;
+ unsigned int num_di_ports;
+ unsigned int num_do_ports;
+ unsigned int legacy_invert:1;
};
static const struct ni_65xx_board ni_65xx_boards[] = {
@@ -360,7 +360,7 @@ static int ni_65xx_dio_insn_config(struct comedi_device *dev,
unsigned long base_port = (unsigned long)s->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int chan_mask = NI_65XX_CHAN_TO_MASK(chan);
- unsigned port = base_port + NI_65XX_CHAN_TO_PORT(chan);
+ unsigned int port = base_port + NI_65XX_CHAN_TO_PORT(chan);
unsigned int interval;
unsigned int val;
@@ -428,14 +428,14 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
unsigned long base_port = (unsigned long)s->private;
unsigned int base_chan = CR_CHAN(insn->chanspec);
int last_port_offset = NI_65XX_CHAN_TO_PORT(s->n_chan - 1);
- unsigned read_bits = 0;
+ unsigned int read_bits = 0;
int port_offset;
for (port_offset = NI_65XX_CHAN_TO_PORT(base_chan);
port_offset <= last_port_offset; port_offset++) {
- unsigned port = base_port + port_offset;
+ unsigned int port = base_port + port_offset;
int base_port_channel = NI_65XX_PORT_TO_CHAN(port_offset);
- unsigned port_mask, port_data, bits;
+ unsigned int port_mask, port_data, bits;
int bitshift = base_port_channel - base_chan;
if (bitshift >= 32)
@@ -640,7 +640,7 @@ static int ni_65xx_auto_attach(struct comedi_device *dev,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct ni_65xx_board *board = NULL;
struct comedi_subdevice *s;
- unsigned i;
+ unsigned int i;
int ret;
if (context < ARRAY_SIZE(ni_65xx_boards))
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 02a532990979..35ef1925703f 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -170,12 +170,12 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
#define DMA_Line_Control_Group1 76
#define DMA_Line_Control_Group2 108
/* channel zero is none */
-static inline unsigned primary_DMAChannel_bits(unsigned channel)
+static inline unsigned int primary_DMAChannel_bits(unsigned int channel)
{
return channel & 0x3;
}
-static inline unsigned secondary_DMAChannel_bits(unsigned channel)
+static inline unsigned int secondary_DMAChannel_bits(unsigned int channel)
{
return (channel << 2) & 0xc;
}
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 344aa343e5e1..d8917392b9f9 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1064,12 +1064,12 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
struct mite *mite = devpriv->mite;
resource_size_t daq_phys_addr;
static const int Start_Cal_EEPROM = 0x400;
- static const unsigned window_size = 10;
+ static const unsigned int window_size = 10;
static const int serial_number_eeprom_offset = 0x4;
static const int serial_number_eeprom_length = 0x4;
- unsigned old_iodwbsr_bits;
- unsigned old_iodwbsr1_bits;
- unsigned old_iodwcr1_bits;
+ unsigned int old_iodwbsr_bits;
+ unsigned int old_iodwbsr1_bits;
+ unsigned int old_iodwcr1_bits;
int i;
/* IO Window 1 needs to be temporarily mapped to read the eeprom */
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 10472e6dd002..70ad497dd20b 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -84,25 +84,25 @@
#define PCMMIO_AI_LSB_REG 0x00
#define PCMMIO_AI_MSB_REG 0x01
#define PCMMIO_AI_CMD_REG 0x02
-#define PCMMIO_AI_CMD_SE (1 << 7)
-#define PCMMIO_AI_CMD_ODD_CHAN (1 << 6)
+#define PCMMIO_AI_CMD_SE BIT(7)
+#define PCMMIO_AI_CMD_ODD_CHAN BIT(6)
#define PCMMIO_AI_CMD_CHAN_SEL(x) (((x) & 0x3) << 4)
#define PCMMIO_AI_CMD_RANGE(x) (((x) & 0x3) << 2)
#define PCMMIO_RESOURCE_REG 0x02
#define PCMMIO_RESOURCE_IRQ(x) (((x) & 0xf) << 0)
#define PCMMIO_AI_STATUS_REG 0x03
-#define PCMMIO_AI_STATUS_DATA_READY (1 << 7)
-#define PCMMIO_AI_STATUS_DATA_DMA_PEND (1 << 6)
-#define PCMMIO_AI_STATUS_CMD_DMA_PEND (1 << 5)
-#define PCMMIO_AI_STATUS_IRQ_PEND (1 << 4)
-#define PCMMIO_AI_STATUS_DATA_DRQ_ENA (1 << 2)
-#define PCMMIO_AI_STATUS_REG_SEL (1 << 3)
-#define PCMMIO_AI_STATUS_CMD_DRQ_ENA (1 << 1)
-#define PCMMIO_AI_STATUS_IRQ_ENA (1 << 0)
+#define PCMMIO_AI_STATUS_DATA_READY BIT(7)
+#define PCMMIO_AI_STATUS_DATA_DMA_PEND BIT(6)
+#define PCMMIO_AI_STATUS_CMD_DMA_PEND BIT(5)
+#define PCMMIO_AI_STATUS_IRQ_PEND BIT(4)
+#define PCMMIO_AI_STATUS_DATA_DRQ_ENA BIT(2)
+#define PCMMIO_AI_STATUS_REG_SEL BIT(3)
+#define PCMMIO_AI_STATUS_CMD_DRQ_ENA BIT(1)
+#define PCMMIO_AI_STATUS_IRQ_ENA BIT(0)
#define PCMMIO_AI_RES_ENA_REG 0x03
#define PCMMIO_AI_RES_ENA_CMD_REG_ACCESS (0 << 3)
-#define PCMMIO_AI_RES_ENA_AI_RES_ACCESS (1 << 3)
-#define PCMMIO_AI_RES_ENA_DIO_RES_ACCESS (1 << 4)
+#define PCMMIO_AI_RES_ENA_AI_RES_ACCESS BIT(3)
+#define PCMMIO_AI_RES_ENA_DIO_RES_ACCESS BIT(4)
#define PCMMIO_AI_2ND_ADC_OFFSET 0x04
#define PCMMIO_AO_LSB_REG 0x08
@@ -125,14 +125,14 @@
#define PCMMIO_AO_CMD_CHAN_SEL(x) (((x) & 0x03) << 1)
#define PCMMIO_AO_CMD_CHAN_SEL_ALL (0x0f << 0)
#define PCMMIO_AO_STATUS_REG 0x0b
-#define PCMMIO_AO_STATUS_DATA_READY (1 << 7)
-#define PCMMIO_AO_STATUS_DATA_DMA_PEND (1 << 6)
-#define PCMMIO_AO_STATUS_CMD_DMA_PEND (1 << 5)
-#define PCMMIO_AO_STATUS_IRQ_PEND (1 << 4)
-#define PCMMIO_AO_STATUS_DATA_DRQ_ENA (1 << 2)
-#define PCMMIO_AO_STATUS_REG_SEL (1 << 3)
-#define PCMMIO_AO_STATUS_CMD_DRQ_ENA (1 << 1)
-#define PCMMIO_AO_STATUS_IRQ_ENA (1 << 0)
+#define PCMMIO_AO_STATUS_DATA_READY BIT(7)
+#define PCMMIO_AO_STATUS_DATA_DMA_PEND BIT(6)
+#define PCMMIO_AO_STATUS_CMD_DMA_PEND BIT(5)
+#define PCMMIO_AO_STATUS_IRQ_PEND BIT(4)
+#define PCMMIO_AO_STATUS_DATA_DRQ_ENA BIT(2)
+#define PCMMIO_AO_STATUS_REG_SEL BIT(3)
+#define PCMMIO_AO_STATUS_CMD_DRQ_ENA BIT(1)
+#define PCMMIO_AO_STATUS_IRQ_ENA BIT(0)
#define PCMMIO_AO_RESOURCE_ENA_REG 0x0b
#define PCMMIO_AO_2ND_DAC_OFFSET 0x04
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 7ea813022ff6..8ad64f2625fe 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -307,7 +307,7 @@ static void pcmuio_stop_intr(struct comedi_device *dev,
static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
struct comedi_subdevice *s,
- unsigned triggered)
+ unsigned int triggered)
{
struct pcmuio_private *devpriv = dev->private;
int asic = pcmuio_subdevice_to_asic(s);
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index 8d1aee00b19f..0e20cc5c9a69 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -3,15 +3,6 @@
*
* Copyright (C) 2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
*
- * I modified this file from the plx9060.h header for the
- * wanXL device driver in the linux kernel,
- * for the register offsets and bit definitions. Made minor modifications,
- * added plx9080 registers and
- * stripped out stuff that was specifically for the wanXL driver.
- * Note: I've only made sure the definitions are correct as far
- * as I make use of them. There are still various plx9060-isms
- * left in this header file.
- *
********************************************************************
*
* Copyright (C) 1999 RG Studio s.c.
@@ -28,392 +19,611 @@
#ifndef __COMEDI_PLX9080_H
#define __COMEDI_PLX9080_H
-/* descriptor block used for chained dma transfers */
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+/**
+ * struct plx_dma_desc - DMA descriptor format for PLX PCI 9080
+ * @pci_start_addr: PCI Bus address for transfer (DMAPADR).
+ * @local_start_addr: Local Bus address for transfer (DMALADR).
+ * @transfer_size: Transfer size in bytes (max 8 MiB) (DMASIZ).
+ * @next: Address of next descriptor + flags (DMADPR).
+ *
+ * Describes the format of a scatter-gather DMA descriptor for the PLX
+ * PCI 9080. All members are raw, little-endian register values that
+ * will be transferred by the DMA engine from local or PCI memory into
+ * corresponding registers for the DMA channel.
+ *
+ * The DMA descriptors must be aligned on a 16-byte boundary. Bits 3:0
+ * of @next contain flags describing the address space of the next
+ * descriptor (local or PCI), an "end of chain" marker, an "interrupt on
+ * terminal count" bit, and a data transfer direction.
+ */
struct plx_dma_desc {
__le32 pci_start_addr;
__le32 local_start_addr;
- /* transfer_size is in bytes, only first 23 bits of register are used */
__le32 transfer_size;
- /*
- * address of next descriptor (quad word aligned), plus some
- * additional bits (see PLX_DMA0_DESCRIPTOR_REG)
- */
__le32 next;
};
-/**********************************************************************
-** Register Offsets and Bit Definitions
-**
-** Note: All offsets zero relative. IE. Some standard base address
-** must be added to the Register Number to properly access the register.
-**
-**********************************************************************/
-
-/* L, Local Addr Space 0 Range Register */
-#define PLX_LAS0RNG_REG 0x0000
-/* L, Local Addr Space 1 Range Register */
-#define PLX_LAS1RNG_REG 0x00f0
-#define LRNG_IO 0x00000001 /* Map to: 1=I/O, 0=Mem */
-#define LRNG_ANY32 0x00000000 /* Locate anywhere in 32 bit */
-#define LRNG_LT1MB 0x00000002 /* Locate in 1st meg */
-#define LRNG_ANY64 0x00000004 /* Locate anywhere in 64 bit */
-/* bits that specify range for memory io */
-#define LRNG_MEM_MASK 0xfffffff0
-/* bits that specify range for normal io */
-#define LRNG_IO_MASK 0xfffffffa
-/* L, Local Addr Space 0 Remap Register */
-#define PLX_LAS0MAP_REG 0x0004
-/* L, Local Addr Space 1 Remap Register */
-#define PLX_LAS1MAP_REG 0x00f4
-#define LMAP_EN 0x00000001 /* Enable slave decode */
-/* bits that specify decode for memory io */
-#define LMAP_MEM_MASK 0xfffffff0
-/* bits that specify decode bits for normal io */
-#define LMAP_IO_MASK 0xfffffffa
-
/*
- * Mode/Arbitration Register.
+ * Register Offsets and Bit Definitions
*/
-#define PLX_MARB_REG 0x8 /* L, Local Arbitration Register */
-#define PLX_DMAARB_REG 0xac
-enum marb_bits {
- MARB_LLT_MASK = 0x000000ff, /* Local Bus Latency Timer */
- MARB_LPT_MASK = 0x0000ff00, /* Local Bus Pause Timer */
- MARB_LTEN = 0x00010000, /* Latency Timer Enable */
- MARB_LPEN = 0x00020000, /* Pause Timer Enable */
- MARB_BREQ = 0x00040000, /* Local Bus BREQ Enable */
- MARB_DMA_PRIORITY_MASK = 0x00180000,
- /* local bus direct slave give up bus mode */
- MARB_LBDS_GIVE_UP_BUS_MODE = 0x00200000,
- /* direct slave LLOCKo# enable */
- MARB_DS_LLOCK_ENABLE = 0x00400000,
- MARB_PCI_REQUEST_MODE = 0x00800000,
- MARB_PCIV21_MODE = 0x01000000, /* pci specification v2.1 mode */
- MARB_PCI_READ_NO_WRITE_MODE = 0x02000000,
- MARB_PCI_READ_WITH_WRITE_FLUSH_MODE = 0x04000000,
- /* gate local bus latency timer with BREQ */
- MARB_GATE_TIMER_WITH_BREQ = 0x08000000,
- MARB_PCI_READ_NO_FLUSH_MODE = 0x10000000,
- MARB_USE_SUBSYSTEM_IDS = 0x20000000,
-};
-
-#define PLX_BIGEND_REG 0xc
-enum bigend_bits {
- /* use big endian ordering for configuration register accesses */
- BIGEND_CONFIG = 0x1,
- BIGEND_DIRECT_MASTER = 0x2,
- BIGEND_DIRECT_SLAVE_LOCAL0 = 0x4,
- BIGEND_ROM = 0x8,
- /*
- * use byte lane consisting of most significant bits instead of
- * least significant
- */
- BIGEND_BYTE_LANE = 0x10,
- BIGEND_DIRECT_SLAVE_LOCAL1 = 0x20,
- BIGEND_DMA1 = 0x40,
- BIGEND_DMA0 = 0x80,
-};
+/* Local Address Space 0 Range Register */
+#define PLX_REG_LAS0RR 0x0000
+/* Local Address Space 1 Range Register */
+#define PLX_REG_LAS1RR 0x00f0
+
+#define PLX_LASRR_IO BIT(0) /* Map to: 1=I/O, 0=Mem */
+#define PLX_LASRR_ANY32 (BIT(1) * 0) /* Locate anywhere in 32 bit */
+#define PLX_LASRR_LT1MB (BIT(1) * 1) /* Locate in 1st meg */
+#define PLX_LASRR_ANY64 (BIT(1) * 2) /* Locate anywhere in 64 bit */
+#define PLX_LASRR_MLOC_MASK GENMASK(2, 1) /* Memory location bits */
+#define PLX_LASRR_PREFETCH BIT(3) /* Memory is prefetchable */
+/* bits that specify range for memory space decode bits */
+#define PLX_LASRR_MEM_MASK GENMASK(31, 4)
+/* bits that specify range for i/o space decode bits */
+#define PLX_LASRR_IO_MASK GENMASK(31, 2)
+
+/* Local Address Space 0 Local Base Address (Remap) Register */
+#define PLX_REG_LAS0BA 0x0004
+/* Local Address Space 1 Local Base Address (Remap) Register */
+#define PLX_REG_LAS1BA 0x00f4
+
+#define PLX_LASBA_EN BIT(0) /* Enable slave decode */
+/* bits that specify local base address for memory space */
+#define PLX_LASBA_MEM_MASK GENMASK(31, 4)
+/* bits that specify local base address for i/o space */
+#define PLX_LASBA_IO_MASK GENMASK(31, 2)
+
+/* Mode/Arbitration Register */
+#define PLX_REG_MARBR 0x0008
+/* DMA Arbitration Register (alias of MARBR). */
+#define PLX_REG_DMAARB 0x00ac
+
+/* Local Bus Latency Timer */
+#define PLX_MARBR_LT(x) (BIT(0) * ((x) & 0xff))
+#define PLX_MARBR_LT_MASK GENMASK(7, 0)
+#define PLX_MARBR_LT_SHIFT 0
+/* Local Bus Pause Timer */
+#define PLX_MARBR_PT(x) (BIT(8) * ((x) & 0xff))
+#define PLX_MARBR_PT_MASK GENMASK(15, 8)
+#define PLX_MARBR_PT_SHIFT 8
+/* Local Bus Latency Timer Enable */
+#define PLX_MARBR_LTEN BIT(16)
+/* Local Bus Pause Timer Enable */
+#define PLX_MARBR_PTEN BIT(17)
+/* Local Bus BREQ Enable */
+#define PLX_MARBR_BREQEN BIT(18)
+/* DMA Channel Priority */
+#define PLX_MARBR_PRIO_ROT (BIT(19) * 0) /* Rotational priority */
+#define PLX_MARBR_PRIO_DMA0 (BIT(19) * 1) /* DMA channel 0 has priority */
+#define PLX_MARBR_PRIO_DMA1 (BIT(19) * 2) /* DMA channel 1 has priority */
+#define PLX_MARBR_PRIO_MASK GENMASK(20, 19)
+/* Local Bus Direct Slave Give Up Bus Mode */
+#define PLX_MARBR_DSGUBM BIT(21)
+/* Direct Slace LLOCKo# Enable */
+#define PLX_MARBR_DSLLOCKOEN BIT(22)
+/* PCI Request Mode */
+#define PLX_MARBR_PCIREQM BIT(23)
+/* PCI Specification v2.1 Mode */
+#define PLX_MARBR_PCIV21M BIT(24)
+/* PCI Read No Write Mode */
+#define PLX_MARBR_PCIRNWM BIT(25)
+/* PCI Read with Write Flush Mode */
+#define PLX_MARBR_PCIRWFM BIT(26)
+/* Gate Local Bus Latency Timer with BREQ */
+#define PLX_MARBR_GLTBREQ BIT(27)
+/* PCI Read No Flush Mode */
+#define PLX_MARBR_PCIRNFM BIT(28)
/*
-** Note: The Expansion ROM stuff is only relevant to the PC environment.
-** This expansion ROM code is executed by the host CPU at boot time.
-** For this reason no bit definitions are provided here.
+ * Make reads from PCI Configuration register 0 return Subsystem ID and
+ * Subsystem Vendor ID instead of Device ID and Vendor ID
*/
-#define PLX_ROMRNG_REG 0x0010 /* L, Expn ROM Space Range Register */
-/* L, Local Addr Space Range Register */
-#define PLX_ROMMAP_REG 0x0014
-
-#define PLX_REGION0_REG 0x0018 /* L, Local Bus Region 0 Descriptor */
-#define RGN_WIDTH 0x00000002 /* Local bus width bits */
-#define RGN_8BITS 0x00000000 /* 08 bit Local Bus */
-#define RGN_16BITS 0x00000001 /* 16 bit Local Bus */
-#define RGN_32BITS 0x00000002 /* 32 bit Local Bus */
-#define RGN_MWS 0x0000003C /* Memory Access Wait States */
-#define RGN_0MWS 0x00000000
-#define RGN_1MWS 0x00000004
-#define RGN_2MWS 0x00000008
-#define RGN_3MWS 0x0000000C
-#define RGN_4MWS 0x00000010
-#define RGN_6MWS 0x00000018
-#define RGN_8MWS 0x00000020
-#define RGN_MRE 0x00000040 /* Memory Space Ready Input Enable */
-#define RGN_MBE 0x00000080 /* Memory Space Bterm Input Enable */
-#define RGN_READ_PREFETCH_DISABLE 0x00000100
-#define RGN_ROM_PREFETCH_DISABLE 0x00000200
-#define RGN_READ_PREFETCH_COUNT_ENABLE 0x00000400
-#define RGN_RWS 0x003C0000 /* Expn ROM Wait States */
-#define RGN_RRE 0x00400000 /* ROM Space Ready Input Enable */
-#define RGN_RBE 0x00800000 /* ROM Space Bterm Input Enable */
-#define RGN_MBEN 0x01000000 /* Memory Space Burst Enable */
-#define RGN_RBEN 0x04000000 /* ROM Space Burst Enable */
-#define RGN_THROT 0x08000000 /* De-assert TRDY when FIFO full */
-#define RGN_TRD 0xF0000000 /* Target Ready Delay /8 */
-
-#define PLX_REGION1_REG 0x00f8 /* L, Local Bus Region 1 Descriptor */
-
-#define PLX_DMRNG_REG 0x001C /* L, Direct Master Range Register */
-
-#define PLX_LBAPMEM_REG 0x0020 /* L, Lcl Base Addr for PCI mem space */
-
-#define PLX_LBAPIO_REG 0x0024 /* L, Lcl Base Addr for PCI I/O space */
-
-#define PLX_DMMAP_REG 0x0028 /* L, Direct Master Remap Register */
-#define DMM_MAE 0x00000001 /* Direct Mstr Memory Acc Enable */
-#define DMM_IAE 0x00000002 /* Direct Mstr I/O Acc Enable */
-#define DMM_LCK 0x00000004 /* LOCK Input Enable */
-#define DMM_PF4 0x00000008 /* Prefetch 4 Mode Enable */
-#define DMM_THROT 0x00000010 /* Assert IRDY when read FIFO full */
-#define DMM_PAF0 0x00000000 /* Programmable Almost fill level */
-#define DMM_PAF1 0x00000020 /* Programmable Almost fill level */
-#define DMM_PAF2 0x00000040 /* Programmable Almost fill level */
-#define DMM_PAF3 0x00000060 /* Programmable Almost fill level */
-#define DMM_PAF4 0x00000080 /* Programmable Almost fill level */
-#define DMM_PAF5 0x000000A0 /* Programmable Almost fill level */
-#define DMM_PAF6 0x000000C0 /* Programmable Almost fill level */
-#define DMM_PAF7 0x000000D0 /* Programmable Almost fill level */
-#define DMM_MAP 0xFFFF0000 /* Remap Address Bits */
-
-#define PLX_CAR_REG 0x002C /* L, Configuration Address Register */
-#define CAR_CT0 0x00000000 /* Config Type 0 */
-#define CAR_CT1 0x00000001 /* Config Type 1 */
-#define CAR_REG 0x000000FC /* Register Number Bits */
-#define CAR_FUN 0x00000700 /* Function Number Bits */
-#define CAR_DEV 0x0000F800 /* Device Number Bits */
-#define CAR_BUS 0x00FF0000 /* Bus Number Bits */
-#define CAR_CFG 0x80000000 /* Config Spc Access Enable */
-
-#define PLX_DBR_IN_REG 0x0060 /* L, PCI to Local Doorbell Register */
-
-#define PLX_DBR_OUT_REG 0x0064 /* L, Local to PCI Doorbell Register */
-
-#define PLX_INTRCS_REG 0x0068 /* L, Interrupt Control/Status Reg */
-#define ICS_AERR 0x00000001 /* Assert LSERR on ABORT */
-#define ICS_PERR 0x00000002 /* Assert LSERR on Parity Error */
-#define ICS_SERR 0x00000004 /* Generate PCI SERR# */
-#define ICS_MBIE 0x00000008 /* mailbox interrupt enable */
-#define ICS_PIE 0x00000100 /* PCI Interrupt Enable */
-#define ICS_PDIE 0x00000200 /* PCI Doorbell Interrupt Enable */
-#define ICS_PAIE 0x00000400 /* PCI Abort Interrupt Enable */
-#define ICS_PLIE 0x00000800 /* PCI Local Int Enable */
-#define ICS_RAE 0x00001000 /* Retry Abort Enable */
-#define ICS_PDIA 0x00002000 /* PCI Doorbell Interrupt Active */
-#define ICS_PAIA 0x00004000 /* PCI Abort Interrupt Active */
-#define ICS_LIA 0x00008000 /* Local Interrupt Active */
-#define ICS_LIE 0x00010000 /* Local Interrupt Enable */
-#define ICS_LDIE 0x00020000 /* Local Doorbell Int Enable */
-#define ICS_DMA0_E 0x00040000 /* DMA #0 Interrupt Enable */
-#define ICS_DMA1_E 0x00080000 /* DMA #1 Interrupt Enable */
-#define ICS_LDIA 0x00100000 /* Local Doorbell Int Active */
-#define ICS_DMA0_A 0x00200000 /* DMA #0 Interrupt Active */
-#define ICS_DMA1_A 0x00400000 /* DMA #1 Interrupt Active */
-#define ICS_BIA 0x00800000 /* BIST Interrupt Active */
-#define ICS_TA_DM 0x01000000 /* Target Abort - Direct Master */
-#define ICS_TA_DMA0 0x02000000 /* Target Abort - DMA #0 */
-#define ICS_TA_DMA1 0x04000000 /* Target Abort - DMA #1 */
-#define ICS_TA_RA 0x08000000 /* Target Abort - Retry Timeout */
-/* mailbox x is active */
-#define ICS_MBIA(x) (0x10000000 << ((x) & 0x3))
-
-#define PLX_CONTROL_REG 0x006C /* L, EEPROM Cntl & PCI Cmd Codes */
-#define CTL_RDMA 0x0000000E /* DMA Read Command */
-#define CTL_WDMA 0x00000070 /* DMA Write Command */
-#define CTL_RMEM 0x00000600 /* Memory Read Command */
-#define CTL_WMEM 0x00007000 /* Memory Write Command */
-#define CTL_USERO 0x00010000 /* USERO output pin control bit */
-#define CTL_USERI 0x00020000 /* USERI input pin bit */
-#define CTL_EE_CLK 0x01000000 /* EEPROM Clock line */
-#define CTL_EE_CS 0x02000000 /* EEPROM Chip Select */
-#define CTL_EE_W 0x04000000 /* EEPROM Write bit */
-#define CTL_EE_R 0x08000000 /* EEPROM Read bit */
-#define CTL_EECHK 0x10000000 /* EEPROM Present bit */
-#define CTL_EERLD 0x20000000 /* EEPROM Reload Register */
-#define CTL_RESET 0x40000000 /* !! Adapter Reset !! */
-#define CTL_READY 0x80000000 /* Local Init Done */
-
-#define PLX_ID_REG 0x70 /* hard-coded plx vendor and device ids */
-
-#define PLX_REVISION_REG 0x74 /* silicon revision */
-
-#define PLX_DMA0_MODE_REG 0x80 /* dma channel 0 mode register */
-#define PLX_DMA1_MODE_REG 0x94 /* dma channel 0 mode register */
-#define PLX_LOCAL_BUS_16_WIDE_BITS 0x1
-#define PLX_LOCAL_BUS_32_WIDE_BITS 0x3
-#define PLX_LOCAL_BUS_WIDTH_MASK 0x3
-#define PLX_DMA_EN_READYIN_BIT 0x40 /* enable ready in input */
-#define PLX_EN_BTERM_BIT 0x80 /* enable BTERM# input */
-#define PLX_DMA_LOCAL_BURST_EN_BIT 0x100 /* enable local burst mode */
-#define PLX_EN_CHAIN_BIT 0x200 /* enables chaining */
-/* enables interrupt on dma done */
-#define PLX_EN_DMA_DONE_INTR_BIT 0x400
-/* hold local address constant (don't increment) */
-#define PLX_LOCAL_ADDR_CONST_BIT 0x800
-/* enables demand-mode for dma transfer */
-#define PLX_DEMAND_MODE_BIT 0x1000
-#define PLX_EOT_ENABLE_BIT 0x4000
-#define PLX_STOP_MODE_BIT 0x8000
-/* routes dma interrupt to pci bus (instead of local bus) */
-#define PLX_DMA_INTR_PCI_BIT 0x20000
-
-/* pci address that dma transfers start at */
-#define PLX_DMA0_PCI_ADDRESS_REG 0x84
-#define PLX_DMA1_PCI_ADDRESS_REG 0x98
-
-/* local address that dma transfers start at */
-#define PLX_DMA0_LOCAL_ADDRESS_REG 0x88
-#define PLX_DMA1_LOCAL_ADDRESS_REG 0x9c
-
-/* number of bytes to transfer (first 23 bits) */
-#define PLX_DMA0_TRANSFER_SIZE_REG 0x8c
-#define PLX_DMA1_TRANSFER_SIZE_REG 0xa0
-
-#define PLX_DMA0_DESCRIPTOR_REG 0x90 /* descriptor pointer register */
-#define PLX_DMA1_DESCRIPTOR_REG 0xa4
-/* descriptor is located in pci space (not local space) */
-#define PLX_DESC_IN_PCI_BIT 0x1
-#define PLX_END_OF_CHAIN_BIT 0x2 /* end of chain bit */
-/* interrupt when this descriptor's transfer is finished */
-#define PLX_INTR_TERM_COUNT 0x4
-/* transfer from local to pci bus (not pci to local) */
-#define PLX_XFER_LOCAL_TO_PCI 0x8
-
-#define PLX_DMA0_CS_REG 0xa8 /* command status register */
-#define PLX_DMA1_CS_REG 0xa9
-#define PLX_DMA_EN_BIT 0x1 /* enable dma channel */
-#define PLX_DMA_START_BIT 0x2 /* start dma transfer */
-#define PLX_DMA_ABORT_BIT 0x4 /* abort dma transfer */
-#define PLX_CLEAR_DMA_INTR_BIT 0x8 /* clear dma interrupt */
-#define PLX_DMA_DONE_BIT 0x10 /* transfer done status bit */
-
-#define PLX_DMA0_THRESHOLD_REG 0xb0 /* command status register */
+#define PLX_MARBR_SUBSYSIDS BIT(29)
+
+/* Big/Little Endian Descriptor Register */
+#define PLX_REG_BIGEND 0x000c
+
+/* Configuration Register Big Endian Mode */
+#define PLX_BIGEND_CONFIG BIT(0)
+/* Direct Master Big Endian Mode */
+#define PLX_BIGEND_DM BIT(1)
+/* Direct Slave Address Space 0 Big Endian Mode */
+#define PLX_BIGEND_DSAS0 BIT(2)
+/* Direct Slave Expansion ROM Big Endian Mode */
+#define PLX_BIGEND_EROM BIT(3)
+/* Big Endian Byte Lane Mode - use most significant byte lanes */
+#define PLX_BIGEND_BEBLM BIT(4)
+/* Direct Slave Address Space 1 Big Endian Mode */
+#define PLX_BIGEND_DSAS1 BIT(5)
+/* DMA Channel 1 Big Endian Mode */
+#define PLX_BIGEND_DMA1 BIT(6)
+/* DMA Channel 0 Big Endian Mode */
+#define PLX_BIGEND_DMA0 BIT(7)
+/* DMA Channel N Big Endian Mode (N <= 1) */
+#define PLX_BIGEND_DMA(n) ((n) ? PLX_BIGEND_DMA1 : PLX_BIGEND_DMA0)
/*
- * Accesses near the end of memory can cause the PLX chip
- * to pre-fetch data off of end-of-ram. Limit the size of
- * memory so host-side accesses cannot occur.
+ * Note: The Expansion ROM stuff is only relevant to the PC environment.
+ * This expansion ROM code is executed by the host CPU at boot time.
+ * For this reason no bit definitions are provided here.
*/
-#define PLX_PREFETCH 32
+/* Expansion ROM Range Register */
+#define PLX_REG_EROMRR 0x0010
+/* Expansion ROM Local Base Address (Remap) Register */
+#define PLX_REG_EROMBA 0x0014
+
+/* Local Address Space 0/Expansion ROM Bus Region Descriptor Register */
+#define PLX_REG_LBRD0 0x0018
+/* Local Address Space 1 Bus Region Descriptor Register */
+#define PLX_REG_LBRD1 0x00f8
+
+/* Memory Space Local Bus Width */
+#define PLX_LBRD_MSWIDTH8 (BIT(0) * 0) /* 8 bits wide */
+#define PLX_LBRD_MSWIDTH16 (BIT(0) * 1) /* 16 bits wide */
+#define PLX_LBRD_MSWIDTH32 (BIT(0) * 2) /* 32 bits wide */
+#define PLX_LBRD_MSWIDTH32A (BIT(0) * 3) /* 32 bits wide */
+#define PLX_LBRD_MSWIDTH_MASK GENMASK(1, 0)
+#define PLX_LBRD_MSWIDTH_SHIFT 0
+/* Memory Space Internal Wait States */
+#define PLX_LBRD_MSIWS(x) (BIT(2) * ((x) & 0xf))
+#define PLX_LBRD_MSIWS_MASK GENMASK(5, 2)
+#define PLX_LBRD_MSIWS_SHIFT 2
+/* Memory Space Ready Input Enable */
+#define PLX_LBRD_MSREADYIEN BIT(6)
+/* Memory Space BTERM# Input Enable */
+#define PLX_LBRD_MSBTERMIEN BIT(7)
+/* Memory Space 0 Prefetch Disable (LBRD0 only) */
+#define PLX_LBRD0_MSPREDIS BIT(8)
+/* Memory Space 1 Burst Enable (LBRD1 only) */
+#define PLX_LBRD1_MSBURSTEN BIT(8)
+/* Expansion ROM Space Prefetch Disable (LBRD0 only) */
+#define PLX_LBRD0_EROMPREDIS BIT(9)
+/* Memory Space 1 Prefetch Disable (LBRD1 only) */
+#define PLX_LBRD1_MSPREDIS BIT(9)
+/* Read Prefetch Count Enable */
+#define PLX_LBRD_RPFCOUNTEN BIT(10)
+/* Prefetch Counter */
+#define PLX_LBRD_PFCOUNT(x) (BIT(11) * ((x) & 0xf))
+#define PLX_LBRD_PFCOUNT_MASK GENMASK(14, 11)
+#define PLX_LBRD_PFCOUNT_SHIFT 11
+/* Expansion ROM Space Local Bus Width (LBRD0 only) */
+#define PLX_LBRD0_EROMWIDTH8 (BIT(16) * 0) /* 8 bits wide */
+#define PLX_LBRD0_EROMWIDTH16 (BIT(16) * 1) /* 16 bits wide */
+#define PLX_LBRD0_EROMWIDTH32 (BIT(16) * 2) /* 32 bits wide */
+#define PLX_LBRD0_EROMWIDTH32A (BIT(16) * 3) /* 32 bits wide */
+#define PLX_LBRD0_EROMWIDTH_MASK GENMASK(17, 16)
+#define PLX_LBRD0_EROMWIDTH_SHIFT 16
+/* Expansion ROM Space Internal Wait States (LBRD0 only) */
+#define PLX_LBRD0_EROMIWS(x) (BIT(18) * ((x) & 0xf))
+#define PLX_LBRD0_EROMIWS_MASK GENMASK(21, 18)
+#define PLX_LBRD0_EROMIWS_SHIFT 18
+/* Expansion ROM Space Ready Input Enable (LBDR0 only) */
+#define PLX_LBRD0_EROMREADYIEN BIT(22)
+/* Expansion ROM Space BTERM# Input Enable (LBRD0 only) */
+#define PLX_LBRD0_EROMBTERMIEN BIT(23)
+/* Memory Space 0 Burst Enable (LBRD0 only) */
+#define PLX_LBRD0_MSBURSTEN BIT(24)
+/* Extra Long Load From Serial EEPROM (LBRD0 only) */
+#define PLX_LBRD0_EELONGLOAD BIT(25)
+/* Expansion ROM Space Burst Enable (LBRD0 only) */
+#define PLX_LBRD0_EROMBURSTEN BIT(26)
+/* Direct Slave PCI Write Mode - assert TRDY# when FIFO full (LBRD0 only) */
+#define PLX_LBRD0_DSWMTRDY BIT(27)
+/* PCI Target Retry Delay Clocks / 8 (LBRD0 only) */
+#define PLX_LBRD0_TRDELAY(x) (BIT(28) * ((x) & 0xF))
+#define PLX_LBRD0_TRDELAY_MASK GENMASK(31, 28)
+#define PLX_LBRD0_TRDELAY_SHIFT 28
+
+/* Local Range Register for Direct Master to PCI */
+#define PLX_REG_DMRR 0x001c
+
+/* Local Bus Base Address Register for Direct Master to PCI Memory */
+#define PLX_REG_DMLBAM 0x0020
+
+/* Local Base Address Register for Direct Master to PCI IO/CFG */
+#define PLX_REG_DMLBAI 0x0024
+
+/* PCI Base Address (Remap) Register for Direct Master to PCI Memory */
+#define PLX_REG_DMPBAM 0x0028
+
+/* Direct Master Memory Access Enable */
+#define PLX_DMPBAM_MEMACCEN BIT(0)
+/* Direct Master I/O Access Enable */
+#define PLX_DMPBAM_IOACCEN BIT(1)
+/* LLOCK# Input Enable */
+#define PLX_DMPBAM_LLOCKIEN BIT(2)
+/* Direct Master Read Prefetch Size Control (bits 12, 3) */
+#define PLX_DMPBAM_RPSIZECONT ((BIT(12) * 0) | (BIT(3) * 0))
+#define PLX_DMPBAM_RPSIZE4 ((BIT(12) * 0) | (BIT(3) * 1))
+#define PLX_DMPBAM_RPSIZE8 ((BIT(12) * 1) | (BIT(3) * 0))
+#define PLX_DMPBAM_RPSIZE16 ((BIT(12) * 1) | (BIT(3) * 1))
+#define PLX_DMPBAM_RPSIZE_MASK (BIT(12) | BIT(3))
+/* Direct Master PCI Read Mode - deassert IRDY when FIFO full */
+#define PLX_DMPBAM_RMIRDY BIT(4)
+/* Programmable Almost Full Level (bits 10, 8:5) */
+#define PLX_DMPBAM_PAFL(x) ((BIT(10) * !!((x) & 0x10)) | \
+ (BIT(5) * ((x) & 0xf)))
+#define PLX_DMPBAM_TO_PAFL(v) ((((BIT(10) & (v)) >> 1) | \
+ (GENMASK(8, 5) & (v))) >> 5)
+#define PLX_DMPBAM_PAFL_MASK (BIT(10) | GENMASK(8, 5))
+/* Write And Invalidate Mode */
+#define PLX_DMPBAM_WIM BIT(9)
+/* Direct Master Prefetch Limit */
+#define PLX_DBPBAM_PFLIMIT BIT(11)
+/* I/O Remap Select */
+#define PLX_DMPBAM_IOREMAPSEL BIT(13)
+/* Direct Master Write Delay */
+#define PLX_DMPBAM_WDELAYNONE (BIT(14) * 0)
+#define PLX_DMPBAM_WDELAY4 (BIT(14) * 1)
+#define PLX_DMPBAM_WDELAY8 (BIT(14) * 2)
+#define PLX_DMPBAM_WDELAY16 (BIT(14) * 3)
+#define PLX_DMPBAM_WDELAY_MASK GENMASK(15, 14)
+/* Remap of Local-to-PCI Space Into PCI Address Space */
+#define PLX_DMPBAM_REMAP_MASK GENMASK(31, 16)
+
+/* PCI Configuration Address Register for Direct Master to PCI IO/CFG */
+#define PLX_REG_DMCFGA 0x002c
+
+/* Congiguration Type */
+#define PLX_DMCFGA_TYPE0 (BIT(0) * 0)
+#define PLX_DMCFGA_TYPE1 (BIT(0) * 1)
+#define PLX_DMCFGA_TYPE_MASK GENMASK(1, 0)
+/* Register Number */
+#define PLX_DMCFGA_REGNUM(x) (BIT(2) * ((x) & 0x3f))
+#define PLX_DMCFGA_REGNUM_MASK GENMASK(7, 2)
+#define PLX_DMCFGA_REGNUM_SHIFT 2
+/* Function Number */
+#define PLX_DMCFGA_FUNCNUM(x) (BIT(8) * ((x) & 0x7))
+#define PLX_DMCFGA_FUNCNUM_MASK GENMASK(10, 8)
+#define PLX_DMCFGA_FUNCNUM_SHIFT 8
+/* Device Number */
+#define PLX_DMCFGA_DEVNUM(x) (BIT(11) * ((x) & 0x1f))
+#define PLX_DMCFGA_DEVNUM_MASK GENMASK(15, 11)
+#define PLX_DMCFGA_DEVNUM_SHIFT 11
+/* Bus Number */
+#define PLX_DMCFGA_BUSNUM(x) (BIT(16) * ((x) & 0xff))
+#define PLX_DMCFGA_BUSNUM_MASK GENMASK(23, 16)
+#define PLX_DMCFGA_BUSNUM_SHIFT 16
+/* Configuration Enable */
+#define PLX_DMCFGA_CONFIGEN BIT(31)
/*
- * The PCI Interface, via the PCI-9060 Chip, has up to eight (8) Mailbox
- * Registers. The PUTS (Power-Up Test Suite) handles the board-side
- * interface/interaction using the first 4 registers. Specifications for
- * the use of the full PUTS' command and status interface is contained
- * within a separate SBE PUTS Manual. The Host-Side Device Driver only
- * uses a subset of the full PUTS interface.
+ * Mailbox Register N (N <= 7)
+ *
+ * Note that if the I2O feature is enabled (QSR[0] is set), Mailbox Register 0
+ * is replaced by the Inbound Queue Port, and Mailbox Register 1 is replaced
+ * by the Outbound Queue Port. However, Mailbox Register 0 and 1 are always
+ * accessible at alternative offsets if the I2O feature is enabled.
*/
+#define PLX_REG_MBOX(n) (0x0040 + (n) * 4)
+#define PLX_REG_MBOX0 PLX_REG_MBOX(0)
+#define PLX_REG_MBOX1 PLX_REG_MBOX(1)
+#define PLX_REG_MBOX2 PLX_REG_MBOX(2)
+#define PLX_REG_MBOX3 PLX_REG_MBOX(3)
+#define PLX_REG_MBOX4 PLX_REG_MBOX(4)
+#define PLX_REG_MBOX5 PLX_REG_MBOX(5)
+#define PLX_REG_MBOX6 PLX_REG_MBOX(6)
+#define PLX_REG_MBOX7 PLX_REG_MBOX(7)
+
+/* Alternative offsets for Mailbox Registers 0 and 1 (in case I2O is enabled) */
+#define PLX_REG_ALT_MBOX(n) ((n) < 2 ? 0x0078 + (n) * 4 : PLX_REG_MBOX(n))
+#define PLX_REG_ALT_MBOX0 PLX_REG_ALT_MBOX(0)
+#define PLX_REG_ALT_MBOX1 PLX_REG_ALT_MBOX(1)
+
+/* PCI-to-Local Doorbell Register */
+#define PLX_REG_P2LDBELL 0x0060
+
+/* Local-to-PCI Doorbell Register */
+#define PLX_REG_L2PDBELL 0x0064
+
+/* Interrupt Control/Status Register */
+#define PLX_REG_INTCSR 0x0068
+
+/* Enable Local Bus LSERR# when PCI Bus Target Abort or Master Abort occurs */
+#define PLX_INTCSR_LSEABORTEN BIT(0)
+/* Enable Local Bus LSERR# when PCI parity error occurs */
+#define PLX_INTCSR_LSEPARITYEN BIT(1)
+/* Generate PCI Bus SERR# when set to 1 */
+#define PLX_INTCSR_GENSERR BIT(2)
+/* Mailbox Interrupt Enable (local bus interrupts on PCI write to MBOX0-3) */
+#define PLX_INTCSR_MBIEN BIT(3)
+/* PCI Interrupt Enable */
+#define PLX_INTCSR_PIEN BIT(8)
+/* PCI Doorbell Interrupt Enable */
+#define PLX_INTCSR_PDBIEN BIT(9)
+/* PCI Abort Interrupt Enable */
+#define PLX_INTCSR_PABORTIEN BIT(10)
+/* PCI Local Interrupt Enable */
+#define PLX_INTCSR_PLIEN BIT(11)
+/* Retry Abort Enable (for diagnostic purposes only) */
+#define PLX_INTCSR_RAEN BIT(12)
+/* PCI Doorbell Interrupt Active (read-only) */
+#define PLX_INTCSR_PDBIA BIT(13)
+/* PCI Abort Interrupt Active (read-only) */
+#define PLX_INTCSR_PABORTIA BIT(14)
+/* Local Interrupt (LINTi#) Active (read-only) */
+#define PLX_INTCSR_PLIA BIT(15)
+/* Local Interrupt Output (LINTo#) Enable */
+#define PLX_INTCSR_LIOEN BIT(16)
+/* Local Doorbell Interrupt Enable */
+#define PLX_INTCSR_LDBIEN BIT(17)
+/* DMA Channel 0 Interrupt Enable */
+#define PLX_INTCSR_DMA0IEN BIT(18)
+/* DMA Channel 1 Interrupt Enable */
+#define PLX_INTCSR_DMA1IEN BIT(19)
+/* DMA Channel N Interrupt Enable (N <= 1) */
+#define PLX_INTCSR_DMAIEN(n) ((n) ? PLX_INTCSR_DMA1IEN : PLX_INTCSR_DMA0IEN)
+/* Local Doorbell Interrupt Active (read-only) */
+#define PLX_INTCSR_LDBIA BIT(20)
+/* DMA Channel 0 Interrupt Active (read-only) */
+#define PLX_INTCSR_DMA0IA BIT(21)
+/* DMA Channel 1 Interrupt Active (read-only) */
+#define PLX_INTCSR_DMA1IA BIT(22)
+/* DMA Channel N Interrupt Active (N <= 1) (read-only) */
+#define PLX_INTCSR_DMAIA(n) ((n) ? PLX_INTCSR_DMA1IA : PLX_INTCSR_DMA0IA)
+/* BIST Interrupt Active (read-only) */
+#define PLX_INTCSR_BISTIA BIT(23)
+/* Direct Master Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDM BIT(24)
+/* DMA Channel 0 Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDMA0 BIT(25)
+/* DMA Channel 1 Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDMA1 BIT(26)
+/* DMA Channel N Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDMA(n) ((n) ? PLX_INTCSR_ABNOTDMA1 \
+ : PLX_INTCSR_ABNOTDMA0)
+/* Target Abort Not Generated After 256 Master Retries (read-only) */
+#define PLX_INTCSR_ABNOTRETRY BIT(27)
+/* PCI Wrote Mailbox 0 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB0IA BIT(28)
+/* PCI Wrote Mailbox 1 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB1IA BIT(29)
+/* PCI Wrote Mailbox 2 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB2IA BIT(30)
+/* PCI Wrote Mailbox 3 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB3IA BIT(31)
+/* PCI Wrote Mailbox N (N <= 3) (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MBIA(n) BIT(28 + (n))
-/*****************************************/
-/*** MAILBOX #(-1) - MEM ACCESS STS ***/
-/*****************************************/
-
-#define MBX_STS_VALID 0x57584744 /* 'WXGD' */
-#define MBX_STS_DILAV 0x44475857 /* swapped = 'DGXW' */
-
-/*****************************************/
-/*** MAILBOX #0 - PUTS STATUS ***/
-/*****************************************/
-
-#define MBX_STS_MASK 0x000000ff /* PUTS Status Register bits */
-#define MBX_STS_TMASK 0x0000000f /* register bits for TEST number */
-
-#define MBX_STS_PCIRESET 0x00000100 /* Host issued PCI reset request */
-#define MBX_STS_BUSY 0x00000080 /* PUTS is in progress */
-#define MBX_STS_ERROR 0x00000040 /* PUTS has failed */
/*
- * Undefined -> status in transition. We are in process of changing bits;
- * we SET Error bit before RESET of Busy bit
+ * Serial EEPROM Control, PCI Command Codes, User I/O Control,
+ * Init Control Register
*/
-#define MBX_STS_RESERVED 0x000000c0
-
-#define MBX_RESERVED_5 0x00000020 /* FYI: reserved/unused bit */
-#define MBX_RESERVED_4 0x00000010 /* FYI: reserved/unused bit */
-
-/******************************************/
-/*** MAILBOX #1 - PUTS COMMANDS ***/
-/******************************************/
-
+#define PLX_REG_CNTRL 0x006c
+
+/* PCI Read Command Code For DMA */
+#define PLX_CNTRL_CCRDMA(x) (BIT(0) * ((x) & 0xf))
+#define PLX_CNTRL_CCRDMA_MASK GENMASK(3, 0)
+#define PLX_CNTRL_CCRDMA_SHIFT 0
+#define PLX_CNTRL_CCRDMA_NORMAL PLX_CNTRL_CCRDMA(14) /* value after reset */
+/* PCI Write Command Code For DMA 0 */
+#define PLX_CNTRL_CCWDMA(x) (BIT(4) * ((x) & 0xf))
+#define PLX_CNTRL_CCWDMA_MASK GENMASK(7, 4)
+#define PLX_CNTRL_CCWDMA_SHIFT 4
+#define PLX_CNTRL_CCWDMA_NORMAL PLX_CNTRL_CCWDMA(7) /* value after reset */
+/* PCI Memory Read Command Code For Direct Master */
+#define PLX_CNTRL_CCRDM(x) (BIT(8) * ((x) & 0xf))
+#define PLX_CNTRL_CCRDM_MASK GENMASK(11, 8)
+#define PLX_CNTRL_CCRDM_SHIFT 8
+#define PLX_CNTRL_CCRDM_NORMAL PLX_CNTRL_CCRDM(6) /* value after reset */
+/* PCI Memory Write Command Code For Direct Master */
+#define PLX_CNTRL_CCWDM(x) (BIT(12) * ((x) & 0xf))
+#define PLX_CNTRL_CCWDM_MASK GENMASK(15, 12)
+#define PLX_CNTRL_CCWDM_SHIFT 12
+#define PLX_CNTRL_CCWDM_NORMAL PLX_CNTRL_CCWDM(7) /* value after reset */
+/* General Purpose Output (USERO) */
+#define PLX_CNTRL_USERO BIT(16)
+/* General Purpose Input (USERI) (read-only) */
+#define PLX_CNTRL_USERI BIT(17)
+/* Serial EEPROM Clock Output (EESK) */
+#define PLX_CNTRL_EESK BIT(24)
+/* Serial EEPROM Chip Select Output (EECS) */
+#define PLX_CNTRL_EECS BIT(25)
+/* Serial EEPROM Data Write Bit (EEDI (sic)) */
+#define PLX_CNTRL_EEWB BIT(26)
+/* Serial EEPROM Data Read Bit (EEDO (sic)) (read-only) */
+#define PLX_CNTRL_EERB BIT(27)
+/* Serial EEPROM Present (read-only) */
+#define PLX_CNTRL_EEPRESENT BIT(28)
+/* Reload Configuration Registers from EEPROM */
+#define PLX_CNTRL_EERELOAD BIT(29)
+/* PCI Adapter Software Reset (asserts LRESETo#) */
+#define PLX_CNTRL_RESET BIT(30)
+/* Local Init Status (read-only) */
+#define PLX_CNTRL_INITDONE BIT(31)
/*
- * Any attempt to execute an unimplement command results in the PUTS
- * interface executing a NOOP and continuing as if the offending command
- * completed normally. Note: this supplies a simple method to interrogate
- * mailbox command processing functionality.
+ * Combined command code stuff for convenience.
*/
+#define PLX_CNTRL_CC_MASK \
+ (PLX_CNTRL_CCRDMA_MASK | PLX_CNTRL_CCWDMA_MASK | \
+ PLX_CNTRL_CCRDM_MASK | PLX_CNTRL_CCWDM_MASK)
+#define PLX_CNTRL_CC_NORMAL \
+ (PLX_CNTRL_CCRDMA_NORMAL | PLX_CNTRL_CCWDMA_NORMAL | \
+ PLX_CNTRL_CCRDM_NORMAL | PLX_CNTRL_CCWDM_NORMAL) /* val after reset */
+
+/* PCI Permanent Configuration ID Register (hard-coded PLX vendor and device) */
+#define PLX_REG_PCIHIDR 0x0070
+
+/* Hard-coded ID for PLX PCI 9080 */
+#define PLX_PCIHIDR_9080 0x908010b5
+
+/* PCI Permanent Revision ID Register (hard-coded silicon revision) (8-bit). */
+#define PLX_REG_PCIHREV 0x0074
+
+/* DMA Channel N Mode Register (N <= 1) */
+#define PLX_REG_DMAMODE(n) ((n) ? PLX_REG_DMAMODE1 : PLX_REG_DMAMODE0)
+#define PLX_REG_DMAMODE0 0x0080
+#define PLX_REG_DMAMODE1 0x0094
+
+/* Local Bus Width */
+#define PLX_DMAMODE_WIDTH8 (BIT(0) * 0) /* 8 bits wide */
+#define PLX_DMAMODE_WIDTH16 (BIT(0) * 1) /* 16 bits wide */
+#define PLX_DMAMODE_WIDTH32 (BIT(0) * 2) /* 32 bits wide */
+#define PLX_DMAMODE_WIDTH32A (BIT(0) * 3) /* 32 bits wide */
+#define PLX_DMAMODE_WIDTH_MASK GENMASK(1, 0)
+#define PLX_DMAMODE_WIDTH_SHIFT 0
+/* Internal Wait States */
+#define PLX_DMAMODE_IWS(x) (BIT(2) * ((x) & 0xf))
+#define PLX_DMAMODE_IWS_MASK GENMASK(5, 2)
+#define PLX_DMAMODE_SHIFT 2
+/* Ready Input Enable */
+#define PLX_DMAMODE_READYIEN BIT(6)
+/* BTERM# Input Enable */
+#define PLX_DMAMODE_BTERMIEN BIT(7)
+/* Local Burst Enable */
+#define PLX_DMAMODE_BURSTEN BIT(8)
+/* Chaining Enable */
+#define PLX_DMAMODE_CHAINEN BIT(9)
+/* Done Interrupt Enable */
+#define PLX_DMAMODE_DONEIEN BIT(10)
+/* Hold Local Address Constant */
+#define PLX_DMAMODE_LACONST BIT(11)
+/* Demand Mode */
+#define PLX_DMAMODE_DEMAND BIT(12)
+/* Write And Invalidate Mode */
+#define PLX_DMAMODE_WINVALIDATE BIT(13)
+/* DMA EOT Enable - enables EOT0# or EOT1# input pin */
+#define PLX_DMAMODE_EOTEN BIT(14)
+/* DMA Stop Data Transfer Mode - 0:BLAST; 1:EOT asserted or DREQ deasserted */
+#define PLX_DMAMODE_STOP BIT(15)
+/* DMA Clear Count Mode - count in descriptor cleared on completion */
+#define PLX_DMAMODE_CLRCOUNT BIT(16)
+/* DMA Channel Interrupt Select - 0:local bus interrupt; 1:PCI interrupt */
+#define PLX_DMAMODE_INTRPCI BIT(17)
+
+/* DMA Channel N PCI Address Register (N <= 1) */
+#define PLX_REG_DMAPADR(n) ((n) ? PLX_REG_DMAPADR1 : PLX_REG_DMAPADR0)
+#define PLX_REG_DMAPADR0 0x0084
+#define PLX_REG_DMAPADR1 0x0098
+
+/* DMA Channel N Local Address Register (N <= 1) */
+#define PLX_REG_DMALADR(n) ((n) ? PLX_REG_DMALADR1 : PLX_REG_DMALADR0)
+#define PLX_REG_DMALADR0 0x0088
+#define PLX_REG_DMALADR1 0x009c
+
+/* DMA Channel N Transfer Size (Bytes) Register (N <= 1) (first 23 bits) */
+#define PLX_REG_DMASIZ(n) ((n) ? PLX_REG_DMASIZ1 : PLX_REG_DMASIZ0)
+#define PLX_REG_DMASIZ0 0x008c
+#define PLX_REG_DMASIZ1 0x00a0
+
+/* DMA Channel N Descriptor Pointer Register (N <= 1) */
+#define PLX_REG_DMADPR(n) ((n) ? PLX_REG_DMADPR1 : PLX_REG_DMADPR0)
+#define PLX_REG_DMADPR0 0x0090
+#define PLX_REG_DMADPR1 0x00a4
+
+/* Descriptor Located In PCI Address Space (not local address space) */
+#define PLX_DMADPR_DESCPCI BIT(0)
+/* End Of Chain */
+#define PLX_DMADPR_CHAINEND BIT(1)
+/* Interrupt After Terminal Count */
+#define PLX_DMADPR_TCINTR BIT(2)
+/* Direction Of Transfer Local Bus To PCI (not PCI to local) */
+#define PLX_DMADPR_XFERL2P BIT(3)
+/* Next Descriptor Address Bits 31:4 (16 byte boundary) */
+#define PLX_DMADPR_NEXT_MASK GENMASK(31, 4)
+
+/* DMA Channel N Command/Status Register (N <= 1) (8-bit) */
+#define PLX_REG_DMACSR(n) ((n) ? PLX_REG_DMACSR1 : PLX_REG_DMACSR0)
+#define PLX_REG_DMACSR0 0x00a8
+#define PLX_REG_DMACSR1 0x00a9
+
+/* Channel Enable */
+#define PLX_DMACSR_ENABLE BIT(0)
+/* Channel Start - write 1 to start transfer (write-only) */
+#define PLX_DMACSR_START BIT(1)
+/* Channel Abort - write 1 to abort transfer (write-only) */
+#define PLX_DMACSR_ABORT BIT(2)
+/* Clear Interrupt - write 1 to clear DMA Channel Interrupt (write-only) */
+#define PLX_DMACSR_CLEARINTR BIT(3)
+/* Channel Done - transfer complete/inactive (read-only) */
+#define PLX_DMACSR_DONE BIT(4)
+
+/* DMA Threshold Register */
+#define PLX_REG_DMATHR 0x00b0
-#define MBX_CMD_MASK 0xffff0000 /* PUTS Command Register bits */
-
-#define MBX_CMD_ABORTJ 0x85000000 /* abort and jump */
-#define MBX_CMD_RESETP 0x86000000 /* reset and pause at start */
-#define MBX_CMD_PAUSE 0x87000000 /* pause immediately */
-#define MBX_CMD_PAUSEC 0x88000000 /* pause on completion */
-#define MBX_CMD_RESUME 0x89000000 /* resume operation */
-#define MBX_CMD_STEP 0x8a000000 /* single step tests */
-
-#define MBX_CMD_BSWAP 0x8c000000 /* identify byte swap scheme */
-#define MBX_CMD_BSWAP_0 0x8c000000 /* use scheme 0 */
-#define MBX_CMD_BSWAP_1 0x8c000001 /* use scheme 1 */
-
-/* setup host memory access window size */
-#define MBX_CMD_SETHMS 0x8d000000
-/* setup host memory access base address */
-#define MBX_CMD_SETHBA 0x8e000000
-/* perform memory setup and continue (IE. Done) */
-#define MBX_CMD_MGO 0x8f000000
-#define MBX_CMD_NOOP 0xFF000000 /* dummy, illegal command */
-
-/*****************************************/
-/*** MAILBOX #2 - MEMORY SIZE ***/
-/*****************************************/
-
-#define MBX_MEMSZ_MASK 0xffff0000 /* PUTS Memory Size Register bits */
-
-#define MBX_MEMSZ_128KB 0x00020000 /* 128 kilobyte board */
-#define MBX_MEMSZ_256KB 0x00040000 /* 256 kilobyte board */
-#define MBX_MEMSZ_512KB 0x00080000 /* 512 kilobyte board */
-#define MBX_MEMSZ_1MB 0x00100000 /* 1 megabyte board */
-#define MBX_MEMSZ_2MB 0x00200000 /* 2 megabyte board */
-#define MBX_MEMSZ_4MB 0x00400000 /* 4 megabyte board */
-#define MBX_MEMSZ_8MB 0x00800000 /* 8 megabyte board */
-#define MBX_MEMSZ_16MB 0x01000000 /* 16 megabyte board */
-
-/***************************************/
-/*** MAILBOX #2 - BOARD TYPE ***/
-/***************************************/
-
-#define MBX_BTYPE_MASK 0x0000ffff /* PUTS Board Type Register */
-/* PUTS Board Family Register */
-#define MBX_BTYPE_FAMILY_MASK 0x0000ff00
-#define MBX_BTYPE_SUBTYPE_MASK 0x000000ff /* PUTS Board Subtype */
-
-#define MBX_BTYPE_PLX9060 0x00000100 /* PLX family type */
-#define MBX_BTYPE_PLX9080 0x00000300 /* PLX wanXL100s family type */
-
-#define MBX_BTYPE_WANXL_4 0x00000104 /* wanXL400, 4-port */
-#define MBX_BTYPE_WANXL_2 0x00000102 /* wanXL200, 2-port */
-#define MBX_BTYPE_WANXL_1s 0x00000301 /* wanXL100s, 1-port */
-#define MBX_BTYPE_WANXL_1t 0x00000401 /* wanXL100T1, 1-port */
+/*
+ * DMA Threshold constraints:
+ * (C0PLAF + 1) + (C0PLAE + 1) <= 32
+ * (C0LPAF + 1) + (C0LPAE + 1) <= 32
+ * (C1PLAF + 1) + (C1PLAE + 1) <= 16
+ * (C1LPAF + 1) + (C1LPAE + 1) <= 16
+ */
-/*****************************************/
-/*** MAILBOX #3 - SHMQ MAILBOX ***/
-/*****************************************/
+/* DMA Channel 0 PCI-to-Local Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C0PLAF(x) (BIT(0) * ((x) & 0xf))
+#define PLX_DMATHR_C0PLAF_MASK GENMASK(3, 0)
+#define PLX_DMATHR_C0PLAF_SHIFT 0
+/* DMA Channel 0 Local-to-PCI Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C0LPAE(x) (BIT(4) * ((x) & 0xf))
+#define PLX_DMATHR_C0LPAE_MASK GENMASK(7, 4)
+#define PLX_DMATHR_C0LPAE_SHIFT 4
+/* DMA Channel 0 Local-to-PCI Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C0LPAF(x) (BIT(8) * ((x) & 0xf))
+#define PLX_DMATHR_C0LPAF_MASK GENMASK(11, 8)
+#define PLX_DMATHR_C0LPAF_SHIFT 8
+/* DMA Channel 0 PCI-to-Local Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C0PLAE(x) (BIT(12) * ((x) & 0xf))
+#define PLX_DMATHR_C0PLAE_MASK GENMASK(15, 12)
+#define PLX_DMATHR_C0PLAE_SHIFT 12
+/* DMA Channel 1 PCI-to-Local Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C1PLAF(x) (BIT(16) * ((x) & 0xf))
+#define PLX_DMATHR_C1PLAF_MASK GENMASK(19, 16)
+#define PLX_DMATHR_C1PLAF_SHIFT 16
+/* DMA Channel 1 Local-to-PCI Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C1LPAE(x) (BIT(20) * ((x) & 0xf))
+#define PLX_DMATHR_C1LPAE_MASK GENMASK(23, 20)
+#define PLX_DMATHR_C1LPAE_SHIFT 20
+/* DMA Channel 1 Local-to-PCI Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C1LPAF(x) (BIT(24) * ((x) & 0xf))
+#define PLX_DMATHR_C1LPAF_MASK GENMASK(27, 24)
+#define PLX_DMATHR_C1LPAF_SHIFT 24
+/* DMA Channel 1 PCI-to-Local Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C1PLAE(x) (BIT(28) * ((x) & 0xf))
+#define PLX_DMATHR_C1PLAE_MASK GENMASK(31, 28)
+#define PLX_DMATHR_C1PLAE_SHIFT 28
-#define MBX_SMBX_MASK 0x000000ff /* PUTS SHMQ Mailbox bits */
+/*
+ * Messaging Queue Registers OPLFIS, OPLFIM, IQP, OQP, MQCR, QBAR, IFHPR,
+ * IFTPR, IPHPR, IPTPR, OFHPR, OFTPR, OPHPR, OPTPR, and QSR have been omitted.
+ * They are used by the I2O feature. (IQP and OQP occupy the usual offsets of
+ * the MBOX0 and MBOX1 registers if the I2O feature is enabled, but MBOX0 and
+ * MBOX1 are accessible via alternative offsets.
+ */
-/***************************************/
-/*** GENERIC HOST-SIDE DRIVER ***/
-/***************************************/
+/* Queue Status/Control Register */
+#define PLX_REG_QSR 0x00e8
-#define MBX_ERR 0
-#define MBX_OK 1
+/* Value of QSR after reset - disables I2O feature completely. */
+#define PLX_QSR_VALUE_AFTER_RESET 0x00000050
-/* mailbox check routine - type of testing */
-#define MBXCHK_STS 0x00 /* check for PUTS status */
-#define MBXCHK_NOWAIT 0x01 /* dont care about PUTS status */
+/*
+ * Accesses near the end of memory can cause the PLX chip
+ * to pre-fetch data off of end-of-ram. Limit the size of
+ * memory so host-side accesses cannot occur.
+ */
-/* system allocates this many bytes for address mapping mailbox space */
-#define MBX_ADDR_SPACE_360 0x80 /* wanXL100s/200/400 */
-#define MBX_ADDR_MASK_360 (MBX_ADDR_SPACE_360 - 1)
+#define PLX_PREFETCH 32
+/**
+ * plx9080_abort_dma - Abort a PLX PCI 9080 DMA transfer
+ * @iobase: Remapped base address of configuration registers.
+ * @channel: DMA channel number (0 or 1).
+ *
+ * Aborts the DMA transfer on the channel, which must have been enabled
+ * and started beforehand.
+ *
+ * Return:
+ * %0 on success.
+ * -%ETIMEDOUT if timed out waiting for abort to complete.
+ */
static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
{
void __iomem *dma_cs_addr;
@@ -421,29 +631,26 @@ static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
const int timeout = 10000;
unsigned int i;
- if (channel)
- dma_cs_addr = iobase + PLX_DMA1_CS_REG;
- else
- dma_cs_addr = iobase + PLX_DMA0_CS_REG;
+ dma_cs_addr = iobase + PLX_REG_DMACSR(channel);
- /* abort dma transfer if necessary */
+ /* abort dma transfer if necessary */
dma_status = readb(dma_cs_addr);
- if ((dma_status & PLX_DMA_EN_BIT) == 0)
+ if ((dma_status & PLX_DMACSR_ENABLE) == 0)
return 0;
- /* wait to make sure done bit is zero */
- for (i = 0; (dma_status & PLX_DMA_DONE_BIT) && i < timeout; i++) {
+ /* wait to make sure done bit is zero */
+ for (i = 0; (dma_status & PLX_DMACSR_DONE) && i < timeout; i++) {
udelay(1);
dma_status = readb(dma_cs_addr);
}
if (i == timeout)
return -ETIMEDOUT;
- /* disable and abort channel */
- writeb(PLX_DMA_ABORT_BIT, dma_cs_addr);
- /* wait for dma done bit */
+ /* disable and abort channel */
+ writeb(PLX_DMACSR_ABORT, dma_cs_addr);
+ /* wait for dma done bit */
dma_status = readb(dma_cs_addr);
- for (i = 0; (dma_status & PLX_DMA_DONE_BIT) == 0 && i < timeout; i++) {
+ for (i = 0; (dma_status & PLX_DMACSR_DONE) == 0 && i < timeout; i++) {
udelay(1);
dma_status = readb(dma_cs_addr);
}
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index e9e43139157d..802f51e46405 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -643,7 +643,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
outb(0, dev->iobase + DAQP_AUX_REG);
for (i = 0; i > insn->n; i++) {
- unsigned val = data[i];
+ unsigned int val = data[i];
int ret;
/* D/A transfer rate is about 8ms */
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 9b6c56773247..e00e9c6268ae 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -362,7 +362,7 @@ struct rtd_private {
long ai_count; /* total transfer size (samples) */
int xfer_count; /* # to transfer data. 0->1/2FIFO */
int flags; /* flag event modes */
- unsigned fifosz;
+ unsigned int fifosz;
/* 8254 Timer/Counter gate and clock sources */
unsigned char timer_gate_src[3];
@@ -491,9 +491,9 @@ static void rtd_load_channelgain_list(struct comedi_device *dev,
static int rtd520_probe_fifo_depth(struct comedi_device *dev)
{
unsigned int chanspec = CR_PACK(0, 0, AREF_GROUND);
- unsigned i;
- static const unsigned limit = 0x2000;
- unsigned fifo_size = 0;
+ unsigned int i;
+ static const unsigned int limit = 0x2000;
+ unsigned int fifo_size = 0;
writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
rtd_load_channelgain_list(dev, 1, &chanspec);
@@ -501,7 +501,7 @@ static int rtd520_probe_fifo_depth(struct comedi_device *dev)
writel(0, dev->mmio + LAS0_ADC_CONVERSION);
/* convert samples */
for (i = 0; i < limit; ++i) {
- unsigned fifo_status;
+ unsigned int fifo_status;
/* trigger conversion */
writew(0, dev->mmio + LAS0_ADC);
usleep_range(1, 1000);
@@ -1175,7 +1175,7 @@ static void rtd_reset(struct comedi_device *dev)
writel(0, dev->mmio + LAS0_BOARD_RESET);
usleep_range(100, 1000); /* needed? */
- writel(0, devpriv->lcfg + PLX_INTRCS_REG);
+ writel(0, devpriv->lcfg + PLX_REG_INTCSR);
writew(0, dev->mmio + LAS0_IT);
writew(~0, dev->mmio + LAS0_CLEAR);
readw(dev->mmio + LAS0_CLEAR);
@@ -1316,7 +1316,8 @@ static int rtd_auto_attach(struct comedi_device *dev,
devpriv->fifosz = ret;
if (dev->irq)
- writel(ICS_PIE | ICS_PLIE, devpriv->lcfg + PLX_INTRCS_REG);
+ writel(PLX_INTCSR_PIEN | PLX_INTCSR_PLIEN,
+ devpriv->lcfg + PLX_REG_INTCSR);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index c5e08635e01e..4a87b4b52400 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -708,7 +708,7 @@ static uint16_t s626_get_mode_a(struct comedi_device *dev,
uint16_t cra;
uint16_t crb;
uint16_t setup;
- unsigned cntsrc, clkmult, clkpol, encmode;
+ unsigned int cntsrc, clkmult, clkpol, encmode;
/* Fetch CRA and CRB register images. */
cra = s626_debi_read(dev, S626_LP_CRA(chan));
@@ -763,7 +763,7 @@ static uint16_t s626_get_mode_b(struct comedi_device *dev,
uint16_t cra;
uint16_t crb;
uint16_t setup;
- unsigned cntsrc, clkmult, clkpol, encmode;
+ unsigned int cntsrc, clkmult, clkpol, encmode;
/* Fetch CRA and CRB register images. */
cra = s626_debi_read(dev, S626_LP_CRA(chan));
@@ -838,7 +838,7 @@ static void s626_set_mode_a(struct comedi_device *dev,
struct s626_private *devpriv = dev->private;
uint16_t cra;
uint16_t crb;
- unsigned cntsrc, clkmult, clkpol;
+ unsigned int cntsrc, clkmult, clkpol;
/* Initialize CRA and CRB images. */
/* Preload trigger is passed through. */
@@ -916,7 +916,7 @@ static void s626_set_mode_b(struct comedi_device *dev,
struct s626_private *devpriv = dev->private;
uint16_t cra;
uint16_t crb;
- unsigned cntsrc, clkmult, clkpol;
+ unsigned int cntsrc, clkmult, clkpol;
/* Initialize CRA and CRB images. */
/* IndexSrc is passed through. */
diff --git a/drivers/staging/comedi/drivers/s626.h b/drivers/staging/comedi/drivers/s626.h
index b83424e7507b..6a00a64c6f3a 100644
--- a/drivers/staging/comedi/drivers/s626.h
+++ b/drivers/staging/comedi/drivers/s626.h
@@ -29,8 +29,10 @@
#define S626_ENCODER_CHANNELS 6
#define S626_DIO_CHANNELS 48
#define S626_DIO_BANKS 3 /* Number of DIO groups. */
-#define S626_DIO_EXTCHANS 40 /* Number of extended-capability
- * DIO channels. */
+#define S626_DIO_EXTCHANS 40 /*
+ * Number of extended-capability
+ * DIO channels.
+ */
#define S626_NUM_TRIMDACS 12 /* Number of valid TrimDAC channels. */
@@ -48,21 +50,29 @@
#define S626_GSEL_BIPOLAR10V 0x00A0 /* S626_LP_GSEL setting 10V bipolar. */
/* Error codes that must be visible to this base class. */
-#define S626_ERR_ILLEGAL_PARM 0x00010000 /* Illegal function parameter
- * value was specified. */
+#define S626_ERR_ILLEGAL_PARM 0x00010000 /*
+ * Illegal function parameter
+ * value was specified.
+ */
#define S626_ERR_I2C 0x00020000 /* I2C error. */
-#define S626_ERR_COUNTERSETUP 0x00200000 /* Illegal setup specified for
- * counter channel. */
+#define S626_ERR_COUNTERSETUP 0x00200000 /*
+ * Illegal setup specified for
+ * counter channel.
+ */
#define S626_ERR_DEBI_TIMEOUT 0x00400000 /* DEBI transfer timed out. */
/*
* Organization (physical order) and size (in DWORDs) of logical DMA buffers
* contained by ANA_DMABUF.
*/
-#define S626_ADC_DMABUF_DWORDS 40 /* ADC DMA buffer must hold 16 samples,
- * plus pre/post garbage samples. */
-#define S626_DAC_WDMABUF_DWORDS 1 /* DAC output DMA buffer holds a single
- * sample. */
+#define S626_ADC_DMABUF_DWORDS 40 /*
+ * ADC DMA buffer must hold 16 samples,
+ * plus pre/post garbage samples.
+ */
+#define S626_DAC_WDMABUF_DWORDS 1 /*
+ * DAC output DMA buffer holds a single
+ * sample.
+ */
/* All remaining space in 4KB DMA buffer is available for the RPS1 program. */
@@ -95,60 +105,90 @@
#define S626_RPS_IRQ 0x60000000 /* IRQ */
#define S626_RPS_LOGICAL_OR 0x08000000 /* Logical OR conditionals. */
-#define S626_RPS_INVERT 0x04000000 /* Test for negated
- * semaphores. */
+#define S626_RPS_INVERT 0x04000000 /*
+ * Test for negated
+ * semaphores.
+ */
#define S626_RPS_DEBI 0x00000002 /* DEBI done */
-#define S626_RPS_SIG0 0x00200000 /* RPS semaphore 0
- * (used by ADC). */
-#define S626_RPS_SIG1 0x00400000 /* RPS semaphore 1
- * (used by DAC). */
-#define S626_RPS_SIG2 0x00800000 /* RPS semaphore 2
- * (not used). */
+#define S626_RPS_SIG0 0x00200000 /*
+ * RPS semaphore 0
+ * (used by ADC).
+ */
+#define S626_RPS_SIG1 0x00400000 /*
+ * RPS semaphore 1
+ * (used by DAC).
+ */
+#define S626_RPS_SIG2 0x00800000 /*
+ * RPS semaphore 2
+ * (not used).
+ */
#define S626_RPS_GPIO2 0x00080000 /* RPS GPIO2 */
#define S626_RPS_GPIO3 0x00100000 /* RPS GPIO3 */
-#define S626_RPS_SIGADC S626_RPS_SIG0 /* Trigger/status for
- * ADC's RPS program. */
-#define S626_RPS_SIGDAC S626_RPS_SIG1 /* Trigger/status for
- * DAC's RPS program. */
+#define S626_RPS_SIGADC S626_RPS_SIG0 /*
+ * Trigger/status for
+ * ADC's RPS program.
+ */
+#define S626_RPS_SIGDAC S626_RPS_SIG1 /*
+ * Trigger/status for
+ * DAC's RPS program.
+ */
/* RPS clock parameters. */
-#define S626_RPSCLK_SCALAR 8 /* This is apparent ratio of
- * PCI/RPS clks (undocumented!!). */
+#define S626_RPSCLK_SCALAR 8 /*
+ * This is apparent ratio of
+ * PCI/RPS clks (undocumented!!).
+ */
#define S626_RPSCLK_PER_US (33 / S626_RPSCLK_SCALAR)
- /* Number of RPS clocks in one
- * microsecond. */
+ /*
+ * Number of RPS clocks in one
+ * microsecond.
+ */
/* Event counter source addresses. */
#define S626_SBA_RPS_A0 0x27 /* Time of RPS0 busy, in PCI clocks. */
/* GPIO constants. */
-#define S626_GPIO_BASE 0x10004000 /* GPIO 0,2,3 = inputs,
- * GPIO3 = IRQ; GPIO1 = out. */
+#define S626_GPIO_BASE 0x10004000 /*
+ * GPIO 0,2,3 = inputs,
+ * GPIO3 = IRQ; GPIO1 = out.
+ */
#define S626_GPIO1_LO 0x00000000 /* GPIO1 set to LOW. */
#define S626_GPIO1_HI 0x00001000 /* GPIO1 set to HIGH. */
/* Primary Status Register (PSR) constants. */
#define S626_PSR_DEBI_E 0x00040000 /* DEBI event flag. */
#define S626_PSR_DEBI_S 0x00080000 /* DEBI status flag. */
-#define S626_PSR_A2_IN 0x00008000 /* Audio output DMA2 protection
- * address reached. */
-#define S626_PSR_AFOU 0x00000800 /* Audio FIFO under/overflow
- * detected. */
-#define S626_PSR_GPIO2 0x00000020 /* GPIO2 input pin: 0=AdcBusy,
- * 1=AdcIdle. */
-#define S626_PSR_EC0S 0x00000001 /* Event counter 0 threshold
- * reached. */
+#define S626_PSR_A2_IN 0x00008000 /*
+ * Audio output DMA2 protection
+ * address reached.
+ */
+#define S626_PSR_AFOU 0x00000800 /*
+ * Audio FIFO under/overflow
+ * detected.
+ */
+#define S626_PSR_GPIO2 0x00000020 /*
+ * GPIO2 input pin: 0=AdcBusy,
+ * 1=AdcIdle.
+ */
+#define S626_PSR_EC0S 0x00000001 /*
+ * Event counter 0 threshold
+ * reached.
+ */
/* Secondary Status Register (SSR) constants. */
-#define S626_SSR_AF2_OUT 0x00000200 /* Audio 2 output FIFO
- * under/overflow detected. */
+#define S626_SSR_AF2_OUT 0x00000200 /*
+ * Audio 2 output FIFO
+ * under/overflow detected.
+ */
/* Master Control Register 1 (MC1) constants. */
#define S626_MC1_SOFT_RESET 0x80000000 /* Invoke 7146 soft reset. */
-#define S626_MC1_SHUTDOWN 0x3FFF0000 /* Shut down all MC1-controlled
- * enables. */
+#define S626_MC1_SHUTDOWN 0x3FFF0000 /*
+ * Shut down all MC1-controlled
+ * enables.
+ */
#define S626_MC1_ERPS1 0x2000 /* Enab/disable RPS task 1. */
#define S626_MC1_ERPS0 0x1000 /* Enab/disable RPS task 0. */
@@ -177,15 +217,23 @@
#define S626_P_DEBIAD 0x0088 /* DEBI target address. */
#define S626_P_I2CCTRL 0x008C /* I2C control. */
#define S626_P_I2CSTAT 0x0090 /* I2C status. */
-#define S626_P_BASEA2_IN 0x00AC /* Audio input 2 base physical DMAbuf
- * address. */
-#define S626_P_PROTA2_IN 0x00B0 /* Audio input 2 physical DMAbuf
- * protection address. */
+#define S626_P_BASEA2_IN 0x00AC /*
+ * Audio input 2 base physical DMAbuf
+ * address.
+ */
+#define S626_P_PROTA2_IN 0x00B0 /*
+ * Audio input 2 physical DMAbuf
+ * protection address.
+ */
#define S626_P_PAGEA2_IN 0x00B4 /* Audio input 2 paging attributes. */
-#define S626_P_BASEA2_OUT 0x00B8 /* Audio output 2 base physical DMAbuf
- * address. */
-#define S626_P_PROTA2_OUT 0x00BC /* Audio output 2 physical DMAbuf
- * protection address. */
+#define S626_P_BASEA2_OUT 0x00B8 /*
+ * Audio output 2 base physical DMAbuf
+ * address.
+ */
+#define S626_P_PROTA2_OUT 0x00BC /*
+ * Audio output 2 physical DMAbuf
+ * protection address.
+ */
#define S626_P_PAGEA2_OUT 0x00C0 /* Audio output 2 paging attributes. */
#define S626_P_RPSPAGE0 0x00C4 /* RPS0 page. */
#define S626_P_RPSPAGE1 0x00C8 /* RPS1 page. */
@@ -205,8 +253,10 @@
#define S626_P_PSR 0x0110 /* Primary status. */
#define S626_P_SSR 0x0114 /* Secondary status. */
#define S626_P_EC1R 0x0118 /* Event counter set 1. */
-#define S626_P_ADP4 0x0138 /* Logical audio DMA pointer of audio
- * input FIFO A2_IN. */
+#define S626_P_ADP4 0x0138 /*
+ * Logical audio DMA pointer of audio
+ * input FIFO A2_IN.
+ */
#define S626_P_FB_BUFFER1 0x0144 /* Audio feedback buffer 1. */
#define S626_P_FB_BUFFER2 0x0148 /* Audio feedback buffer 2. */
#define S626_P_TSL1 0x0180 /* Audio time slot list 1. */
@@ -243,13 +293,19 @@
#define S626_LP_RDMISC2 0x0082 /* Read Misc2. */
/* Bit masks for MISC1 register that are the same for reads and writes. */
-#define S626_MISC1_WENABLE 0x8000 /* enab writes to MISC2 (except Clear
- * Watchdog bit). */
+#define S626_MISC1_WENABLE 0x8000 /*
+ * enab writes to MISC2 (except Clear
+ * Watchdog bit).
+ */
#define S626_MISC1_WDISABLE 0x0000 /* Disable writes to MISC2. */
-#define S626_MISC1_EDCAP 0x1000 /* Enable edge capture on DIO chans
- * specified by S626_LP_WRCAPSELx. */
-#define S626_MISC1_NOEDCAP 0x0000 /* Disable edge capture on specified
- * DIO chans. */
+#define S626_MISC1_EDCAP 0x1000 /*
+ * Enable edge capture on DIO chans
+ * specified by S626_LP_WRCAPSELx.
+ */
+#define S626_MISC1_NOEDCAP 0x0000 /*
+ * Disable edge capture on specified
+ * DIO chans.
+ */
/* Bit masks for MISC1 register reads. */
#define S626_RDMISC1_WDTIMEOUT 0x4000 /* Watchdog timer timed out. */
@@ -268,35 +324,49 @@
#define S626_A1_RUN 0x20000000 /* Run A1 based on TSL1. */
#define S626_A1_SWAP 0x00200000 /* Use big-endian for A1. */
#define S626_A2_SWAP 0x00100000 /* Use big-endian for A2. */
-#define S626_WS_MODES 0x00019999 /* WS0 = TSL1 trigger input,
- * WS1-WS4 = CS* outputs. */
-
-#if S626_PLATFORM == S626_INTEL /* Base ACON1 config: always run
- * A1 based on TSL1. */
+#define S626_WS_MODES 0x00019999 /*
+ * WS0 = TSL1 trigger input,
+ * WS1-WS4 = CS* outputs.
+ */
+
+#if S626_PLATFORM == S626_INTEL /*
+ * Base ACON1 config: always run
+ * A1 based on TSL1.
+ */
#define S626_ACON1_BASE (S626_WS_MODES | S626_A1_RUN)
#elif S626_PLATFORM == S626_MOTOROLA
#define S626_ACON1_BASE \
(S626_WS_MODES | S626_A1_RUN | S626_A1_SWAP | S626_A2_SWAP)
#endif
-#define S626_ACON1_ADCSTART S626_ACON1_BASE /* Start ADC: run A1
- * based on TSL1. */
+#define S626_ACON1_ADCSTART S626_ACON1_BASE /*
+ * Start ADC: run A1
+ * based on TSL1.
+ */
#define S626_ACON1_DACSTART (S626_ACON1_BASE | S626_A2_RUN)
/* Start transmit to DAC: run A2 based on TSL2. */
#define S626_ACON1_DACSTOP S626_ACON1_BASE /* Halt A2. */
/* Bit masks for ACON2 register. */
#define S626_A1_CLKSRC_BCLK1 0x00000000 /* A1 bit rate = BCLK1 (ADC). */
-#define S626_A2_CLKSRC_X1 0x00800000 /* A2 bit rate = ACLK/1
- * (DACs). */
-#define S626_A2_CLKSRC_X2 0x00C00000 /* A2 bit rate = ACLK/2
- * (DACs). */
-#define S626_A2_CLKSRC_X4 0x01400000 /* A2 bit rate = ACLK/4
- * (DACs). */
+#define S626_A2_CLKSRC_X1 0x00800000 /*
+ * A2 bit rate = ACLK/1
+ * (DACs).
+ */
+#define S626_A2_CLKSRC_X2 0x00C00000 /*
+ * A2 bit rate = ACLK/2
+ * (DACs).
+ */
+#define S626_A2_CLKSRC_X4 0x01400000 /*
+ * A2 bit rate = ACLK/4
+ * (DACs).
+ */
#define S626_INVERT_BCLK2 0x00100000 /* Invert BCLK2 (DACs). */
#define S626_BCLK2_OE 0x00040000 /* Enable BCLK2 (DACs). */
-#define S626_ACON2_XORMASK 0x000C0000 /* XOR mask for ACON2
- * active-low bits. */
+#define S626_ACON2_XORMASK 0x000C0000 /*
+ * XOR mask for ACON2
+ * active-low bits.
+ */
#define S626_ACON2_INIT (S626_ACON2_XORMASK ^ \
(S626_A1_CLKSRC_BCLK1 | S626_A2_CLKSRC_X2 | \
@@ -308,12 +378,18 @@
#define S626_WS3 0x10000000
#define S626_WS4 0x08000000
#define S626_RSD1 0x01000000 /* Shift A1 data in on SD1. */
-#define S626_SDW_A1 0x00800000 /* Store rcv'd char at next char
- * slot of DWORD1 buffer. */
-#define S626_SIB_A1 0x00400000 /* Store rcv'd char at next
- * char slot of FB1 buffer. */
-#define S626_SF_A1 0x00200000 /* Write unsigned long
- * buffer to input FIFO. */
+#define S626_SDW_A1 0x00800000 /*
+ * Store rcv'd char at next char
+ * slot of DWORD1 buffer.
+ */
+#define S626_SIB_A1 0x00400000 /*
+ * Store rcv'd char at next
+ * char slot of FB1 buffer.
+ */
+#define S626_SF_A1 0x00200000 /*
+ * Write unsigned long
+ * buffer to input FIFO.
+ */
/* Select parallel-to-serial converter's data source: */
#define S626_XFIFO_0 0x00000000 /* Data fifo byte 0. */
@@ -324,31 +400,45 @@
#define S626_XFB1 0x00000050 /* FB_BUFFER byte 1. */
#define S626_XFB2 0x00000060 /* FB_BUFFER byte 2. */
#define S626_XFB3 0x00000070 /* FB_BUFFER byte 3. */
-#define S626_SIB_A2 0x00000200 /* Store next dword from A2's
+#define S626_SIB_A2 0x00000200 /*
+ * Store next dword from A2's
* input shifter to FB2
- * buffer. */
-#define S626_SF_A2 0x00000100 /* Store next dword from A2's
+ * buffer.
+ */
+#define S626_SF_A2 0x00000100 /*
+ * Store next dword from A2's
* input shifter to its input
- * fifo. */
-#define S626_LF_A2 0x00000080 /* Load next dword from A2's
+ * fifo.
+ */
+#define S626_LF_A2 0x00000080 /*
+ * Load next dword from A2's
* output fifo into its
- * output dword buffer. */
+ * output dword buffer.
+ */
#define S626_XSD2 0x00000008 /* Shift data out on SD2. */
#define S626_RSD3 0x00001800 /* Shift data in on SD3. */
#define S626_RSD2 0x00001000 /* Shift data in on SD2. */
-#define S626_LOW_A2 0x00000002 /* Drive last SD low for 7 clks,
- * then tri-state. */
+#define S626_LOW_A2 0x00000002 /*
+ * Drive last SD low for 7 clks,
+ * then tri-state.
+ */
#define S626_EOS 0x00000001 /* End of superframe. */
/* I2C configuration constants. */
-#define S626_I2C_CLKSEL 0x0400 /* I2C bit rate =
- * PCIclk/480 = 68.75 KHz. */
-#define S626_I2C_BITRATE 68.75 /* I2C bus data bit rate
+#define S626_I2C_CLKSEL 0x0400 /*
+ * I2C bit rate =
+ * PCIclk/480 = 68.75 KHz.
+ */
+#define S626_I2C_BITRATE 68.75 /*
+ * I2C bus data bit rate
* (determined by
- * S626_I2C_CLKSEL) in KHz. */
-#define S626_I2C_WRTIME 15.0 /* Worst case time, in msec,
+ * S626_I2C_CLKSEL) in KHz.
+ */
+#define S626_I2C_WRTIME 15.0 /*
+ * Worst case time, in msec,
* for EEPROM internal write
- * op. */
+ * op.
+ */
/* I2C manifest constants. */
@@ -368,8 +458,10 @@
#define S626_I2C_B0(ATTR, VAL) (((ATTR) << 2) | ((VAL) << 8))
/* DEBI command constants. */
-#define S626_DEBI_CMD_SIZE16 (2 << 17) /* Transfer size is always
- * 2 bytes. */
+#define S626_DEBI_CMD_SIZE16 (2 << 17) /*
+ * Transfer size is always
+ * 2 bytes.
+ */
#define S626_DEBI_CMD_READ 0x00010000 /* Read operation. */
#define S626_DEBI_CMD_WRITE 0x00000000 /* Write operation. */
@@ -380,42 +472,58 @@
#define S626_DEBI_CMD_WRWORD (S626_DEBI_CMD_WRITE | S626_DEBI_CMD_SIZE16)
/* DEBI configuration constants. */
-#define S626_DEBI_CFG_XIRQ_EN 0x80000000 /* Enable external interrupt
- * on GPIO3. */
+#define S626_DEBI_CFG_XIRQ_EN 0x80000000 /*
+ * Enable external interrupt
+ * on GPIO3.
+ */
#define S626_DEBI_CFG_XRESUME 0x40000000 /* Resume block */
- /* Transfer when XIRQ
- * deasserted. */
+ /*
+ * Transfer when XIRQ
+ * deasserted.
+ */
#define S626_DEBI_CFG_TOQ 0x03C00000 /* Timeout (15 PCI cycles). */
#define S626_DEBI_CFG_FAST 0x10000000 /* Fast mode enable. */
/* 4-bit field that specifies DEBI timeout value in PCI clock cycles: */
-#define S626_DEBI_CFG_TOUT_BIT 22 /* Finish DEBI cycle after this many
- * clocks. */
+#define S626_DEBI_CFG_TOUT_BIT 22 /*
+ * Finish DEBI cycle after this many
+ * clocks.
+ */
/* 2-bit field that specifies Endian byte lane steering: */
-#define S626_DEBI_CFG_SWAP_NONE 0x00000000 /* Straight - don't swap any
- * bytes (Intel). */
+#define S626_DEBI_CFG_SWAP_NONE 0x00000000 /*
+ * Straight - don't swap any
+ * bytes (Intel).
+ */
#define S626_DEBI_CFG_SWAP_2 0x00100000 /* 2-byte swap (Motorola). */
#define S626_DEBI_CFG_SWAP_4 0x00200000 /* 4-byte swap. */
-#define S626_DEBI_CFG_SLAVE16 0x00080000 /* Slave is able to serve
- * 16-bit cycles. */
-#define S626_DEBI_CFG_INC 0x00040000 /* Enable address increment
- * for block transfers. */
+#define S626_DEBI_CFG_SLAVE16 0x00080000 /*
+ * Slave is able to serve
+ * 16-bit cycles.
+ */
+#define S626_DEBI_CFG_INC 0x00040000 /*
+ * Enable address increment
+ * for block transfers.
+ */
#define S626_DEBI_CFG_INTEL 0x00020000 /* Intel style local bus. */
#define S626_DEBI_CFG_TIMEROFF 0x00010000 /* Disable timer. */
#if S626_PLATFORM == S626_INTEL
-#define S626_DEBI_TOUT 7 /* Wait 7 PCI clocks (212 ns) before
- * polling RDY. */
+#define S626_DEBI_TOUT 7 /*
+ * Wait 7 PCI clocks (212 ns) before
+ * polling RDY.
+ */
/* Intel byte lane steering (pass through all byte lanes). */
#define S626_DEBI_SWAP S626_DEBI_CFG_SWAP_NONE
#elif S626_PLATFORM == S626_MOTOROLA
-#define S626_DEBI_TOUT 15 /* Wait 15 PCI clocks (454 ns) maximum
- * before timing out. */
+#define S626_DEBI_TOUT 15 /*
+ * Wait 15 PCI clocks (454 ns) maximum
+ * before timing out.
+ */
/* Motorola byte lane steering. */
#define S626_DEBI_SWAP S626_DEBI_CFG_SWAP_2
@@ -429,10 +537,14 @@
/* LoadSrc values: */
#define S626_LOADSRC_INDX 0 /* Preload core in response to Index. */
-#define S626_LOADSRC_OVER 1 /* Preload core in response to
- * Overflow. */
-#define S626_LOADSRCB_OVERA 2 /* Preload B core in response to
- * A Overflow. */
+#define S626_LOADSRC_OVER 1 /*
+ * Preload core in response to
+ * Overflow.
+ */
+#define S626_LOADSRCB_OVERA 2 /*
+ * Preload B core in response to
+ * A Overflow.
+ */
#define S626_LOADSRC_NONE 3 /* Never preload core. */
/* IntSrc values: */
@@ -469,10 +581,14 @@
#define S626_CNTSRC_SYSCLK_DOWN 3 /* System clock down */
/* ClkPol values: */
-#define S626_CLKPOL_POS 0 /* Counter/Extender clock is
- * active high. */
-#define S626_CLKPOL_NEG 1 /* Counter/Extender clock is
- * active low. */
+#define S626_CLKPOL_POS 0 /*
+ * Counter/Extender clock is
+ * active high.
+ */
+#define S626_CLKPOL_NEG 1 /*
+ * Counter/Extender clock is
+ * active low.
+ */
#define S626_CNTDIR_UP 0 /* Timer counts up. */
#define S626_CNTDIR_DOWN 1 /* Timer counts down. */
@@ -488,8 +604,10 @@
/* Sanity-check limits for parameters. */
-#define S626_NUM_COUNTERS 6 /* Maximum valid counter
- * logical channel number. */
+#define S626_NUM_COUNTERS 6 /*
+ * Maximum valid counter
+ * logical channel number.
+ */
#define S626_NUM_INTSOURCES 4
#define S626_NUM_LATCHSOURCES 4
#define S626_NUM_CLKMULTS 4
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index 7a1defcf2102..0d33e520f635 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -95,7 +95,7 @@ struct serial_data {
#define S2002_CFG_SIGN(x) (((x) >> 13) & 0x1)
#define S2002_CFG_BASE(x) (((x) >> 14) & 0xfffff)
-static long serial2002_tty_ioctl(struct file *f, unsigned op,
+static long serial2002_tty_ioctl(struct file *f, unsigned int op,
unsigned long param)
{
if (f->f_op->unlocked_ioctl)
@@ -379,7 +379,10 @@ static int serial2002_setup_subdevice(struct comedi_subdevice *s,
range_table_list[chan] =
(const struct comedi_lrange *)&range[j];
}
- maxdata_list[chan] = ((long long)1 << cfg[j].bits) - 1;
+ if (cfg[j].bits < 32)
+ maxdata_list[chan] = (1u << cfg[j].bits) - 1;
+ else
+ maxdata_list[chan] = 0xffffffff;
chan++;
}
}
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index c31fe1bca191..fe271fbd629b 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -57,12 +57,14 @@ int dpbp_open(struct fsl_mc_io *mc_io,
u16 *token)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_open *cmd_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
cmd_flags, 0);
- cmd.params[0] |= mc_enc(0, 32, dpbp_id);
+ cmd_params = (struct dpbp_cmd_open *)cmd.params;
+ cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -70,7 +72,7 @@ int dpbp_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return err;
}
@@ -143,7 +145,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
@@ -231,6 +233,7 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
int *en)
{
struct mc_command cmd = { 0 };
+ struct dpbp_rsp_is_enabled *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
@@ -242,7 +245,8 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *en = (int)mc_dec(cmd.params[0], 0, 1);
+ rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
+ *en = rsp_params->enabled & DPBP_ENABLE;
return 0;
}
@@ -286,14 +290,16 @@ int dpbp_set_irq(struct fsl_mc_io *mc_io,
struct dpbp_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_set_irq *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, irq_index);
- cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+ cmd_params = (struct dpbp_cmd_set_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -319,12 +325,15 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io,
struct dpbp_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_get_irq *cmd_params;
+ struct dpbp_rsp_get_irq *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -332,10 +341,12 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
- irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
- *type = (int)mc_dec(cmd.params[2], 32, 32);
+ rsp_params = (struct dpbp_rsp_get_irq *)cmd.params;
+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+ irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+ *type = le32_to_cpu(rsp_params->type);
+
return 0;
}
@@ -361,12 +372,14 @@ int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
u8 en)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_set_irq_enable *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, en);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en & DPBP_ENABLE;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -389,12 +402,15 @@ int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
u8 *en)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_get_irq_enable *cmd_params;
+ struct dpbp_rsp_get_irq_enable *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -402,7 +418,8 @@ int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *en = (u8)mc_dec(cmd.params[0], 0, 8);
+ rsp_params = (struct dpbp_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->enabled & DPBP_ENABLE;
return 0;
}
@@ -429,12 +446,14 @@ int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
u32 mask)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_set_irq_mask *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, mask);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -460,12 +479,15 @@ int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
u32 *mask)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_get_irq_mask *cmd_params;
+ struct dpbp_rsp_get_irq_mask *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -473,7 +495,9 @@ int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *mask = (u32)mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dpbp_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
return 0;
}
@@ -497,13 +521,16 @@ int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
u32 *status)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_get_irq_status *cmd_params;
+ struct dpbp_rsp_get_irq_status *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, *status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -511,7 +538,9 @@ int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *status = (u32)mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dpbp_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
return 0;
}
@@ -535,12 +564,14 @@ int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
u32 status)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_clear_irq_status *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -562,6 +593,7 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
struct dpbp_attr *attr)
{
struct mc_command cmd = { 0 };
+ struct dpbp_rsp_get_attributes *rsp_params;
int err;
/* prepare command */
@@ -574,10 +606,12 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- attr->bpid = (u16)mc_dec(cmd.params[0], 16, 16);
- attr->id = (int)mc_dec(cmd.params[0], 32, 32);
- attr->version.major = (u16)mc_dec(cmd.params[1], 0, 16);
- attr->version.minor = (u16)mc_dec(cmd.params[1], 16, 16);
+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+ attr->bpid = le16_to_cpu(rsp_params->bpid);
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->version.major = le16_to_cpu(rsp_params->version_major);
+ attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
return 0;
}
EXPORT_SYMBOL(dpbp_get_attributes);
@@ -597,19 +631,19 @@ int dpbp_set_notifications(struct fsl_mc_io *mc_io,
struct dpbp_notification_cfg *cfg)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_set_notifications *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
- cmd_flags,
- token);
-
- cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry);
- cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit);
- cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry);
- cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit);
- cmd.params[2] |= mc_enc(0, 16, cfg->options);
- cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx);
- cmd.params[4] |= mc_enc(0, 64, cfg->message_iova);
+ cmd_flags, token);
+ cmd_params = (struct dpbp_cmd_set_notifications *)cmd.params;
+ cmd_params->depletion_entry = cpu_to_le32(cfg->depletion_entry);
+ cmd_params->depletion_exit = cpu_to_le32(cfg->depletion_exit);
+ cmd_params->surplus_entry = cpu_to_le32(cfg->surplus_entry);
+ cmd_params->surplus_exit = cpu_to_le32(cfg->surplus_exit);
+ cmd_params->options = cpu_to_le16(cfg->options);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -630,6 +664,7 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io,
struct dpbp_notification_cfg *cfg)
{
struct mc_command cmd = { 0 };
+ struct dpbp_rsp_get_notifications *rsp_params;
int err;
/* prepare command */
@@ -643,13 +678,14 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- cfg->depletion_entry = (u32)mc_dec(cmd.params[0], 0, 32);
- cfg->depletion_exit = (u32)mc_dec(cmd.params[0], 32, 32);
- cfg->surplus_entry = (u32)mc_dec(cmd.params[1], 0, 32);
- cfg->surplus_exit = (u32)mc_dec(cmd.params[1], 32, 32);
- cfg->options = (u16)mc_dec(cmd.params[2], 0, 16);
- cfg->message_ctx = (u64)mc_dec(cmd.params[3], 0, 64);
- cfg->message_iova = (u64)mc_dec(cmd.params[4], 0, 64);
+ rsp_params = (struct dpbp_rsp_get_notifications *)cmd.params;
+ cfg->depletion_entry = le32_to_cpu(rsp_params->depletion_entry);
+ cfg->depletion_exit = le32_to_cpu(rsp_params->depletion_exit);
+ cfg->surplus_entry = le32_to_cpu(rsp_params->surplus_entry);
+ cfg->surplus_exit = le32_to_cpu(rsp_params->surplus_exit);
+ cfg->options = le16_to_cpu(rsp_params->options);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index c9b52dd7ba31..d098a6d8f6bc 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -1,4 +1,4 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -53,4 +53,88 @@
#define DPMCP_CMDID_GET_IRQ_MASK 0x015
#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
+struct dpmcp_cmd_open {
+ __le32 dpmcp_id;
+};
+
+struct dpmcp_cmd_create {
+ __le32 portal_id;
+};
+
+struct dpmcp_cmd_set_irq {
+ /* cmd word 0 */
+ u8 irq_index;
+ u8 pad[3];
+ __le32 irq_val;
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+struct dpmcp_cmd_get_irq {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* cmd word 1 */
+ __le64 irq_paddr;
+ /* cmd word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+#define DPMCP_ENABLE 0x1
+
+struct dpmcp_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpmcp_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq_enable {
+ u8 enabled;
+};
+
+struct dpmcp_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpmcp_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpmcp_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpmcp_rsp_get_attributes {
+ /* response word 0 */
+ __le32 pad;
+ __le32 id;
+ /* response word 1 */
+ __le16 version_major;
+ __le16 version_minor;
+};
+
#endif /* _FSL_DPMCP_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index fd6dd4e07b87..06440176243a 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -57,12 +57,14 @@ int dpmcp_open(struct fsl_mc_io *mc_io,
u16 *token)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_open *cmd_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
cmd_flags, 0);
- cmd.params[0] |= mc_enc(0, 32, dpmcp_id);
+ cmd_params = (struct dpmcp_cmd_open *)cmd.params;
+ cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -70,7 +72,7 @@ int dpmcp_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return err;
}
@@ -127,12 +129,15 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
u16 *token)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_create *cmd_params;
+
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
cmd_flags, 0);
- cmd.params[0] |= mc_enc(0, 32, cfg->portal_id);
+ cmd_params = (struct dpmcp_cmd_create *)cmd.params;
+ cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -140,7 +145,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
@@ -206,14 +211,16 @@ int dpmcp_set_irq(struct fsl_mc_io *mc_io,
struct dpmcp_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_set_irq *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, irq_index);
- cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+ cmd_params = (struct dpmcp_cmd_set_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -239,12 +246,15 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_io,
struct dpmcp_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_get_irq *cmd_params;
+ struct dpmcp_rsp_get_irq *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -252,10 +262,11 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
- irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
- *type = (int)mc_dec(cmd.params[2], 32, 32);
+ rsp_params = (struct dpmcp_rsp_get_irq *)cmd.params;
+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+ irq_cfg->paddr = le64_to_cpu(rsp_params->irq_paddr);
+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+ *type = le32_to_cpu(rsp_params->type);
return 0;
}
@@ -281,12 +292,14 @@ int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
u8 en)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_set_irq_enable *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, en);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en & DPMCP_ENABLE;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -309,12 +322,15 @@ int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
u8 *en)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_get_irq_enable *cmd_params;
+ struct dpmcp_rsp_get_irq_enable *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -322,7 +338,8 @@ int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *en = (u8)mc_dec(cmd.params[0], 0, 8);
+ rsp_params = (struct dpmcp_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->enabled & DPMCP_ENABLE;
return 0;
}
@@ -349,12 +366,15 @@ int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
u32 mask)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_set_irq_mask *cmd_params;
+
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, mask);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -380,12 +400,16 @@ int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
u32 *mask)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_get_irq_mask *cmd_params;
+ struct dpmcp_rsp_get_irq_mask *rsp_params;
+
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -393,7 +417,9 @@ int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *mask = (u32)mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dpmcp_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
return 0;
}
@@ -417,12 +443,16 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
u32 *status)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_get_irq_status *cmd_params;
+ struct dpmcp_rsp_get_irq_status *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -430,7 +460,9 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *status = (u32)mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dpmcp_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
return 0;
}
@@ -450,6 +482,7 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
struct dpmcp_attr *attr)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_rsp_get_attributes *rsp_params;
int err;
/* prepare command */
@@ -462,8 +495,10 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- attr->id = (int)mc_dec(cmd.params[0], 32, 32);
- attr->version.major = (u16)mc_dec(cmd.params[1], 0, 16);
- attr->version.minor = (u16)mc_dec(cmd.params[1], 16, 16);
+ rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->version.major = le16_to_cpu(rsp_params->version_major);
+ attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
index ba8cfa9635dd..779bf9c25bc0 100644
--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -44,4 +44,14 @@
#define DPMNG_CMDID_GET_CONT_ID 0x830
#define DPMNG_CMDID_GET_VERSION 0x831
+struct dpmng_rsp_get_container_id {
+ __le32 container_id;
+};
+
+struct dpmng_rsp_get_version {
+ __le32 revision;
+ __le32 version_major;
+ __le32 version_minor;
+};
+
#endif /* __FSL_DPMNG_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
index f633fcd86e51..660bbe7ea899 100644
--- a/drivers/staging/fsl-mc/bus/dpmng.c
+++ b/drivers/staging/fsl-mc/bus/dpmng.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -48,6 +48,7 @@ int mc_get_version(struct fsl_mc_io *mc_io,
struct mc_version *mc_ver_info)
{
struct mc_command cmd = { 0 };
+ struct dpmng_rsp_get_version *rsp_params;
int err;
/* prepare command */
@@ -61,12 +62,14 @@ int mc_get_version(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- mc_ver_info->revision = mc_dec(cmd.params[0], 0, 32);
- mc_ver_info->major = mc_dec(cmd.params[0], 32, 32);
- mc_ver_info->minor = mc_dec(cmd.params[1], 0, 32);
+ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
return 0;
}
+EXPORT_SYMBOL(mc_get_version);
/**
* dpmng_get_container_id() - Get container ID associated with a given portal.
@@ -81,6 +84,7 @@ int dpmng_get_container_id(struct fsl_mc_io *mc_io,
int *container_id)
{
struct mc_command cmd = { 0 };
+ struct dpmng_rsp_get_container_id *rsp_params;
int err;
/* prepare command */
@@ -94,7 +98,8 @@ int dpmng_get_container_id(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *container_id = mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
+ *container_id = le32_to_cpu(rsp_params->container_id);
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index 9b854fa8e84d..bb127f4a3ae7 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -84,4 +84,381 @@
#define DPRC_CMDID_GET_CONNECTION 0x16C
+struct dprc_cmd_open {
+ __le32 container_id;
+};
+
+struct dprc_cmd_create_container {
+ /* cmd word 0 */
+ __le32 options;
+ __le16 icid;
+ __le16 pad0;
+ /* cmd word 1 */
+ __le32 pad1;
+ __le32 portal_id;
+ /* cmd words 2-3 */
+ u8 label[16];
+};
+
+struct dprc_rsp_create_container {
+ /* response word 0 */
+ __le64 pad0;
+ /* response word 1 */
+ __le32 child_container_id;
+ __le32 pad1;
+ /* response word 2 */
+ __le64 child_portal_addr;
+};
+
+struct dprc_cmd_destroy_container {
+ __le32 child_container_id;
+};
+
+struct dprc_cmd_reset_container {
+ __le32 child_container_id;
+};
+
+struct dprc_cmd_set_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+struct dprc_cmd_get_irq {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq {
+ /* response word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* response word 1 */
+ __le64 irq_addr;
+ /* response word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+#define DPRC_ENABLE 0x1
+
+struct dprc_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_enable {
+ u8 enabled;
+};
+
+struct dprc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dprc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_attributes {
+ /* response word 0 */
+ __le32 container_id;
+ __le16 icid;
+ __le16 pad;
+ /* response word 1 */
+ __le32 options;
+ __le32 portal_id;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+struct dprc_cmd_set_res_quota {
+ /* cmd word 0 */
+ __le32 child_container_id;
+ __le16 quota;
+ __le16 pad;
+ /* cmd words 1-2 */
+ u8 type[16];
+};
+
+struct dprc_cmd_get_res_quota {
+ /* cmd word 0 */
+ __le32 child_container_id;
+ __le32 pad;
+ /* cmd word 1-2 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_res_quota {
+ __le32 pad;
+ __le16 quota;
+};
+
+struct dprc_cmd_assign {
+ /* cmd word 0 */
+ __le32 container_id;
+ __le32 options;
+ /* cmd word 1 */
+ __le32 num;
+ __le32 id_base_align;
+ /* cmd word 2-3 */
+ u8 type[16];
+};
+
+struct dprc_cmd_unassign {
+ /* cmd word 0 */
+ __le32 child_container_id;
+ __le32 options;
+ /* cmd word 1 */
+ __le32 num;
+ __le32 id_base_align;
+ /* cmd word 2-3 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_pool_count {
+ __le32 pool_count;
+};
+
+struct dprc_cmd_get_pool {
+ __le32 pool_index;
+};
+
+struct dprc_rsp_get_pool {
+ /* response word 0 */
+ __le64 pad;
+ /* response word 1-2 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_obj_count {
+ __le32 pad;
+ __le32 obj_count;
+};
+
+struct dprc_cmd_get_obj {
+ __le32 obj_index;
+};
+
+struct dprc_rsp_get_obj {
+ /* response word 0 */
+ __le32 pad0;
+ __le32 id;
+ /* response word 1 */
+ __le16 vendor;
+ u8 irq_count;
+ u8 region_count;
+ __le32 state;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 flags;
+ __le16 pad1;
+ /* response word 3-4 */
+ u8 type[16];
+ /* response word 5-6 */
+ u8 label[16];
+};
+
+struct dprc_cmd_get_obj_desc {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le32 pad;
+ /* cmd word 1-2 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_obj_desc {
+ /* response word 0 */
+ __le32 pad0;
+ __le32 id;
+ /* response word 1 */
+ __le16 vendor;
+ u8 irq_count;
+ u8 region_count;
+ __le32 state;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 flags;
+ __le16 pad1;
+ /* response word 3-4 */
+ u8 type[16];
+ /* response word 5-6 */
+ u8 label[16];
+};
+
+struct dprc_cmd_get_res_count {
+ /* cmd word 0 */
+ __le64 pad;
+ /* cmd word 1-2 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_res_count {
+ __le32 res_count;
+};
+
+struct dprc_cmd_get_res_ids {
+ /* cmd word 0 */
+ u8 pad0[5];
+ u8 iter_status;
+ __le16 pad1;
+ /* cmd word 1 */
+ __le32 base_id;
+ __le32 last_id;
+ /* cmd word 2-3 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_res_ids {
+ /* response word 0 */
+ u8 pad0[5];
+ u8 iter_status;
+ __le16 pad1;
+ /* response word 1 */
+ __le32 base_id;
+ __le32 last_id;
+};
+
+struct dprc_cmd_get_obj_region {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le16 pad0;
+ u8 region_index;
+ u8 pad1;
+ /* cmd word 1-2 */
+ __le64 pad2[2];
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_region {
+ /* response word 0 */
+ __le64 pad;
+ /* response word 1 */
+ __le64 base_addr;
+ /* response word 2 */
+ __le32 size;
+};
+
+struct dprc_cmd_set_obj_label {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le32 pad;
+ /* cmd word 1-2 */
+ u8 label[16];
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_cmd_set_obj_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+ __le32 obj_id;
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_cmd_get_obj_irq {
+ /* cmd word 0 */
+ __le32 obj_id;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1-2 */
+ u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_irq {
+ /* response word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* response word 1 */
+ __le64 irq_addr;
+ /* response word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+struct dprc_cmd_connect {
+ /* cmd word 0 */
+ __le32 ep1_id;
+ __le32 ep1_interface_id;
+ /* cmd word 1 */
+ __le32 ep2_id;
+ __le32 ep2_interface_id;
+ /* cmd word 2-3 */
+ u8 ep1_type[16];
+ /* cmd word 4 */
+ __le32 max_rate;
+ __le32 committed_rate;
+ /* cmd word 5-6 */
+ u8 ep2_type[16];
+};
+
+struct dprc_cmd_disconnect {
+ /* cmd word 0 */
+ __le32 id;
+ __le32 interface_id;
+ /* cmd word 1-2 */
+ u8 type[16];
+};
+
+struct dprc_cmd_get_connection {
+ /* cmd word 0 */
+ __le32 ep1_id;
+ __le32 ep1_interface_id;
+ /* cmd word 1-2 */
+ u8 ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ /* response word 0-2 */
+ __le64 pad[3];
+ /* response word 3 */
+ __le32 ep2_id;
+ __le32 ep2_interface_id;
+ /* response word 4-5 */
+ u8 ep2_type[16];
+ /* response word 6 */
+ __le32 state;
+};
+
#endif /* _FSL_DPRC_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index 7fc47173c164..d2a71f14bf72 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -760,7 +760,12 @@ error_cleanup_msi_domain:
*/
static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
{
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
(void)disable_dprc_irq(mc_dev);
+
+ devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
+
fsl_mc_free_irqs(mc_dev);
}
@@ -791,21 +796,28 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
dprc_teardown_irq(mc_dev);
device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+
+ if (dev_get_msi_domain(&mc_dev->dev)) {
+ fsl_mc_cleanup_irq_pool(mc_bus);
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+ }
+
dprc_cleanup_all_resource_pools(mc_dev);
+
error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
if (error < 0)
dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
- if (dev_get_msi_domain(&mc_dev->dev)) {
- fsl_mc_cleanup_irq_pool(mc_bus);
- dev_set_msi_domain(&mc_dev->dev, NULL);
+ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
}
dev_info(&mc_dev->dev, "DPRC device unbound from driver");
return 0;
}
-static const struct fsl_mc_device_match_id match_id_table[] = {
+static const struct fsl_mc_device_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dprc"},
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index a2c47377cc4e..c26054981333 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -51,12 +51,14 @@ int dprc_open(struct fsl_mc_io *mc_io,
u16 *token)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_open *cmd_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
0);
- cmd.params[0] |= mc_enc(0, 32, container_id);
+ cmd_params = (struct dprc_cmd_open *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -64,7 +66,7 @@ int dprc_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
@@ -115,28 +117,17 @@ int dprc_create_container(struct fsl_mc_io *mc_io,
u64 *child_portal_offset)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_create_container *cmd_params;
+ struct dprc_rsp_create_container *rsp_params;
int err;
/* prepare command */
- cmd.params[0] |= mc_enc(32, 16, cfg->icid);
- cmd.params[0] |= mc_enc(0, 32, cfg->options);
- cmd.params[1] |= mc_enc(32, 32, cfg->portal_id);
- cmd.params[2] |= mc_enc(0, 8, cfg->label[0]);
- cmd.params[2] |= mc_enc(8, 8, cfg->label[1]);
- cmd.params[2] |= mc_enc(16, 8, cfg->label[2]);
- cmd.params[2] |= mc_enc(24, 8, cfg->label[3]);
- cmd.params[2] |= mc_enc(32, 8, cfg->label[4]);
- cmd.params[2] |= mc_enc(40, 8, cfg->label[5]);
- cmd.params[2] |= mc_enc(48, 8, cfg->label[6]);
- cmd.params[2] |= mc_enc(56, 8, cfg->label[7]);
- cmd.params[3] |= mc_enc(0, 8, cfg->label[8]);
- cmd.params[3] |= mc_enc(8, 8, cfg->label[9]);
- cmd.params[3] |= mc_enc(16, 8, cfg->label[10]);
- cmd.params[3] |= mc_enc(24, 8, cfg->label[11]);
- cmd.params[3] |= mc_enc(32, 8, cfg->label[12]);
- cmd.params[3] |= mc_enc(40, 8, cfg->label[13]);
- cmd.params[3] |= mc_enc(48, 8, cfg->label[14]);
- cmd.params[3] |= mc_enc(56, 8, cfg->label[15]);
+ cmd_params = (struct dprc_cmd_create_container *)cmd.params;
+ cmd_params->options = cpu_to_le32(cfg->options);
+ cmd_params->icid = cpu_to_le16(cfg->icid);
+ cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
+ strncpy(cmd_params->label, cfg->label, 16);
+ cmd_params->label[15] = '\0';
cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT,
cmd_flags, token);
@@ -147,8 +138,9 @@ int dprc_create_container(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *child_container_id = mc_dec(cmd.params[1], 0, 32);
- *child_portal_offset = mc_dec(cmd.params[2], 0, 64);
+ rsp_params = (struct dprc_rsp_create_container *)cmd.params;
+ *child_container_id = le32_to_cpu(rsp_params->child_container_id);
+ *child_portal_offset = le64_to_cpu(rsp_params->child_portal_addr);
return 0;
}
@@ -181,11 +173,13 @@ int dprc_destroy_container(struct fsl_mc_io *mc_io,
int child_container_id)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_destroy_container *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
+ cmd_params = (struct dprc_cmd_destroy_container *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -219,11 +213,13 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
int child_container_id)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_reset_container *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
+ cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -249,13 +245,16 @@ int dprc_get_irq(struct fsl_mc_io *mc_io,
struct dprc_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq *cmd_params;
+ struct dprc_rsp_get_irq *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -263,10 +262,11 @@ int dprc_get_irq(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- irq_cfg->val = mc_dec(cmd.params[0], 0, 32);
- irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64);
- irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32);
- *type = mc_dec(cmd.params[2], 32, 32);
+ rsp_params = (struct dprc_rsp_get_irq *)cmd.params;
+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+ irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+ *type = le32_to_cpu(rsp_params->type);
return 0;
}
@@ -288,15 +288,17 @@ int dprc_set_irq(struct fsl_mc_io *mc_io,
struct dprc_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
- cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+ cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -319,12 +321,15 @@ int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
u8 *en)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq_enable *cmd_params;
+ struct dprc_rsp_get_irq_enable *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -332,7 +337,8 @@ int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *en = mc_dec(cmd.params[0], 0, 8);
+ rsp_params = (struct dprc_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->enabled & DPRC_ENABLE;
return 0;
}
@@ -359,12 +365,14 @@ int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
u8 en)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_enable *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, en);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en & DPRC_ENABLE;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -390,12 +398,15 @@ int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
u32 *mask)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq_mask *cmd_params;
+ struct dprc_rsp_get_irq_mask *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -403,7 +414,8 @@ int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *mask = mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dprc_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
return 0;
}
@@ -431,12 +443,14 @@ int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
u32 mask)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_mask *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, mask);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -461,13 +475,16 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
u32 *status)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq_status *cmd_params;
+ struct dprc_rsp_get_irq_status *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, *status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -475,7 +492,8 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *status = mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
return 0;
}
@@ -499,12 +517,14 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
u32 status)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_clear_irq_status *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -525,6 +545,7 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
struct dprc_attributes *attr)
{
struct mc_command cmd = { 0 };
+ struct dprc_rsp_get_attributes *rsp_params;
int err;
/* prepare command */
@@ -538,12 +559,13 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- attr->container_id = mc_dec(cmd.params[0], 0, 32);
- attr->icid = mc_dec(cmd.params[0], 32, 16);
- attr->options = mc_dec(cmd.params[1], 0, 32);
- attr->portal_id = mc_dec(cmd.params[1], 32, 32);
- attr->version.major = mc_dec(cmd.params[2], 0, 16);
- attr->version.minor = mc_dec(cmd.params[2], 16, 16);
+ rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
+ attr->container_id = le32_to_cpu(rsp_params->container_id);
+ attr->icid = le16_to_cpu(rsp_params->icid);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->portal_id = le32_to_cpu(rsp_params->portal_id);
+ attr->version.major = le16_to_cpu(rsp_params->version_major);
+ attr->version.minor = le16_to_cpu(rsp_params->version_minor);
return 0;
}
@@ -581,28 +603,16 @@ int dprc_set_res_quota(struct fsl_mc_io *mc_io,
u16 quota)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_res_quota *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
- cmd.params[0] |= mc_enc(32, 16, quota);
- cmd.params[1] |= mc_enc(0, 8, type[0]);
- cmd.params[1] |= mc_enc(8, 8, type[1]);
- cmd.params[1] |= mc_enc(16, 8, type[2]);
- cmd.params[1] |= mc_enc(24, 8, type[3]);
- cmd.params[1] |= mc_enc(32, 8, type[4]);
- cmd.params[1] |= mc_enc(40, 8, type[5]);
- cmd.params[1] |= mc_enc(48, 8, type[6]);
- cmd.params[1] |= mc_enc(56, 8, type[7]);
- cmd.params[2] |= mc_enc(0, 8, type[8]);
- cmd.params[2] |= mc_enc(8, 8, type[9]);
- cmd.params[2] |= mc_enc(16, 8, type[10]);
- cmd.params[2] |= mc_enc(24, 8, type[11]);
- cmd.params[2] |= mc_enc(32, 8, type[12]);
- cmd.params[2] |= mc_enc(40, 8, type[13]);
- cmd.params[2] |= mc_enc(48, 8, type[14]);
- cmd.params[2] |= mc_enc(56, 8, '\0');
+ cmd_params = (struct dprc_cmd_set_res_quota *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ cmd_params->quota = cpu_to_le16(quota);
+ strncpy(cmd_params->type, type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -631,28 +641,17 @@ int dprc_get_res_quota(struct fsl_mc_io *mc_io,
u16 *quota)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_res_quota *cmd_params;
+ struct dprc_rsp_get_res_quota *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
- cmd.params[1] |= mc_enc(0, 8, type[0]);
- cmd.params[1] |= mc_enc(8, 8, type[1]);
- cmd.params[1] |= mc_enc(16, 8, type[2]);
- cmd.params[1] |= mc_enc(24, 8, type[3]);
- cmd.params[1] |= mc_enc(32, 8, type[4]);
- cmd.params[1] |= mc_enc(40, 8, type[5]);
- cmd.params[1] |= mc_enc(48, 8, type[6]);
- cmd.params[1] |= mc_enc(56, 8, type[7]);
- cmd.params[2] |= mc_enc(0, 8, type[8]);
- cmd.params[2] |= mc_enc(8, 8, type[9]);
- cmd.params[2] |= mc_enc(16, 8, type[10]);
- cmd.params[2] |= mc_enc(24, 8, type[11]);
- cmd.params[2] |= mc_enc(32, 8, type[12]);
- cmd.params[2] |= mc_enc(40, 8, type[13]);
- cmd.params[2] |= mc_enc(48, 8, type[14]);
- cmd.params[2] |= mc_enc(56, 8, '\0');
+ cmd_params = (struct dprc_cmd_get_res_quota *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ strncpy(cmd_params->type, type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -660,7 +659,8 @@ int dprc_get_res_quota(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *quota = mc_dec(cmd.params[0], 32, 16);
+ rsp_params = (struct dprc_rsp_get_res_quota *)cmd.params;
+ *quota = le16_to_cpu(rsp_params->quota);
return 0;
}
@@ -704,30 +704,18 @@ int dprc_assign(struct fsl_mc_io *mc_io,
struct dprc_res_req *res_req)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_assign *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, container_id);
- cmd.params[0] |= mc_enc(32, 32, res_req->options);
- cmd.params[1] |= mc_enc(0, 32, res_req->num);
- cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align);
- cmd.params[2] |= mc_enc(0, 8, res_req->type[0]);
- cmd.params[2] |= mc_enc(8, 8, res_req->type[1]);
- cmd.params[2] |= mc_enc(16, 8, res_req->type[2]);
- cmd.params[2] |= mc_enc(24, 8, res_req->type[3]);
- cmd.params[2] |= mc_enc(32, 8, res_req->type[4]);
- cmd.params[2] |= mc_enc(40, 8, res_req->type[5]);
- cmd.params[2] |= mc_enc(48, 8, res_req->type[6]);
- cmd.params[2] |= mc_enc(56, 8, res_req->type[7]);
- cmd.params[3] |= mc_enc(0, 8, res_req->type[8]);
- cmd.params[3] |= mc_enc(8, 8, res_req->type[9]);
- cmd.params[3] |= mc_enc(16, 8, res_req->type[10]);
- cmd.params[3] |= mc_enc(24, 8, res_req->type[11]);
- cmd.params[3] |= mc_enc(32, 8, res_req->type[12]);
- cmd.params[3] |= mc_enc(40, 8, res_req->type[13]);
- cmd.params[3] |= mc_enc(48, 8, res_req->type[14]);
- cmd.params[3] |= mc_enc(56, 8, res_req->type[15]);
+ cmd_params = (struct dprc_cmd_assign *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
+ cmd_params->options = cpu_to_le32(res_req->options);
+ cmd_params->num = cpu_to_le32(res_req->num);
+ cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+ strncpy(cmd_params->type, res_req->type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -755,31 +743,19 @@ int dprc_unassign(struct fsl_mc_io *mc_io,
struct dprc_res_req *res_req)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_unassign *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
- cmd.params[0] |= mc_enc(32, 32, res_req->options);
- cmd.params[1] |= mc_enc(0, 32, res_req->num);
- cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align);
- cmd.params[2] |= mc_enc(0, 8, res_req->type[0]);
- cmd.params[2] |= mc_enc(8, 8, res_req->type[1]);
- cmd.params[2] |= mc_enc(16, 8, res_req->type[2]);
- cmd.params[2] |= mc_enc(24, 8, res_req->type[3]);
- cmd.params[2] |= mc_enc(32, 8, res_req->type[4]);
- cmd.params[2] |= mc_enc(40, 8, res_req->type[5]);
- cmd.params[2] |= mc_enc(48, 8, res_req->type[6]);
- cmd.params[2] |= mc_enc(56, 8, res_req->type[7]);
- cmd.params[3] |= mc_enc(0, 8, res_req->type[8]);
- cmd.params[3] |= mc_enc(8, 8, res_req->type[9]);
- cmd.params[3] |= mc_enc(16, 8, res_req->type[10]);
- cmd.params[3] |= mc_enc(24, 8, res_req->type[11]);
- cmd.params[3] |= mc_enc(32, 8, res_req->type[12]);
- cmd.params[3] |= mc_enc(40, 8, res_req->type[13]);
- cmd.params[3] |= mc_enc(48, 8, res_req->type[14]);
- cmd.params[3] |= mc_enc(56, 8, res_req->type[15]);
+ cmd_params = (struct dprc_cmd_unassign *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ cmd_params->options = cpu_to_le32(res_req->options);
+ cmd_params->num = cpu_to_le32(res_req->num);
+ cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+ strncpy(cmd_params->type, res_req->type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -800,6 +776,7 @@ int dprc_get_pool_count(struct fsl_mc_io *mc_io,
int *pool_count)
{
struct mc_command cmd = { 0 };
+ struct dprc_rsp_get_pool_count *rsp_params;
int err;
/* prepare command */
@@ -812,7 +789,8 @@ int dprc_get_pool_count(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *pool_count = mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dprc_rsp_get_pool_count *)cmd.params;
+ *pool_count = le32_to_cpu(rsp_params->pool_count);
return 0;
}
@@ -839,13 +817,16 @@ int dprc_get_pool(struct fsl_mc_io *mc_io,
char *type)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_pool *cmd_params;
+ struct dprc_rsp_get_pool *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, pool_index);
+ cmd_params = (struct dprc_cmd_get_pool *)cmd.params;
+ cmd_params->pool_index = cpu_to_le32(pool_index);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -853,21 +834,8 @@ int dprc_get_pool(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- type[0] = mc_dec(cmd.params[1], 0, 8);
- type[1] = mc_dec(cmd.params[1], 8, 8);
- type[2] = mc_dec(cmd.params[1], 16, 8);
- type[3] = mc_dec(cmd.params[1], 24, 8);
- type[4] = mc_dec(cmd.params[1], 32, 8);
- type[5] = mc_dec(cmd.params[1], 40, 8);
- type[6] = mc_dec(cmd.params[1], 48, 8);
- type[7] = mc_dec(cmd.params[1], 56, 8);
- type[8] = mc_dec(cmd.params[2], 0, 8);
- type[9] = mc_dec(cmd.params[2], 8, 8);
- type[10] = mc_dec(cmd.params[2], 16, 8);
- type[11] = mc_dec(cmd.params[2], 24, 8);
- type[12] = mc_dec(cmd.params[2], 32, 8);
- type[13] = mc_dec(cmd.params[2], 40, 8);
- type[14] = mc_dec(cmd.params[2], 48, 8);
+ rsp_params = (struct dprc_rsp_get_pool *)cmd.params;
+ strncpy(type, rsp_params->type, 16);
type[15] = '\0';
return 0;
@@ -888,6 +856,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
int *obj_count)
{
struct mc_command cmd = { 0 };
+ struct dprc_rsp_get_obj_count *rsp_params;
int err;
/* prepare command */
@@ -900,7 +869,8 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *obj_count = mc_dec(cmd.params[0], 32, 32);
+ rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
+ *obj_count = le32_to_cpu(rsp_params->obj_count);
return 0;
}
@@ -928,13 +898,16 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
struct dprc_obj_desc *obj_desc)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj *cmd_params;
+ struct dprc_rsp_get_obj *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, obj_index);
+ cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
+ cmd_params->obj_index = cpu_to_le32(obj_index);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -942,45 +915,18 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- obj_desc->id = mc_dec(cmd.params[0], 32, 32);
- obj_desc->vendor = mc_dec(cmd.params[1], 0, 16);
- obj_desc->irq_count = mc_dec(cmd.params[1], 16, 8);
- obj_desc->region_count = mc_dec(cmd.params[1], 24, 8);
- obj_desc->state = mc_dec(cmd.params[1], 32, 32);
- obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16);
- obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16);
- obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
- obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8);
- obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8);
- obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8);
- obj_desc->type[3] = mc_dec(cmd.params[3], 24, 8);
- obj_desc->type[4] = mc_dec(cmd.params[3], 32, 8);
- obj_desc->type[5] = mc_dec(cmd.params[3], 40, 8);
- obj_desc->type[6] = mc_dec(cmd.params[3], 48, 8);
- obj_desc->type[7] = mc_dec(cmd.params[3], 56, 8);
- obj_desc->type[8] = mc_dec(cmd.params[4], 0, 8);
- obj_desc->type[9] = mc_dec(cmd.params[4], 8, 8);
- obj_desc->type[10] = mc_dec(cmd.params[4], 16, 8);
- obj_desc->type[11] = mc_dec(cmd.params[4], 24, 8);
- obj_desc->type[12] = mc_dec(cmd.params[4], 32, 8);
- obj_desc->type[13] = mc_dec(cmd.params[4], 40, 8);
- obj_desc->type[14] = mc_dec(cmd.params[4], 48, 8);
+ rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
+ obj_desc->id = le32_to_cpu(rsp_params->id);
+ obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+ obj_desc->irq_count = rsp_params->irq_count;
+ obj_desc->region_count = rsp_params->region_count;
+ obj_desc->state = le32_to_cpu(rsp_params->state);
+ obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+ obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+ obj_desc->flags = le16_to_cpu(rsp_params->flags);
+ strncpy(obj_desc->type, rsp_params->type, 16);
obj_desc->type[15] = '\0';
- obj_desc->label[0] = mc_dec(cmd.params[5], 0, 8);
- obj_desc->label[1] = mc_dec(cmd.params[5], 8, 8);
- obj_desc->label[2] = mc_dec(cmd.params[5], 16, 8);
- obj_desc->label[3] = mc_dec(cmd.params[5], 24, 8);
- obj_desc->label[4] = mc_dec(cmd.params[5], 32, 8);
- obj_desc->label[5] = mc_dec(cmd.params[5], 40, 8);
- obj_desc->label[6] = mc_dec(cmd.params[5], 48, 8);
- obj_desc->label[7] = mc_dec(cmd.params[5], 56, 8);
- obj_desc->label[8] = mc_dec(cmd.params[6], 0, 8);
- obj_desc->label[9] = mc_dec(cmd.params[6], 8, 8);
- obj_desc->label[10] = mc_dec(cmd.params[6], 16, 8);
- obj_desc->label[11] = mc_dec(cmd.params[6], 24, 8);
- obj_desc->label[12] = mc_dec(cmd.params[6], 32, 8);
- obj_desc->label[13] = mc_dec(cmd.params[6], 40, 8);
- obj_desc->label[14] = mc_dec(cmd.params[6], 48, 8);
+ strncpy(obj_desc->label, rsp_params->label, 16);
obj_desc->label[15] = '\0';
return 0;
}
@@ -1007,29 +953,18 @@ int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
struct dprc_obj_desc *obj_desc)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj_desc *cmd_params;
+ struct dprc_rsp_get_obj_desc *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[1] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[1] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[1] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[1] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[1] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[1] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[1] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[1] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[2] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[2] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[2] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[2] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[2] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[2] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[2] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[2] |= mc_enc(56, 8, obj_type[15]);
+ cmd_params = (struct dprc_cmd_get_obj_desc *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ strncpy(cmd_params->type, obj_type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1037,46 +972,19 @@ int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- obj_desc->id = (int)mc_dec(cmd.params[0], 32, 32);
- obj_desc->vendor = (u16)mc_dec(cmd.params[1], 0, 16);
- obj_desc->vendor = (u8)mc_dec(cmd.params[1], 16, 8);
- obj_desc->region_count = (u8)mc_dec(cmd.params[1], 24, 8);
- obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32);
- obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16);
- obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16);
- obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
- obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8);
- obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8);
- obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8);
- obj_desc->type[3] = (char)mc_dec(cmd.params[3], 24, 8);
- obj_desc->type[4] = (char)mc_dec(cmd.params[3], 32, 8);
- obj_desc->type[5] = (char)mc_dec(cmd.params[3], 40, 8);
- obj_desc->type[6] = (char)mc_dec(cmd.params[3], 48, 8);
- obj_desc->type[7] = (char)mc_dec(cmd.params[3], 56, 8);
- obj_desc->type[8] = (char)mc_dec(cmd.params[4], 0, 8);
- obj_desc->type[9] = (char)mc_dec(cmd.params[4], 8, 8);
- obj_desc->type[10] = (char)mc_dec(cmd.params[4], 16, 8);
- obj_desc->type[11] = (char)mc_dec(cmd.params[4], 24, 8);
- obj_desc->type[12] = (char)mc_dec(cmd.params[4], 32, 8);
- obj_desc->type[13] = (char)mc_dec(cmd.params[4], 40, 8);
- obj_desc->type[14] = (char)mc_dec(cmd.params[4], 48, 8);
- obj_desc->type[15] = (char)mc_dec(cmd.params[4], 56, 8);
- obj_desc->label[0] = (char)mc_dec(cmd.params[5], 0, 8);
- obj_desc->label[1] = (char)mc_dec(cmd.params[5], 8, 8);
- obj_desc->label[2] = (char)mc_dec(cmd.params[5], 16, 8);
- obj_desc->label[3] = (char)mc_dec(cmd.params[5], 24, 8);
- obj_desc->label[4] = (char)mc_dec(cmd.params[5], 32, 8);
- obj_desc->label[5] = (char)mc_dec(cmd.params[5], 40, 8);
- obj_desc->label[6] = (char)mc_dec(cmd.params[5], 48, 8);
- obj_desc->label[7] = (char)mc_dec(cmd.params[5], 56, 8);
- obj_desc->label[8] = (char)mc_dec(cmd.params[6], 0, 8);
- obj_desc->label[9] = (char)mc_dec(cmd.params[6], 8, 8);
- obj_desc->label[10] = (char)mc_dec(cmd.params[6], 16, 8);
- obj_desc->label[11] = (char)mc_dec(cmd.params[6], 24, 8);
- obj_desc->label[12] = (char)mc_dec(cmd.params[6], 32, 8);
- obj_desc->label[13] = (char)mc_dec(cmd.params[6], 40, 8);
- obj_desc->label[14] = (char)mc_dec(cmd.params[6], 48, 8);
- obj_desc->label[15] = (char)mc_dec(cmd.params[6], 56, 8);
+ rsp_params = (struct dprc_rsp_get_obj_desc *)cmd.params;
+ obj_desc->id = le32_to_cpu(rsp_params->id);
+ obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+ obj_desc->irq_count = rsp_params->irq_count;
+ obj_desc->region_count = rsp_params->region_count;
+ obj_desc->state = le32_to_cpu(rsp_params->state);
+ obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+ obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+ obj_desc->flags = le16_to_cpu(rsp_params->flags);
+ strncpy(obj_desc->type, rsp_params->type, 16);
+ obj_desc->type[15] = '\0';
+ strncpy(obj_desc->label, rsp_params->label, 16);
+ obj_desc->label[15] = '\0';
return 0;
}
@@ -1103,32 +1011,20 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
struct dprc_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_obj_irq *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
- cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
- cmd.params[2] |= mc_enc(32, 32, obj_id);
- cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[4] |= mc_enc(56, 8, obj_type[15]);
+ cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1159,30 +1055,19 @@ int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
struct dprc_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj_irq *cmd_params;
+ struct dprc_rsp_get_obj_irq *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
- cmd.params[1] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[1] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[1] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[1] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[1] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[1] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[1] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[1] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[2] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[2] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[2] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[2] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[2] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[2] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[2] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[2] |= mc_enc(56, 8, obj_type[15]);
+ cmd_params = (struct dprc_cmd_get_obj_irq *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ cmd_params->irq_index = irq_index;
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1190,10 +1075,11 @@ int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
- irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
- *type = (int)mc_dec(cmd.params[2], 32, 32);
+ rsp_params = (struct dprc_rsp_get_obj_irq *)cmd.params;
+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+ irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+ *type = le32_to_cpu(rsp_params->type);
return 0;
}
@@ -1218,29 +1104,16 @@ int dprc_get_res_count(struct fsl_mc_io *mc_io,
int *res_count)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_res_count *cmd_params;
+ struct dprc_rsp_get_res_count *rsp_params;
int err;
- *res_count = 0;
-
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT,
cmd_flags, token);
- cmd.params[1] |= mc_enc(0, 8, type[0]);
- cmd.params[1] |= mc_enc(8, 8, type[1]);
- cmd.params[1] |= mc_enc(16, 8, type[2]);
- cmd.params[1] |= mc_enc(24, 8, type[3]);
- cmd.params[1] |= mc_enc(32, 8, type[4]);
- cmd.params[1] |= mc_enc(40, 8, type[5]);
- cmd.params[1] |= mc_enc(48, 8, type[6]);
- cmd.params[1] |= mc_enc(56, 8, type[7]);
- cmd.params[2] |= mc_enc(0, 8, type[8]);
- cmd.params[2] |= mc_enc(8, 8, type[9]);
- cmd.params[2] |= mc_enc(16, 8, type[10]);
- cmd.params[2] |= mc_enc(24, 8, type[11]);
- cmd.params[2] |= mc_enc(32, 8, type[12]);
- cmd.params[2] |= mc_enc(40, 8, type[13]);
- cmd.params[2] |= mc_enc(48, 8, type[14]);
- cmd.params[2] |= mc_enc(56, 8, '\0');
+ cmd_params = (struct dprc_cmd_get_res_count *)cmd.params;
+ strncpy(cmd_params->type, type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1248,7 +1121,8 @@ int dprc_get_res_count(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *res_count = mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dprc_rsp_get_res_count *)cmd.params;
+ *res_count = le32_to_cpu(rsp_params->res_count);
return 0;
}
@@ -1271,30 +1145,19 @@ int dprc_get_res_ids(struct fsl_mc_io *mc_io,
struct dprc_res_ids_range_desc *range_desc)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_res_ids *cmd_params;
+ struct dprc_rsp_get_res_ids *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(42, 7, range_desc->iter_status);
- cmd.params[1] |= mc_enc(0, 32, range_desc->base_id);
- cmd.params[1] |= mc_enc(32, 32, range_desc->last_id);
- cmd.params[2] |= mc_enc(0, 8, type[0]);
- cmd.params[2] |= mc_enc(8, 8, type[1]);
- cmd.params[2] |= mc_enc(16, 8, type[2]);
- cmd.params[2] |= mc_enc(24, 8, type[3]);
- cmd.params[2] |= mc_enc(32, 8, type[4]);
- cmd.params[2] |= mc_enc(40, 8, type[5]);
- cmd.params[2] |= mc_enc(48, 8, type[6]);
- cmd.params[2] |= mc_enc(56, 8, type[7]);
- cmd.params[3] |= mc_enc(0, 8, type[8]);
- cmd.params[3] |= mc_enc(8, 8, type[9]);
- cmd.params[3] |= mc_enc(16, 8, type[10]);
- cmd.params[3] |= mc_enc(24, 8, type[11]);
- cmd.params[3] |= mc_enc(32, 8, type[12]);
- cmd.params[3] |= mc_enc(40, 8, type[13]);
- cmd.params[3] |= mc_enc(48, 8, type[14]);
- cmd.params[3] |= mc_enc(56, 8, '\0');
+ cmd_params = (struct dprc_cmd_get_res_ids *)cmd.params;
+ cmd_params->iter_status = range_desc->iter_status;
+ cmd_params->base_id = cpu_to_le32(range_desc->base_id);
+ cmd_params->last_id = cpu_to_le32(range_desc->last_id);
+ strncpy(cmd_params->type, type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1302,9 +1165,10 @@ int dprc_get_res_ids(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- range_desc->iter_status = mc_dec(cmd.params[0], 42, 7);
- range_desc->base_id = mc_dec(cmd.params[1], 0, 32);
- range_desc->last_id = mc_dec(cmd.params[1], 32, 32);
+ rsp_params = (struct dprc_rsp_get_res_ids *)cmd.params;
+ range_desc->iter_status = rsp_params->iter_status;
+ range_desc->base_id = le32_to_cpu(rsp_params->base_id);
+ range_desc->last_id = le32_to_cpu(rsp_params->last_id);
return 0;
}
@@ -1331,29 +1195,18 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
struct dprc_region_desc *region_desc)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj_region *cmd_params;
+ struct dprc_rsp_get_obj_region *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[0] |= mc_enc(48, 8, region_index);
- cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[4] |= mc_enc(56, 8, '\0');
+ cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ cmd_params->region_index = region_index;
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1361,8 +1214,9 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- region_desc->base_offset = mc_dec(cmd.params[1], 0, 64);
- region_desc->size = mc_dec(cmd.params[2], 0, 32);
+ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
+ region_desc->base_offset = le64_to_cpu(rsp_params->base_addr);
+ region_desc->size = le32_to_cpu(rsp_params->size);
return 0;
}
@@ -1387,45 +1241,18 @@ int dprc_set_obj_label(struct fsl_mc_io *mc_io,
char *label)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_obj_label *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL,
cmd_flags,
token);
-
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[1] |= mc_enc(0, 8, label[0]);
- cmd.params[1] |= mc_enc(8, 8, label[1]);
- cmd.params[1] |= mc_enc(16, 8, label[2]);
- cmd.params[1] |= mc_enc(24, 8, label[3]);
- cmd.params[1] |= mc_enc(32, 8, label[4]);
- cmd.params[1] |= mc_enc(40, 8, label[5]);
- cmd.params[1] |= mc_enc(48, 8, label[6]);
- cmd.params[1] |= mc_enc(56, 8, label[7]);
- cmd.params[2] |= mc_enc(0, 8, label[8]);
- cmd.params[2] |= mc_enc(8, 8, label[9]);
- cmd.params[2] |= mc_enc(16, 8, label[10]);
- cmd.params[2] |= mc_enc(24, 8, label[11]);
- cmd.params[2] |= mc_enc(32, 8, label[12]);
- cmd.params[2] |= mc_enc(40, 8, label[13]);
- cmd.params[2] |= mc_enc(48, 8, label[14]);
- cmd.params[2] |= mc_enc(56, 8, label[15]);
- cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[4] |= mc_enc(56, 8, obj_type[15]);
+ cmd_params = (struct dprc_cmd_set_obj_label *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ strncpy(cmd_params->label, label, 16);
+ cmd_params->label[15] = '\0';
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1453,49 +1280,23 @@ int dprc_connect(struct fsl_mc_io *mc_io,
const struct dprc_connection_cfg *cfg)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_connect *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, endpoint1->id);
- cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id);
- cmd.params[1] |= mc_enc(0, 32, endpoint2->id);
- cmd.params[1] |= mc_enc(32, 32, endpoint2->if_id);
- cmd.params[2] |= mc_enc(0, 8, endpoint1->type[0]);
- cmd.params[2] |= mc_enc(8, 8, endpoint1->type[1]);
- cmd.params[2] |= mc_enc(16, 8, endpoint1->type[2]);
- cmd.params[2] |= mc_enc(24, 8, endpoint1->type[3]);
- cmd.params[2] |= mc_enc(32, 8, endpoint1->type[4]);
- cmd.params[2] |= mc_enc(40, 8, endpoint1->type[5]);
- cmd.params[2] |= mc_enc(48, 8, endpoint1->type[6]);
- cmd.params[2] |= mc_enc(56, 8, endpoint1->type[7]);
- cmd.params[3] |= mc_enc(0, 8, endpoint1->type[8]);
- cmd.params[3] |= mc_enc(8, 8, endpoint1->type[9]);
- cmd.params[3] |= mc_enc(16, 8, endpoint1->type[10]);
- cmd.params[3] |= mc_enc(24, 8, endpoint1->type[11]);
- cmd.params[3] |= mc_enc(32, 8, endpoint1->type[12]);
- cmd.params[3] |= mc_enc(40, 8, endpoint1->type[13]);
- cmd.params[3] |= mc_enc(48, 8, endpoint1->type[14]);
- cmd.params[3] |= mc_enc(56, 8, endpoint1->type[15]);
- cmd.params[4] |= mc_enc(0, 32, cfg->max_rate);
- cmd.params[4] |= mc_enc(32, 32, cfg->committed_rate);
- cmd.params[5] |= mc_enc(0, 8, endpoint2->type[0]);
- cmd.params[5] |= mc_enc(8, 8, endpoint2->type[1]);
- cmd.params[5] |= mc_enc(16, 8, endpoint2->type[2]);
- cmd.params[5] |= mc_enc(24, 8, endpoint2->type[3]);
- cmd.params[5] |= mc_enc(32, 8, endpoint2->type[4]);
- cmd.params[5] |= mc_enc(40, 8, endpoint2->type[5]);
- cmd.params[5] |= mc_enc(48, 8, endpoint2->type[6]);
- cmd.params[5] |= mc_enc(56, 8, endpoint2->type[7]);
- cmd.params[6] |= mc_enc(0, 8, endpoint2->type[8]);
- cmd.params[6] |= mc_enc(8, 8, endpoint2->type[9]);
- cmd.params[6] |= mc_enc(16, 8, endpoint2->type[10]);
- cmd.params[6] |= mc_enc(24, 8, endpoint2->type[11]);
- cmd.params[6] |= mc_enc(32, 8, endpoint2->type[12]);
- cmd.params[6] |= mc_enc(40, 8, endpoint2->type[13]);
- cmd.params[6] |= mc_enc(48, 8, endpoint2->type[14]);
- cmd.params[6] |= mc_enc(56, 8, endpoint2->type[15]);
+ cmd_params = (struct dprc_cmd_connect *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+ cmd_params->ep2_id = cpu_to_le32(endpoint2->id);
+ cmd_params->ep2_interface_id = cpu_to_le32(endpoint2->if_id);
+ strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+ cmd_params->ep1_type[15] = '\0';
+ cmd_params->max_rate = cpu_to_le32(cfg->max_rate);
+ cmd_params->committed_rate = cpu_to_le32(cfg->committed_rate);
+ strncpy(cmd_params->ep2_type, endpoint2->type, 16);
+ cmd_params->ep2_type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1516,29 +1317,17 @@ int dprc_disconnect(struct fsl_mc_io *mc_io,
const struct dprc_endpoint *endpoint)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_disconnect *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, endpoint->id);
- cmd.params[0] |= mc_enc(32, 32, endpoint->if_id);
- cmd.params[1] |= mc_enc(0, 8, endpoint->type[0]);
- cmd.params[1] |= mc_enc(8, 8, endpoint->type[1]);
- cmd.params[1] |= mc_enc(16, 8, endpoint->type[2]);
- cmd.params[1] |= mc_enc(24, 8, endpoint->type[3]);
- cmd.params[1] |= mc_enc(32, 8, endpoint->type[4]);
- cmd.params[1] |= mc_enc(40, 8, endpoint->type[5]);
- cmd.params[1] |= mc_enc(48, 8, endpoint->type[6]);
- cmd.params[1] |= mc_enc(56, 8, endpoint->type[7]);
- cmd.params[2] |= mc_enc(0, 8, endpoint->type[8]);
- cmd.params[2] |= mc_enc(8, 8, endpoint->type[9]);
- cmd.params[2] |= mc_enc(16, 8, endpoint->type[10]);
- cmd.params[2] |= mc_enc(24, 8, endpoint->type[11]);
- cmd.params[2] |= mc_enc(32, 8, endpoint->type[12]);
- cmd.params[2] |= mc_enc(40, 8, endpoint->type[13]);
- cmd.params[2] |= mc_enc(48, 8, endpoint->type[14]);
- cmd.params[2] |= mc_enc(56, 8, endpoint->type[15]);
+ cmd_params = (struct dprc_cmd_disconnect *)cmd.params;
+ cmd_params->id = cpu_to_le32(endpoint->id);
+ cmd_params->interface_id = cpu_to_le32(endpoint->if_id);
+ strncpy(cmd_params->type, endpoint->type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1567,30 +1356,19 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
int *state)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, endpoint1->id);
- cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id);
- cmd.params[1] |= mc_enc(0, 8, endpoint1->type[0]);
- cmd.params[1] |= mc_enc(8, 8, endpoint1->type[1]);
- cmd.params[1] |= mc_enc(16, 8, endpoint1->type[2]);
- cmd.params[1] |= mc_enc(24, 8, endpoint1->type[3]);
- cmd.params[1] |= mc_enc(32, 8, endpoint1->type[4]);
- cmd.params[1] |= mc_enc(40, 8, endpoint1->type[5]);
- cmd.params[1] |= mc_enc(48, 8, endpoint1->type[6]);
- cmd.params[1] |= mc_enc(56, 8, endpoint1->type[7]);
- cmd.params[2] |= mc_enc(0, 8, endpoint1->type[8]);
- cmd.params[2] |= mc_enc(8, 8, endpoint1->type[9]);
- cmd.params[2] |= mc_enc(16, 8, endpoint1->type[10]);
- cmd.params[2] |= mc_enc(24, 8, endpoint1->type[11]);
- cmd.params[2] |= mc_enc(32, 8, endpoint1->type[12]);
- cmd.params[2] |= mc_enc(40, 8, endpoint1->type[13]);
- cmd.params[2] |= mc_enc(48, 8, endpoint1->type[14]);
- cmd.params[2] |= mc_enc(56, 8, endpoint1->type[15]);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+ strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+ cmd_params->ep1_type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1598,25 +1376,12 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- endpoint2->id = mc_dec(cmd.params[3], 0, 32);
- endpoint2->if_id = mc_dec(cmd.params[3], 32, 32);
- endpoint2->type[0] = mc_dec(cmd.params[4], 0, 8);
- endpoint2->type[1] = mc_dec(cmd.params[4], 8, 8);
- endpoint2->type[2] = mc_dec(cmd.params[4], 16, 8);
- endpoint2->type[3] = mc_dec(cmd.params[4], 24, 8);
- endpoint2->type[4] = mc_dec(cmd.params[4], 32, 8);
- endpoint2->type[5] = mc_dec(cmd.params[4], 40, 8);
- endpoint2->type[6] = mc_dec(cmd.params[4], 48, 8);
- endpoint2->type[7] = mc_dec(cmd.params[4], 56, 8);
- endpoint2->type[8] = mc_dec(cmd.params[5], 0, 8);
- endpoint2->type[9] = mc_dec(cmd.params[5], 8, 8);
- endpoint2->type[10] = mc_dec(cmd.params[5], 16, 8);
- endpoint2->type[11] = mc_dec(cmd.params[5], 24, 8);
- endpoint2->type[12] = mc_dec(cmd.params[5], 32, 8);
- endpoint2->type[13] = mc_dec(cmd.params[5], 40, 8);
- endpoint2->type[14] = mc_dec(cmd.params[5], 48, 8);
- endpoint2->type[15] = mc_dec(cmd.params[5], 56, 8);
- *state = mc_dec(cmd.params[6], 0, 32);
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le32_to_cpu(rsp_params->ep2_interface_id);
+ strncpy(endpoint2->type, rsp_params->ep2_type, 16);
+ endpoint2->type[15] = '\0';
+ *state = le32_to_cpu(rsp_params->state);
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c
index fb08f22a7f9c..e59d85060c7b 100644
--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
@@ -717,7 +717,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
return 0;
}
-static const struct fsl_mc_device_match_id match_id_table[] = {
+static const struct fsl_mc_device_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpbp",
diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c
index 405364307561..db3afdbdf4ae 100644
--- a/drivers/staging/fsl-mc/bus/mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/mc-bus.c
@@ -24,8 +24,6 @@
static struct kmem_cache *mc_dev_cache;
-static bool fsl_mc_is_root_dprc(struct device *dev);
-
/**
* fsl_mc_bus_match - device to driver matching callback
* @dev: the MC object device structure to match against
@@ -36,7 +34,7 @@ static bool fsl_mc_is_root_dprc(struct device *dev);
*/
static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
{
- const struct fsl_mc_device_match_id *id;
+ const struct fsl_mc_device_id *id;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
@@ -78,14 +76,45 @@ out:
*/
static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- pr_debug("%s invoked\n", __func__);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
+ mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type))
+ return -ENOMEM;
+
return 0;
}
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *fsl_mc_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+
+static const struct attribute_group fsl_mc_dev_group = {
+ .attrs = fsl_mc_dev_attrs,
+};
+
+static const struct attribute_group *fsl_mc_dev_groups[] = {
+ &fsl_mc_dev_group,
+ NULL,
+};
+
struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
+ .dev_groups = fsl_mc_dev_groups,
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
@@ -216,19 +245,6 @@ static void fsl_mc_get_root_dprc(struct device *dev,
}
}
-/**
- * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
- */
-static bool fsl_mc_is_root_dprc(struct device *dev)
-{
- struct device *root_dprc_dev;
-
- fsl_mc_get_root_dprc(dev, &root_dprc_dev);
- if (!root_dprc_dev)
- return false;
- return dev == root_dprc_dev;
-}
-
static int get_dprc_attr(struct fsl_mc_io *mc_io,
int container_id, struct dprc_attributes *attr)
{
@@ -393,6 +409,19 @@ error_cleanup_regions:
}
/**
+ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
+ */
+bool fsl_mc_is_root_dprc(struct device *dev)
+{
+ struct device *root_dprc_dev;
+
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ if (!root_dprc_dev)
+ return false;
+ return dev == root_dprc_dev;
+}
+
+/**
* Add a newly discovered MC object device to be visible in Linux
*/
int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
@@ -550,10 +579,6 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
mc_bus = to_fsl_mc_bus(mc_dev);
- if (mc_dev->mc_io) {
- fsl_destroy_mc_io(mc_dev->mc_io);
- mc_dev->mc_io = NULL;
- }
if (fsl_mc_is_root_dprc(&mc_dev->dev)) {
if (atomic_read(&root_dprc_count) > 0)
@@ -781,6 +806,10 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
return -EINVAL;
fsl_mc_device_remove(mc->root_mc_bus_dev);
+
+ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
+ mc->root_mc_bus_dev->mc_io = NULL;
+
dev_info(&pdev->dev, "Root MC bus device removed");
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/mc-msi.c
index e202b2b88c63..c7be156ae5e0 100644
--- a/drivers/staging/fsl-mc/bus/mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/mc-msi.c
@@ -20,11 +20,26 @@
#include "../include/mc-sys.h"
#include "dprc-cmd.h"
+/*
+ * Generate a unique ID identifying the interrupt (only used within the MSI
+ * irqdomain. Combine the icid with the interrupt index.
+ */
+static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
+ struct msi_desc *desc)
+{
+ /*
+ * Make the base hwirq value for ICID*10000 so it is readable
+ * as a decimal value in /proc/interrupts.
+ */
+ return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
+}
+
static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
struct msi_desc *desc)
{
arg->desc = desc;
- arg->hwirq = (irq_hw_number_t)desc->fsl_mc.msi_index;
+ arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
+ desc);
}
static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
index 810a611c1cb0..0c185abe665e 100644
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
@@ -53,8 +53,20 @@
#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
-#define MC_CMD_HDR_READ_CMDID(_hdr) \
- ((u16)mc_dec((_hdr), MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S))
+static enum mc_cmd_status mc_cmd_hdr_read_status(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+
+ return (enum mc_cmd_status)hdr->status;
+}
+
+static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 cmd_id = le16_to_cpu(hdr->cmd_id);
+
+ return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
+}
/**
* Creates an MC I/O object
@@ -261,10 +273,11 @@ static inline void mc_write_command(struct mc_command __iomem *portal,
/* copy command parameters into the portal */
for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
- writeq(cmd->params[i], &portal->params[i]);
+ __raw_writeq(cmd->params[i], &portal->params[i]);
+ __iowmb();
/* submit the command by writing the header */
- writeq(cmd->header, &portal->header);
+ __raw_writeq(cmd->header, &portal->header);
}
/**
@@ -284,14 +297,17 @@ static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem *
enum mc_cmd_status status;
/* Copy command response header from MC portal: */
- resp->header = readq(&portal->header);
- status = MC_CMD_HDR_READ_STATUS(resp->header);
+ __iormb();
+ resp->header = __raw_readq(&portal->header);
+ __iormb();
+ status = mc_cmd_hdr_read_status(resp);
if (status != MC_CMD_STATUS_OK)
return status;
/* Copy command response data from MC portal: */
for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
- resp->params[i] = readq(&portal->params[i]);
+ resp->params[i] = __raw_readq(&portal->params[i]);
+ __iormb();
return status;
}
@@ -331,10 +347,8 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
dev_dbg(mc_io->dev,
"MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
mc_io->portal_phys_addr,
- (unsigned int)
- MC_CMD_HDR_READ_TOKEN(cmd->header),
- (unsigned int)
- MC_CMD_HDR_READ_CMDID(cmd->header));
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
return -ETIMEDOUT;
}
@@ -373,10 +387,8 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
dev_dbg(mc_io->dev,
"MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
mc_io->portal_phys_addr,
- (unsigned int)
- MC_CMD_HDR_READ_TOKEN(cmd->header),
- (unsigned int)
- MC_CMD_HDR_READ_CMDID(cmd->header));
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
return -ETIMEDOUT;
}
@@ -429,8 +441,8 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
dev_dbg(mc_io->dev,
"MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
mc_io->portal_phys_addr,
- (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header),
- (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header),
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
mc_status_to_string(status),
(unsigned int)status);
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
index c57b454a2912..4828ccd0cffd 100644
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -59,4 +59,127 @@
#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
+
+struct dpbp_cmd_open {
+ __le32 dpbp_id;
+};
+
+#define DPBP_ENABLE 0x1
+
+struct dpbp_rsp_is_enabled {
+ u8 enabled;
+};
+
+struct dpbp_cmd_set_irq {
+ /* cmd word 0 */
+ u8 irq_index;
+ u8 pad[3];
+ __le32 irq_val;
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+struct dpbp_cmd_get_irq {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq {
+ /* response word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* response word 1 */
+ __le64 irq_addr;
+ /* response word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+struct dpbp_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpbp_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_enable {
+ u8 enabled;
+};
+
+struct dpbp_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpbp_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpbp_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpbp_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpbp_rsp_get_attributes {
+ /* response word 0 */
+ __le16 pad;
+ __le16 bpid;
+ __le32 id;
+ /* response word 1 */
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+struct dpbp_cmd_set_notifications {
+ /* cmd word 0 */
+ __le32 depletion_entry;
+ __le32 depletion_exit;
+ /* cmd word 1 */
+ __le32 surplus_entry;
+ __le32 surplus_exit;
+ /* cmd word 2 */
+ __le16 options;
+ __le16 pad[3];
+ /* cmd word 3 */
+ __le64 message_ctx;
+ /* cmd word 4 */
+ __le64 message_iova;
+};
+
+struct dpbp_rsp_get_notifications {
+ /* response word 0 */
+ __le32 depletion_entry;
+ __le32 depletion_exit;
+ /* response word 1 */
+ __le32 surplus_entry;
+ __le32 surplus_exit;
+ /* response word 2 */
+ __le16 options;
+ __le16 pad[3];
+ /* response word 3 */
+ __le64 message_ctx;
+ /* response word 4 */
+ __le64 message_iova;
+};
+
#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h
index 65277e3de44d..5decb9890c31 100644
--- a/drivers/staging/fsl-mc/include/mc-cmd.h
+++ b/drivers/staging/fsl-mc/include/mc-cmd.h
@@ -34,18 +34,14 @@
#define MC_CMD_NUM_OF_PARAMS 7
-#define MAKE_UMASK64(_width) \
- ((u64)((_width) < 64 ? ((u64)1 << (_width)) - 1 : -1))
-
-static inline u64 mc_enc(int lsoffset, int width, u64 val)
-{
- return (u64)(((u64)val & MAKE_UMASK64(width)) << lsoffset);
-}
-
-static inline u64 mc_dec(u64 val, int lsoffset, int width)
-{
- return (u64)((val >> lsoffset) & MAKE_UMASK64(width));
-}
+struct mc_cmd_header {
+ u8 src_id;
+ u8 flags_hw;
+ u8 status;
+ u8 flags_sw;
+ __le16 token;
+ __le16 cmd_id;
+};
struct mc_command {
u64 header;
@@ -72,60 +68,41 @@ enum mc_cmd_status {
*/
/* High priority flag */
-#define MC_CMD_FLAG_PRI 0x00008000
+#define MC_CMD_FLAG_PRI 0x80
/* Command completion flag */
-#define MC_CMD_FLAG_INTR_DIS 0x01000000
-
-/*
- * TODO Remove following two defines after completion of flib 8.0.0
- * integration
- */
-#define MC_CMD_PRI_LOW 0 /*!< Low Priority command indication */
-#define MC_CMD_PRI_HIGH 1 /*!< High Priority command indication */
-
-#define MC_CMD_HDR_CMDID_O 52 /* Command ID field offset */
-#define MC_CMD_HDR_CMDID_S 12 /* Command ID field size */
-#define MC_CMD_HDR_TOKEN_O 38 /* Token field offset */
-#define MC_CMD_HDR_TOKEN_S 10 /* Token field size */
-#define MC_CMD_HDR_STATUS_O 16 /* Status field offset */
-#define MC_CMD_HDR_STATUS_S 8 /* Status field size*/
-#define MC_CMD_HDR_FLAGS_O 0 /* Flags field offset */
-#define MC_CMD_HDR_FLAGS_S 32 /* Flags field size*/
-#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00 /* Command flags mask */
-
-#define MC_CMD_HDR_READ_STATUS(_hdr) \
- ((enum mc_cmd_status)mc_dec((_hdr), \
- MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S))
-
-#define MC_CMD_HDR_READ_TOKEN(_hdr) \
- ((u16)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
-
-#define MC_CMD_HDR_READ_FLAGS(_hdr) \
- ((u32)mc_dec((_hdr), MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S))
+#define MC_CMD_FLAG_INTR_DIS 0x01
-#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \
- ((_ext)[_param] |= mc_enc((_offset), (_width), _arg))
-
-#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
- ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
-
-#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
- (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
+#define MC_CMD_HDR_CMDID_MASK 0xFFF0
+#define MC_CMD_HDR_CMDID_SHIFT 4
+#define MC_CMD_HDR_TOKEN_MASK 0xFFC0
+#define MC_CMD_HDR_TOKEN_SHIFT 6
static inline u64 mc_encode_cmd_header(u16 cmd_id,
u32 cmd_flags,
u16 token)
{
- u64 hdr;
+ u64 header = 0;
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+ hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) &
+ MC_CMD_HDR_CMDID_MASK);
+ hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
+ MC_CMD_HDR_TOKEN_MASK);
+ hdr->status = MC_CMD_STATUS_READY;
+ if (cmd_flags & MC_CMD_FLAG_PRI)
+ hdr->flags_hw = MC_CMD_FLAG_PRI;
+ if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
+ hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
+
+ return header;
+}
- hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id);
- hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S,
- (cmd_flags & MC_CMD_HDR_FLAGS_MASK));
- hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token);
- hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S,
- MC_CMD_STATUS_READY);
+static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 token = le16_to_cpu(hdr->token);
- return hdr;
+ return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT;
}
#endif /* __FSL_MC_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index ac7c1ce68c03..853cbf38a400 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -39,7 +39,7 @@ struct fsl_mc_bus;
*/
struct fsl_mc_driver {
struct device_driver driver;
- const struct fsl_mc_device_match_id *match_id_table;
+ const struct fsl_mc_device_id *match_id_table;
int (*probe)(struct fsl_mc_device *dev);
int (*remove)(struct fsl_mc_device *dev);
void (*shutdown)(struct fsl_mc_device *dev);
@@ -51,23 +51,6 @@ struct fsl_mc_driver {
container_of(_drv, struct fsl_mc_driver, driver)
/**
- * struct fsl_mc_device_match_id - MC object device Id entry for driver matching
- * @vendor: vendor ID
- * @obj_type: MC object type
- * @ver_major: MC object version major number
- * @ver_minor: MC object version minor number
- *
- * Type of entries in the "device Id" table for MC object devices supported by
- * a MC object device driver. The last entry of the table has vendor set to 0x0
- */
-struct fsl_mc_device_match_id {
- u16 vendor;
- const char obj_type[16];
- u32 ver_major;
- u32 ver_minor;
-};
-
-/**
* enum fsl_mc_pool_type - Types of allocatable MC bus resources
*
* Entries in these enum are used as indices in the array of resource
@@ -224,6 +207,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+bool fsl_mc_is_root_dprc(struct device *dev);
+
extern struct bus_type fsl_mc_bus_type;
#endif /* _FSL_MC_H_ */
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index f066aa30f0ac..1c994b57c7d2 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -51,20 +51,6 @@ config ADIS16240
To compile this driver as a module, say M here: the module will be
called adis16240.
-config LIS3L02DQ
- tristate "ST Microelectronics LIS3L02DQ Accelerometer Driver"
- depends on SPI
- select IIO_TRIGGER if IIO_BUFFER
- depends on !IIO_BUFFER || IIO_KFIFO_BUF
- depends on GPIOLIB || COMPILE_TEST
- help
- Say Y here to build SPI support for the ST microelectronics
- accelerometer. The driver supplies direct access via sysfs files
- and an event interface via a character device.
-
- To compile this driver as a module, say M here: the module will be
- called lis3l02dq.
-
config SCA3000
depends on IIO_BUFFER
depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 415329c96f0c..1810a434a755 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -14,9 +14,5 @@ obj-$(CONFIG_ADIS16209) += adis16209.o
adis16240-y := adis16240_core.o
obj-$(CONFIG_ADIS16240) += adis16240.o
-lis3l02dq-y := lis3l02dq_core.o
-lis3l02dq-$(CONFIG_IIO_BUFFER) += lis3l02dq_ring.o
-obj-$(CONFIG_LIS3L02DQ) += lis3l02dq.o
-
sca3000-y := sca3000_core.o sca3000_ring.o
obj-$(CONFIG_SCA3000) += sca3000.o
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
deleted file mode 100644
index 6bd3d4d5bc9d..000000000000
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * LISL02DQ.h -- support STMicroelectronics LISD02DQ
- * 3d 2g Linear Accelerometers via SPI
- *
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * Loosely based upon tle62x0.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SPI_LIS3L02DQ_H_
-#define SPI_LIS3L02DQ_H_
-#define LIS3L02DQ_READ_REG(a) ((a) | 0x80)
-#define LIS3L02DQ_WRITE_REG(a) a
-
-/* Calibration parameters */
-#define LIS3L02DQ_REG_OFFSET_X_ADDR 0x16
-#define LIS3L02DQ_REG_OFFSET_Y_ADDR 0x17
-#define LIS3L02DQ_REG_OFFSET_Z_ADDR 0x18
-
-#define LIS3L02DQ_REG_GAIN_X_ADDR 0x19
-#define LIS3L02DQ_REG_GAIN_Y_ADDR 0x1A
-#define LIS3L02DQ_REG_GAIN_Z_ADDR 0x1B
-
-/* Control Register (1 of 2) */
-#define LIS3L02DQ_REG_CTRL_1_ADDR 0x20
-/* Power ctrl - either bit set corresponds to on*/
-#define LIS3L02DQ_REG_CTRL_1_PD_ON 0xC0
-
-/* Decimation Factor */
-#define LIS3L02DQ_DEC_MASK 0x30
-#define LIS3L02DQ_REG_CTRL_1_DF_128 0x00
-#define LIS3L02DQ_REG_CTRL_1_DF_64 0x10
-#define LIS3L02DQ_REG_CTRL_1_DF_32 0x20
-#define LIS3L02DQ_REG_CTRL_1_DF_8 (0x10 | 0x20)
-
-/* Self Test Enable */
-#define LIS3L02DQ_REG_CTRL_1_SELF_TEST_ON 0x08
-
-/* Axes enable ctrls */
-#define LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE 0x04
-#define LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE 0x02
-#define LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE 0x01
-
-/* Control Register (2 of 2) */
-#define LIS3L02DQ_REG_CTRL_2_ADDR 0x21
-
-/* Block Data Update only after MSB and LSB read */
-#define LIS3L02DQ_REG_CTRL_2_BLOCK_UPDATE 0x40
-
-/* Set to big endian output */
-#define LIS3L02DQ_REG_CTRL_2_BIG_ENDIAN 0x20
-
-/* Reboot memory content */
-#define LIS3L02DQ_REG_CTRL_2_REBOOT_MEMORY 0x10
-
-/* Interrupt Enable - applies data ready to the RDY pad */
-#define LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT 0x08
-
-/* Enable Data Ready Generation - relationship with previous unclear in docs */
-#define LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION 0x04
-
-/* SPI 3 wire mode */
-#define LIS3L02DQ_REG_CTRL_2_THREE_WIRE_SPI_MODE 0x02
-
-/* Data alignment, default is 12 bit right justified
- * - option for 16 bit left justified
- */
-#define LIS3L02DQ_REG_CTRL_2_DATA_ALIGNMENT_16_BIT_LEFT_JUSTIFIED 0x01
-
-/* Interrupt related stuff */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_ADDR 0x23
-
-/* Switch from or combination of conditions to and */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_BOOLEAN_AND 0x80
-
-/* Latch interrupt request,
- * if on ack must be given by reading the ack register
- */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC 0x40
-
-/* Z Interrupt on High (above threshold) */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_HIGH 0x20
-/* Z Interrupt on Low */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_LOW 0x10
-/* Y Interrupt on High */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_HIGH 0x08
-/* Y Interrupt on Low */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_LOW 0x04
-/* X Interrupt on High */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_HIGH 0x02
-/* X Interrupt on Low */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_LOW 0x01
-
-/* Register that gives description of what caused interrupt
- * - latched if set in CFG_ADDRES
- */
-#define LIS3L02DQ_REG_WAKE_UP_SRC_ADDR 0x24
-/* top bit ignored */
-/* Interrupt Active */
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_ACTIVATED 0x40
-/* Interupts that have been triggered */
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH 0x20
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW 0x10
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH 0x08
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW 0x04
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH 0x02
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW 0x01
-
-#define LIS3L02DQ_REG_WAKE_UP_ACK_ADDR 0x25
-
-/* Status register */
-#define LIS3L02DQ_REG_STATUS_ADDR 0x27
-/* XYZ axis data overrun - first is all overrun? */
-#define LIS3L02DQ_REG_STATUS_XYZ_OVERRUN 0x80
-#define LIS3L02DQ_REG_STATUS_Z_OVERRUN 0x40
-#define LIS3L02DQ_REG_STATUS_Y_OVERRUN 0x20
-#define LIS3L02DQ_REG_STATUS_X_OVERRUN 0x10
-/* XYZ new data available - first is all 3 available? */
-#define LIS3L02DQ_REG_STATUS_XYZ_NEW_DATA 0x08
-#define LIS3L02DQ_REG_STATUS_Z_NEW_DATA 0x04
-#define LIS3L02DQ_REG_STATUS_Y_NEW_DATA 0x02
-#define LIS3L02DQ_REG_STATUS_X_NEW_DATA 0x01
-
-/* The accelerometer readings - low and high bytes.
- * Form of high byte dependent on justification set in ctrl reg
- */
-#define LIS3L02DQ_REG_OUT_X_L_ADDR 0x28
-#define LIS3L02DQ_REG_OUT_X_H_ADDR 0x29
-#define LIS3L02DQ_REG_OUT_Y_L_ADDR 0x2A
-#define LIS3L02DQ_REG_OUT_Y_H_ADDR 0x2B
-#define LIS3L02DQ_REG_OUT_Z_L_ADDR 0x2C
-#define LIS3L02DQ_REG_OUT_Z_H_ADDR 0x2D
-
-/* Threshold values for all axes and both above and below thresholds
- * - i.e. there is only one value
- */
-#define LIS3L02DQ_REG_THS_L_ADDR 0x2E
-#define LIS3L02DQ_REG_THS_H_ADDR 0x2F
-
-#define LIS3L02DQ_DEFAULT_CTRL1 (LIS3L02DQ_REG_CTRL_1_PD_ON \
- | LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE \
- | LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE \
- | LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE \
- | LIS3L02DQ_REG_CTRL_1_DF_128)
-
-#define LIS3L02DQ_DEFAULT_CTRL2 0
-
-#define LIS3L02DQ_MAX_TX 12
-#define LIS3L02DQ_MAX_RX 12
-/**
- * struct lis3l02dq_state - device instance specific data
- * @us: actual spi_device
- * @trig: data ready trigger registered with iio
- * @buf_lock: mutex to protect tx and rx
- * @tx: transmit buffer
- * @rx: receive buffer
- **/
-struct lis3l02dq_state {
- struct spi_device *us;
- struct iio_trigger *trig;
- struct mutex buf_lock;
- int gpio;
- bool trigger_on;
-
- u8 tx[LIS3L02DQ_MAX_RX] ____cacheline_aligned;
- u8 rx[LIS3L02DQ_MAX_RX] ____cacheline_aligned;
-};
-
-int lis3l02dq_spi_read_reg_8(struct iio_dev *indio_dev,
- u8 reg_address,
- u8 *val);
-
-int lis3l02dq_spi_write_reg_8(struct iio_dev *indio_dev,
- u8 reg_address,
- u8 val);
-
-int lis3l02dq_disable_all_events(struct iio_dev *indio_dev);
-
-#ifdef CONFIG_IIO_BUFFER
-/* At the moment triggers are only used for buffer
- * filling. This may change!
- */
-void lis3l02dq_remove_trigger(struct iio_dev *indio_dev);
-int lis3l02dq_probe_trigger(struct iio_dev *indio_dev);
-
-int lis3l02dq_configure_buffer(struct iio_dev *indio_dev);
-void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
-
-irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private);
-#define lis3l02dq_th lis3l02dq_data_rdy_trig_poll
-
-#else /* CONFIG_IIO_BUFFER */
-#define lis3l02dq_th lis3l02dq_nobuffer
-
-static inline void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
-{
-}
-
-static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
-{
-}
-#endif /* CONFIG_IIO_BUFFER */
-#endif /* SPI_LIS3L02DQ_H_ */
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
deleted file mode 100644
index 7a6fed3f2d3f..000000000000
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ /dev/null
@@ -1,814 +0,0 @@
-/*
- * lis3l02dq.c support STMicroelectronics LISD02DQ
- * 3d 2g Linear Accelerometers via SPI
- *
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Settings:
- * 16 bit left justified mode used.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include <linux/iio/buffer.h>
-
-#include "lis3l02dq.h"
-
-/* At the moment the spi framework doesn't allow global setting of cs_change.
- * It's in the likely to be added comment at the top of spi.h.
- * This means that use cannot be made of spi_write etc.
- */
-/* direct copy of the irq_default_primary_handler */
-#ifndef CONFIG_IIO_BUFFER
-static irqreturn_t lis3l02dq_nobuffer(int irq, void *private)
-{
- return IRQ_WAKE_THREAD;
-}
-#endif
-
-/**
- * lis3l02dq_spi_read_reg_8() - read single byte from a single register
- * @indio_dev: iio_dev for this actual device
- * @reg_address: the address of the register to be read
- * @val: pass back the resulting value
- **/
-int lis3l02dq_spi_read_reg_8(struct iio_dev *indio_dev,
- u8 reg_address, u8 *val)
-{
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- int ret;
- struct spi_transfer xfer = {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 2,
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = LIS3L02DQ_READ_REG(reg_address);
- st->tx[1] = 0;
-
- ret = spi_sync_transfer(st->us, &xfer, 1);
- *val = st->rx[1];
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-/**
- * lis3l02dq_spi_write_reg_8() - write single byte to a register
- * @indio_dev: iio_dev for this device
- * @reg_address: the address of the register to be written
- * @val: the value to write
- **/
-int lis3l02dq_spi_write_reg_8(struct iio_dev *indio_dev,
- u8 reg_address,
- u8 val)
-{
- int ret;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = LIS3L02DQ_WRITE_REG(reg_address);
- st->tx[1] = val;
- ret = spi_write(st->us, st->tx, 2);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-/**
- * lisl302dq_spi_write_reg_s16() - write 2 bytes to a pair of registers
- * @indio_dev: iio_dev for this device
- * @lower_reg_address: the address of the lower of the two registers.
- * Second register is assumed to have address one greater.
- * @value: value to be written
- **/
-static int lis3l02dq_spi_write_reg_s16(struct iio_dev *indio_dev,
- u8 lower_reg_address,
- s16 value)
-{
- int ret;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = { {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- }, {
- .tx_buf = st->tx + 2,
- .bits_per_word = 8,
- .len = 2,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = LIS3L02DQ_WRITE_REG(lower_reg_address);
- st->tx[1] = value & 0xFF;
- st->tx[2] = LIS3L02DQ_WRITE_REG(lower_reg_address + 1);
- st->tx[3] = (value >> 8) & 0xFF;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int lis3l02dq_read_reg_s16(struct iio_dev *indio_dev,
- u8 lower_reg_address,
- int *val)
-{
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- int ret;
- s16 tempval;
- struct spi_transfer xfers[] = { {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- }, {
- .tx_buf = st->tx + 2,
- .rx_buf = st->rx + 2,
- .bits_per_word = 8,
- .len = 2,
- },
- };
-
- mutex_lock(&st->buf_lock);
- st->tx[0] = LIS3L02DQ_READ_REG(lower_reg_address);
- st->tx[1] = 0;
- st->tx[2] = LIS3L02DQ_READ_REG(lower_reg_address + 1);
- st->tx[3] = 0;
-
- ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- dev_err(&st->us->dev, "problem when reading 16 bit register");
- goto error_ret;
- }
- tempval = (s16)(st->rx[1]) | ((s16)(st->rx[3]) << 8);
-
- *val = tempval;
-error_ret:
- mutex_unlock(&st->buf_lock);
- return ret;
-}
-
-enum lis3l02dq_rm_ind {
- LIS3L02DQ_ACCEL,
- LIS3L02DQ_GAIN,
- LIS3L02DQ_BIAS,
-};
-
-static u8 lis3l02dq_axis_map[3][3] = {
- [LIS3L02DQ_ACCEL] = { LIS3L02DQ_REG_OUT_X_L_ADDR,
- LIS3L02DQ_REG_OUT_Y_L_ADDR,
- LIS3L02DQ_REG_OUT_Z_L_ADDR },
- [LIS3L02DQ_GAIN] = { LIS3L02DQ_REG_GAIN_X_ADDR,
- LIS3L02DQ_REG_GAIN_Y_ADDR,
- LIS3L02DQ_REG_GAIN_Z_ADDR },
- [LIS3L02DQ_BIAS] = { LIS3L02DQ_REG_OFFSET_X_ADDR,
- LIS3L02DQ_REG_OFFSET_Y_ADDR,
- LIS3L02DQ_REG_OFFSET_Z_ADDR }
-};
-
-static int lis3l02dq_read_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int *val, int *val2)
-{
- int ret;
-
- ret = lis3l02dq_read_reg_s16(indio_dev, LIS3L02DQ_REG_THS_L_ADDR, val);
- if (ret)
- return ret;
- return IIO_VAL_INT;
-}
-
-static int lis3l02dq_write_thresh(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int val, int val2)
-{
- u16 value = val;
-
- return lis3l02dq_spi_write_reg_s16(indio_dev,
- LIS3L02DQ_REG_THS_L_ADDR,
- value);
-}
-
-static int lis3l02dq_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- int ret = -EINVAL, reg;
- u8 uval;
- s8 sval;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- if (val > 255 || val < -256)
- return -EINVAL;
- sval = val;
- reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
- ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, sval);
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
- if (val & ~0xFF)
- return -EINVAL;
- uval = val;
- reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
- ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, uval);
- break;
- }
- return ret;
-}
-
-static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- u8 utemp;
- s8 stemp;
- ssize_t ret = 0;
- u8 reg;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
- ret = -EBUSY;
- } else {
- reg = lis3l02dq_axis_map
- [LIS3L02DQ_ACCEL][chan->address];
- ret = lis3l02dq_read_reg_s16(indio_dev, reg, val);
- }
- mutex_unlock(&indio_dev->mlock);
- if (ret < 0)
- goto error_ret;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = 9580;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_CALIBSCALE:
- reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
- ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, &utemp);
- if (ret)
- goto error_ret;
- /* to match with what previous code does */
- *val = utemp;
- return IIO_VAL_INT;
-
- case IIO_CHAN_INFO_CALIBBIAS:
- reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
- ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, (u8 *)&stemp);
- /* to match with what previous code does */
- *val = stemp;
- return IIO_VAL_INT;
- }
-error_ret:
- return ret;
-}
-
-static ssize_t lis3l02dq_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- int ret, len = 0;
- s8 t;
-
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- (u8 *)&t);
- if (ret)
- return ret;
- t &= LIS3L02DQ_DEC_MASK;
- switch (t) {
- case LIS3L02DQ_REG_CTRL_1_DF_128:
- len = sprintf(buf, "280\n");
- break;
- case LIS3L02DQ_REG_CTRL_1_DF_64:
- len = sprintf(buf, "560\n");
- break;
- case LIS3L02DQ_REG_CTRL_1_DF_32:
- len = sprintf(buf, "1120\n");
- break;
- case LIS3L02DQ_REG_CTRL_1_DF_8:
- len = sprintf(buf, "4480\n");
- break;
- }
- return len;
-}
-
-static ssize_t lis3l02dq_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- unsigned long val;
- int ret;
- u8 t;
-
- ret = kstrtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- mutex_lock(&indio_dev->mlock);
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- &t);
- if (ret)
- goto error_ret_mutex;
- /* Wipe the bits clean */
- t &= ~LIS3L02DQ_DEC_MASK;
- switch (val) {
- case 280:
- t |= LIS3L02DQ_REG_CTRL_1_DF_128;
- break;
- case 560:
- t |= LIS3L02DQ_REG_CTRL_1_DF_64;
- break;
- case 1120:
- t |= LIS3L02DQ_REG_CTRL_1_DF_32;
- break;
- case 4480:
- t |= LIS3L02DQ_REG_CTRL_1_DF_8;
- break;
- default:
- ret = -EINVAL;
- goto error_ret_mutex;
- }
-
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- t);
-
-error_ret_mutex:
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-static int lis3l02dq_initial_setup(struct iio_dev *indio_dev)
-{
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- int ret;
- u8 val, valtest;
-
- st->us->mode = SPI_MODE_3;
-
- spi_setup(st->us);
-
- val = LIS3L02DQ_DEFAULT_CTRL1;
- /* Write suitable defaults to ctrl1 */
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- val);
- if (ret) {
- dev_err(&st->us->dev, "problem with setup control register 1");
- goto err_ret;
- }
- /* Repeat as sometimes doesn't work first time? */
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- val);
- if (ret) {
- dev_err(&st->us->dev, "problem with setup control register 1");
- goto err_ret;
- }
-
- /*
- * Read back to check this has worked acts as loose test of correct
- * chip
- */
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- &valtest);
- if (ret || (valtest != val)) {
- dev_err(&indio_dev->dev,
- "device not playing ball %d %d\n", valtest, val);
- ret = -EINVAL;
- goto err_ret;
- }
-
- val = LIS3L02DQ_DEFAULT_CTRL2;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- val);
- if (ret) {
- dev_err(&st->us->dev, "problem with setup control register 2");
- goto err_ret;
- }
-
- val = LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- val);
- if (ret)
- dev_err(&st->us->dev, "problem with interrupt cfg register");
-err_ret:
-
- return ret;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- lis3l02dq_read_frequency,
- lis3l02dq_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("280 560 1120 4480");
-
-static irqreturn_t lis3l02dq_event_handler(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- u8 t;
-
- s64 timestamp = iio_get_time_ns();
-
- lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
- &t);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- timestamp);
-
- if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- timestamp);
-
- /* Ack and allow for new interrupts */
- lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_ACK_ADDR,
- &t);
-
- return IRQ_HANDLED;
-}
-
-static const struct iio_event_spec lis3l02dq_event[] = {
- {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_RISING,
- .mask_separate = BIT(IIO_EV_INFO_ENABLE),
- .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
- }, {
- .type = IIO_EV_TYPE_THRESH,
- .dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_ENABLE),
- .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
- }
-};
-
-#define LIS3L02DQ_CHAN(index, mod) \
- { \
- .type = IIO_ACCEL, \
- .modified = 1, \
- .channel2 = mod, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_CALIBSCALE) | \
- BIT(IIO_CHAN_INFO_CALIBBIAS), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .address = index, \
- .scan_index = index, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 12, \
- .storagebits = 16, \
- }, \
- .event_spec = lis3l02dq_event, \
- .num_event_specs = ARRAY_SIZE(lis3l02dq_event), \
- }
-
-static const struct iio_chan_spec lis3l02dq_channels[] = {
- LIS3L02DQ_CHAN(0, IIO_MOD_X),
- LIS3L02DQ_CHAN(1, IIO_MOD_Y),
- LIS3L02DQ_CHAN(2, IIO_MOD_Z),
- IIO_CHAN_SOFT_TIMESTAMP(3)
-};
-
-static int lis3l02dq_read_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir)
-{
- u8 val;
- int ret;
- u8 mask = 1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING));
-
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- &val);
- if (ret < 0)
- return ret;
-
- return !!(val & mask);
-}
-
-int lis3l02dq_disable_all_events(struct iio_dev *indio_dev)
-{
- int ret;
- u8 control, val;
-
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- &control);
-
- control &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- control);
- if (ret)
- goto error_ret;
- /* Also for consistency clear the mask */
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- &val);
- if (ret)
- goto error_ret;
- val &= ~0x3f;
-
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- val);
- if (ret)
- goto error_ret;
-
- ret = control;
-error_ret:
- return ret;
-}
-
-static int lis3l02dq_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- int state)
-{
- int ret = 0;
- u8 val, control;
- u8 currentlyset;
- bool changed = false;
- u8 mask = 1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING));
-
- mutex_lock(&indio_dev->mlock);
- /* read current control */
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- &control);
- if (ret)
- goto error_ret;
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- &val);
- if (ret < 0)
- goto error_ret;
- currentlyset = val & mask;
-
- if (!currentlyset && state) {
- changed = true;
- val |= mask;
- } else if (currentlyset && !state) {
- changed = true;
- val &= ~mask;
- }
-
- if (changed) {
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
- val);
- if (ret)
- goto error_ret;
- control = val & 0x3f ?
- (control | LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT) :
- (control & ~LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT);
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- control);
- if (ret)
- goto error_ret;
- }
-
-error_ret:
- mutex_unlock(&indio_dev->mlock);
- return ret;
-}
-
-static struct attribute *lis3l02dq_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group lis3l02dq_attribute_group = {
- .attrs = lis3l02dq_attributes,
-};
-
-static const struct iio_info lis3l02dq_info = {
- .read_raw = &lis3l02dq_read_raw,
- .write_raw = &lis3l02dq_write_raw,
- .read_event_value = &lis3l02dq_read_thresh,
- .write_event_value = &lis3l02dq_write_thresh,
- .write_event_config = &lis3l02dq_write_event_config,
- .read_event_config = &lis3l02dq_read_event_config,
- .driver_module = THIS_MODULE,
- .attrs = &lis3l02dq_attribute_group,
-};
-
-static int lis3l02dq_probe(struct spi_device *spi)
-{
- int ret;
- struct lis3l02dq_state *st;
- struct iio_dev *indio_dev;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- st->us = spi;
- st->gpio = of_get_gpio(spi->dev.of_node, 0);
- mutex_init(&st->buf_lock);
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &lis3l02dq_info;
- indio_dev->channels = lis3l02dq_channels;
- indio_dev->num_channels = ARRAY_SIZE(lis3l02dq_channels);
-
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = lis3l02dq_configure_buffer(indio_dev);
- if (ret)
- return ret;
-
- if (spi->irq) {
- ret = request_threaded_irq(st->us->irq,
- &lis3l02dq_th,
- &lis3l02dq_event_handler,
- IRQF_TRIGGER_RISING,
- "lis3l02dq",
- indio_dev);
- if (ret)
- goto error_unreg_buffer_funcs;
-
- ret = lis3l02dq_probe_trigger(indio_dev);
- if (ret)
- goto error_free_interrupt;
- }
-
- /* Get the device into a sane initial state */
- ret = lis3l02dq_initial_setup(indio_dev);
- if (ret)
- goto error_remove_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_remove_trigger;
-
- return 0;
-
-error_remove_trigger:
- if (spi->irq)
- lis3l02dq_remove_trigger(indio_dev);
-error_free_interrupt:
- if (spi->irq)
- free_irq(st->us->irq, indio_dev);
-error_unreg_buffer_funcs:
- lis3l02dq_unconfigure_buffer(indio_dev);
- return ret;
-}
-
-/* Power down the device */
-static int lis3l02dq_stop_device(struct iio_dev *indio_dev)
-{
- int ret;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- u8 val = 0;
-
- mutex_lock(&indio_dev->mlock);
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- val);
- if (ret) {
- dev_err(&st->us->dev, "problem with turning device off: ctrl1");
- goto err_ret;
- }
-
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- val);
- if (ret)
- dev_err(&st->us->dev, "problem with turning device off: ctrl2");
-err_ret:
- mutex_unlock(&indio_dev->mlock);
- return ret;
-}
-
-/* fixme, confirm ordering in this function */
-static int lis3l02dq_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
-
- lis3l02dq_disable_all_events(indio_dev);
- lis3l02dq_stop_device(indio_dev);
-
- if (spi->irq)
- free_irq(st->us->irq, indio_dev);
-
- lis3l02dq_remove_trigger(indio_dev);
- lis3l02dq_unconfigure_buffer(indio_dev);
-
- return 0;
-}
-
-static struct spi_driver lis3l02dq_driver = {
- .driver = {
- .name = "lis3l02dq",
- },
- .probe = lis3l02dq_probe,
- .remove = lis3l02dq_remove,
-};
-module_spi_driver(lis3l02dq_driver);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("ST LIS3L02DQ Accelerometer SPI driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:lis3l02dq");
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
deleted file mode 100644
index 50c162e0c31f..000000000000
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ /dev/null
@@ -1,428 +0,0 @@
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/mutex.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/kfifo_buf.h>
-#include <linux/iio/trigger.h>
-#include <linux/iio/trigger_consumer.h>
-#include "lis3l02dq.h"
-
-/**
- * combine_8_to_16() utility function to munge two u8s into u16
- **/
-static inline u16 combine_8_to_16(u8 lower, u8 upper)
-{
- u16 _lower = lower;
- u16 _upper = upper;
-
- return _lower | (_upper << 8);
-}
-
-/**
- * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
- **/
-irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
-{
- struct iio_dev *indio_dev = private;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- if (st->trigger_on) {
- iio_trigger_poll(st->trig);
- return IRQ_HANDLED;
- }
-
- return IRQ_WAKE_THREAD;
-}
-
-static const u8 read_all_tx_array[] = {
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
- LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
-};
-
-/**
- * lis3l02dq_read_all() Reads all channels currently selected
- * @indio_dev: IIO device state
- * @rx_array: (dma capable) receive array, must be at least
- * 4*number of channels
- **/
-static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
-{
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- struct spi_transfer *xfers;
- struct spi_message msg;
- int ret, i, j = 0;
-
- xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength) * 2,
- sizeof(*xfers), GFP_KERNEL);
- if (!xfers)
- return -ENOMEM;
-
- mutex_lock(&st->buf_lock);
-
- for (i = 0; i < ARRAY_SIZE(read_all_tx_array) / 4; i++)
- if (test_bit(i, indio_dev->active_scan_mask)) {
- /* lower byte */
- xfers[j].tx_buf = st->tx + (2 * j);
- st->tx[2 * j] = read_all_tx_array[i * 4];
- st->tx[2 * j + 1] = 0;
- if (rx_array)
- xfers[j].rx_buf = rx_array + (j * 2);
- xfers[j].bits_per_word = 8;
- xfers[j].len = 2;
- xfers[j].cs_change = 1;
- j++;
-
- /* upper byte */
- xfers[j].tx_buf = st->tx + (2 * j);
- st->tx[2 * j] = read_all_tx_array[i * 4 + 2];
- st->tx[2 * j + 1] = 0;
- if (rx_array)
- xfers[j].rx_buf = rx_array + (j * 2);
- xfers[j].bits_per_word = 8;
- xfers[j].len = 2;
- xfers[j].cs_change = 1;
- j++;
- }
-
- /* After these are transmitted, the rx_buff should have
- * values in alternate bytes
- */
- spi_message_init(&msg);
- for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength) * 2; j++)
- spi_message_add_tail(&xfers[j], &msg);
-
- ret = spi_sync(st->us, &msg);
- mutex_unlock(&st->buf_lock);
- kfree(xfers);
-
- return ret;
-}
-
-static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
- u8 *buf)
-{
- int ret, i;
- u8 *rx_array;
- s16 *data = (s16 *)buf;
- int scan_count = bitmap_weight(indio_dev->active_scan_mask,
- indio_dev->masklength);
-
- rx_array = kcalloc(4, scan_count, GFP_KERNEL);
- if (!rx_array)
- return -ENOMEM;
- ret = lis3l02dq_read_all(indio_dev, rx_array);
- if (ret < 0) {
- kfree(rx_array);
- return ret;
- }
- for (i = 0; i < scan_count; i++)
- data[i] = combine_8_to_16(rx_array[i * 4 + 1],
- rx_array[i * 4 + 3]);
- kfree(rx_array);
-
- return i * sizeof(data[0]);
-}
-
-static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- int len = 0;
- char *data;
-
- data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
- if (!data)
- goto done;
-
- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
- len = lis3l02dq_get_buffer_element(indio_dev, data);
-
- iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
-
- kfree(data);
-done:
- iio_trigger_notify_done(indio_dev->trig);
- return IRQ_HANDLED;
-}
-
-/* Caller responsible for locking as necessary. */
-static int
-__lis3l02dq_write_data_ready_config(struct iio_dev *indio_dev, bool state)
-{
- int ret;
- u8 valold;
- bool currentlyset;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- /* Get the current event mask register */
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- &valold);
- if (ret)
- goto error_ret;
- /* Find out if data ready is already on */
- currentlyset
- = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
-
- /* Disable requested */
- if (!state && currentlyset) {
- /* Disable the data ready signal */
- valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
-
- /* The double write is to overcome a hardware bug? */
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- valold);
- if (ret)
- goto error_ret;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- valold);
- if (ret)
- goto error_ret;
- st->trigger_on = false;
- /* Enable requested */
- } else if (state && !currentlyset) {
- /* If not set, enable requested
- * first disable all events
- */
- ret = lis3l02dq_disable_all_events(indio_dev);
- if (ret < 0)
- goto error_ret;
-
- valold = ret |
- LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
-
- st->trigger_on = true;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_2_ADDR,
- valold);
- if (ret)
- goto error_ret;
- }
-
- return 0;
-error_ret:
- return ret;
-}
-
-/**
- * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
- *
- * If disabling the interrupt also does a final read to ensure it is clear.
- * This is only important in some cases where the scan enable elements are
- * switched before the buffer is reenabled.
- **/
-static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
- bool state)
-{
- struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
- int ret = 0;
- u8 t;
-
- __lis3l02dq_write_data_ready_config(indio_dev, state);
- if (!state) {
- /*
- * A possible quirk with the handler is currently worked around
- * by ensuring outstanding read events are cleared.
- */
- ret = lis3l02dq_read_all(indio_dev, NULL);
- }
- lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
- &t);
- return ret;
-}
-
-/**
- * lis3l02dq_trig_try_reen() try reenabling irq for data rdy trigger
- * @trig: the datardy trigger
- */
-static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
-{
- struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
- struct lis3l02dq_state *st = iio_priv(indio_dev);
- int i;
-
- /* If gpio still high (or high again)
- * In theory possible we will need to do this several times
- */
- for (i = 0; i < 5; i++)
- if (gpio_get_value(st->gpio))
- lis3l02dq_read_all(indio_dev, NULL);
- else
- break;
- if (i == 5)
- pr_info("Failed to clear the interrupt for lis3l02dq\n");
-
- /* irq reenabled so success! */
- return 0;
-}
-
-static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
- .owner = THIS_MODULE,
- .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
- .try_reenable = &lis3l02dq_trig_try_reen,
-};
-
-int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
-{
- int ret;
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- st->trig = iio_trigger_alloc("lis3l02dq-dev%d", indio_dev->id);
- if (!st->trig) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- st->trig->dev.parent = &st->us->dev;
- st->trig->ops = &lis3l02dq_trigger_ops;
- iio_trigger_set_drvdata(st->trig, indio_dev);
- ret = iio_trigger_register(st->trig);
- if (ret)
- goto error_free_trig;
-
- return 0;
-
-error_free_trig:
- iio_trigger_free(st->trig);
-error_ret:
- return ret;
-}
-
-void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
-{
- struct lis3l02dq_state *st = iio_priv(indio_dev);
-
- iio_trigger_unregister(st->trig);
- iio_trigger_free(st->trig);
-}
-
-void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
-{
- iio_dealloc_pollfunc(indio_dev->pollfunc);
- iio_kfifo_free(indio_dev->buffer);
-}
-
-static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
-{
- /* Disable unwanted channels otherwise the interrupt will not clear */
- u8 t;
- int ret;
- bool oneenabled = false;
-
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- &t);
- if (ret)
- goto error_ret;
-
- if (test_bit(0, indio_dev->active_scan_mask)) {
- t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
- oneenabled = true;
- } else {
- t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
- }
- if (test_bit(1, indio_dev->active_scan_mask)) {
- t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
- oneenabled = true;
- } else {
- t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
- }
- if (test_bit(2, indio_dev->active_scan_mask)) {
- t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
- oneenabled = true;
- } else {
- t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
- }
- if (!oneenabled) /* what happens in this case is unknown */
- return -EINVAL;
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- t);
- if (ret)
- goto error_ret;
-
- return iio_triggered_buffer_postenable(indio_dev);
-error_ret:
- return ret;
-}
-
-/* Turn all channels on again */
-static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
-{
- u8 t;
- int ret;
-
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret)
- goto error_ret;
-
- ret = lis3l02dq_spi_read_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- &t);
- if (ret)
- goto error_ret;
- t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
- LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
- LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
-
- ret = lis3l02dq_spi_write_reg_8(indio_dev,
- LIS3L02DQ_REG_CTRL_1_ADDR,
- t);
-
-error_ret:
- return ret;
-}
-
-static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
- .postenable = &lis3l02dq_buffer_postenable,
- .predisable = &lis3l02dq_buffer_predisable,
-};
-
-int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
-{
- int ret;
- struct iio_buffer *buffer;
-
- buffer = iio_kfifo_allocate();
- if (!buffer)
- return -ENOMEM;
-
- iio_device_attach_buffer(indio_dev, buffer);
-
- buffer->scan_timestamp = true;
- indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
-
- /* Functions are NULL as we set handler below */
- indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
- &lis3l02dq_trigger_handler,
- 0,
- indio_dev,
- "lis3l02dq_consumer%d",
- indio_dev->id);
-
- if (!indio_dev->pollfunc) {
- ret = -ENOMEM;
- goto error_iio_sw_rb_free;
- }
-
- indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
- return 0;
-
-error_iio_sw_rb_free:
- iio_kfifo_free(indio_dev->buffer);
- return ret;
-}
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index ec12181822e6..b5625f5d5e0e 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -774,7 +774,7 @@ static irqreturn_t sca3000_event_handler(int irq, void *private)
struct iio_dev *indio_dev = private;
struct sca3000_state *st = iio_priv(indio_dev);
int ret, val;
- s64 last_timestamp = iio_get_time_ns();
+ s64 last_timestamp = iio_get_time_ns(indio_dev);
/*
* Could lead if badly timed to an extra read of status reg,
@@ -1046,6 +1046,8 @@ static int sca3000_clean_setup(struct sca3000_state *st)
/* Disable ring buffer */
ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ goto error_ret;
ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
(ret & SCA3000_OUT_CTRL_PROT_MASK)
| SCA3000_OUT_CTRL_BUF_X_EN
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index a06b46cb81ca..2177f1dd2b5d 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -705,7 +705,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
IIO_EV_DIR_RISING,
IIO_EV_TYPE_THRESH,
0, 0, 0),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
else if (((channels[i] >> 11) & 0xFFF) <=
st->cell_threshlow)
iio_push_event(indio_dev,
@@ -715,7 +715,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
IIO_EV_DIR_FALLING,
IIO_EV_TYPE_THRESH,
0, 0, 0),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
} else {
if (((channels[i] >> 11) & 0xFFF) >= st->aux_threshhigh)
iio_push_event(indio_dev,
@@ -724,7 +724,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
else if (((channels[i] >> 11) & 0xFFF) <=
st->aux_threshlow)
iio_push_event(indio_dev,
@@ -733,7 +733,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
}
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index a6f8eb11242c..0572df9aad85 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -77,7 +77,8 @@ static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
goto done;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
done:
gpio_set_value(st->pdata->gpio_convst, 0);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index ac3735c7f4a9..5e8115b01011 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -253,7 +253,8 @@ static const struct attribute_group ad7816_attribute_group = {
static irqreturn_t ad7816_event_handler(int irq, void *private)
{
- iio_push_event(private, IIO_EVENT_CODE_AD7816_OTI, iio_get_time_ns());
+ iio_push_event(private, IIO_EVENT_CODE_AD7816_OTI,
+ iio_get_time_ns((struct iio_dev *)private));
return IRQ_HANDLED;
}
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index a10e7d8e6002..3faffe59c933 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -1752,7 +1752,7 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
stat1 &= 0x1F;
- time = iio_get_time_ns();
+ time = iio_get_time_ns(indio_dev);
if (stat1 & BIT(0))
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
@@ -1804,7 +1804,7 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
return IRQ_HANDLED;
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index f6b9a10326ea..5578a077fcfb 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -493,7 +493,7 @@ static irqreturn_t ad7150_event_handler(int irq, void *private)
struct iio_dev *indio_dev = private;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
u8 int_status;
- s64 timestamp = iio_get_time_ns();
+ s64 timestamp = iio_get_time_ns(indio_dev);
int ret;
ret = i2c_smbus_read_byte_data(chip->client, AD7150_STATUS);
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index d553c8e18fcc..ea15bc1c300c 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -1554,7 +1554,7 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- s64 timestamp = iio_get_time_ns();
+ s64 timestamp = iio_get_time_ns(indio_dev);
int ret;
u8 value;
diff --git a/drivers/staging/ks7010/Kconfig b/drivers/staging/ks7010/Kconfig
new file mode 100644
index 000000000000..0b9217674d5b
--- /dev/null
+++ b/drivers/staging/ks7010/Kconfig
@@ -0,0 +1,10 @@
+config KS7010
+ tristate "KeyStream KS7010 SDIO support"
+ depends on MMC && WIRELESS
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select FW_LOADER
+ help
+ This is a driver for KeyStream KS7010 based SDIO WIFI cards. It is
+ found on at least later Spectec SDW-821 (FCC-ID "S2Y-WLAN-11G-K" only,
+ sadly not FCC-ID "S2Y-WLAN-11B-G") and Spectec SDW-823 microSD cards.
diff --git a/drivers/staging/ks7010/Makefile b/drivers/staging/ks7010/Makefile
new file mode 100644
index 000000000000..69fcf8d655c7
--- /dev/null
+++ b/drivers/staging/ks7010/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_KS7010) += ks7010.o
+
+ccflags-y += -DKS_WLAN_DEBUG=0
+ks7010-y := michael_mic.o ks_hostif.o ks_wlan_net.o ks7010_sdio.o
diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO
new file mode 100644
index 000000000000..2938d35be5bb
--- /dev/null
+++ b/drivers/staging/ks7010/TODO
@@ -0,0 +1,36 @@
+KS7010 Linux driver
+===================
+
+This driver is based on source code from the Ben Nanonote extra repository [1]
+which is based on the original v007 release from Renesas [2]. Some more
+background info about the chipset can be found here [3] and here [4]. Thank
+you to all which already participated in cleaning up the driver so far!
+
+[1] http://projects.qi-hardware.com/index.php/p/openwrt-packages/source/tree/master/ks7010/src
+[2] http://downloads.qi-hardware.com/software/ks7010_sdio_v007.tar.bz2
+[3] http://en.qi-hardware.com/wiki/Ben_NanoNote_Wi-Fi
+[4] https://wikidevi.com/wiki/Renesas
+
+TODO
+----
+
+First a few words what not to do (at least not blindly):
+
+- don't be overly strict with the 80 char limit. Only if it REALLY makes the
+ code more readable
+- No '#if 0/1' removal unless the surrounding code is understood and removal is
+ really OK. There might be some hints hidden there.
+
+Now the TODOs:
+
+- fix codechecker warnings (checkpatch, sparse, smatch). But PLEASE make sure
+ that you are not only silencing the warning but really fixing code. You
+ should understand the change you submit.
+- fix the 'card removal' event when card is inserted when booting
+- check what other upstream wireless mechanisms can be used instead of the
+ custom ones here
+
+Please send any patches to:
+Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Wolfram Sang <wsa@the-dreams.de>
+Linux Driver Project Developer List <driverdev-devel@linuxdriverproject.org>
diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h
new file mode 100644
index 000000000000..16a392abdaec
--- /dev/null
+++ b/drivers/staging/ks7010/eap_packet.h
@@ -0,0 +1,129 @@
+#ifndef EAP_PACKET_H
+#define EAP_PACKET_H
+
+#define WBIT(n) (1 << (n))
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
+
+struct ether_hdr {
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_source[ETH_ALEN]; /* source ether addr */
+ unsigned char h_dest_snap;
+ unsigned char h_source_snap;
+ unsigned char h_command;
+ unsigned char h_vendor_id[3];
+ unsigned short h_proto; /* packet type ID field */
+#define ETHER_PROTOCOL_TYPE_EAP 0x888e
+#define ETHER_PROTOCOL_TYPE_IP 0x0800
+#define ETHER_PROTOCOL_TYPE_ARP 0x0806
+ /* followed by length octets of data */
+} __attribute__ ((packed));
+
+struct ieee802_1x_hdr {
+ unsigned char version;
+ unsigned char type;
+ unsigned short length;
+ /* followed by length octets of data */
+} __attribute__ ((packed));
+
+#define EAPOL_VERSION 2
+
+enum { IEEE802_1X_TYPE_EAP_PACKET = 0,
+ IEEE802_1X_TYPE_EAPOL_START = 1,
+ IEEE802_1X_TYPE_EAPOL_LOGOFF = 2,
+ IEEE802_1X_TYPE_EAPOL_KEY = 3,
+ IEEE802_1X_TYPE_EAPOL_ENCAPSULATED_ASF_ALERT = 4
+};
+
+enum { EAPOL_KEY_TYPE_RC4 = 1, EAPOL_KEY_TYPE_RSN = 2,
+ EAPOL_KEY_TYPE_WPA = 254
+};
+
+#define IEEE8021X_REPLAY_COUNTER_LEN 8
+#define IEEE8021X_KEY_SIGN_LEN 16
+#define IEEE8021X_KEY_IV_LEN 16
+
+#define IEEE8021X_KEY_INDEX_FLAG 0x80
+#define IEEE8021X_KEY_INDEX_MASK 0x03
+
+struct ieee802_1x_eapol_key {
+ unsigned char type;
+ unsigned short key_length;
+ /* does not repeat within the life of the keying material used to
+ * encrypt the Key field; 64-bit NTP timestamp MAY be used here */
+ unsigned char replay_counter[IEEE8021X_REPLAY_COUNTER_LEN];
+ unsigned char key_iv[IEEE8021X_KEY_IV_LEN]; /* cryptographically random number */
+ unsigned char key_index; /* key flag in the most significant bit:
+ * 0 = broadcast (default key),
+ * 1 = unicast (key mapping key); key index is in the
+ * 7 least significant bits */
+ /* HMAC-MD5 message integrity check computed with MS-MPPE-Send-Key as
+ * the key */
+ unsigned char key_signature[IEEE8021X_KEY_SIGN_LEN];
+
+ /* followed by key: if packet body length = 44 + key length, then the
+ * key field (of key_length bytes) contains the key in encrypted form;
+ * if packet body length = 44, key field is absent and key_length
+ * represents the number of least significant octets from
+ * MS-MPPE-Send-Key attribute to be used as the keying material;
+ * RC4 key used in encryption = Key-IV + MS-MPPE-Recv-Key */
+} __attribute__ ((packed));
+
+#define WPA_NONCE_LEN 32
+#define WPA_REPLAY_COUNTER_LEN 8
+
+struct wpa_eapol_key {
+ unsigned char type;
+ unsigned short key_info;
+ unsigned short key_length;
+ unsigned char replay_counter[WPA_REPLAY_COUNTER_LEN];
+ unsigned char key_nonce[WPA_NONCE_LEN];
+ unsigned char key_iv[16];
+ unsigned char key_rsc[8];
+ unsigned char key_id[8]; /* Reserved in IEEE 802.11i/RSN */
+ unsigned char key_mic[16];
+ unsigned short key_data_length;
+ /* followed by key_data_length bytes of key_data */
+} __attribute__ ((packed));
+
+#define WPA_KEY_INFO_TYPE_MASK (WBIT(0) | WBIT(1) | WBIT(2))
+#define WPA_KEY_INFO_TYPE_HMAC_MD5_RC4 WBIT(0)
+#define WPA_KEY_INFO_TYPE_HMAC_SHA1_AES WBIT(1)
+#define WPA_KEY_INFO_KEY_TYPE WBIT(3) /* 1 = Pairwise, 0 = Group key */
+/* bit4..5 is used in WPA, but is reserved in IEEE 802.11i/RSN */
+#define WPA_KEY_INFO_KEY_INDEX_MASK (WBIT(4) | WBIT(5))
+#define WPA_KEY_INFO_KEY_INDEX_SHIFT 4
+#define WPA_KEY_INFO_INSTALL WBIT(6) /* pairwise */
+#define WPA_KEY_INFO_TXRX WBIT(6) /* group */
+#define WPA_KEY_INFO_ACK WBIT(7)
+#define WPA_KEY_INFO_MIC WBIT(8)
+#define WPA_KEY_INFO_SECURE WBIT(9)
+#define WPA_KEY_INFO_ERROR WBIT(10)
+#define WPA_KEY_INFO_REQUEST WBIT(11)
+#define WPA_KEY_INFO_ENCR_KEY_DATA WBIT(12) /* IEEE 802.11i/RSN only */
+
+#define WPA_CAPABILITY_PREAUTH WBIT(0)
+
+#define GENERIC_INFO_ELEM 0xdd
+#define RSN_INFO_ELEM 0x30
+
+enum {
+ REASON_UNSPECIFIED = 1,
+ REASON_DEAUTH_LEAVING = 3,
+ REASON_INVALID_IE = 13,
+ REASON_MICHAEL_MIC_FAILURE = 14,
+ REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
+ REASON_GROUP_KEY_UPDATE_TIMEOUT = 16,
+ REASON_IE_IN_4WAY_DIFFERS = 17,
+ REASON_GROUP_CIPHER_NOT_VALID = 18,
+ REASON_PAIRWISE_CIPHER_NOT_VALID = 19,
+ REASON_AKMP_NOT_VALID = 20,
+ REASON_UNSUPPORTED_RSN_IE_VERSION = 21,
+ REASON_INVALID_RSN_IE_CAPAB = 22,
+ REASON_IEEE_802_1X_AUTH_FAILED = 23,
+ REASON_CIPHER_SUITE_REJECTED = 24
+};
+
+#endif /* EAP_PACKET_H */
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
new file mode 100644
index 000000000000..b7337fd813d5
--- /dev/null
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -0,0 +1,1236 @@
+/*
+ * Driver for KeyStream, KS7010 based SDIO cards.
+ *
+ * Copyright (C) 2006-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/firmware.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#include "ks_wlan.h"
+#include "ks_wlan_ioctl.h"
+#include "ks_hostif.h"
+#include "ks7010_sdio.h"
+
+#define KS7010_FUNC_NUM 1
+#define KS7010_IO_BLOCK_SIZE 512
+#define KS7010_MAX_CLOCK 25000000
+
+static const struct sdio_device_id ks7010_sdio_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_A, SDIO_DEVICE_ID_KS_7010)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_B, SDIO_DEVICE_ID_KS_7010)},
+ { /* all zero */ }
+};
+MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
+
+/* macro */
+
+#define inc_txqhead(priv) \
+ ( priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE )
+#define inc_txqtail(priv) \
+ ( priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE )
+#define cnt_txqbody(priv) \
+ (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE )
+
+#define inc_rxqhead(priv) \
+ ( priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE )
+#define inc_rxqtail(priv) \
+ ( priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE )
+#define cnt_rxqbody(priv) \
+ (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE )
+
+static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address,
+ unsigned char *buffer, int length)
+{
+ struct ks_sdio_card *card;
+ int rc;
+
+ card = priv->ks_wlan_hw.sdio_card;
+
+ if (length == 1) /* CMD52 */
+ *buffer = sdio_readb(card->func, address, &rc);
+ else /* CMD53 multi-block transfer */
+ rc = sdio_memcpy_fromio(card->func, buffer, address, length);
+
+ if (rc != 0)
+ DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
+
+ return rc;
+}
+
+static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address,
+ unsigned char *buffer, int length)
+{
+ struct ks_sdio_card *card;
+ int rc;
+
+ card = priv->ks_wlan_hw.sdio_card;
+
+ if (length == 1) /* CMD52 */
+ sdio_writeb(card->func, *buffer, (unsigned int)address, &rc);
+ else /* CMD53 */
+ rc = sdio_memcpy_toio(card->func, (unsigned int)address, buffer,
+ length);
+
+ if (rc != 0)
+ DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
+
+ return rc;
+}
+
+void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
+{
+ unsigned char rw_data;
+ int retval;
+
+ DPRINTK(4, "\n");
+
+ /* clear request */
+ atomic_set(&priv->sleepstatus.doze_request, 0);
+
+ if (atomic_read(&priv->sleepstatus.status) == 0) {
+ rw_data = GCR_B_DOZE;
+ retval =
+ ks7010_sdio_write(priv, GCR_B, &rw_data, sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : GCR_B=%02X\n", rw_data);
+ goto out;
+ }
+ DPRINTK(4, "PMG SET!! : GCR_B=%02X\n", rw_data);
+ DPRINTK(3, "sleep_mode=SLP_SLEEP\n");
+ atomic_set(&priv->sleepstatus.status, 1);
+ priv->last_doze = jiffies;
+ } else {
+ DPRINTK(1, "sleep_mode=%d\n", priv->sleep_mode);
+ }
+
+ out:
+ priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
+ return;
+}
+
+void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
+{
+ unsigned char rw_data;
+ int retval;
+
+ DPRINTK(4, "\n");
+
+ /* clear request */
+ atomic_set(&priv->sleepstatus.wakeup_request, 0);
+
+ if (atomic_read(&priv->sleepstatus.status) == 1) {
+ rw_data = WAKEUP_REQ;
+ retval =
+ ks7010_sdio_write(priv, WAKEUP, &rw_data, sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : WAKEUP=%02X\n", rw_data);
+ goto out;
+ }
+ DPRINTK(4, "wake up : WAKEUP=%02X\n", rw_data);
+ atomic_set(&priv->sleepstatus.status, 0);
+ priv->last_wakeup = jiffies;
+ ++priv->wakeup_count;
+ } else {
+ DPRINTK(1, "sleep_mode=%d\n", priv->sleep_mode);
+ }
+
+ out:
+ priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
+ return;
+}
+
+void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
+{
+ unsigned char rw_data;
+ int retval;
+
+ DPRINTK(4, "\n");
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ rw_data = WAKEUP_REQ;
+ retval =
+ ks7010_sdio_write(priv, WAKEUP, &rw_data, sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : WAKEUP=%02X\n", rw_data);
+ }
+ DPRINTK(4, "wake up : WAKEUP=%02X\n", rw_data);
+ priv->last_wakeup = jiffies;
+ ++priv->wakeup_count;
+ } else {
+ DPRINTK(1, "psstatus=%d\n",
+ atomic_read(&priv->psstatus.status));
+ }
+}
+
+int _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
+{
+ int rc = 0;
+ unsigned char rw_data;
+ int retval;
+
+ if (priv->reg.powermgt == POWMGT_ACTIVE_MODE)
+ return rc;
+
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE &&
+ (priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+
+ //DPRINTK(1,"psstatus.status=%d\n",atomic_read(&priv->psstatus.status));
+ if (priv->dev_state == DEVICE_STATE_SLEEP) {
+ switch (atomic_read(&priv->psstatus.status)) {
+ case PS_SNOOZE: /* 4 */
+ break;
+ default:
+ DPRINTK(5, "\npsstatus.status=%d\npsstatus.confirm_wait=%d\npsstatus.snooze_guard=%d\ncnt_txqbody=%d\n",
+ atomic_read(&priv->psstatus.status),
+ atomic_read(&priv->psstatus.confirm_wait),
+ atomic_read(&priv->psstatus.snooze_guard),
+ cnt_txqbody(priv));
+
+ if (!atomic_read(&priv->psstatus.confirm_wait)
+ && !atomic_read(&priv->psstatus.snooze_guard)
+ && !cnt_txqbody(priv)) {
+ retval =
+ ks7010_sdio_read(priv, INT_PENDING,
+ &rw_data,
+ sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1,
+ " error : INT_PENDING=%02X\n",
+ rw_data);
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ break;
+ }
+ if (!rw_data) {
+ rw_data = GCR_B_DOZE;
+ retval =
+ ks7010_sdio_write(priv,
+ GCR_B,
+ &rw_data,
+ sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1,
+ " error : GCR_B=%02X\n",
+ rw_data);
+ queue_delayed_work
+ (priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ break;
+ }
+ DPRINTK(4,
+ "PMG SET!! : GCR_B=%02X\n",
+ rw_data);
+ atomic_set(&priv->psstatus.
+ status, PS_SNOOZE);
+ DPRINTK(3,
+ "psstatus.status=PS_SNOOZE\n");
+ } else {
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ }
+ } else {
+ queue_delayed_work(priv->ks_wlan_hw.
+ ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq,
+ 0);
+ }
+ break;
+ }
+ }
+
+ }
+
+ return rc;
+}
+
+int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
+{
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ return 0;
+}
+
+static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
+ unsigned long size,
+ void (*complete_handler) (void *arg1, void *arg2),
+ void *arg1, void *arg2)
+{
+ struct tx_device_buffer *sp;
+
+ if (priv->dev_state < DEVICE_STATE_BOOT) {
+ kfree(p);
+ if (complete_handler != NULL)
+ (*complete_handler) (arg1, arg2);
+ return 1;
+ }
+
+ if ((TX_DEVICE_BUFF_SIZE - 1) <= cnt_txqbody(priv)) {
+ /* in case of buffer overflow */
+ DPRINTK(1, "tx buffer overflow\n");
+ kfree(p);
+ if (complete_handler != NULL)
+ (*complete_handler) (arg1, arg2);
+ return 1;
+ }
+
+ sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qtail];
+ sp->sendp = p;
+ sp->size = size;
+ sp->complete_handler = complete_handler;
+ sp->arg1 = arg1;
+ sp->arg2 = arg2;
+ inc_txqtail(priv);
+
+ return 0;
+}
+
+/* write data */
+static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
+ unsigned long size)
+{
+ int rc, retval;
+ unsigned char rw_data;
+ struct hostif_hdr *hdr;
+ hdr = (struct hostif_hdr *)buffer;
+ rc = 0;
+
+ DPRINTK(4, "size=%d\n", hdr->size);
+ if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
+ DPRINTK(1, "unknown event=%04X\n", hdr->event);
+ return 0;
+ }
+
+ retval = ks7010_sdio_write(priv, DATA_WINDOW, buffer, size);
+ if (retval) {
+ DPRINTK(1, " write error : retval=%d\n", retval);
+ return -4;
+ }
+
+ rw_data = WRITE_STATUS_BUSY;
+ retval =
+ ks7010_sdio_write(priv, WRITE_STATUS, &rw_data, sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : WRITE_STATUS=%02X\n", rw_data);
+ return -3;
+ }
+
+ return 0;
+}
+
+static void tx_device_task(void *dev)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+ struct tx_device_buffer *sp;
+ int rc = 0;
+
+ DPRINTK(4, "\n");
+ if (cnt_txqbody(priv) > 0
+ && atomic_read(&priv->psstatus.status) != PS_SNOOZE) {
+ sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
+ if (priv->dev_state >= DEVICE_STATE_BOOT) {
+ rc = write_to_device(priv, sp->sendp, sp->size);
+ if (rc) {
+ DPRINTK(1, "write_to_device error !!(%d)\n",
+ rc);
+ queue_delayed_work(priv->ks_wlan_hw.
+ ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ return;
+ }
+
+ }
+ kfree(sp->sendp); /* allocated memory free */
+ if (sp->complete_handler != NULL) /* TX Complete */
+ (*sp->complete_handler) (sp->arg1, sp->arg2);
+ inc_txqhead(priv);
+
+ if (cnt_txqbody(priv) > 0) {
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 0);
+ }
+ }
+ return;
+}
+
+int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
+ void (*complete_handler) (void *arg1, void *arg2),
+ void *arg1, void *arg2)
+{
+ int result = 0;
+ struct hostif_hdr *hdr;
+ hdr = (struct hostif_hdr *)p;
+
+ if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
+ DPRINTK(1, "unknown event=%04X\n", hdr->event);
+ return 0;
+ }
+
+ /* add event to hostt buffer */
+ priv->hostt.buff[priv->hostt.qtail] = hdr->event;
+ priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
+
+ DPRINTK(4, "event=%04X\n", hdr->event);
+ spin_lock(&priv->tx_dev.tx_dev_lock);
+ result = enqueue_txdev(priv, p, size, complete_handler, arg1, arg2);
+ spin_unlock(&priv->tx_dev.tx_dev_lock);
+
+ if (cnt_txqbody(priv) > 0) {
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 0);
+ }
+ return result;
+}
+
+static void rx_event_task(unsigned long dev)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+ struct rx_device_buffer *rp;
+
+ DPRINTK(4, "\n");
+
+ if (cnt_rxqbody(priv) > 0 && priv->dev_state >= DEVICE_STATE_BOOT) {
+ rp = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qhead];
+ hostif_receive(priv, rp->data, rp->size);
+ inc_rxqhead(priv);
+
+ if (cnt_rxqbody(priv) > 0) {
+ tasklet_schedule(&priv->ks_wlan_hw.rx_bh_task);
+ }
+ }
+
+ return;
+}
+
+static void ks_wlan_hw_rx(void *dev, uint16_t size)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+ int retval;
+ struct rx_device_buffer *rx_buffer;
+ struct hostif_hdr *hdr;
+ unsigned char read_status;
+ unsigned short event = 0;
+
+ DPRINTK(4, "\n");
+
+ /* receive data */
+ if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
+ /* in case of buffer overflow */
+ DPRINTK(1, "rx buffer overflow \n");
+ goto error_out;
+ }
+ rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
+
+ retval =
+ ks7010_sdio_read(priv, DATA_WINDOW, &rx_buffer->data[0],
+ hif_align_size(size));
+ if (retval) {
+ goto error_out;
+ }
+
+ /* length check */
+ if (size > 2046 || size == 0) {
+#ifdef KS_WLAN_DEBUG
+ if (KS_WLAN_DEBUG > 5)
+ print_hex_dump_bytes("INVALID DATA dump: ",
+ DUMP_PREFIX_OFFSET,
+ rx_buffer->data, 32);
+#endif
+ /* rx_status update */
+ read_status = READ_STATUS_IDLE;
+ retval =
+ ks7010_sdio_write(priv, READ_STATUS, &read_status,
+ sizeof(read_status));
+ if (retval) {
+ DPRINTK(1, " error : READ_STATUS=%02X\n", read_status);
+ }
+ goto error_out;
+ }
+
+ hdr = (struct hostif_hdr *)&rx_buffer->data[0];
+ rx_buffer->size = le16_to_cpu(hdr->size) + sizeof(hdr->size);
+ event = hdr->event;
+ inc_rxqtail(priv);
+
+ /* read status update */
+ read_status = READ_STATUS_IDLE;
+ retval =
+ ks7010_sdio_write(priv, READ_STATUS, &read_status,
+ sizeof(read_status));
+ if (retval) {
+ DPRINTK(1, " error : READ_STATUS=%02X\n", read_status);
+ }
+ DPRINTK(4, "READ_STATUS=%02X\n", read_status);
+
+ if (atomic_read(&priv->psstatus.confirm_wait)) {
+ if (IS_HIF_CONF(event)) {
+ DPRINTK(4, "IS_HIF_CONF true !!\n");
+ atomic_dec(&priv->psstatus.confirm_wait);
+ }
+ }
+
+ /* rx_event_task((void *)priv); */
+ tasklet_schedule(&priv->ks_wlan_hw.rx_bh_task);
+
+ error_out:
+ return;
+}
+
+static void ks7010_rw_function(struct work_struct *work)
+{
+ struct hw_info_t *hw;
+ struct ks_wlan_private *priv;
+ unsigned char rw_data;
+ int retval;
+
+ hw = container_of(work, struct hw_info_t, rw_wq.work);
+ priv = container_of(hw, struct ks_wlan_private, ks_wlan_hw);
+
+ DPRINTK(4, "\n");
+
+ /* wiat after DOZE */
+ if (time_after(priv->last_doze + ((30 * HZ) / 1000), jiffies)) {
+ DPRINTK(4, "wait after DOZE \n");
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ return;
+ }
+
+ /* wiat after WAKEUP */
+ while (time_after(priv->last_wakeup + ((30 * HZ) / 1000), jiffies)) {
+ DPRINTK(4, "wait after WAKEUP \n");
+/* queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,&priv->ks_wlan_hw.rw_wq,
+ (priv->last_wakeup + ((30*HZ)/1000) - jiffies));*/
+ printk("wake: %lu %lu\n", priv->last_wakeup + (30 * HZ) / 1000,
+ jiffies);
+ msleep(30);
+ }
+
+ sdio_claim_host(priv->ks_wlan_hw.sdio_card->func);
+
+ /* power save wakeup */
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ if (cnt_txqbody(priv) > 0) {
+ ks_wlan_hw_wakeup_request(priv);
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ }
+ goto err_out;
+ }
+
+ /* sleep mode doze */
+ if (atomic_read(&priv->sleepstatus.doze_request) == 1) {
+ ks_wlan_hw_sleep_doze_request(priv);
+ goto err_out;
+ }
+ /* sleep mode wakeup */
+ if (atomic_read(&priv->sleepstatus.wakeup_request) == 1) {
+ ks_wlan_hw_sleep_wakeup_request(priv);
+ goto err_out;
+ }
+
+ /* read (WriteStatus/ReadDataSize FN1:00_0014) */
+ retval =
+ ks7010_sdio_read(priv, WSTATUS_RSIZE, &rw_data, sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : WSTATUS_RSIZE=%02X psstatus=%d\n", rw_data,
+ atomic_read(&priv->psstatus.status));
+ goto err_out;
+ }
+ DPRINTK(4, "WSTATUS_RSIZE=%02X\n", rw_data);
+
+ if (rw_data & RSIZE_MASK) { /* Read schedule */
+ ks_wlan_hw_rx((void *)priv,
+ (uint16_t) (((rw_data & RSIZE_MASK) << 4)));
+ }
+ if ((rw_data & WSTATUS_MASK)) {
+ tx_device_task((void *)priv);
+ }
+ _ks_wlan_hw_power_save(priv);
+
+ err_out:
+ sdio_release_host(priv->ks_wlan_hw.sdio_card->func);
+
+ return;
+}
+
+static void ks_sdio_interrupt(struct sdio_func *func)
+{
+ int retval;
+ struct ks_sdio_card *card;
+ struct ks_wlan_private *priv;
+ unsigned char status, rsize, rw_data;
+
+ card = sdio_get_drvdata(func);
+ priv = card->priv;
+ DPRINTK(4, "\n");
+
+ if (priv->dev_state >= DEVICE_STATE_BOOT) {
+ retval =
+ ks7010_sdio_read(priv, INT_PENDING, &status,
+ sizeof(status));
+ if (retval) {
+ DPRINTK(1, "read INT_PENDING Failed!!(%d)\n", retval);
+ goto intr_out;
+ }
+ DPRINTK(4, "INT_PENDING=%02X\n", rw_data);
+
+ /* schedule task for interrupt status */
+ /* bit7 -> Write General Communication B register */
+ /* read (General Communication B register) */
+ /* bit5 -> Write Status Idle */
+ /* bit2 -> Read Status Busy */
+ if (status & INT_GCR_B
+ || atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ retval =
+ ks7010_sdio_read(priv, GCR_B, &rw_data,
+ sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : GCR_B=%02X\n", rw_data);
+ goto intr_out;
+ }
+ /* DPRINTK(1, "GCR_B=%02X\n", rw_data); */
+ if (rw_data == GCR_B_ACTIVE) {
+ if (atomic_read(&priv->psstatus.status) ==
+ PS_SNOOZE) {
+ atomic_set(&priv->psstatus.status,
+ PS_WAKEUP);
+ priv->wakeup_count = 0;
+ }
+ complete(&priv->psstatus.wakeup_wait);
+ }
+
+ }
+
+ do {
+ /* read (WriteStatus/ReadDataSize FN1:00_0014) */
+ retval =
+ ks7010_sdio_read(priv, WSTATUS_RSIZE, &rw_data,
+ sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : WSTATUS_RSIZE=%02X\n",
+ rw_data);
+ goto intr_out;
+ }
+ DPRINTK(4, "WSTATUS_RSIZE=%02X\n", rw_data);
+ rsize = rw_data & RSIZE_MASK;
+ if (rsize) { /* Read schedule */
+ ks_wlan_hw_rx((void *)priv,
+ (uint16_t) (((rsize) << 4)));
+ }
+ if (rw_data & WSTATUS_MASK) {
+#if 0
+ if (status & INT_WRITE_STATUS
+ && !cnt_txqbody(priv)) {
+ /* dummy write for interrupt clear */
+ rw_data = 0;
+ retval =
+ ks7010_sdio_write(priv, DATA_WINDOW,
+ &rw_data,
+ sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1,
+ "write DATA_WINDOW Failed!!(%d)\n",
+ retval);
+ }
+ status &= ~INT_WRITE_STATUS;
+ } else {
+#endif
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ if (cnt_txqbody(priv)) {
+ ks_wlan_hw_wakeup_request(priv);
+ queue_delayed_work
+ (priv->ks_wlan_hw.
+ ks7010sdio_wq,
+ &priv->ks_wlan_hw.
+ rw_wq, 1);
+ return;
+ }
+ } else {
+ tx_device_task((void *)priv);
+ }
+#if 0
+ }
+#endif
+ }
+ } while (rsize);
+ }
+
+ intr_out:
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 0);
+ return;
+}
+
+static int trx_device_init(struct ks_wlan_private *priv)
+{
+ /* initialize values (tx) */
+ priv->tx_dev.qtail = priv->tx_dev.qhead = 0;
+
+ /* initialize values (rx) */
+ priv->rx_dev.qtail = priv->rx_dev.qhead = 0;
+
+ /* initialize spinLock (tx,rx) */
+ spin_lock_init(&priv->tx_dev.tx_dev_lock);
+ spin_lock_init(&priv->rx_dev.rx_dev_lock);
+
+ tasklet_init(&priv->ks_wlan_hw.rx_bh_task, rx_event_task,
+ (unsigned long)priv);
+
+ return 0;
+}
+
+static void trx_device_exit(struct ks_wlan_private *priv)
+{
+ struct tx_device_buffer *sp;
+
+ /* tx buffer clear */
+ while (cnt_txqbody(priv) > 0) {
+ sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
+ kfree(sp->sendp); /* allocated memory free */
+ if (sp->complete_handler != NULL) /* TX Complete */
+ (*sp->complete_handler) (sp->arg1, sp->arg2);
+ inc_txqhead(priv);
+ }
+
+ tasklet_kill(&priv->ks_wlan_hw.rx_bh_task);
+
+ return;
+}
+
+static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
+{
+ int rc = 0;
+ int retval;
+ unsigned char *data_buf;
+ data_buf = NULL;
+
+ data_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ if (!data_buf) {
+ rc = 1;
+ goto error_out;
+ }
+
+ memcpy(data_buf, &index, sizeof(index));
+ retval = ks7010_sdio_write(priv, WRITE_INDEX, data_buf, sizeof(index));
+ if (retval) {
+ rc = 2;
+ goto error_out;
+ }
+
+ retval = ks7010_sdio_write(priv, READ_INDEX, data_buf, sizeof(index));
+ if (retval) {
+ rc = 3;
+ goto error_out;
+ }
+ error_out:
+ if (data_buf)
+ kfree(data_buf);
+ return rc;
+}
+
+#define ROM_BUFF_SIZE (64*1024)
+static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
+ unsigned char *data, unsigned int size)
+{
+ int rc = 0;
+ int retval;
+ unsigned char *read_buf;
+ read_buf = NULL;
+ read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
+ if (!read_buf) {
+ rc = 1;
+ goto error_out;
+ }
+ retval = ks7010_sdio_read(priv, address, read_buf, size);
+ if (retval) {
+ rc = 2;
+ goto error_out;
+ }
+ retval = memcmp(data, read_buf, size);
+
+ if (retval) {
+ DPRINTK(0, "data compare error (%d) \n", retval);
+ rc = 3;
+ goto error_out;
+ }
+ error_out:
+ if (read_buf)
+ kfree(read_buf);
+ return rc;
+}
+
+static int ks7010_upload_firmware(struct ks_wlan_private *priv,
+ struct ks_sdio_card *card)
+{
+ unsigned int size, offset, n = 0;
+ unsigned char *rom_buf;
+ unsigned char rw_data = 0;
+ int retval, rc = 0;
+ int length;
+ const struct firmware *fw_entry = NULL;
+
+ rom_buf = NULL;
+
+ /* buffer allocate */
+ rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
+ if (!rom_buf) {
+ rc = 3;
+ goto error_out0;
+ }
+
+ sdio_claim_host(card->func);
+
+ /* Firmware running ? */
+ retval = ks7010_sdio_read(priv, GCR_A, &rw_data, sizeof(rw_data));
+ if (rw_data == GCR_A_RUN) {
+ DPRINTK(0, "MAC firmware running ...\n");
+ rc = 0;
+ goto error_out0;
+ }
+
+ retval = request_firmware(&fw_entry, ROM_FILE, &priv->ks_wlan_hw.sdio_card->func->dev);
+ if (retval)
+ return retval;
+
+ length = fw_entry->size;
+
+ /* Load Program */
+ n = 0;
+ do {
+ if (length >= ROM_BUFF_SIZE) {
+ size = ROM_BUFF_SIZE;
+ length = length - ROM_BUFF_SIZE;
+ } else {
+ size = length;
+ length = 0;
+ }
+ DPRINTK(4, "size = %d\n", size);
+ if (size == 0)
+ break;
+ memcpy(rom_buf, fw_entry->data + n, size);
+ /* Update write index */
+ offset = n;
+ retval =
+ ks7010_sdio_update_index(priv,
+ KS7010_IRAM_ADDRESS + offset);
+ if (retval) {
+ rc = 6;
+ goto error_out1;
+ }
+
+ /* Write data */
+ retval = ks7010_sdio_write(priv, DATA_WINDOW, rom_buf, size);
+ if (retval) {
+ rc = 8;
+ goto error_out1;
+ }
+
+ /* compare */
+ retval =
+ ks7010_sdio_data_compare(priv, DATA_WINDOW, rom_buf, size);
+ if (retval) {
+ rc = 9;
+ goto error_out1;
+ }
+ n += size;
+
+ } while (size);
+
+ /* Remap request */
+ rw_data = GCR_A_REMAP;
+ retval = ks7010_sdio_write(priv, GCR_A, &rw_data, sizeof(rw_data));
+ if (retval) {
+ rc = 11;
+ goto error_out1;
+ }
+ DPRINTK(4, " REMAP Request : GCR_A=%02X\n", rw_data);
+
+ /* Firmware running check */
+ for (n = 0; n < 50; ++n) {
+ mdelay(10); /* wait_ms(10); */
+ retval =
+ ks7010_sdio_read(priv, GCR_A, &rw_data, sizeof(rw_data));
+ if (retval) {
+ rc = 11;
+ goto error_out1;
+ }
+ if (rw_data == GCR_A_RUN)
+ break;
+ }
+ DPRINTK(4, "firmware wakeup (%d)!!!!\n", n);
+ if ((50) <= n) {
+ DPRINTK(1, "firmware can't start\n");
+ rc = 12;
+ goto error_out1;
+ }
+
+ rc = 0;
+
+ error_out1:
+ release_firmware(fw_entry);
+ error_out0:
+ sdio_release_host(card->func);
+ if (rom_buf)
+ kfree(rom_buf);
+ return rc;
+}
+
+static void ks7010_card_init(struct ks_wlan_private *priv)
+{
+ DPRINTK(5, "\ncard_init_task()\n");
+
+ /* init_waitqueue_head(&priv->confirm_wait); */
+ init_completion(&priv->confirm_wait);
+
+ DPRINTK(5, "init_completion()\n");
+
+ /* get mac address & firmware version */
+ hostif_sme_enqueue(priv, SME_START);
+
+ DPRINTK(5, "hostif_sme_enqueu()\n");
+
+ if (!wait_for_completion_interruptible_timeout
+ (&priv->confirm_wait, 5 * HZ)) {
+ DPRINTK(1, "wait time out!! SME_START\n");
+ }
+
+ if (priv->mac_address_valid && priv->version_size) {
+ priv->dev_state = DEVICE_STATE_PREINIT;
+ }
+
+ hostif_sme_enqueue(priv, SME_GET_EEPROM_CKSUM);
+
+ /* load initial wireless parameter */
+ hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST);
+ hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST);
+ hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST);
+ hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST);
+ hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST);
+ hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
+ hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST);
+ hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST);
+ hostif_sme_enqueue(priv, SME_START_REQUEST);
+
+ if (!wait_for_completion_interruptible_timeout
+ (&priv->confirm_wait, 5 * HZ)) {
+ DPRINTK(1, "wait time out!! wireless parameter set\n");
+ }
+
+ if (priv->dev_state >= DEVICE_STATE_PREINIT) {
+ DPRINTK(1, "DEVICE READY!!\n");
+ priv->dev_state = DEVICE_STATE_READY;
+ } else {
+ DPRINTK(1, "dev_state=%d\n", priv->dev_state);
+ }
+}
+
+static void ks7010_init_defaults(struct ks_wlan_private *priv)
+{
+ priv->reg.tx_rate = TX_RATE_AUTO;
+ priv->reg.preamble = LONG_PREAMBLE;
+ priv->reg.powermgt = POWMGT_ACTIVE_MODE;
+ priv->reg.scan_type = ACTIVE_SCAN;
+ priv->reg.beacon_lost_count = 20;
+ priv->reg.rts = 2347UL;
+ priv->reg.fragment = 2346UL;
+ priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
+ priv->reg.cts_mode = CTS_MODE_FALSE;
+ priv->reg.rate_set.body[11] = TX_RATE_54M;
+ priv->reg.rate_set.body[10] = TX_RATE_48M;
+ priv->reg.rate_set.body[9] = TX_RATE_36M;
+ priv->reg.rate_set.body[8] = TX_RATE_18M;
+ priv->reg.rate_set.body[7] = TX_RATE_9M;
+ priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
+ priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
+ priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
+ priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
+ priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
+ priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
+ priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
+ priv->reg.tx_rate = TX_RATE_FULL_AUTO;
+ priv->reg.rate_set.size = 12;
+}
+
+static int ks7010_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *device)
+{
+ struct ks_wlan_private *priv;
+ struct ks_sdio_card *card;
+ struct net_device *netdev;
+ unsigned char rw_data;
+ int ret;
+
+ DPRINTK(5, "ks7010_sdio_probe()\n");
+
+ priv = NULL;
+ netdev = NULL;
+
+ /* initilize ks_sdio_card */
+ card = kzalloc(sizeof(struct ks_sdio_card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->func = func;
+ spin_lock_init(&card->lock);
+
+ /*** Initialize SDIO ***/
+ sdio_claim_host(func);
+
+ /* bus setting */
+ /* Issue config request to override clock rate */
+
+ /* function blocksize set */
+ ret = sdio_set_block_size(func, KS7010_IO_BLOCK_SIZE);
+ DPRINTK(5, "multi_block=%d sdio_set_block_size()=%d %d\n",
+ func->card->cccr.multi_block, func->cur_blksize, ret);
+
+ /* Allocate the slot current */
+
+ /* function enable */
+ ret = sdio_enable_func(func);
+ DPRINTK(5, "sdio_enable_func() %d\n", ret);
+ if (ret)
+ goto error_free_card;
+
+ /* interrupt disable */
+ sdio_writeb(func, 0, INT_ENABLE, &ret);
+ if (ret)
+ goto error_free_card;
+ sdio_writeb(func, 0xff, INT_PENDING, &ret);
+ if (ret)
+ goto error_disable_func;
+
+ /* setup interrupt handler */
+ ret = sdio_claim_irq(func, ks_sdio_interrupt);
+ if (ret)
+ goto error_disable_func;
+
+ sdio_release_host(func);
+
+ sdio_set_drvdata(func, card);
+
+ DPRINTK(5, "class = 0x%X, vendor = 0x%X, "
+ "device = 0x%X\n", func->class, func->vendor, func->device);
+
+ /* private memory allocate */
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (netdev == NULL) {
+ printk(KERN_ERR "ks7010 : Unable to alloc new net device\n");
+ goto error_release_irq;
+ }
+ if (dev_alloc_name(netdev, "wlan%d") < 0) {
+ printk(KERN_ERR "ks7010 : Couldn't get name!\n");
+ goto error_free_netdev;
+ }
+
+ priv = netdev_priv(netdev);
+
+ card->priv = priv;
+ SET_NETDEV_DEV(netdev, &card->func->dev); /* for create sysfs symlinks */
+
+ /* private memory initialize */
+ priv->ks_wlan_hw.sdio_card = card;
+ init_completion(&priv->ks_wlan_hw.ks7010_sdio_wait);
+ priv->ks_wlan_hw.read_buf = NULL;
+ priv->ks_wlan_hw.read_buf = kmalloc(RX_DATA_SIZE, GFP_KERNEL);
+ if (!priv->ks_wlan_hw.read_buf) {
+ goto error_free_netdev;
+ }
+ priv->dev_state = DEVICE_STATE_PREBOOT;
+ priv->net_dev = netdev;
+ priv->firmware_version[0] = '\0';
+ priv->version_size = 0;
+ priv->last_doze = jiffies; /* set current jiffies */
+ priv->last_wakeup = jiffies;
+ memset(&priv->nstats, 0, sizeof(priv->nstats));
+ memset(&priv->wstats, 0, sizeof(priv->wstats));
+
+ /* sleep mode */
+ atomic_set(&priv->sleepstatus.doze_request, 0);
+ atomic_set(&priv->sleepstatus.wakeup_request, 0);
+ atomic_set(&priv->sleepstatus.wakeup_request, 0);
+
+ trx_device_init(priv);
+ hostif_init(priv);
+ ks_wlan_net_start(netdev);
+
+ ks7010_init_defaults(priv);
+
+ /* Upload firmware */
+ ret = ks7010_upload_firmware(priv, card); /* firmware load */
+ if (ret) {
+ printk(KERN_ERR
+ "ks7010: firmware load failed !! retern code = %d\n",
+ ret);
+ goto error_free_read_buf;
+ }
+
+ /* interrupt setting */
+ /* clear Interrupt status write (ARMtoSD_InterruptPending FN1:00_0024) */
+ rw_data = 0xff;
+ sdio_claim_host(func);
+ ret = ks7010_sdio_write(priv, INT_PENDING, &rw_data, sizeof(rw_data));
+ sdio_release_host(func);
+ if (ret) {
+ DPRINTK(1, " error : INT_PENDING=%02X\n", rw_data);
+ }
+ DPRINTK(4, " clear Interrupt : INT_PENDING=%02X\n", rw_data);
+
+ /* enable ks7010sdio interrupt (INT_GCR_B|INT_READ_STATUS|INT_WRITE_STATUS) */
+ rw_data = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS);
+ sdio_claim_host(func);
+ ret = ks7010_sdio_write(priv, INT_ENABLE, &rw_data, sizeof(rw_data));
+ sdio_release_host(func);
+ if (ret) {
+ DPRINTK(1, " error : INT_ENABLE=%02X\n", rw_data);
+ }
+ DPRINTK(4, " enable Interrupt : INT_ENABLE=%02X\n", rw_data);
+ priv->dev_state = DEVICE_STATE_BOOT;
+
+ priv->ks_wlan_hw.ks7010sdio_wq = create_workqueue("ks7010sdio_wq");
+ if (!priv->ks_wlan_hw.ks7010sdio_wq) {
+ DPRINTK(1, "create_workqueue failed !!\n");
+ goto error_free_read_buf;
+ }
+
+ INIT_DELAYED_WORK(&priv->ks_wlan_hw.rw_wq, ks7010_rw_function);
+ ks7010_card_init(priv);
+
+ ret = register_netdev(priv->net_dev);
+ if (ret)
+ goto error_free_read_buf;
+
+ return 0;
+
+ error_free_read_buf:
+ kfree(priv->ks_wlan_hw.read_buf);
+ priv->ks_wlan_hw.read_buf = NULL;
+ error_free_netdev:
+ free_netdev(priv->net_dev);
+ card->priv = NULL;
+ error_release_irq:
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ error_disable_func:
+ sdio_disable_func(func);
+ error_free_card:
+ sdio_release_host(func);
+ sdio_set_drvdata(func, NULL);
+ kfree(card);
+
+ return -ENODEV;
+}
+
+static void ks7010_sdio_remove(struct sdio_func *func)
+{
+ int ret;
+ struct ks_sdio_card *card;
+ struct ks_wlan_private *priv;
+ struct net_device *netdev;
+ DPRINTK(1, "ks7010_sdio_remove()\n");
+
+ card = sdio_get_drvdata(func);
+
+ if (card == NULL)
+ return;
+
+ DPRINTK(1, "priv = card->priv\n");
+ priv = card->priv;
+ netdev = priv->net_dev;
+ if (priv) {
+ ks_wlan_net_stop(netdev);
+ DPRINTK(1, "ks_wlan_net_stop\n");
+
+ /* interrupt disable */
+ sdio_claim_host(func);
+ sdio_writeb(func, 0, INT_ENABLE, &ret);
+ sdio_writeb(func, 0xff, INT_PENDING, &ret);
+ sdio_release_host(func);
+ DPRINTK(1, "interrupt disable\n");
+
+ /* send stop request to MAC */
+ {
+ struct hostif_stop_request_t *pp;
+ pp = (struct hostif_stop_request_t *)
+ kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return; /* to do goto ni suru */
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t)
+ (sizeof(*pp) -
+ sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_STOP_REQ);
+
+ sdio_claim_host(func);
+ write_to_device(priv, (unsigned char *)pp,
+ hif_align_size(sizeof(*pp)));
+ sdio_release_host(func);
+ kfree(pp);
+ }
+ DPRINTK(1, "STOP Req\n");
+
+ if (priv->ks_wlan_hw.ks7010sdio_wq) {
+ flush_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);
+ destroy_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);
+ }
+ DPRINTK(1,
+ "destroy_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);\n");
+
+ hostif_exit(priv);
+ DPRINTK(1, "hostif_exit\n");
+
+ unregister_netdev(netdev);
+
+ trx_device_exit(priv);
+ if (priv->ks_wlan_hw.read_buf) {
+ kfree(priv->ks_wlan_hw.read_buf);
+ }
+ free_netdev(priv->net_dev);
+ card->priv = NULL;
+ }
+
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ DPRINTK(1, "sdio_release_irq()\n");
+ sdio_disable_func(func);
+ DPRINTK(1, "sdio_disable_func()\n");
+ sdio_release_host(func);
+
+ sdio_set_drvdata(func, NULL);
+
+ kfree(card);
+ DPRINTK(1, "kfree()\n");
+
+ DPRINTK(5, " Bye !!\n");
+ return;
+}
+
+static struct sdio_driver ks7010_sdio_driver = {
+ .name = "ks7010_sdio",
+ .id_table = ks7010_sdio_ids,
+ .probe = ks7010_sdio_probe,
+ .remove = ks7010_sdio_remove,
+};
+
+module_driver(ks7010_sdio_driver, sdio_register_driver, sdio_unregister_driver);
+MODULE_AUTHOR("Sang Engineering, Qi-Hardware, KeyStream");
+MODULE_DESCRIPTION("Driver for KeyStream KS7010 based SDIO cards");
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(ROM_FILE);
diff --git a/drivers/staging/ks7010/ks7010_sdio.h b/drivers/staging/ks7010/ks7010_sdio.h
new file mode 100644
index 000000000000..c72064b48bd8
--- /dev/null
+++ b/drivers/staging/ks7010/ks7010_sdio.h
@@ -0,0 +1,147 @@
+/*
+ * Driver for KeyStream, KS7010 based SDIO cards.
+ *
+ * Copyright (C) 2006-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _KS7010_SDIO_H
+#define _KS7010_SDIO_H
+
+#ifdef DEVICE_ALIGNMENT
+#undef DEVICE_ALIGNMENT
+#endif
+#define DEVICE_ALIGNMENT 32
+
+/* SDIO KeyStream vendor and device */
+#define SDIO_VENDOR_ID_KS_CODE_A 0x005b
+#define SDIO_VENDOR_ID_KS_CODE_B 0x0023
+/* Older sources suggest earlier versions were named 7910 or 79xx */
+#define SDIO_DEVICE_ID_KS_7010 0x7910
+
+/* Read Status Register */
+#define READ_STATUS 0x000000
+#define READ_STATUS_BUSY 0
+#define READ_STATUS_IDLE 1
+
+/* Read Index Register */
+#define READ_INDEX 0x000004
+
+/* Read Data Size Register */
+#define READ_DATA_SIZE 0x000008
+
+/* Write Status Register */
+#define WRITE_STATUS 0x00000C
+#define WRITE_STATUS_BUSY 0
+#define WRITE_STATUS_IDLE 1
+
+/* Write Index Register */
+#define WRITE_INDEX 0x000010
+
+/* Write Status/Read Data Size Register
+ * for network packet (less than 2048 bytes data)
+ */
+#define WSTATUS_RSIZE 0x000014
+#define WSTATUS_MASK 0x80 /* Write Status Register value */
+#define RSIZE_MASK 0x7F /* Read Data Size Register value [10:4] */
+
+/* ARM to SD interrupt Enable */
+#define INT_ENABLE 0x000020
+/* ARM to SD interrupt Pending */
+#define INT_PENDING 0x000024
+
+#define INT_GCR_B (1<<7)
+#define INT_GCR_A (1<<6)
+#define INT_WRITE_STATUS (1<<5)
+#define INT_WRITE_INDEX (1<<4)
+#define INT_WRITE_SIZE (1<<3)
+#define INT_READ_STATUS (1<<2)
+#define INT_READ_INDEX (1<<1)
+#define INT_READ_SIZE (1<<0)
+
+/* General Communication Register A */
+#define GCR_A 0x000028
+#define GCR_A_INIT 0
+#define GCR_A_REMAP 1
+#define GCR_A_RUN 2
+
+/* General Communication Register B */
+#define GCR_B 0x00002C
+#define GCR_B_ACTIVE 0
+#define GCR_B_DOZE 1
+
+/* Wakeup Register */
+/* #define WAKEUP 0x008104 */
+/* #define WAKEUP_REQ 0x00 */
+#define WAKEUP 0x008018
+#define WAKEUP_REQ 0x5a
+
+/* AHB Data Window 0x010000-0x01FFFF */
+#define DATA_WINDOW 0x010000
+#define WINDOW_SIZE 64*1024
+
+#define KS7010_IRAM_ADDRESS 0x06000000
+
+/*
+ * struct define
+ */
+struct hw_info_t {
+ struct ks_sdio_card *sdio_card;
+ struct completion ks7010_sdio_wait;
+ struct workqueue_struct *ks7010sdio_wq;
+ struct delayed_work rw_wq;
+ unsigned char *read_buf;
+ struct tasklet_struct rx_bh_task;
+};
+
+struct ks_sdio_packet {
+ struct ks_sdio_packet *next;
+ u16 nb;
+ u8 buffer[0] __attribute__ ((aligned(4)));
+};
+
+struct ks_sdio_card {
+ struct sdio_func *func;
+ struct ks_wlan_private *priv;
+ spinlock_t lock;
+};
+
+/* Tx Device struct */
+#define TX_DEVICE_BUFF_SIZE 1024
+
+struct tx_device_buffer {
+ unsigned char *sendp; /* pointer of send req data */
+ unsigned int size;
+ void (*complete_handler) (void *arg1, void *arg2);
+ void *arg1;
+ void *arg2;
+};
+
+struct tx_device {
+ struct tx_device_buffer tx_dev_buff[TX_DEVICE_BUFF_SIZE];
+ unsigned int qhead; /* tx buffer queue first pointer */
+ unsigned int qtail; /* tx buffer queue last pointer */
+ spinlock_t tx_dev_lock;
+};
+
+/* Rx Device struct */
+#define RX_DATA_SIZE (2 + 2 + 2347 + 1)
+#define RX_DEVICE_BUFF_SIZE 32
+
+struct rx_device_buffer {
+ unsigned char data[RX_DATA_SIZE];
+ unsigned int size;
+};
+
+struct rx_device {
+ struct rx_device_buffer rx_dev_buff[RX_DEVICE_BUFF_SIZE];
+ unsigned int qhead; /* rx buffer queue first pointer */
+ unsigned int qtail; /* rx buffer queue last pointer */
+ spinlock_t rx_dev_lock;
+};
+#define ROM_FILE "ks7010sd.rom"
+
+#endif /* _KS7010_SDIO_H */
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
new file mode 100644
index 000000000000..a8822fe2bd60
--- /dev/null
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -0,0 +1,2760 @@
+/*
+ * Driver for KeyStream wireless LAN cards.
+ *
+ * Copyright (C) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "ks_wlan.h"
+#include "ks_hostif.h"
+#include "eap_packet.h"
+#include "michael_mic.h"
+
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+
+/* Include Wireless Extension definition and check version */
+#include <net/iw_handler.h> /* New driver API */
+
+extern int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p,
+ unsigned long size,
+ void (*complete_handler) (void *arg1, void *arg2),
+ void *arg1, void *arg2);
+extern void send_packet_complete(void *, void *);
+
+extern void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv);
+extern int ks_wlan_hw_power_save(struct ks_wlan_private *priv);
+
+/* macro */
+#define inc_smeqhead(priv) \
+ ( priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE )
+#define inc_smeqtail(priv) \
+ ( priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE )
+#define cnt_smeqbody(priv) \
+ (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE )
+
+#define KS_WLAN_MEM_FLAG (GFP_ATOMIC)
+
+static
+inline u8 get_BYTE(struct ks_wlan_private *priv)
+{
+ u8 data;
+ data = *(priv->rxp)++;
+ /* length check in advance ! */
+ --(priv->rx_size);
+ return data;
+}
+
+static
+inline u16 get_WORD(struct ks_wlan_private *priv)
+{
+ u16 data;
+ data = (get_BYTE(priv) & 0xff);
+ data |= ((get_BYTE(priv) << 8) & 0xff00);
+ return data;
+}
+
+static
+inline u32 get_DWORD(struct ks_wlan_private *priv)
+{
+ u32 data;
+ data = (get_BYTE(priv) & 0xff);
+ data |= ((get_BYTE(priv) << 8) & 0x0000ff00);
+ data |= ((get_BYTE(priv) << 16) & 0x00ff0000);
+ data |= ((get_BYTE(priv) << 24) & 0xff000000);
+ return data;
+}
+
+void ks_wlan_hw_wakeup_task(struct work_struct *work)
+{
+ struct ks_wlan_private *priv =
+ container_of(work, struct ks_wlan_private, ks_wlan_wakeup_task);
+ int ps_status = atomic_read(&priv->psstatus.status);
+
+ if (ps_status == PS_SNOOZE) {
+ ks_wlan_hw_wakeup_request(priv);
+ if (!wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait, HZ / 50)) { /* 20ms timeout */
+ DPRINTK(1, "wake up timeout !!!\n");
+ schedule_work(&priv->ks_wlan_wakeup_task);
+ return;
+ }
+ } else {
+ DPRINTK(1, "ps_status=%d\n", ps_status);
+ }
+
+ /* power save */
+ if (atomic_read(&priv->sme_task.count) > 0) {
+ DPRINTK(4, "sme task enable.\n");
+ tasklet_enable(&priv->sme_task);
+ }
+}
+
+static
+int ks_wlan_do_power_save(struct ks_wlan_private *priv)
+{
+ int rc = 0;
+
+ DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status));
+
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
+ } else {
+ priv->dev_state = DEVICE_STATE_READY;
+ }
+ return rc;
+}
+
+static
+int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
+{
+ struct local_ap_t *ap;
+ union iwreq_data wrqu;
+ struct net_device *netdev = priv->net_dev;
+ int rc = 0;
+
+ DPRINTK(3, "\n");
+ ap = &(priv->current_ap);
+
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS) {
+ memset(ap, 0, sizeof(struct local_ap_t));
+ return 1;
+ }
+
+ /* bssid */
+ memcpy(&(ap->bssid[0]), &(ap_info->bssid[0]), ETH_ALEN);
+ /* essid */
+ memcpy(&(ap->ssid.body[0]), &(priv->reg.ssid.body[0]),
+ priv->reg.ssid.size);
+ ap->ssid.size = priv->reg.ssid.size;
+ /* rate_set */
+ memcpy(&(ap->rate_set.body[0]), &(ap_info->rate_set.body[0]),
+ ap_info->rate_set.size);
+ ap->rate_set.size = ap_info->rate_set.size;
+ if (ap_info->ext_rate_set.size) {
+ /* rate_set */
+ memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+ &(ap_info->ext_rate_set.body[0]),
+ ap_info->ext_rate_set.size);
+ ap->rate_set.size += ap_info->ext_rate_set.size;
+ }
+ /* channel */
+ ap->channel = ap_info->ds_parameter.channel;
+ /* rssi */
+ ap->rssi = ap_info->rssi;
+ /* sq */
+ ap->sq = ap_info->sq;
+ /* noise */
+ ap->noise = ap_info->noise;
+ /* capability */
+ ap->capability = ap_info->capability;
+ /* rsn */
+ if ((ap_info->rsn_mode & RSN_MODE_WPA2)
+ && (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) {
+ ap->rsn_ie.id = 0x30;
+ if (ap_info->rsn.size <= RSN_IE_BODY_MAX) {
+ ap->rsn_ie.size = ap_info->rsn.size;
+ memcpy(&(ap->rsn_ie.body[0]), &(ap_info->rsn.body[0]),
+ ap_info->rsn.size);
+ } else {
+ ap->rsn_ie.size = RSN_IE_BODY_MAX;
+ memcpy(&(ap->rsn_ie.body[0]), &(ap_info->rsn.body[0]),
+ RSN_IE_BODY_MAX);
+ }
+ } else if ((ap_info->rsn_mode & RSN_MODE_WPA)
+ && (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA)) {
+ ap->wpa_ie.id = 0xdd;
+ if (ap_info->rsn.size <= RSN_IE_BODY_MAX) {
+ ap->wpa_ie.size = ap_info->rsn.size;
+ memcpy(&(ap->wpa_ie.body[0]), &(ap_info->rsn.body[0]),
+ ap_info->rsn.size);
+ } else {
+ ap->wpa_ie.size = RSN_IE_BODY_MAX;
+ memcpy(&(ap->wpa_ie.body[0]), &(ap_info->rsn.body[0]),
+ RSN_IE_BODY_MAX);
+ }
+ } else {
+ ap->rsn_ie.id = 0;
+ ap->rsn_ie.size = 0;
+ ap->wpa_ie.id = 0;
+ ap->wpa_ie.size = 0;
+ }
+
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ memcpy(wrqu.ap_addr.sa_data,
+ &(priv->current_ap.bssid[0]), ETH_ALEN);
+ DPRINTK(3,
+ "IWEVENT: connect bssid=%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (unsigned char)wrqu.ap_addr.sa_data[0],
+ (unsigned char)wrqu.ap_addr.sa_data[1],
+ (unsigned char)wrqu.ap_addr.sa_data[2],
+ (unsigned char)wrqu.ap_addr.sa_data[3],
+ (unsigned char)wrqu.ap_addr.sa_data[4],
+ (unsigned char)wrqu.ap_addr.sa_data[5]);
+ wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
+ }
+ DPRINTK(4, "\n Link AP\n");
+ DPRINTK(4, " bssid=%02X:%02X:%02X:%02X:%02X:%02X\n \
+ essid=%s\n rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n channel=%d\n \
+ rssi=%d\n sq=%d\n capability=%04X\n", ap->bssid[0], ap->bssid[1], ap->bssid[2], ap->bssid[3], ap->bssid[4], ap->bssid[5], &(ap->ssid.body[0]), ap->rate_set.body[0], ap->rate_set.body[1], ap->rate_set.body[2], ap->rate_set.body[3], ap->rate_set.body[4], ap->rate_set.body[5], ap->rate_set.body[6], ap->rate_set.body[7], ap->channel, ap->rssi, ap->sq, ap->capability);
+ DPRINTK(4, "\n Link AP\n rsn.mode=%d\n rsn.size=%d\n",
+ ap_info->rsn_mode, ap_info->rsn.size);
+ DPRINTK(4, "\n ext_rate_set_size=%d\n rate_set_size=%d\n",
+ ap_info->ext_rate_set.size, ap_info->rate_set.size);
+
+ return rc;
+}
+
+static
+int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ struct local_ap_t *ap)
+{
+ unsigned char *bp;
+ int bsize, offset;
+ int rc = 0;
+
+ DPRINTK(3, "\n");
+ memset(ap, 0, sizeof(struct local_ap_t));
+
+ /* bssid */
+ memcpy(&(ap->bssid[0]), &(ap_info->bssid[0]), ETH_ALEN);
+ /* rssi */
+ ap->rssi = ap_info->rssi;
+ /* sq */
+ ap->sq = ap_info->sq;
+ /* noise */
+ ap->noise = ap_info->noise;
+ /* capability */
+ ap->capability = ap_info->capability;
+ /* channel */
+ ap->channel = ap_info->ch_info;
+
+ bp = &(ap_info->body[0]);
+ bsize = ap_info->body_size;
+ offset = 0;
+
+ while (bsize > offset) {
+ /* DPRINTK(4, "Element ID=%d \n",*bp); */
+ switch (*bp) {
+ case 0: /* ssid */
+ if (*(bp + 1) <= SSID_MAX_SIZE) {
+ ap->ssid.size = *(bp + 1);
+ } else {
+ DPRINTK(1, "size over :: ssid size=%d \n",
+ *(bp + 1));
+ ap->ssid.size = SSID_MAX_SIZE;
+ }
+ memcpy(&(ap->ssid.body[0]), bp + 2, ap->ssid.size);
+ break;
+ case 1: /* rate */
+ case 50: /* ext rate */
+ if ((*(bp + 1) + ap->rate_set.size) <=
+ RATE_SET_MAX_SIZE) {
+ memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+ bp + 2, *(bp + 1));
+ ap->rate_set.size += *(bp + 1);
+ } else {
+ DPRINTK(1, "size over :: rate size=%d \n",
+ (*(bp + 1) + ap->rate_set.size));
+ memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+ bp + 2,
+ RATE_SET_MAX_SIZE - ap->rate_set.size);
+ ap->rate_set.size +=
+ (RATE_SET_MAX_SIZE - ap->rate_set.size);
+ }
+ break;
+ case 3: /* DS parameter */
+ break;
+ case 48: /* RSN(WPA2) */
+ ap->rsn_ie.id = *bp;
+ if (*(bp + 1) <= RSN_IE_BODY_MAX) {
+ ap->rsn_ie.size = *(bp + 1);
+ } else {
+ DPRINTK(1, "size over :: rsn size=%d \n",
+ *(bp + 1));
+ ap->rsn_ie.size = RSN_IE_BODY_MAX;
+ }
+ memcpy(&(ap->rsn_ie.body[0]), bp + 2, ap->rsn_ie.size);
+ break;
+ case 221: /* WPA */
+ if (!memcmp(bp + 2, "\x00\x50\xf2\x01", 4)) { /* WPA OUI check */
+ ap->wpa_ie.id = *bp;
+ if (*(bp + 1) <= RSN_IE_BODY_MAX) {
+ ap->wpa_ie.size = *(bp + 1);
+ } else {
+ DPRINTK(1,
+ "size over :: wpa size=%d \n",
+ *(bp + 1));
+ ap->wpa_ie.size = RSN_IE_BODY_MAX;
+ }
+ memcpy(&(ap->wpa_ie.body[0]), bp + 2,
+ ap->wpa_ie.size);
+ }
+ break;
+
+ case 2: /* FH parameter */
+ case 4: /* CF parameter */
+ case 5: /* TIM */
+ case 6: /* IBSS parameter */
+ case 7: /* Country */
+ case 42: /* ERP information */
+ case 47: /* Reserve ID 47 Broadcom AP */
+ break;
+ default:
+ DPRINTK(4, "unknown Element ID=%d \n", *bp);
+ break;
+ }
+ offset += 2; /* id & size field */
+ offset += *(bp + 1); /* +size offset */
+ bp += (*(bp + 1) + 2); /* pointer update */
+ }
+
+ return rc;
+}
+
+static
+void hostif_data_indication(struct ks_wlan_private *priv)
+{
+ unsigned int rx_ind_size; /* indicate data size */
+ struct sk_buff *skb;
+ unsigned short auth_type;
+ unsigned char temp[256];
+
+ unsigned char RecvMIC[8];
+ char buf[128];
+ struct ether_hdr *eth_hdr;
+ unsigned short eth_proto;
+ unsigned long now;
+ struct mic_failure_t *mic_failure;
+ struct ieee802_1x_hdr *aa1x_hdr;
+ struct wpa_eapol_key *eap_key;
+ struct michel_mic_t michel_mic;
+ union iwreq_data wrqu;
+
+ DPRINTK(3, "\n");
+
+ /* min length check */
+ if (priv->rx_size <= ETH_HLEN) {
+ DPRINTK(3, "rx_size = %d\n", priv->rx_size);
+ priv->nstats.rx_errors++;
+ return;
+ }
+
+ auth_type = get_WORD(priv); /* AuthType */
+ get_WORD(priv); /* Reserve Area */
+
+ eth_hdr = (struct ether_hdr *)(priv->rxp);
+ eth_proto = ntohs(eth_hdr->h_proto);
+ DPRINTK(3, "ether protocol = %04X\n", eth_proto);
+
+ /* source address check */
+ if (!memcmp(&priv->eth_addr[0], eth_hdr->h_source, ETH_ALEN)) {
+ DPRINTK(1, "invalid : source is own mac address !!\n");
+ DPRINTK(1,
+ "eth_hdrernet->h_dest=%02X:%02X:%02X:%02X:%02X:%02X\n",
+ eth_hdr->h_source[0], eth_hdr->h_source[1],
+ eth_hdr->h_source[2], eth_hdr->h_source[3],
+ eth_hdr->h_source[4], eth_hdr->h_source[5]);
+ priv->nstats.rx_errors++;
+ return;
+ }
+
+ /* for WPA */
+ if (auth_type != TYPE_DATA && priv->wpa.rsn_enabled) {
+ if (memcmp(&eth_hdr->h_source[0], &priv->eth_addr[0], ETH_ALEN)) { /* source address check */
+ if (eth_hdr->h_dest_snap != eth_hdr->h_source_snap) {
+ DPRINTK(1, "invalid data format\n");
+ priv->nstats.rx_errors++;
+ return;
+ }
+ if (((auth_type == TYPE_PMK1
+ && priv->wpa.pairwise_suite ==
+ IW_AUTH_CIPHER_TKIP) || (auth_type == TYPE_GMK1
+ && priv->wpa.
+ group_suite ==
+ IW_AUTH_CIPHER_TKIP)
+ || (auth_type == TYPE_GMK2
+ && priv->wpa.group_suite ==
+ IW_AUTH_CIPHER_TKIP))
+ && priv->wpa.key[auth_type - 1].key_len) {
+ DPRINTK(4, "TKIP: protocol=%04X: size=%u\n",
+ eth_proto, priv->rx_size);
+ /* MIC save */
+ memcpy(&RecvMIC[0],
+ (priv->rxp) + ((priv->rx_size) - 8), 8);
+ priv->rx_size = priv->rx_size - 8;
+ if (auth_type > 0 && auth_type < 4) { /* auth_type check */
+ MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[auth_type - 1].rx_mic_key, (uint8_t *) priv->rxp, (int)priv->rx_size, (uint8_t) 0, /* priority */
+ (uint8_t *)
+ michel_mic.Result);
+ }
+ if (memcmp(michel_mic.Result, RecvMIC, 8)) {
+ now = jiffies;
+ mic_failure = &priv->wpa.mic_failure;
+ /* MIC FAILURE */
+ if (mic_failure->last_failure_time &&
+ (now -
+ mic_failure->last_failure_time) /
+ HZ >= 60) {
+ mic_failure->failure = 0;
+ }
+ DPRINTK(4, "MIC FAILURE \n");
+ if (mic_failure->failure == 0) {
+ mic_failure->failure = 1;
+ mic_failure->counter = 0;
+ } else if (mic_failure->failure == 1) {
+ mic_failure->failure = 2;
+ mic_failure->counter =
+ (uint16_t) ((now -
+ mic_failure->
+ last_failure_time)
+ / HZ);
+ if (!mic_failure->counter) /* mic_failure counter value range 1-60 */
+ mic_failure->counter =
+ 1;
+ }
+ priv->wpa.mic_failure.
+ last_failure_time = now;
+ /* needed parameters: count, keyid, key type, TSC */
+ sprintf(buf,
+ "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
+ "%02x:%02x:%02x:%02x:%02x:%02x)",
+ auth_type - 1,
+ eth_hdr->
+ h_dest[0] & 0x01 ? "broad" :
+ "uni", eth_hdr->h_source[0],
+ eth_hdr->h_source[1],
+ eth_hdr->h_source[2],
+ eth_hdr->h_source[3],
+ eth_hdr->h_source[4],
+ eth_hdr->h_source[5]);
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = strlen(buf);
+ DPRINTK(4,
+ "IWEVENT:MICHAELMICFAILURE\n");
+ wireless_send_event(priv->net_dev,
+ IWEVCUSTOM, &wrqu,
+ buf);
+ return;
+ }
+ }
+ }
+ }
+
+ if ((priv->connect_status & FORCE_DISCONNECT) ||
+ priv->wpa.mic_failure.failure == 2) {
+ return;
+ }
+
+ /* check 13th byte at rx data */
+ switch (*(priv->rxp + 12)) {
+ case 0xAA: /* SNAP */
+ rx_ind_size = priv->rx_size - 6;
+ skb = dev_alloc_skb(rx_ind_size);
+ DPRINTK(4, "SNAP, rx_ind_size = %d\n", rx_ind_size);
+
+ if (skb) {
+ memcpy(skb_put(skb, 12), priv->rxp, 12); /* 8802/FDDI MAC copy */
+ /* (SNAP+UI..) skip */
+ memcpy(skb_put(skb, rx_ind_size - 12), priv->rxp + 18, rx_ind_size - 12); /* copy after Type */
+
+ aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 20);
+ if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY
+ && priv->wpa.rsn_enabled) {
+ eap_key =
+ (struct wpa_eapol_key *)(aa1x_hdr + 1);
+ atomic_set(&priv->psstatus.snooze_guard, 1);
+ }
+
+ /* rx indication */
+ skb->dev = priv->net_dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ priv->nstats.rx_packets++;
+ priv->nstats.rx_bytes += rx_ind_size;
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+ } else {
+ printk(KERN_WARNING
+ "%s: Memory squeeze, dropping packet.\n",
+ skb->dev->name);
+ priv->nstats.rx_dropped++;
+ }
+ break;
+ case 0xF0: /* NETBEUI/NetBIOS */
+ rx_ind_size = (priv->rx_size + 2);
+ skb = dev_alloc_skb(rx_ind_size);
+ DPRINTK(3, "NETBEUI/NetBIOS rx_ind_size=%d\n", rx_ind_size);
+
+ if (skb) {
+ memcpy(skb_put(skb, 12), priv->rxp, 12); /* 8802/FDDI MAC copy */
+
+ temp[0] = (((rx_ind_size - 12) >> 8) & 0xff); /* NETBEUI size add */
+ temp[1] = ((rx_ind_size - 12) & 0xff);
+ memcpy(skb_put(skb, 2), temp, 2);
+
+ memcpy(skb_put(skb, rx_ind_size - 14), priv->rxp + 12, rx_ind_size - 14); /* copy after Type */
+
+ aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 14);
+ if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY
+ && priv->wpa.rsn_enabled) {
+ eap_key =
+ (struct wpa_eapol_key *)(aa1x_hdr + 1);
+ atomic_set(&priv->psstatus.snooze_guard, 1);
+ }
+
+ /* rx indication */
+ skb->dev = priv->net_dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ priv->nstats.rx_packets++;
+ priv->nstats.rx_bytes += rx_ind_size;
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+ } else {
+ printk(KERN_WARNING
+ "%s: Memory squeeze, dropping packet.\n",
+ skb->dev->name);
+ priv->nstats.rx_dropped++;
+ }
+ break;
+ default: /* other rx data */
+ DPRINTK(2, "invalid data format\n");
+ priv->nstats.rx_errors++;
+ }
+}
+
+static
+void hostif_mib_get_confirm(struct ks_wlan_private *priv)
+{
+ struct net_device *dev = priv->net_dev;
+ uint32_t mib_status;
+ uint32_t mib_attribute;
+ uint16_t mib_val_size;
+ uint16_t mib_val_type;
+
+ DPRINTK(3, "\n");
+
+ mib_status = get_DWORD(priv); /* MIB status */
+ mib_attribute = get_DWORD(priv); /* MIB atttibute */
+ mib_val_size = get_WORD(priv); /* MIB value size */
+ mib_val_type = get_WORD(priv); /* MIB value type */
+
+ if (mib_status != 0) {
+ /* in case of error */
+ DPRINTK(1, "attribute=%08X, status=%08X\n", mib_attribute,
+ mib_status);
+ return;
+ }
+
+ switch (mib_attribute) {
+ case DOT11_MAC_ADDRESS:
+ /* MAC address */
+ DPRINTK(3, " mib_attribute=DOT11_MAC_ADDRESS\n");
+ hostif_sme_enqueue(priv, SME_GET_MAC_ADDRESS);
+ memcpy(priv->eth_addr, priv->rxp, ETH_ALEN);
+ priv->mac_address_valid = 1;
+ dev->dev_addr[0] = priv->eth_addr[0];
+ dev->dev_addr[1] = priv->eth_addr[1];
+ dev->dev_addr[2] = priv->eth_addr[2];
+ dev->dev_addr[3] = priv->eth_addr[3];
+ dev->dev_addr[4] = priv->eth_addr[4];
+ dev->dev_addr[5] = priv->eth_addr[5];
+ dev->dev_addr[6] = 0x00;
+ dev->dev_addr[7] = 0x00;
+ printk(KERN_INFO
+ "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
+ priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+ break;
+ case DOT11_PRODUCT_VERSION:
+ /* firmware version */
+ DPRINTK(3, " mib_attribute=DOT11_PRODUCT_VERSION\n");
+ priv->version_size = priv->rx_size;
+ memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
+ priv->firmware_version[priv->rx_size] = '\0';
+ printk(KERN_INFO "ks_wlan: firmware ver. = %s\n",
+ priv->firmware_version);
+ hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION);
+ /* wake_up_interruptible_all(&priv->confirm_wait); */
+ complete(&priv->confirm_wait);
+ break;
+ case LOCAL_GAIN:
+ memcpy(&priv->gain, priv->rxp, sizeof(priv->gain));
+ DPRINTK(3, "TxMode=%d, RxMode=%d, TxGain=%d, RxGain=%d\n",
+ priv->gain.TxMode, priv->gain.RxMode, priv->gain.TxGain,
+ priv->gain.RxGain);
+ break;
+ case LOCAL_EEPROM_SUM:
+ memcpy(&priv->eeprom_sum, priv->rxp, sizeof(priv->eeprom_sum));
+ DPRINTK(1, "eeprom_sum.type=%x, eeprom_sum.result=%x\n",
+ priv->eeprom_sum.type, priv->eeprom_sum.result);
+ if (priv->eeprom_sum.type == 0) {
+ priv->eeprom_checksum = EEPROM_CHECKSUM_NONE;
+ } else if (priv->eeprom_sum.type == 1) {
+ if (priv->eeprom_sum.result == 0) {
+ priv->eeprom_checksum = EEPROM_NG;
+ printk("LOCAL_EEPROM_SUM NG\n");
+ } else if (priv->eeprom_sum.result == 1) {
+ priv->eeprom_checksum = EEPROM_OK;
+ }
+ } else {
+ printk("LOCAL_EEPROM_SUM error!\n");
+ }
+ break;
+ default:
+ DPRINTK(1, "mib_attribute=%08x\n", (unsigned int)mib_attribute);
+ break;
+ }
+}
+
+static
+void hostif_mib_set_confirm(struct ks_wlan_private *priv)
+{
+ uint32_t mib_status; /* +04 MIB Status */
+ uint32_t mib_attribute; /* +08 MIB attribute */
+
+ DPRINTK(3, "\n");
+
+ mib_status = get_DWORD(priv); /* MIB Status */
+ mib_attribute = get_DWORD(priv); /* MIB attribute */
+
+ if (mib_status != 0) {
+ /* in case of error */
+ DPRINTK(1, "error :: attribute=%08X, status=%08X\n",
+ mib_attribute, mib_status);
+ }
+
+ switch (mib_attribute) {
+ case DOT11_RTS_THRESHOLD:
+ hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_CONFIRM);
+ break;
+ case DOT11_FRAGMENTATION_THRESHOLD:
+ hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_CONFIRM);
+ break;
+ case DOT11_WEP_DEFAULT_KEY_ID:
+ if (!priv->wpa.wpa_enabled)
+ hostif_sme_enqueue(priv, SME_WEP_INDEX_CONFIRM);
+ break;
+ case DOT11_WEP_DEFAULT_KEY_VALUE1:
+ DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE1:mib_status=%d\n",
+ (int)mib_status);
+ if (priv->wpa.rsn_enabled)
+ hostif_sme_enqueue(priv, SME_SET_PMK_TSC);
+ else
+ hostif_sme_enqueue(priv, SME_WEP_KEY1_CONFIRM);
+ break;
+ case DOT11_WEP_DEFAULT_KEY_VALUE2:
+ DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE2:mib_status=%d\n",
+ (int)mib_status);
+ if (priv->wpa.rsn_enabled)
+ hostif_sme_enqueue(priv, SME_SET_GMK1_TSC);
+ else
+ hostif_sme_enqueue(priv, SME_WEP_KEY2_CONFIRM);
+ break;
+ case DOT11_WEP_DEFAULT_KEY_VALUE3:
+ DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE3:mib_status=%d\n",
+ (int)mib_status);
+ if (priv->wpa.rsn_enabled)
+ hostif_sme_enqueue(priv, SME_SET_GMK2_TSC);
+ else
+ hostif_sme_enqueue(priv, SME_WEP_KEY3_CONFIRM);
+ break;
+ case DOT11_WEP_DEFAULT_KEY_VALUE4:
+ DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE4:mib_status=%d\n",
+ (int)mib_status);
+ if (!priv->wpa.rsn_enabled)
+ hostif_sme_enqueue(priv, SME_WEP_KEY4_CONFIRM);
+ break;
+ case DOT11_PRIVACY_INVOKED:
+ if (!priv->wpa.rsn_enabled)
+ hostif_sme_enqueue(priv, SME_WEP_FLAG_CONFIRM);
+ break;
+ case DOT11_RSN_ENABLED:
+ DPRINTK(2, "DOT11_RSN_ENABLED:mib_status=%d\n",
+ (int)mib_status);
+ hostif_sme_enqueue(priv, SME_RSN_ENABLED_CONFIRM);
+ break;
+ case LOCAL_RSN_MODE:
+ hostif_sme_enqueue(priv, SME_RSN_MODE_CONFIRM);
+ break;
+ case LOCAL_MULTICAST_ADDRESS:
+ hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
+ break;
+ case LOCAL_MULTICAST_FILTER:
+ hostif_sme_enqueue(priv, SME_MULTICAST_CONFIRM);
+ break;
+ case LOCAL_CURRENTADDRESS:
+ priv->mac_address_valid = 1;
+ break;
+ case DOT11_RSN_CONFIG_MULTICAST_CIPHER:
+ DPRINTK(2, "DOT11_RSN_CONFIG_MULTICAST_CIPHER:mib_status=%d\n",
+ (int)mib_status);
+ hostif_sme_enqueue(priv, SME_RSN_MCAST_CONFIRM);
+ break;
+ case DOT11_RSN_CONFIG_UNICAST_CIPHER:
+ DPRINTK(2, "DOT11_RSN_CONFIG_UNICAST_CIPHER:mib_status=%d\n",
+ (int)mib_status);
+ hostif_sme_enqueue(priv, SME_RSN_UCAST_CONFIRM);
+ break;
+ case DOT11_RSN_CONFIG_AUTH_SUITE:
+ DPRINTK(2, "DOT11_RSN_CONFIG_AUTH_SUITE:mib_status=%d\n",
+ (int)mib_status);
+ hostif_sme_enqueue(priv, SME_RSN_AUTH_CONFIRM);
+ break;
+ case DOT11_PMK_TSC:
+ DPRINTK(2, "DOT11_PMK_TSC:mib_status=%d\n", (int)mib_status);
+ break;
+ case DOT11_GMK1_TSC:
+ DPRINTK(2, "DOT11_GMK1_TSC:mib_status=%d\n", (int)mib_status);
+ if (atomic_read(&priv->psstatus.snooze_guard)) {
+ atomic_set(&priv->psstatus.snooze_guard, 0);
+ }
+ break;
+ case DOT11_GMK2_TSC:
+ DPRINTK(2, "DOT11_GMK2_TSC:mib_status=%d\n", (int)mib_status);
+ if (atomic_read(&priv->psstatus.snooze_guard)) {
+ atomic_set(&priv->psstatus.snooze_guard, 0);
+ }
+ break;
+ case LOCAL_PMK:
+ DPRINTK(2, "LOCAL_PMK:mib_status=%d\n", (int)mib_status);
+ break;
+ case LOCAL_GAIN:
+ DPRINTK(2, "LOCAL_GAIN:mib_status=%d\n", (int)mib_status);
+ break;
+#ifdef WPS
+ case LOCAL_WPS_ENABLE:
+ DPRINTK(2, "LOCAL_WPS_ENABLE:mib_status=%d\n", (int)mib_status);
+ break;
+ case LOCAL_WPS_PROBE_REQ:
+ DPRINTK(2, "LOCAL_WPS_PROBE_REQ:mib_status=%d\n",
+ (int)mib_status);
+ break;
+#endif /* WPS */
+ case LOCAL_REGION:
+ DPRINTK(2, "LOCAL_REGION:mib_status=%d\n", (int)mib_status);
+ default:
+ break;
+ }
+}
+
+static
+void hostif_power_mngmt_confirm(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "\n");
+
+ if (priv->reg.powermgt > POWMGT_ACTIVE_MODE &&
+ priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+ atomic_set(&priv->psstatus.confirm_wait, 0);
+ priv->dev_state = DEVICE_STATE_SLEEP;
+ ks_wlan_hw_power_save(priv);
+ } else {
+ priv->dev_state = DEVICE_STATE_READY;
+ }
+
+}
+
+static
+void hostif_sleep_confirm(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "\n");
+
+ atomic_set(&priv->sleepstatus.doze_request, 1);
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+}
+
+static
+void hostif_start_confirm(struct ks_wlan_private *priv)
+{
+#ifdef WPS
+ union iwreq_data wrqu;
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
+ DPRINTK(3, "IWEVENT: disconnect\n");
+ wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+ }
+#endif
+ DPRINTK(3, " scan_ind_count=%d\n", priv->scan_ind_count);
+ hostif_sme_enqueue(priv, SME_START_CONFIRM);
+}
+
+static
+void hostif_connect_indication(struct ks_wlan_private *priv)
+{
+ unsigned short connect_code;
+ unsigned int tmp = 0;
+ unsigned int old_status = priv->connect_status;
+ struct net_device *netdev = priv->net_dev;
+ union iwreq_data wrqu0;
+ connect_code = get_WORD(priv);
+
+ switch (connect_code) {
+ case RESULT_CONNECT: /* connect */
+ DPRINTK(3, "connect :: scan_ind_count=%d\n",
+ priv->scan_ind_count);
+ if (!(priv->connect_status & FORCE_DISCONNECT))
+ netif_carrier_on(netdev);
+ tmp = FORCE_DISCONNECT & priv->connect_status;
+ priv->connect_status = tmp + CONNECT_STATUS;
+ break;
+ case RESULT_DISCONNECT: /* disconnect */
+ DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
+ priv->scan_ind_count);
+ netif_carrier_off(netdev);
+ tmp = FORCE_DISCONNECT & priv->connect_status;
+ priv->connect_status = tmp + DISCONNECT_STATUS;
+ break;
+ default:
+ DPRINTK(1, "unknown connect_code=%d :: scan_ind_count=%d\n",
+ connect_code, priv->scan_ind_count);
+ netif_carrier_off(netdev);
+ tmp = FORCE_DISCONNECT & priv->connect_status;
+ priv->connect_status = tmp + DISCONNECT_STATUS;
+ break;
+ }
+
+ get_current_ap(priv, (struct link_ap_info_t *)priv->rxp);
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS &&
+ (old_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS) {
+ /* for power save */
+ atomic_set(&priv->psstatus.snooze_guard, 0);
+ atomic_set(&priv->psstatus.confirm_wait, 0);
+ }
+ ks_wlan_do_power_save(priv);
+
+ wrqu0.data.length = 0;
+ wrqu0.data.flags = 0;
+ wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS &&
+ (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ memset(wrqu0.ap_addr.sa_data, '\0', ETH_ALEN);
+ DPRINTK(3, "IWEVENT: disconnect\n");
+ DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
+ priv->scan_ind_count);
+ wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
+ }
+ priv->scan_ind_count = 0;
+}
+
+static
+void hostif_scan_indication(struct ks_wlan_private *priv)
+{
+ int i;
+ struct ap_info_t *ap_info;
+
+ DPRINTK(3, "scan_ind_count = %d\n", priv->scan_ind_count);
+ ap_info = (struct ap_info_t *)(priv->rxp);
+
+ if (priv->scan_ind_count != 0) {
+ for (i = 0; i < priv->aplist.size; i++) { /* bssid check */
+ if (!memcmp
+ (&(ap_info->bssid[0]),
+ &(priv->aplist.ap[i].bssid[0]), ETH_ALEN)) {
+ if (ap_info->frame_type ==
+ FRAME_TYPE_PROBE_RESP)
+ get_ap_information(priv, ap_info,
+ &(priv->aplist.
+ ap[i]));
+ return;
+ }
+ }
+ }
+ priv->scan_ind_count++;
+ if (priv->scan_ind_count < LOCAL_APLIST_MAX + 1) {
+ DPRINTK(4, " scan_ind_count=%d :: aplist.size=%d\n",
+ priv->scan_ind_count, priv->aplist.size);
+ get_ap_information(priv, (struct ap_info_t *)(priv->rxp),
+ &(priv->aplist.
+ ap[priv->scan_ind_count - 1]));
+ priv->aplist.size = priv->scan_ind_count;
+ } else {
+ DPRINTK(4, " count over :: scan_ind_count=%d\n",
+ priv->scan_ind_count);
+ }
+
+}
+
+static
+void hostif_stop_confirm(struct ks_wlan_private *priv)
+{
+ unsigned int tmp = 0;
+ unsigned int old_status = priv->connect_status;
+ struct net_device *netdev = priv->net_dev;
+ union iwreq_data wrqu0;
+
+ DPRINTK(3, "\n");
+ if (priv->dev_state == DEVICE_STATE_SLEEP)
+ priv->dev_state = DEVICE_STATE_READY;
+
+ /* disconnect indication */
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ netif_carrier_off(netdev);
+ tmp = FORCE_DISCONNECT & priv->connect_status;
+ priv->connect_status = tmp | DISCONNECT_STATUS;
+ printk("IWEVENT: disconnect\n");
+
+ wrqu0.data.length = 0;
+ wrqu0.data.flags = 0;
+ wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
+ if ((priv->connect_status & CONNECT_STATUS_MASK) ==
+ DISCONNECT_STATUS
+ && (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ memset(wrqu0.ap_addr.sa_data, '\0', ETH_ALEN);
+ DPRINTK(3, "IWEVENT: disconnect\n");
+ printk("IWEVENT: disconnect\n");
+ DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
+ priv->scan_ind_count);
+ wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
+ }
+ priv->scan_ind_count = 0;
+ }
+
+ hostif_sme_enqueue(priv, SME_STOP_CONFIRM);
+}
+
+static
+void hostif_ps_adhoc_set_confirm(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "\n");
+ priv->infra_status = 0; /* infrastructure mode cancel */
+ hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
+
+}
+
+static
+void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
+{
+ uint16_t result_code;
+ DPRINTK(3, "\n");
+ result_code = get_WORD(priv);
+ DPRINTK(3, "result code = %d\n", result_code);
+ priv->infra_status = 1; /* infrastructure mode set */
+ hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
+}
+
+static
+void hostif_adhoc_set_confirm(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "\n");
+ priv->infra_status = 1; /* infrastructure mode set */
+ hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
+}
+
+static
+void hostif_associate_indication(struct ks_wlan_private *priv)
+{
+ struct association_request_t *assoc_req;
+ struct association_response_t *assoc_resp;
+ unsigned char *pb;
+ union iwreq_data wrqu;
+ char buf[IW_CUSTOM_MAX];
+ char *pbuf = &buf[0];
+ int i;
+
+ static const char associnfo_leader0[] = "ASSOCINFO(ReqIEs=";
+ static const char associnfo_leader1[] = " RespIEs=";
+
+ DPRINTK(3, "\n");
+ assoc_req = (struct association_request_t *)(priv->rxp);
+ assoc_resp = (struct association_response_t *)(assoc_req + 1);
+ pb = (unsigned char *)(assoc_resp + 1);
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ memcpy(pbuf, associnfo_leader0, sizeof(associnfo_leader0) - 1);
+ wrqu.data.length += sizeof(associnfo_leader0) - 1;
+ pbuf += sizeof(associnfo_leader0) - 1;
+
+ for (i = 0; i < assoc_req->reqIEs_size; i++)
+ pbuf += sprintf(pbuf, "%02x", *(pb + i));
+ wrqu.data.length += (assoc_req->reqIEs_size) * 2;
+
+ memcpy(pbuf, associnfo_leader1, sizeof(associnfo_leader1) - 1);
+ wrqu.data.length += sizeof(associnfo_leader1) - 1;
+ pbuf += sizeof(associnfo_leader1) - 1;
+
+ pb += assoc_req->reqIEs_size;
+ for (i = 0; i < assoc_resp->respIEs_size; i++)
+ pbuf += sprintf(pbuf, "%02x", *(pb + i));
+ wrqu.data.length += (assoc_resp->respIEs_size) * 2;
+
+ pbuf += sprintf(pbuf, ")");
+ wrqu.data.length += 1;
+
+ DPRINTK(3, "IWEVENT:ASSOCINFO\n");
+ wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu, buf);
+}
+
+static
+void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
+{
+ unsigned int result_code;
+ struct net_device *dev = priv->net_dev;
+ union iwreq_data wrqu;
+ result_code = get_DWORD(priv);
+ DPRINTK(2, "result=%d :: scan_ind_count=%d\n", result_code,
+ priv->scan_ind_count);
+
+ priv->sme_i.sme_flag &= ~SME_AP_SCAN;
+ hostif_sme_enqueue(priv, SME_BSS_SCAN_CONFIRM);
+
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ DPRINTK(3, "IWEVENT: SCAN CONFIRM\n");
+ wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
+ priv->scan_ind_count = 0;
+}
+
+static
+void hostif_phy_information_confirm(struct ks_wlan_private *priv)
+{
+ struct iw_statistics *wstats = &priv->wstats;
+ unsigned char rssi, signal, noise;
+ unsigned char LinkSpeed;
+ unsigned int TransmittedFrameCount, ReceivedFragmentCount;
+ unsigned int FailedCount, FCSErrorCount;
+
+ DPRINTK(3, "\n");
+ rssi = get_BYTE(priv);
+ signal = get_BYTE(priv);
+ noise = get_BYTE(priv);
+ LinkSpeed = get_BYTE(priv);
+ TransmittedFrameCount = get_DWORD(priv);
+ ReceivedFragmentCount = get_DWORD(priv);
+ FailedCount = get_DWORD(priv);
+ FCSErrorCount = get_DWORD(priv);
+
+ DPRINTK(4, "phyinfo confirm rssi=%d signal=%d\n", rssi, signal);
+ priv->current_rate = (LinkSpeed & RATE_MASK);
+ wstats->qual.qual = signal;
+ wstats->qual.level = 256 - rssi;
+ wstats->qual.noise = 0; /* invalid noise value */
+ wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+
+ DPRINTK(3, "\n rssi=%u\n signal=%u\n LinkSpeed=%ux500Kbps\n \
+ TransmittedFrameCount=%u\n ReceivedFragmentCount=%u\n FailedCount=%u\n \
+ FCSErrorCount=%u\n", rssi, signal, LinkSpeed, TransmittedFrameCount, ReceivedFragmentCount, FailedCount, FCSErrorCount);
+
+ /* wake_up_interruptible_all(&priv->confirm_wait); */
+ complete(&priv->confirm_wait);
+}
+
+static
+void hostif_mic_failure_confirm(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "mic_failure=%u\n", priv->wpa.mic_failure.failure);
+ hostif_sme_enqueue(priv, SME_MIC_FAILURE_CONFIRM);
+}
+
+static
+void hostif_event_check(struct ks_wlan_private *priv)
+{
+ unsigned short event;
+
+ DPRINTK(4, "\n");
+ event = get_WORD(priv); /* get event */
+ switch (event) {
+ case HIF_DATA_IND:
+ hostif_data_indication(priv);
+ break;
+ case HIF_MIB_GET_CONF:
+ hostif_mib_get_confirm(priv);
+ break;
+ case HIF_MIB_SET_CONF:
+ hostif_mib_set_confirm(priv);
+ break;
+ case HIF_POWERMGT_CONF:
+ hostif_power_mngmt_confirm(priv);
+ break;
+ case HIF_SLEEP_CONF:
+ hostif_sleep_confirm(priv);
+ break;
+ case HIF_START_CONF:
+ hostif_start_confirm(priv);
+ break;
+ case HIF_CONNECT_IND:
+ hostif_connect_indication(priv);
+ break;
+ case HIF_STOP_CONF:
+ hostif_stop_confirm(priv);
+ break;
+ case HIF_PS_ADH_SET_CONF:
+ hostif_ps_adhoc_set_confirm(priv);
+ break;
+ case HIF_INFRA_SET_CONF:
+ case HIF_INFRA_SET2_CONF:
+ hostif_infrastructure_set_confirm(priv);
+ break;
+ case HIF_ADH_SET_CONF:
+ case HIF_ADH_SET2_CONF:
+ hostif_adhoc_set_confirm(priv);
+ break;
+ case HIF_ASSOC_INFO_IND:
+ hostif_associate_indication(priv);
+ break;
+ case HIF_MIC_FAILURE_CONF:
+ hostif_mic_failure_confirm(priv);
+ break;
+ case HIF_SCAN_CONF:
+ hostif_bss_scan_confirm(priv);
+ break;
+ case HIF_PHY_INFO_CONF:
+ case HIF_PHY_INFO_IND:
+ hostif_phy_information_confirm(priv);
+ break;
+ case HIF_SCAN_IND:
+ hostif_scan_indication(priv);
+ break;
+ case HIF_AP_SET_CONF:
+ default:
+ //DPRINTK(1, "undefined event[%04X]\n", event);
+ printk("undefined event[%04X]\n", event);
+ /* wake_up_all(&priv->confirm_wait); */
+ complete(&priv->confirm_wait);
+ break;
+ }
+
+ /* add event to hostt buffer */
+ priv->hostt.buff[priv->hostt.qtail] = event;
+ priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
+}
+
+#define CHECK_ALINE(size) (size%4 ? (size+(4-(size%4))):size)
+
+int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
+{
+ unsigned int packet_len = 0;
+
+ unsigned char *buffer = NULL;
+ unsigned int length = 0;
+ struct hostif_data_request_t *pp;
+ unsigned char *p;
+ int result = 0;
+ unsigned short eth_proto;
+ struct ether_hdr *eth_hdr;
+ struct michel_mic_t michel_mic;
+ unsigned short keyinfo = 0;
+ struct ieee802_1x_hdr *aa1x_hdr;
+ struct wpa_eapol_key *eap_key;
+ struct ethhdr *eth;
+
+ packet_len = packet->len;
+ if (packet_len > ETH_FRAME_LEN) {
+ DPRINTK(1, "bad length packet_len=%d \n", packet_len);
+ dev_kfree_skb(packet);
+ return -1;
+ }
+
+ if (((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS)
+ || (priv->connect_status & FORCE_DISCONNECT)
+ || priv->wpa.mic_failure.stop) {
+ DPRINTK(3, " DISCONNECT\n");
+ if (netif_queue_stopped(priv->net_dev))
+ netif_wake_queue(priv->net_dev);
+ if (packet)
+ dev_kfree_skb(packet);
+
+ return 0;
+ }
+
+ /* for PowerSave */
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) { /* power save wakeup */
+ if (!netif_queue_stopped(priv->net_dev))
+ netif_stop_queue(priv->net_dev);
+ }
+
+ DPRINTK(4, "skb_buff length=%d\n", packet_len);
+ pp = (struct hostif_data_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp) + 6 + packet_len + 8),
+ KS_WLAN_MEM_FLAG);
+
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ dev_kfree_skb(packet);
+ return -2;
+ }
+
+ p = (unsigned char *)pp->data;
+
+ buffer = packet->data;
+ length = packet->len;
+
+ /* packet check */
+ eth = (struct ethhdr *)packet->data;
+ if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN)) {
+ DPRINTK(1, "invalid mac address !!\n");
+ DPRINTK(1, "ethernet->h_source=%02X:%02X:%02X:%02X:%02X:%02X\n",
+ eth->h_source[0], eth->h_source[1], eth->h_source[2],
+ eth->h_source[3], eth->h_source[4], eth->h_source[5]);
+ return -3;
+ }
+
+ /* MAC address copy */
+ memcpy(p, buffer, 12); /* DST/SRC MAC address */
+ p += 12;
+ buffer += 12;
+ length -= 12;
+ /* EtherType/Length check */
+ if (*(buffer + 1) + (*buffer << 8) > 1500) {
+ /* ProtocolEAP = *(buffer+1) + (*buffer << 8); */
+ /* DPRINTK(2, "Send [SNAP]Type %x\n",ProtocolEAP); */
+ /* SAP/CTL/OUI(6 byte) add */
+ *p++ = 0xAA; /* DSAP */
+ *p++ = 0xAA; /* SSAP */
+ *p++ = 0x03; /* CTL */
+ *p++ = 0x00; /* OUI ("000000") */
+ *p++ = 0x00; /* OUI ("000000") */
+ *p++ = 0x00; /* OUI ("000000") */
+ packet_len += 6;
+ } else {
+ DPRINTK(4, "DIX\n");
+ /* Length(2 byte) delete */
+ buffer += 2;
+ length -= 2;
+ packet_len -= 2;
+ }
+
+ /* pp->data copy */
+ memcpy(p, buffer, length);
+
+ p += length;
+
+ /* for WPA */
+ eth_hdr = (struct ether_hdr *)&pp->data[0];
+ eth_proto = ntohs(eth_hdr->h_proto);
+
+ /* for MIC FAILUER REPORT check */
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
+ && priv->wpa.mic_failure.failure > 0) {
+ aa1x_hdr = (struct ieee802_1x_hdr *)(eth_hdr + 1);
+ if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY) {
+ eap_key = (struct wpa_eapol_key *)(aa1x_hdr + 1);
+ keyinfo = ntohs(eap_key->key_info);
+ }
+ }
+
+ if (priv->wpa.rsn_enabled && priv->wpa.key[0].key_len) {
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
+ && !(priv->wpa.key[1].key_len)
+ && !(priv->wpa.key[2].key_len)
+ && !(priv->wpa.key[3].key_len)) {
+ pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH); /* no encryption */
+ } else {
+ if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
+ MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) & pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */
+ (uint8_t *) michel_mic.
+ Result);
+ memcpy(p, michel_mic.Result, 8);
+ length += 8;
+ packet_len += 8;
+ p += 8;
+ pp->auth_type =
+ cpu_to_le16((uint16_t) TYPE_DATA);
+
+ } else if (priv->wpa.pairwise_suite ==
+ IW_AUTH_CIPHER_CCMP) {
+ pp->auth_type =
+ cpu_to_le16((uint16_t) TYPE_DATA);
+ }
+ }
+ } else {
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP)
+ pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH);
+ else
+ pp->auth_type = cpu_to_le16((uint16_t) TYPE_DATA);
+ }
+
+ /* header value set */
+ pp->header.size =
+ cpu_to_le16((uint16_t)
+ (sizeof(*pp) - sizeof(pp->header.size) + packet_len));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_DATA_REQ);
+
+ /* tx request */
+ result =
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + packet_len),
+ (void *)send_packet_complete, (void *)priv,
+ (void *)packet);
+
+ /* MIC FAILUER REPORT check */
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
+ && priv->wpa.mic_failure.failure > 0) {
+ if (keyinfo & WPA_KEY_INFO_ERROR
+ && keyinfo & WPA_KEY_INFO_REQUEST) {
+ DPRINTK(3, " MIC ERROR Report SET : %04X\n", keyinfo);
+ hostif_sme_enqueue(priv, SME_MIC_FAILURE_REQUEST);
+ }
+ if (priv->wpa.mic_failure.failure == 2)
+ priv->wpa.mic_failure.stop = 1;
+ }
+
+ return result;
+}
+
+#define ps_confirm_wait_inc(priv) do{if(atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET){ \
+ atomic_inc(&priv->psstatus.confirm_wait); \
+ /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
+ } }while(0)
+
+static
+void hostif_mib_get_request(struct ks_wlan_private *priv,
+ unsigned long mib_attribute)
+{
+ struct hostif_mib_get_request_t *pp;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_mib_get_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_MIB_GET_REQ);
+ pp->mib_attribute = cpu_to_le32((uint32_t) mib_attribute);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_mib_set_request(struct ks_wlan_private *priv,
+ unsigned long mib_attribute, unsigned short size,
+ unsigned short type, void *vp)
+{
+ struct hostif_mib_set_request_t *pp;
+
+ DPRINTK(3, "\n");
+
+ if (priv->dev_state < DEVICE_STATE_BOOT) {
+ DPRINTK(3, "DeviceRemove\n");
+ return;
+ }
+
+ /* make primitive */
+ pp = (struct hostif_mib_set_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp) + size), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+
+ pp->header.size =
+ cpu_to_le16((uint16_t)
+ (sizeof(*pp) - sizeof(pp->header.size) + size));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_MIB_SET_REQ);
+ pp->mib_attribute = cpu_to_le32((uint32_t) mib_attribute);
+ pp->mib_value.size = cpu_to_le16((uint16_t) size);
+ pp->mib_value.type = cpu_to_le16((uint16_t) type);
+ memcpy(&pp->mib_value.body, vp, size);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + size), NULL, NULL,
+ NULL);
+}
+
+static
+void hostif_start_request(struct ks_wlan_private *priv, unsigned char mode)
+{
+ struct hostif_start_request_t *pp;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_start_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_START_REQ);
+ pp->mode = cpu_to_le16((uint16_t) mode);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+
+ priv->aplist.size = 0;
+ priv->scan_ind_count = 0;
+}
+
+static
+void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv)
+{
+ struct hostif_ps_adhoc_set_request_t *pp;
+ uint16_t capability;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_ps_adhoc_set_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ memset(pp, 0, sizeof(*pp));
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_PS_ADH_SET_REQ);
+ pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+ pp->channel = cpu_to_le16((uint16_t) (priv->reg.channel));
+ pp->rate_set.size = priv->reg.rate_set.size;
+ memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+ priv->reg.rate_set.size);
+
+ capability = 0x0000;
+ if (priv->reg.preamble == SHORT_PREAMBLE) {
+ /* short preamble */
+ capability |= BSS_CAP_SHORT_PREAMBLE;
+ }
+ capability &= ~(BSS_CAP_PBCC); /* pbcc not support */
+ if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+ capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
+ capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM */
+ }
+ pp->capability = cpu_to_le16((uint16_t) capability);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_infrastructure_set_request(struct ks_wlan_private *priv)
+{
+ struct hostif_infrastructure_set_request_t *pp;
+ uint16_t capability;
+
+ DPRINTK(3, "ssid.size=%d \n", priv->reg.ssid.size);
+
+ /* make primitive */
+ pp = (struct hostif_infrastructure_set_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_INFRA_SET_REQ);
+ pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+
+ pp->rate_set.size = priv->reg.rate_set.size;
+ memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+ priv->reg.rate_set.size);
+ pp->ssid.size = priv->reg.ssid.size;
+ memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+ capability = 0x0000;
+ if (priv->reg.preamble == SHORT_PREAMBLE) {
+ /* short preamble */
+ capability |= BSS_CAP_SHORT_PREAMBLE;
+ }
+ capability &= ~(BSS_CAP_PBCC); /* pbcc not support */
+ if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+ capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
+ capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
+ }
+ pp->capability = cpu_to_le16((uint16_t) capability);
+ pp->beacon_lost_count =
+ cpu_to_le16((uint16_t) (priv->reg.beacon_lost_count));
+ pp->auth_type = cpu_to_le16((uint16_t) (priv->reg.authenticate_type));
+
+ pp->channel_list.body[0] = 1;
+ pp->channel_list.body[1] = 8;
+ pp->channel_list.body[2] = 2;
+ pp->channel_list.body[3] = 9;
+ pp->channel_list.body[4] = 3;
+ pp->channel_list.body[5] = 10;
+ pp->channel_list.body[6] = 4;
+ pp->channel_list.body[7] = 11;
+ pp->channel_list.body[8] = 5;
+ pp->channel_list.body[9] = 12;
+ pp->channel_list.body[10] = 6;
+ pp->channel_list.body[11] = 13;
+ pp->channel_list.body[12] = 7;
+ if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+ pp->channel_list.size = 13;
+ } else {
+ pp->channel_list.body[13] = 14;
+ pp->channel_list.size = 14;
+ }
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
+{
+ struct hostif_infrastructure_set2_request_t *pp;
+ uint16_t capability;
+
+ DPRINTK(2, "ssid.size=%d \n", priv->reg.ssid.size);
+
+ /* make primitive */
+ pp = (struct hostif_infrastructure_set2_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_INFRA_SET2_REQ);
+ pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+
+ pp->rate_set.size = priv->reg.rate_set.size;
+ memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+ priv->reg.rate_set.size);
+ pp->ssid.size = priv->reg.ssid.size;
+ memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+ capability = 0x0000;
+ if (priv->reg.preamble == SHORT_PREAMBLE) {
+ /* short preamble */
+ capability |= BSS_CAP_SHORT_PREAMBLE;
+ }
+ capability &= ~(BSS_CAP_PBCC); /* pbcc not support */
+ if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+ capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
+ capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
+ }
+ pp->capability = cpu_to_le16((uint16_t) capability);
+ pp->beacon_lost_count =
+ cpu_to_le16((uint16_t) (priv->reg.beacon_lost_count));
+ pp->auth_type = cpu_to_le16((uint16_t) (priv->reg.authenticate_type));
+
+ pp->channel_list.body[0] = 1;
+ pp->channel_list.body[1] = 8;
+ pp->channel_list.body[2] = 2;
+ pp->channel_list.body[3] = 9;
+ pp->channel_list.body[4] = 3;
+ pp->channel_list.body[5] = 10;
+ pp->channel_list.body[6] = 4;
+ pp->channel_list.body[7] = 11;
+ pp->channel_list.body[8] = 5;
+ pp->channel_list.body[9] = 12;
+ pp->channel_list.body[10] = 6;
+ pp->channel_list.body[11] = 13;
+ pp->channel_list.body[12] = 7;
+ if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+ pp->channel_list.size = 13;
+ } else {
+ pp->channel_list.body[13] = 14;
+ pp->channel_list.size = 14;
+ }
+
+ memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_adhoc_set_request(struct ks_wlan_private *priv)
+{
+ struct hostif_adhoc_set_request_t *pp;
+ uint16_t capability;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_adhoc_set_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ memset(pp, 0, sizeof(*pp));
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_ADH_SET_REQ);
+ pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+ pp->channel = cpu_to_le16((uint16_t) (priv->reg.channel));
+ pp->rate_set.size = priv->reg.rate_set.size;
+ memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+ priv->reg.rate_set.size);
+ pp->ssid.size = priv->reg.ssid.size;
+ memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+ capability = 0x0000;
+ if (priv->reg.preamble == SHORT_PREAMBLE) {
+ /* short preamble */
+ capability |= BSS_CAP_SHORT_PREAMBLE;
+ }
+ capability &= ~(BSS_CAP_PBCC); /* pbcc not support */
+ if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+ capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
+ capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
+ }
+ pp->capability = cpu_to_le16((uint16_t) capability);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
+{
+ struct hostif_adhoc_set2_request_t *pp;
+ uint16_t capability;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_adhoc_set2_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ memset(pp, 0, sizeof(*pp));
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_ADH_SET_REQ);
+ pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+ pp->rate_set.size = priv->reg.rate_set.size;
+ memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+ priv->reg.rate_set.size);
+ pp->ssid.size = priv->reg.ssid.size;
+ memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+ capability = 0x0000;
+ if (priv->reg.preamble == SHORT_PREAMBLE) {
+ /* short preamble */
+ capability |= BSS_CAP_SHORT_PREAMBLE;
+ }
+ capability &= ~(BSS_CAP_PBCC); /* pbcc not support */
+ if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+ capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
+ capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
+ }
+ pp->capability = cpu_to_le16((uint16_t) capability);
+
+ pp->channel_list.body[0] = priv->reg.channel;
+ pp->channel_list.size = 1;
+ memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_stop_request(struct ks_wlan_private *priv)
+{
+ struct hostif_stop_request_t *pp;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_stop_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_STOP_REQ);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_phy_information_request(struct ks_wlan_private *priv)
+{
+ struct hostif_phy_information_request_t *pp;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_phy_information_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_PHY_INFO_REQ);
+ if (priv->reg.phy_info_timer) {
+ pp->type = cpu_to_le16((uint16_t) TIME_TYPE);
+ pp->time = cpu_to_le16((uint16_t) (priv->reg.phy_info_timer));
+ } else {
+ pp->type = cpu_to_le16((uint16_t) NORMAL_TYPE);
+ pp->time = cpu_to_le16((uint16_t) 0);
+ }
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_power_mngmt_request(struct ks_wlan_private *priv,
+ unsigned long mode, unsigned long wake_up,
+ unsigned long receiveDTIMs)
+{
+ struct hostif_power_mngmt_request_t *pp;
+
+ DPRINTK(3, "mode=%lu wake_up=%lu receiveDTIMs=%lu\n", mode, wake_up,
+ receiveDTIMs);
+ /* make primitive */
+ pp = (struct hostif_power_mngmt_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_POWERMGT_REQ);
+ pp->mode = cpu_to_le32((uint32_t) mode);
+ pp->wake_up = cpu_to_le32((uint32_t) wake_up);
+ pp->receiveDTIMs = cpu_to_le32((uint32_t) receiveDTIMs);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_sleep_request(struct ks_wlan_private *priv, unsigned long mode)
+{
+ struct hostif_sleep_request_t *pp;
+
+ DPRINTK(3, "mode=%lu \n", mode);
+
+ if (mode == SLP_SLEEP) {
+ /* make primitive */
+ pp = (struct hostif_sleep_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t)
+ (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_SLEEP_REQ);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL,
+ NULL);
+ } else if (mode == SLP_ACTIVE) {
+ atomic_set(&priv->sleepstatus.wakeup_request, 1);
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ } else {
+ DPRINTK(3, "invalid mode %ld \n", mode);
+ return;
+ }
+}
+
+static
+void hostif_bss_scan_request(struct ks_wlan_private *priv,
+ unsigned long scan_type, uint8_t * scan_ssid,
+ uint8_t scan_ssid_len)
+{
+ struct hostif_bss_scan_request_t *pp;
+
+ DPRINTK(2, "\n");
+ /* make primitive */
+ pp = (struct hostif_bss_scan_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_SCAN_REQ);
+ pp->scan_type = scan_type;
+
+ pp->ch_time_min = cpu_to_le32((uint32_t) 110); /* default value */
+ pp->ch_time_max = cpu_to_le32((uint32_t) 130); /* default value */
+ pp->channel_list.body[0] = 1;
+ pp->channel_list.body[1] = 8;
+ pp->channel_list.body[2] = 2;
+ pp->channel_list.body[3] = 9;
+ pp->channel_list.body[4] = 3;
+ pp->channel_list.body[5] = 10;
+ pp->channel_list.body[6] = 4;
+ pp->channel_list.body[7] = 11;
+ pp->channel_list.body[8] = 5;
+ pp->channel_list.body[9] = 12;
+ pp->channel_list.body[10] = 6;
+ pp->channel_list.body[11] = 13;
+ pp->channel_list.body[12] = 7;
+ if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+ pp->channel_list.size = 13;
+ } else {
+ pp->channel_list.body[13] = 14;
+ pp->channel_list.size = 14;
+ }
+ pp->ssid.size = 0;
+
+ /* specified SSID SCAN */
+ if (scan_ssid_len > 0 && scan_ssid_len <= 32) {
+ pp->ssid.size = scan_ssid_len;
+ memcpy(&pp->ssid.body[0], scan_ssid, scan_ssid_len);
+ }
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+
+ priv->aplist.size = 0;
+ priv->scan_ind_count = 0;
+}
+
+static
+void hostif_mic_failure_request(struct ks_wlan_private *priv,
+ unsigned short failure_count,
+ unsigned short timer)
+{
+ struct hostif_mic_failure_request_t *pp;
+
+ DPRINTK(3, "count=%d :: timer=%d\n", failure_count, timer);
+ /* make primitive */
+ pp = (struct hostif_mic_failure_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_MIC_FAILURE_REQ);
+ pp->failure_count = cpu_to_le16((uint16_t) failure_count);
+ pp->timer = cpu_to_le16((uint16_t) timer);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+/* Device I/O Recieve indicate */
+static void devio_rec_ind(struct ks_wlan_private *priv, unsigned char *p,
+ unsigned int size)
+{
+ if (priv->device_open_status) {
+ spin_lock(&priv->dev_read_lock); /* request spin lock */
+ priv->dev_data[atomic_read(&priv->rec_count)] = p;
+ priv->dev_size[atomic_read(&priv->rec_count)] = size;
+
+ if (atomic_read(&priv->event_count) != DEVICE_STOCK_COUNT) {
+ /* rx event count inc */
+ atomic_inc(&priv->event_count);
+ }
+ atomic_inc(&priv->rec_count);
+ if (atomic_read(&priv->rec_count) == DEVICE_STOCK_COUNT)
+ atomic_set(&priv->rec_count, 0);
+
+ wake_up_interruptible_all(&priv->devread_wait);
+
+ /* release spin lock */
+ spin_unlock(&priv->dev_read_lock);
+ }
+}
+
+void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
+ unsigned int size)
+{
+ DPRINTK(4, "\n");
+
+ devio_rec_ind(priv, p, size);
+
+ priv->rxp = p;
+ priv->rx_size = size;
+
+ if (get_WORD(priv) == priv->rx_size) { /* length check !! */
+ hostif_event_check(priv); /* event check */
+ }
+}
+
+static
+void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
+{
+ uint32_t val;
+ switch (type) {
+ case SME_WEP_INDEX_REQUEST:
+ val = cpu_to_le32((uint32_t) (priv->reg.wep_index));
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_ID,
+ sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ break;
+ case SME_WEP_KEY1_REQUEST:
+ if (!priv->wpa.wpa_enabled)
+ hostif_mib_set_request(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE1,
+ priv->reg.wep_key[0].size,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->reg.wep_key[0].val[0]);
+ break;
+ case SME_WEP_KEY2_REQUEST:
+ if (!priv->wpa.wpa_enabled)
+ hostif_mib_set_request(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE2,
+ priv->reg.wep_key[1].size,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->reg.wep_key[1].val[0]);
+ break;
+ case SME_WEP_KEY3_REQUEST:
+ if (!priv->wpa.wpa_enabled)
+ hostif_mib_set_request(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE3,
+ priv->reg.wep_key[2].size,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->reg.wep_key[2].val[0]);
+ break;
+ case SME_WEP_KEY4_REQUEST:
+ if (!priv->wpa.wpa_enabled)
+ hostif_mib_set_request(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE4,
+ priv->reg.wep_key[3].size,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->reg.wep_key[3].val[0]);
+ break;
+ case SME_WEP_FLAG_REQUEST:
+ val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
+ hostif_mib_set_request(priv, DOT11_PRIVACY_INVOKED,
+ sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+ break;
+ }
+
+ return;
+}
+
+struct wpa_suite_t {
+ unsigned short size;
+ unsigned char suite[4][CIPHER_ID_LEN];
+} __attribute__ ((packed));
+
+struct rsn_mode_t {
+ uint32_t rsn_mode;
+ uint16_t rsn_capability;
+} __attribute__ ((packed));
+
+static
+void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
+{
+ struct wpa_suite_t wpa_suite;
+ struct rsn_mode_t rsn_mode;
+ uint32_t val;
+
+ memset(&wpa_suite, 0, sizeof(wpa_suite));
+
+ switch (type) {
+ case SME_RSN_UCAST_REQUEST:
+ wpa_suite.size = cpu_to_le16((uint16_t) 1);
+ switch (priv->wpa.pairwise_suite) {
+ case IW_AUTH_CIPHER_NONE:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_NONE, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_NONE, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_WEP40:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_WEP40, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_WEP40, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_TKIP:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_TKIP, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_TKIP, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_CCMP:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_CCMP, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_CCMP, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_WEP104:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_WEP104, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_WEP104, CIPHER_ID_LEN);
+ break;
+ }
+
+ hostif_mib_set_request(priv, DOT11_RSN_CONFIG_UNICAST_CIPHER,
+ sizeof(wpa_suite.size) +
+ CIPHER_ID_LEN * wpa_suite.size,
+ MIB_VALUE_TYPE_OSTRING, &wpa_suite);
+ break;
+ case SME_RSN_MCAST_REQUEST:
+ switch (priv->wpa.group_suite) {
+ case IW_AUTH_CIPHER_NONE:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_NONE, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_NONE, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_WEP40:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_WEP40, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_WEP40, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_TKIP:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_TKIP, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_TKIP, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_CCMP:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_CCMP, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_CCMP, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_WEP104:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_WEP104, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_WEP104, CIPHER_ID_LEN);
+ break;
+ }
+
+ hostif_mib_set_request(priv, DOT11_RSN_CONFIG_MULTICAST_CIPHER,
+ CIPHER_ID_LEN, MIB_VALUE_TYPE_OSTRING,
+ &wpa_suite.suite[0][0]);
+ break;
+ case SME_RSN_AUTH_REQUEST:
+ wpa_suite.size = cpu_to_le16((uint16_t) 1);
+ switch (priv->wpa.key_mgmt_suite) {
+ case IW_AUTH_KEY_MGMT_802_1X:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA2_1X, KEY_MGMT_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA_1X, KEY_MGMT_ID_LEN);
+ break;
+ case IW_AUTH_KEY_MGMT_PSK:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA2_PSK, KEY_MGMT_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA_PSK, KEY_MGMT_ID_LEN);
+ break;
+ case 0:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA2_NONE, KEY_MGMT_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA_NONE, KEY_MGMT_ID_LEN);
+ break;
+ case 4:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA2_WPANONE,
+ KEY_MGMT_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA_WPANONE,
+ KEY_MGMT_ID_LEN);
+ break;
+ }
+
+ hostif_mib_set_request(priv, DOT11_RSN_CONFIG_AUTH_SUITE,
+ sizeof(wpa_suite.size) +
+ KEY_MGMT_ID_LEN * wpa_suite.size,
+ MIB_VALUE_TYPE_OSTRING, &wpa_suite);
+ break;
+ case SME_RSN_ENABLED_REQUEST:
+ val = cpu_to_le32((uint32_t) (priv->wpa.rsn_enabled));
+ hostif_mib_set_request(priv, DOT11_RSN_ENABLED,
+ sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+ break;
+ case SME_RSN_MODE_REQUEST:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) {
+ rsn_mode.rsn_mode =
+ cpu_to_le32((uint32_t) RSN_MODE_WPA2);
+ rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+ } else if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA) {
+ rsn_mode.rsn_mode =
+ cpu_to_le32((uint32_t) RSN_MODE_WPA);
+ rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+ } else {
+ rsn_mode.rsn_mode =
+ cpu_to_le32((uint32_t) RSN_MODE_NONE);
+ rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+ }
+ hostif_mib_set_request(priv, LOCAL_RSN_MODE, sizeof(rsn_mode),
+ MIB_VALUE_TYPE_OSTRING, &rsn_mode);
+ break;
+
+ }
+ return;
+}
+
+static
+void hostif_sme_mode_setup(struct ks_wlan_private *priv)
+{
+ unsigned char rate_size;
+ unsigned char rate_octet[RATE_SET_MAX_SIZE];
+ int i = 0;
+
+ /* rate setting if rate segging is auto for changing phy_type (#94) */
+ if (priv->reg.tx_rate == TX_RATE_FULL_AUTO) {
+ if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ priv->reg.rate_set.body[3] = TX_RATE_11M;
+ priv->reg.rate_set.body[2] = TX_RATE_5M;
+ priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
+ priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
+ priv->reg.rate_set.size = 4;
+ } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+ priv->reg.rate_set.body[11] = TX_RATE_54M;
+ priv->reg.rate_set.body[10] = TX_RATE_48M;
+ priv->reg.rate_set.body[9] = TX_RATE_36M;
+ priv->reg.rate_set.body[8] = TX_RATE_18M;
+ priv->reg.rate_set.body[7] = TX_RATE_9M;
+ priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
+ priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
+ priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
+ priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
+ priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
+ priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
+ priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
+ priv->reg.rate_set.size = 12;
+ }
+ }
+
+ /* rate mask by phy setting */
+ if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ for (i = 0; i < priv->reg.rate_set.size; i++) {
+ if (IS_11B_RATE(priv->reg.rate_set.body[i])) {
+ if ((priv->reg.rate_set.body[i] & RATE_MASK) >=
+ TX_RATE_5M)
+ rate_octet[i] =
+ priv->reg.rate_set.
+ body[i] & RATE_MASK;
+ else
+ rate_octet[i] =
+ priv->reg.rate_set.body[i];
+ } else
+ break;
+ }
+
+ } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+ for (i = 0; i < priv->reg.rate_set.size; i++) {
+ if (IS_11BG_RATE(priv->reg.rate_set.body[i])) {
+ if (IS_OFDM_EXT_RATE
+ (priv->reg.rate_set.body[i]))
+ rate_octet[i] =
+ priv->reg.rate_set.
+ body[i] & RATE_MASK;
+ else
+ rate_octet[i] =
+ priv->reg.rate_set.body[i];
+ } else
+ break;
+ }
+ }
+ rate_size = i;
+ if (rate_size == 0) {
+ if (priv->reg.phy_type == D_11G_ONLY_MODE)
+ rate_octet[0] = TX_RATE_6M | BASIC_RATE;
+ else
+ rate_octet[0] = TX_RATE_2M | BASIC_RATE;
+ rate_size = 1;
+ }
+
+ /* rate set update */
+ priv->reg.rate_set.size = rate_size;
+ memcpy(&priv->reg.rate_set.body[0], &rate_octet[0], rate_size);
+
+ switch (priv->reg.operation_mode) {
+ case MODE_PSEUDO_ADHOC:
+ /* Pseudo Ad-Hoc mode */
+ hostif_ps_adhoc_set_request(priv);
+ break;
+ case MODE_INFRASTRUCTURE:
+ /* Infrastructure mode */
+ if (!is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+ hostif_infrastructure_set_request(priv);
+ } else {
+ hostif_infrastructure_set2_request(priv);
+ DPRINTK(2,
+ "Infra bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ priv->reg.bssid[0], priv->reg.bssid[1],
+ priv->reg.bssid[2], priv->reg.bssid[3],
+ priv->reg.bssid[4], priv->reg.bssid[5]);
+ }
+ break;
+ case MODE_ADHOC:
+ /* IEEE802.11 Ad-Hoc mode */
+ if (!is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+ hostif_adhoc_set_request(priv);
+ } else {
+ hostif_adhoc_set2_request(priv);
+ DPRINTK(2,
+ "Adhoc bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ priv->reg.bssid[0], priv->reg.bssid[1],
+ priv->reg.bssid[2], priv->reg.bssid[3],
+ priv->reg.bssid[4], priv->reg.bssid[5]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static
+void hostif_sme_multicast_set(struct ks_wlan_private *priv)
+{
+
+ struct net_device *dev = priv->net_dev;
+ int mc_count;
+ struct netdev_hw_addr *ha;
+ char set_address[NIC_MAX_MCAST_LIST * ETH_ALEN];
+ unsigned long filter_type;
+ int i = 0;
+
+ DPRINTK(3, "\n");
+
+ spin_lock(&priv->multicast_spin);
+
+ memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
+
+ if (dev->flags & IFF_PROMISC) {
+ filter_type = cpu_to_le32((uint32_t) MCAST_FILTER_PROMISC);
+ hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
+ sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
+ &filter_type);
+ } else if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST)
+ || (dev->flags & IFF_ALLMULTI)) {
+ filter_type = cpu_to_le32((uint32_t) MCAST_FILTER_MCASTALL);
+ hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
+ sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
+ &filter_type);
+ } else {
+ if (priv->sme_i.sme_flag & SME_MULTICAST) {
+ mc_count = netdev_mc_count(dev);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(&set_address[i * ETH_ALEN], ha->addr,
+ ETH_ALEN);
+ i++;
+ }
+ priv->sme_i.sme_flag &= ~SME_MULTICAST;
+ hostif_mib_set_request(priv, LOCAL_MULTICAST_ADDRESS,
+ (ETH_ALEN * mc_count),
+ MIB_VALUE_TYPE_OSTRING,
+ &set_address[0]);
+ } else {
+ filter_type =
+ cpu_to_le32((uint32_t) MCAST_FILTER_MCAST);
+ priv->sme_i.sme_flag |= SME_MULTICAST;
+ hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
+ sizeof(filter_type),
+ MIB_VALUE_TYPE_BOOL,
+ &filter_type);
+ }
+ }
+
+ spin_unlock(&priv->multicast_spin);
+
+}
+
+static
+void hostif_sme_powermgt_set(struct ks_wlan_private *priv)
+{
+ unsigned long mode, wake_up, receiveDTIMs;
+
+ DPRINTK(3, "\n");
+ switch (priv->reg.powermgt) {
+ case POWMGT_ACTIVE_MODE:
+ mode = POWER_ACTIVE;
+ wake_up = 0;
+ receiveDTIMs = 0;
+ break;
+ case POWMGT_SAVE1_MODE:
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+ mode = POWER_SAVE;
+ wake_up = 0;
+ receiveDTIMs = 0;
+ } else {
+ mode = POWER_ACTIVE;
+ wake_up = 0;
+ receiveDTIMs = 0;
+ }
+ break;
+ case POWMGT_SAVE2_MODE:
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+ mode = POWER_SAVE;
+ wake_up = 0;
+ receiveDTIMs = 1;
+ } else {
+ mode = POWER_ACTIVE;
+ wake_up = 0;
+ receiveDTIMs = 0;
+ }
+ break;
+ default:
+ mode = POWER_ACTIVE;
+ wake_up = 0;
+ receiveDTIMs = 0;
+ break;
+ }
+ hostif_power_mngmt_request(priv, mode, wake_up, receiveDTIMs);
+
+ return;
+}
+
+static
+void hostif_sme_sleep_set(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "\n");
+ switch (priv->sleep_mode) {
+ case SLP_SLEEP:
+ hostif_sleep_request(priv, priv->sleep_mode);
+ break;
+ case SLP_ACTIVE:
+ hostif_sleep_request(priv, priv->sleep_mode);
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static
+void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
+{
+ uint32_t val;
+ switch (type) {
+ case SME_SET_FLAG:
+ val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
+ hostif_mib_set_request(priv, DOT11_PRIVACY_INVOKED,
+ sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+ break;
+ case SME_SET_TXKEY:
+ val = cpu_to_le32((uint32_t) (priv->wpa.txkey));
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_ID,
+ sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ break;
+ case SME_SET_KEY1:
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE1,
+ priv->wpa.key[0].key_len,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[0].key_val[0]);
+ break;
+ case SME_SET_KEY2:
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE2,
+ priv->wpa.key[1].key_len,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[1].key_val[0]);
+ break;
+ case SME_SET_KEY3:
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE3,
+ priv->wpa.key[2].key_len,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[2].key_val[0]);
+ break;
+ case SME_SET_KEY4:
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE4,
+ priv->wpa.key[3].key_len,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[3].key_val[0]);
+ break;
+ case SME_SET_PMK_TSC:
+ hostif_mib_set_request(priv, DOT11_PMK_TSC,
+ WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[0].rx_seq[0]);
+ break;
+ case SME_SET_GMK1_TSC:
+ hostif_mib_set_request(priv, DOT11_GMK1_TSC,
+ WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[1].rx_seq[0]);
+ break;
+ case SME_SET_GMK2_TSC:
+ hostif_mib_set_request(priv, DOT11_GMK2_TSC,
+ WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[2].rx_seq[0]);
+ break;
+ }
+ return;
+}
+
+static
+void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
+{
+ struct pmk_cache_t {
+ uint16_t size;
+ struct {
+ uint8_t bssid[ETH_ALEN];
+ uint8_t pmkid[IW_PMKID_LEN];
+ } __attribute__ ((packed)) list[PMK_LIST_MAX];
+ } __attribute__ ((packed)) pmkcache;
+ struct pmk_t *pmk;
+ struct list_head *ptr;
+ int i;
+
+ DPRINTK(4, "pmklist.size=%d\n", priv->pmklist.size);
+ i = 0;
+ list_for_each(ptr, &priv->pmklist.head) {
+ pmk = list_entry(ptr, struct pmk_t, list);
+ if (i < PMK_LIST_MAX) {
+ memcpy(pmkcache.list[i].bssid, pmk->bssid, ETH_ALEN);
+ memcpy(pmkcache.list[i].pmkid, pmk->pmkid,
+ IW_PMKID_LEN);
+ i++;
+ }
+ }
+ pmkcache.size = cpu_to_le16((uint16_t) (priv->pmklist.size));
+ hostif_mib_set_request(priv, LOCAL_PMK,
+ sizeof(priv->pmklist.size) + (ETH_ALEN +
+ IW_PMKID_LEN) *
+ (priv->pmklist.size), MIB_VALUE_TYPE_OSTRING,
+ &pmkcache);
+}
+
+/* execute sme */
+static
+void hostif_sme_execute(struct ks_wlan_private *priv, int event)
+{
+ uint32_t val;
+
+ DPRINTK(3, "event=%d\n", event);
+ switch (event) {
+ case SME_START:
+ if (priv->dev_state == DEVICE_STATE_BOOT) {
+ hostif_mib_get_request(priv, DOT11_MAC_ADDRESS);
+ }
+ break;
+ case SME_MULTICAST_REQUEST:
+ hostif_sme_multicast_set(priv);
+ break;
+ case SME_MACADDRESS_SET_REQUEST:
+ hostif_mib_set_request(priv, LOCAL_CURRENTADDRESS, ETH_ALEN,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->eth_addr[0]);
+ break;
+ case SME_BSS_SCAN_REQUEST:
+ hostif_bss_scan_request(priv, priv->reg.scan_type,
+ priv->scan_ssid, priv->scan_ssid_len);
+ break;
+ case SME_POW_MNGMT_REQUEST:
+ hostif_sme_powermgt_set(priv);
+ break;
+ case SME_PHY_INFO_REQUEST:
+ hostif_phy_information_request(priv);
+ break;
+ case SME_MIC_FAILURE_REQUEST:
+ if (priv->wpa.mic_failure.failure == 1) {
+ hostif_mic_failure_request(priv,
+ priv->wpa.mic_failure.
+ failure - 1, 0);
+ } else if (priv->wpa.mic_failure.failure == 2) {
+ hostif_mic_failure_request(priv,
+ priv->wpa.mic_failure.
+ failure - 1,
+ priv->wpa.mic_failure.
+ counter);
+ } else
+ DPRINTK(4,
+ "SME_MIC_FAILURE_REQUEST: failure count=%u error?\n",
+ priv->wpa.mic_failure.failure);
+ break;
+ case SME_MIC_FAILURE_CONFIRM:
+ if (priv->wpa.mic_failure.failure == 2) {
+ if (priv->wpa.mic_failure.stop)
+ priv->wpa.mic_failure.stop = 0;
+ priv->wpa.mic_failure.failure = 0;
+ hostif_start_request(priv, priv->reg.operation_mode);
+ }
+ break;
+ case SME_GET_MAC_ADDRESS:
+ if (priv->dev_state == DEVICE_STATE_BOOT) {
+ hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION);
+ }
+ break;
+ case SME_GET_PRODUCT_VERSION:
+ if (priv->dev_state == DEVICE_STATE_BOOT) {
+ priv->dev_state = DEVICE_STATE_PREINIT;
+ }
+ break;
+ case SME_STOP_REQUEST:
+ hostif_stop_request(priv);
+ break;
+ case SME_RTS_THRESHOLD_REQUEST:
+ val = cpu_to_le32((uint32_t) (priv->reg.rts));
+ hostif_mib_set_request(priv, DOT11_RTS_THRESHOLD,
+ sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ break;
+ case SME_FRAGMENTATION_THRESHOLD_REQUEST:
+ val = cpu_to_le32((uint32_t) (priv->reg.fragment));
+ hostif_mib_set_request(priv, DOT11_FRAGMENTATION_THRESHOLD,
+ sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ break;
+ case SME_WEP_INDEX_REQUEST:
+ case SME_WEP_KEY1_REQUEST:
+ case SME_WEP_KEY2_REQUEST:
+ case SME_WEP_KEY3_REQUEST:
+ case SME_WEP_KEY4_REQUEST:
+ case SME_WEP_FLAG_REQUEST:
+ hostif_sme_set_wep(priv, event);
+ break;
+ case SME_RSN_UCAST_REQUEST:
+ case SME_RSN_MCAST_REQUEST:
+ case SME_RSN_AUTH_REQUEST:
+ case SME_RSN_ENABLED_REQUEST:
+ case SME_RSN_MODE_REQUEST:
+ hostif_sme_set_rsn(priv, event);
+ break;
+ case SME_SET_FLAG:
+ case SME_SET_TXKEY:
+ case SME_SET_KEY1:
+ case SME_SET_KEY2:
+ case SME_SET_KEY3:
+ case SME_SET_KEY4:
+ case SME_SET_PMK_TSC:
+ case SME_SET_GMK1_TSC:
+ case SME_SET_GMK2_TSC:
+ hostif_sme_set_key(priv, event);
+ break;
+ case SME_SET_PMKSA:
+ hostif_sme_set_pmksa(priv);
+ break;
+#ifdef WPS
+ case SME_WPS_ENABLE_REQUEST:
+ hostif_mib_set_request(priv, LOCAL_WPS_ENABLE,
+ sizeof(priv->wps.wps_enabled),
+ MIB_VALUE_TYPE_INT,
+ &priv->wps.wps_enabled);
+ break;
+ case SME_WPS_PROBE_REQUEST:
+ hostif_mib_set_request(priv, LOCAL_WPS_PROBE_REQ,
+ priv->wps.ielen,
+ MIB_VALUE_TYPE_OSTRING, priv->wps.ie);
+ break;
+#endif /* WPS */
+ case SME_MODE_SET_REQUEST:
+ hostif_sme_mode_setup(priv);
+ break;
+ case SME_SET_GAIN:
+ hostif_mib_set_request(priv, LOCAL_GAIN,
+ sizeof(priv->gain),
+ MIB_VALUE_TYPE_OSTRING, &priv->gain);
+ break;
+ case SME_GET_GAIN:
+ hostif_mib_get_request(priv, LOCAL_GAIN);
+ break;
+ case SME_GET_EEPROM_CKSUM:
+ priv->eeprom_checksum = EEPROM_FW_NOT_SUPPORT; /* initialize */
+ hostif_mib_get_request(priv, LOCAL_EEPROM_SUM);
+ break;
+ case SME_START_REQUEST:
+ hostif_start_request(priv, priv->reg.operation_mode);
+ break;
+ case SME_START_CONFIRM:
+ /* for power save */
+ atomic_set(&priv->psstatus.snooze_guard, 0);
+ atomic_set(&priv->psstatus.confirm_wait, 0);
+ if (priv->dev_state == DEVICE_STATE_PREINIT) {
+ priv->dev_state = DEVICE_STATE_INIT;
+ }
+ /* wake_up_interruptible_all(&priv->confirm_wait); */
+ complete(&priv->confirm_wait);
+ break;
+ case SME_SLEEP_REQUEST:
+ hostif_sme_sleep_set(priv);
+ break;
+ case SME_SET_REGION:
+ val = cpu_to_le32((uint32_t) (priv->region));
+ hostif_mib_set_request(priv, LOCAL_REGION,
+ sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ break;
+ case SME_MULTICAST_CONFIRM:
+ case SME_BSS_SCAN_CONFIRM:
+ case SME_POW_MNGMT_CONFIRM:
+ case SME_PHY_INFO_CONFIRM:
+ case SME_STOP_CONFIRM:
+ case SME_RTS_THRESHOLD_CONFIRM:
+ case SME_FRAGMENTATION_THRESHOLD_CONFIRM:
+ case SME_WEP_INDEX_CONFIRM:
+ case SME_WEP_KEY1_CONFIRM:
+ case SME_WEP_KEY2_CONFIRM:
+ case SME_WEP_KEY3_CONFIRM:
+ case SME_WEP_KEY4_CONFIRM:
+ case SME_WEP_FLAG_CONFIRM:
+ case SME_RSN_UCAST_CONFIRM:
+ case SME_RSN_MCAST_CONFIRM:
+ case SME_RSN_AUTH_CONFIRM:
+ case SME_RSN_ENABLED_CONFIRM:
+ case SME_RSN_MODE_CONFIRM:
+ case SME_MODE_SET_CONFIRM:
+ break;
+ case SME_TERMINATE:
+ default:
+ break;
+ }
+}
+
+static
+void hostif_sme_task(unsigned long dev)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+
+ DPRINTK(3, "\n");
+
+ if (priv->dev_state >= DEVICE_STATE_BOOT) {
+ if (0 < cnt_smeqbody(priv)
+ && priv->dev_state >= DEVICE_STATE_BOOT) {
+ hostif_sme_execute(priv,
+ priv->sme_i.event_buff[priv->sme_i.
+ qhead]);
+ inc_smeqhead(priv);
+ if (0 < cnt_smeqbody(priv))
+ tasklet_schedule(&priv->sme_task);
+ }
+ }
+ return;
+}
+
+/* send to Station Management Entity module */
+void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
+{
+ DPRINTK(3, "\n");
+
+ /* enqueue sme event */
+ if (cnt_smeqbody(priv) < (SME_EVENT_BUFF_SIZE - 1)) {
+ priv->sme_i.event_buff[priv->sme_i.qtail] = event;
+ inc_smeqtail(priv);
+ //DPRINTK(3,"inc_smeqtail \n");
+#ifdef KS_WLAN_DEBUG
+ if (priv->sme_i.max_event_count < cnt_smeqbody(priv))
+ priv->sme_i.max_event_count = cnt_smeqbody(priv);
+#endif /* KS_WLAN_DEBUG */
+ } else {
+ /* in case of buffer overflow */
+ //DPRINTK(2,"sme queue buffer overflow\n");
+ printk("sme queue buffer overflow\n");
+ }
+
+ tasklet_schedule(&priv->sme_task);
+
+}
+
+int hostif_init(struct ks_wlan_private *priv)
+{
+ int rc = 0;
+ int i;
+
+ DPRINTK(3, "\n");
+
+ priv->aplist.size = 0;
+ for (i = 0; i < LOCAL_APLIST_MAX; i++)
+ memset(&(priv->aplist.ap[i]), 0, sizeof(struct local_ap_t));
+ priv->infra_status = 0;
+ priv->current_rate = 4;
+ priv->connect_status = DISCONNECT_STATUS;
+
+ spin_lock_init(&priv->multicast_spin);
+
+ spin_lock_init(&priv->dev_read_lock);
+ init_waitqueue_head(&priv->devread_wait);
+ priv->dev_count = 0;
+ atomic_set(&priv->event_count, 0);
+ atomic_set(&priv->rec_count, 0);
+
+ /* for power save */
+ atomic_set(&priv->psstatus.status, PS_NONE);
+ atomic_set(&priv->psstatus.confirm_wait, 0);
+ atomic_set(&priv->psstatus.snooze_guard, 0);
+ /* init_waitqueue_head(&priv->psstatus.wakeup_wait); */
+ init_completion(&priv->psstatus.wakeup_wait);
+ //INIT_WORK(&priv->ks_wlan_wakeup_task, ks_wlan_hw_wakeup_task, (void *)priv);
+ INIT_WORK(&priv->ks_wlan_wakeup_task, ks_wlan_hw_wakeup_task);
+
+ /* WPA */
+ memset(&(priv->wpa), 0, sizeof(priv->wpa));
+ priv->wpa.rsn_enabled = 0;
+ priv->wpa.mic_failure.failure = 0;
+ priv->wpa.mic_failure.last_failure_time = 0;
+ priv->wpa.mic_failure.stop = 0;
+ memset(&(priv->pmklist), 0, sizeof(priv->pmklist));
+ INIT_LIST_HEAD(&priv->pmklist.head);
+ for (i = 0; i < PMK_LIST_MAX; i++)
+ INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
+
+ priv->sme_i.sme_status = SME_IDLE;
+ priv->sme_i.qhead = priv->sme_i.qtail = 0;
+#ifdef KS_WLAN_DEBUG
+ priv->sme_i.max_event_count = 0;
+#endif
+ spin_lock_init(&priv->sme_i.sme_spin);
+ priv->sme_i.sme_flag = 0;
+
+ tasklet_init(&priv->sme_task, hostif_sme_task, (unsigned long)priv);
+
+ return rc;
+}
+
+void hostif_exit(struct ks_wlan_private *priv)
+{
+ tasklet_kill(&priv->sme_task);
+ return;
+}
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
new file mode 100644
index 000000000000..dc806b5b47be
--- /dev/null
+++ b/drivers/staging/ks7010/ks_hostif.h
@@ -0,0 +1,644 @@
+/*
+ * Driver for KeyStream wireless LAN
+ *
+ * Copyright (c) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KS_HOSTIF_H_
+#define _KS_HOSTIF_H_
+/*
+ * HOST-MAC I/F events
+ */
+#define HIF_DATA_REQ 0xE001
+#define HIF_DATA_IND 0xE801
+#define HIF_MIB_GET_REQ 0xE002
+#define HIF_MIB_GET_CONF 0xE802
+#define HIF_MIB_SET_REQ 0xE003
+#define HIF_MIB_SET_CONF 0xE803
+#define HIF_POWERMGT_REQ 0xE004
+#define HIF_POWERMGT_CONF 0xE804
+#define HIF_START_REQ 0xE005
+#define HIF_START_CONF 0xE805
+#define HIF_CONNECT_IND 0xE806
+#define HIF_STOP_REQ 0xE006
+#define HIF_STOP_CONF 0xE807
+#define HIF_PS_ADH_SET_REQ 0xE007
+#define HIF_PS_ADH_SET_CONF 0xE808
+#define HIF_INFRA_SET_REQ 0xE008
+#define HIF_INFRA_SET_CONF 0xE809
+#define HIF_ADH_SET_REQ 0xE009
+#define HIF_ADH_SET_CONF 0xE80A
+#define HIF_AP_SET_REQ 0xE00A
+#define HIF_AP_SET_CONF 0xE80B
+#define HIF_ASSOC_INFO_IND 0xE80C
+#define HIF_MIC_FAILURE_REQ 0xE00B
+#define HIF_MIC_FAILURE_CONF 0xE80D
+#define HIF_SCAN_REQ 0xE00C
+#define HIF_SCAN_CONF 0xE80E
+#define HIF_PHY_INFO_REQ 0xE00D
+#define HIF_PHY_INFO_CONF 0xE80F
+#define HIF_SLEEP_REQ 0xE00E
+#define HIF_SLEEP_CONF 0xE810
+#define HIF_PHY_INFO_IND 0xE811
+#define HIF_SCAN_IND 0xE812
+#define HIF_INFRA_SET2_REQ 0xE00F
+#define HIF_INFRA_SET2_CONF 0xE813
+#define HIF_ADH_SET2_REQ 0xE010
+#define HIF_ADH_SET2_CONF 0xE814
+
+#define HIF_REQ_MAX 0xE010
+
+/*
+ * HOST-MAC I/F data structure
+ * Byte alignmet Little Endian
+ */
+
+struct hostif_hdr {
+ uint16_t size;
+ uint16_t event;
+} __attribute__ ((packed));
+
+struct hostif_data_request_t {
+ struct hostif_hdr header;
+ uint16_t auth_type;
+#define TYPE_DATA 0x0000
+#define TYPE_AUTH 0x0001
+ uint16_t reserved;
+ uint8_t data[0];
+} __attribute__ ((packed));
+
+struct hostif_data_indication_t {
+ struct hostif_hdr header;
+ uint16_t auth_type;
+/* #define TYPE_DATA 0x0000 */
+#define TYPE_PMK1 0x0001
+#define TYPE_GMK1 0x0002
+#define TYPE_GMK2 0x0003
+ uint16_t reserved;
+ uint8_t data[0];
+} __attribute__ ((packed));
+
+#define CHANNEL_LIST_MAX_SIZE 14
+struct channel_list_t {
+ uint8_t size;
+ uint8_t body[CHANNEL_LIST_MAX_SIZE];
+ uint8_t pad;
+} __attribute__ ((packed));
+
+/* MIB Attribute */
+#define DOT11_MAC_ADDRESS 0x21010100 /* MAC Address (R) */
+#define DOT11_PRODUCT_VERSION 0x31024100 /* FirmWare Version (R) */
+#define DOT11_RTS_THRESHOLD 0x21020100 /* RTS Threshold (R/W) */
+#define DOT11_FRAGMENTATION_THRESHOLD 0x21050100 /* Fragment Threshold (R/W) */
+#define DOT11_PRIVACY_INVOKED 0x15010100 /* WEP ON/OFF (W) */
+#define DOT11_WEP_DEFAULT_KEY_ID 0x15020100 /* WEP Index (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE1 0x13020101 /* WEP Key#1(TKIP AES: PairwiseTemporalKey) (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE2 0x13020102 /* WEP Key#2(TKIP AES: GroupKey1) (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE3 0x13020103 /* WEP Key#3(TKIP AES: GroupKey2) (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE4 0x13020104 /* WEP Key#4 (W) */
+#define DOT11_WEP_LIST 0x13020100 /* WEP LIST */
+#define DOT11_DESIRED_SSID 0x11090100 /* SSID */
+#define DOT11_CURRENT_CHANNEL 0x45010100 /* channel set */
+#define DOT11_OPERATION_RATE_SET 0x11110100 /* rate set */
+
+#define LOCAL_AP_SEARCH_INTEAVAL 0xF1010100 /* AP search interval (R/W) */
+#define LOCAL_CURRENTADDRESS 0xF1050100 /* MAC Adress change (W) */
+#define LOCAL_MULTICAST_ADDRESS 0xF1060100 /* Multicast Adress (W) */
+#define LOCAL_MULTICAST_FILTER 0xF1060200 /* Multicast Adress Filter enable/disable (W) */
+#define LOCAL_SEARCHED_AP_LIST 0xF1030100 /* AP list (R) */
+#define LOCAL_LINK_AP_STATUS 0xF1040100 /* Link AP status (R) */
+#define LOCAL_PACKET_STATISTICS 0xF1020100 /* tx,rx packets statistics */
+#define LOCAL_AP_SCAN_LIST_TYPE_SET 0xF1030200 /* AP_SCAN_LIST_TYPE */
+
+#define DOT11_RSN_ENABLED 0x15070100 /* WPA enable/disable (W) */
+#define LOCAL_RSN_MODE 0x56010100 /* RSN mode WPA/WPA2 (W) */
+#define DOT11_RSN_CONFIG_MULTICAST_CIPHER 0x51040100 /* GroupKeyCipherSuite (W) */
+#define DOT11_RSN_CONFIG_UNICAST_CIPHER 0x52020100 /* PairwiseKeyCipherSuite (W) */
+#define DOT11_RSN_CONFIG_AUTH_SUITE 0x53020100 /* AuthenticationKeyManagementSuite (W) */
+#define DOT11_RSN_CONFIG_VERSION 0x51020100 /* RSN version (W) */
+#define LOCAL_RSN_CONFIG_ALL 0x5F010100 /* RSN CONFIG ALL (W) */
+#define DOT11_PMK_TSC 0x55010100 /* PMK_TSC (W) */
+#define DOT11_GMK1_TSC 0x55010101 /* GMK1_TSC (W) */
+#define DOT11_GMK2_TSC 0x55010102 /* GMK2_TSC (W) */
+#define DOT11_GMK3_TSC 0x55010103 /* GMK3_TSC */
+#define LOCAL_PMK 0x58010100 /* Pairwise Master Key cache (W) */
+
+#define LOCAL_REGION 0xF10A0100 /* Region setting */
+
+#ifdef WPS
+#define LOCAL_WPS_ENABLE 0xF10B0100 /* WiFi Protected Setup */
+#define LOCAL_WPS_PROBE_REQ 0xF10C0100 /* WPS Probe Request */
+#endif /* WPS */
+
+#define LOCAL_GAIN 0xF10D0100 /* Carrer sense threshold for demo ato show */
+#define LOCAL_EEPROM_SUM 0xF10E0100 /* EEPROM checksum information */
+
+struct hostif_mib_get_request_t {
+ struct hostif_hdr header;
+ uint32_t mib_attribute;
+} __attribute__ ((packed));
+
+struct hostif_mib_value_t {
+ uint16_t size;
+ uint16_t type;
+#define MIB_VALUE_TYPE_NULL 0
+#define MIB_VALUE_TYPE_INT 1
+#define MIB_VALUE_TYPE_BOOL 2
+#define MIB_VALUE_TYPE_COUNT32 3
+#define MIB_VALUE_TYPE_OSTRING 4
+ uint8_t body[0];
+} __attribute__ ((packed));
+
+struct hostif_mib_get_confirm_t {
+ struct hostif_hdr header;
+ uint32_t mib_status;
+#define MIB_SUCCESS 0
+#define MIB_INVALID 1
+#define MIB_READ_ONLY 2
+#define MIB_WRITE_ONLY 3
+ uint32_t mib_attribute;
+ struct hostif_mib_value_t mib_value;
+} __attribute__ ((packed));
+
+struct hostif_mib_set_request_t {
+ struct hostif_hdr header;
+ uint32_t mib_attribute;
+ struct hostif_mib_value_t mib_value;
+} __attribute__ ((packed));
+
+struct hostif_mib_set_confirm_t {
+ struct hostif_hdr header;
+ uint32_t mib_status;
+ uint32_t mib_attribute;
+} __attribute__ ((packed));
+
+struct hostif_power_mngmt_request_t {
+ struct hostif_hdr header;
+ uint32_t mode;
+#define POWER_ACTIVE 1
+#define POWER_SAVE 2
+ uint32_t wake_up;
+#define SLEEP_FALSE 0
+#define SLEEP_TRUE 1 /* not used */
+ uint32_t receiveDTIMs;
+#define DTIM_FALSE 0
+#define DTIM_TRUE 1
+} __attribute__ ((packed));
+
+/* power management mode */
+enum {
+ POWMGT_ACTIVE_MODE = 0,
+ POWMGT_SAVE1_MODE,
+ POWMGT_SAVE2_MODE
+};
+
+#define RESULT_SUCCESS 0
+#define RESULT_INVALID_PARAMETERS 1
+#define RESULT_NOT_SUPPORTED 2
+/* #define RESULT_ALREADY_RUNNING 3 */
+#define RESULT_ALREADY_RUNNING 7
+
+struct hostif_power_mngmt_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_start_request_t {
+ struct hostif_hdr header;
+ uint16_t mode;
+#define MODE_PSEUDO_ADHOC 0
+#define MODE_INFRASTRUCTURE 1
+#define MODE_AP 2 /* not used */
+#define MODE_ADHOC 3
+} __attribute__ ((packed));
+
+struct hostif_start_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+#define SSID_MAX_SIZE 32
+struct ssid_t {
+ uint8_t size;
+ uint8_t body[SSID_MAX_SIZE];
+ uint8_t ssid_pad;
+} __attribute__ ((packed));
+
+#define RATE_SET_MAX_SIZE 16
+struct rate_set8_t {
+ uint8_t size;
+ uint8_t body[8];
+ uint8_t rate_pad;
+} __attribute__ ((packed));
+
+struct FhParms_t {
+ uint16_t dwellTime;
+ uint8_t hopSet;
+ uint8_t hopPattern;
+ uint8_t hopIndex;
+} __attribute__ ((packed));
+
+struct DsParms_t {
+ uint8_t channel;
+} __attribute__ ((packed));
+
+struct CfParms_t {
+ uint8_t count;
+ uint8_t period;
+ uint16_t maxDuration;
+ uint16_t durRemaining;
+} __attribute__ ((packed));
+
+struct IbssParms_t {
+ uint16_t atimWindow;
+} __attribute__ ((packed));
+
+struct rsn_t {
+ uint8_t size;
+#define RSN_BODY_SIZE 64
+ uint8_t body[RSN_BODY_SIZE];
+} __attribute__ ((packed));
+
+struct ErpParams_t {
+ uint8_t erp_info;
+} __attribute__ ((packed));
+
+struct rate_set16_t {
+ uint8_t size;
+ uint8_t body[16];
+ uint8_t rate_pad;
+} __attribute__ ((packed));
+
+struct ap_info_t {
+ uint8_t bssid[6]; /* +00 */
+ uint8_t rssi; /* +06 */
+ uint8_t sq; /* +07 */
+ uint8_t noise; /* +08 */
+ uint8_t pad0; /* +09 */
+ uint16_t beacon_period; /* +10 */
+ uint16_t capability; /* +12 */
+#define BSS_CAP_ESS (1<<0)
+#define BSS_CAP_IBSS (1<<1)
+#define BSS_CAP_CF_POLABLE (1<<2)
+#define BSS_CAP_CF_POLL_REQ (1<<3)
+#define BSS_CAP_PRIVACY (1<<4)
+#define BSS_CAP_SHORT_PREAMBLE (1<<5)
+#define BSS_CAP_PBCC (1<<6)
+#define BSS_CAP_CHANNEL_AGILITY (1<<7)
+#define BSS_CAP_SHORT_SLOT_TIME (1<<10)
+#define BSS_CAP_DSSS_OFDM (1<<13)
+ uint8_t frame_type; /* +14 */
+ uint8_t ch_info; /* +15 */
+#define FRAME_TYPE_BEACON 0x80
+#define FRAME_TYPE_PROBE_RESP 0x50
+ uint16_t body_size; /* +16 */
+ uint8_t body[1024]; /* +18 */
+ /* +1032 */
+} __attribute__ ((packed));
+
+struct link_ap_info_t {
+ uint8_t bssid[6]; /* +00 */
+ uint8_t rssi; /* +06 */
+ uint8_t sq; /* +07 */
+ uint8_t noise; /* +08 */
+ uint8_t pad0; /* +09 */
+ uint16_t beacon_period; /* +10 */
+ uint16_t capability; /* +12 */
+ struct rate_set8_t rate_set; /* +14 */
+ struct FhParms_t fh_parameter; /* +24 */
+ struct DsParms_t ds_parameter; /* +29 */
+ struct CfParms_t cf_parameter; /* +30 */
+ struct IbssParms_t ibss_parameter; /* +36 */
+ struct ErpParams_t erp_parameter; /* +38 */
+ uint8_t pad1; /* +39 */
+ struct rate_set8_t ext_rate_set; /* +40 */
+ uint8_t DTIM_period; /* +50 */
+ uint8_t rsn_mode; /* +51 */
+#define RSN_MODE_NONE 0
+#define RSN_MODE_WPA 1
+#define RSN_MODE_WPA2 2
+ struct {
+ uint8_t size; /* +52 */
+ uint8_t body[128]; /* +53 */
+ } __attribute__ ((packed)) rsn;
+} __attribute__ ((packed));
+
+struct hostif_connect_indication_t {
+ struct hostif_hdr header;
+ uint16_t connect_code;
+#define RESULT_CONNECT 0
+#define RESULT_DISCONNECT 1
+ struct link_ap_info_t link_ap_info;
+} __attribute__ ((packed));
+
+struct hostif_stop_request_t {
+ struct hostif_hdr header;
+} __attribute__ ((packed));
+
+struct hostif_stop_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_ps_adhoc_set_request_t {
+ struct hostif_hdr header;
+ uint16_t phy_type;
+#define D_11B_ONLY_MODE 0
+#define D_11G_ONLY_MODE 1
+#define D_11BG_COMPATIBLE_MODE 2
+#define D_11A_ONLY_MODE 3
+ uint16_t cts_mode;
+#define CTS_MODE_FALSE 0
+#define CTS_MODE_TRUE 1
+ uint16_t channel;
+ struct rate_set16_t rate_set;
+ uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
+ * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+ uint16_t scan_type;
+} __attribute__ ((packed));
+
+struct hostif_ps_adhoc_set_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_infrastructure_set_request_t {
+ struct hostif_hdr header;
+ uint16_t phy_type;
+ uint16_t cts_mode;
+ struct rate_set16_t rate_set;
+ struct ssid_t ssid;
+ uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
+ * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+ uint16_t beacon_lost_count;
+ uint16_t auth_type;
+#define AUTH_TYPE_OPEN_SYSTEM 0
+#define AUTH_TYPE_SHARED_KEY 1
+ struct channel_list_t channel_list;
+ uint16_t scan_type;
+} __attribute__ ((packed));
+
+struct hostif_infrastructure_set2_request_t {
+ struct hostif_hdr header;
+ uint16_t phy_type;
+ uint16_t cts_mode;
+ struct rate_set16_t rate_set;
+ struct ssid_t ssid;
+ uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
+ * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+ uint16_t beacon_lost_count;
+ uint16_t auth_type;
+#define AUTH_TYPE_OPEN_SYSTEM 0
+#define AUTH_TYPE_SHARED_KEY 1
+ struct channel_list_t channel_list;
+ uint16_t scan_type;
+ uint8_t bssid[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct hostif_infrastructure_set_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_adhoc_set_request_t {
+ struct hostif_hdr header;
+ uint16_t phy_type;
+ uint16_t cts_mode;
+ uint16_t channel;
+ struct rate_set16_t rate_set;
+ struct ssid_t ssid;
+ uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
+ * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+ uint16_t scan_type;
+} __attribute__ ((packed));
+
+struct hostif_adhoc_set2_request_t {
+ struct hostif_hdr header;
+ uint16_t phy_type;
+ uint16_t cts_mode;
+ uint16_t reserved;
+ struct rate_set16_t rate_set;
+ struct ssid_t ssid;
+ uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
+ * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+ uint16_t scan_type;
+ struct channel_list_t channel_list;
+ uint8_t bssid[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct hostif_adhoc_set_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct last_associate_t {
+ uint8_t type;
+ uint8_t status;
+} __attribute__ ((packed));
+
+struct association_request_t {
+ uint8_t type;
+#define FRAME_TYPE_ASSOC_REQ 0x00
+#define FRAME_TYPE_REASSOC_REQ 0x20
+ uint8_t pad;
+ uint16_t capability;
+ uint16_t listen_interval;
+ uint8_t ap_address[6];
+ uint16_t reqIEs_size;
+} __attribute__ ((packed));
+
+struct association_response_t {
+ uint8_t type;
+#define FRAME_TYPE_ASSOC_RESP 0x10
+#define FRAME_TYPE_REASSOC_RESP 0x30
+ uint8_t pad;
+ uint16_t capability;
+ uint16_t status;
+ uint16_t association_id;
+ uint16_t respIEs_size;
+} __attribute__ ((packed));
+
+struct hostif_associate_indication_t {
+ struct hostif_hdr header;
+ struct association_request_t assoc_req;
+ struct association_response_t assoc_resp;
+ /* followed by (reqIEs_size + respIEs_size) octets of data */
+ /* reqIEs data *//* respIEs data */
+} __attribute__ ((packed));
+
+struct hostif_bss_scan_request_t {
+ struct hostif_hdr header;
+ uint8_t scan_type;
+#define ACTIVE_SCAN 0
+#define PASSIVE_SCAN 1
+ uint8_t pad[3];
+ uint32_t ch_time_min;
+ uint32_t ch_time_max;
+ struct channel_list_t channel_list;
+ struct ssid_t ssid;
+} __attribute__ ((packed));
+
+struct hostif_bss_scan_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+ uint16_t reserved;
+} __attribute__ ((packed));
+
+struct hostif_phy_information_request_t {
+ struct hostif_hdr header;
+ uint16_t type;
+#define NORMAL_TYPE 0
+#define TIME_TYPE 1
+ uint16_t time; /* unit 100ms */
+} __attribute__ ((packed));
+
+struct hostif_phy_information_confirm_t {
+ struct hostif_hdr header;
+ uint8_t rssi;
+ uint8_t sq;
+ uint8_t noise;
+ uint8_t link_speed;
+ uint32_t tx_frame;
+ uint32_t rx_frame;
+ uint32_t tx_error;
+ uint32_t rx_error;
+} __attribute__ ((packed));
+
+/* sleep mode */
+#define SLP_ACTIVE 0
+#define SLP_SLEEP 1
+struct hostif_sleep_request_t {
+ struct hostif_hdr header;
+} __attribute__ ((packed));
+
+struct hostif_sleep_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_mic_failure_request_t {
+ struct hostif_hdr header;
+ uint16_t failure_count;
+ uint16_t timer;
+} __attribute__ ((packed));
+
+struct hostif_mic_failure_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+#define BASIC_RATE 0x80
+#define RATE_MASK 0x7F
+
+#define TX_RATE_AUTO 0xff
+#define TX_RATE_1M_FIXED 0
+#define TX_RATE_2M_FIXED 1
+#define TX_RATE_1_2M_AUTO 2
+#define TX_RATE_5M_FIXED 3
+#define TX_RATE_11M_FIXED 4
+
+#define TX_RATE_FULL_AUTO 0
+#define TX_RATE_11_AUTO 1
+#define TX_RATE_11B_AUTO 2
+#define TX_RATE_11BG_AUTO 3
+#define TX_RATE_MANUAL_AUTO 4
+#define TX_RATE_FIXED 5
+
+/* 11b rate */
+#define TX_RATE_1M (uint8_t)(10/5) /* 11b 11g basic rate */
+#define TX_RATE_2M (uint8_t)(20/5) /* 11b 11g basic rate */
+#define TX_RATE_5M (uint8_t)(55/5) /* 11g basic rate */
+#define TX_RATE_11M (uint8_t)(110/5) /* 11g basic rate */
+
+/* 11g rate */
+#define TX_RATE_6M (uint8_t)(60/5) /* 11g basic rate */
+#define TX_RATE_12M (uint8_t)(120/5) /* 11g basic rate */
+#define TX_RATE_24M (uint8_t)(240/5) /* 11g basic rate */
+#define TX_RATE_9M (uint8_t)(90/5)
+#define TX_RATE_18M (uint8_t)(180/5)
+#define TX_RATE_36M (uint8_t)(360/5)
+#define TX_RATE_48M (uint8_t)(480/5)
+#define TX_RATE_54M (uint8_t)(540/5)
+
+#define IS_11B_RATE(A) (((A&RATE_MASK)==TX_RATE_1M)||((A&RATE_MASK)==TX_RATE_2M)||\
+ ((A&RATE_MASK)==TX_RATE_5M)||((A&RATE_MASK)==TX_RATE_11M))
+
+#define IS_OFDM_RATE(A) (((A&RATE_MASK)==TX_RATE_6M)||((A&RATE_MASK)==TX_RATE_12M)||\
+ ((A&RATE_MASK)==TX_RATE_24M)||((A&RATE_MASK)==TX_RATE_9M)||\
+ ((A&RATE_MASK)==TX_RATE_18M)||((A&RATE_MASK)==TX_RATE_36M)||\
+ ((A&RATE_MASK)==TX_RATE_48M)||((A&RATE_MASK)==TX_RATE_54M))
+
+#define IS_11BG_RATE(A) (IS_11B_RATE(A)||IS_OFDM_RATE(A))
+
+#define IS_OFDM_EXT_RATE(A) (((A&RATE_MASK)==TX_RATE_9M)||((A&RATE_MASK)==TX_RATE_18M)||\
+ ((A&RATE_MASK)==TX_RATE_36M)||((A&RATE_MASK)==TX_RATE_48M)||\
+ ((A&RATE_MASK)==TX_RATE_54M))
+
+enum {
+ CONNECT_STATUS = 0,
+ DISCONNECT_STATUS
+};
+
+/* preamble type */
+enum {
+ LONG_PREAMBLE = 0,
+ SHORT_PREAMBLE
+};
+
+/* multicast filter */
+#define MCAST_FILTER_MCAST 0
+#define MCAST_FILTER_MCASTALL 1
+#define MCAST_FILTER_PROMISC 2
+
+#define NIC_MAX_MCAST_LIST 32
+
+/* macro function */
+#define HIF_EVENT_MASK 0xE800
+#define IS_HIF_IND(_EVENT) ((_EVENT&HIF_EVENT_MASK)==0xE800 && \
+ ((_EVENT&~HIF_EVENT_MASK)==0x0001 || \
+ (_EVENT&~HIF_EVENT_MASK)==0x0006 || \
+ (_EVENT&~HIF_EVENT_MASK)==0x000C || \
+ (_EVENT&~HIF_EVENT_MASK)==0x0011 || \
+ (_EVENT&~HIF_EVENT_MASK)==0x0012))
+
+#define IS_HIF_CONF(_EVENT) ((_EVENT&HIF_EVENT_MASK)==0xE800 && \
+ (_EVENT&~HIF_EVENT_MASK)>0x0000 && \
+ (_EVENT&~HIF_EVENT_MASK)<0x0012 && \
+ !IS_HIF_IND(_EVENT) )
+
+#ifdef __KERNEL__
+
+#include "ks_wlan.h"
+
+/* function prototype */
+extern int hostif_data_request(struct ks_wlan_private *priv,
+ struct sk_buff *packet);
+extern void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
+ unsigned int size);
+extern void hostif_sme_enqueue(struct ks_wlan_private *priv, uint16_t event);
+extern int hostif_init(struct ks_wlan_private *priv);
+extern void hostif_exit(struct ks_wlan_private *priv);
+
+static
+inline int hif_align_size(int size)
+{
+#ifdef KS_ATOM
+ if (size < 1024)
+ size = 1024;
+#endif
+#ifdef DEVICE_ALIGNMENT
+ return (size % DEVICE_ALIGNMENT) ? size + DEVICE_ALIGNMENT -
+ (size % DEVICE_ALIGNMENT) : size;
+#else
+ return size;
+#endif
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _KS_HOSTIF_H_ */
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
new file mode 100644
index 000000000000..f05dc0122fcb
--- /dev/null
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -0,0 +1,505 @@
+/*
+ * Driver for KeyStream IEEE802.11 b/g wireless LAN cards.
+ *
+ * Copyright (C) 2006-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KS_WLAN_H
+#define _KS_WLAN_H
+
+#define WPS
+
+#include <linux/version.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/spinlock.h> /* spinlock_t */
+#include <linux/sched.h> /* wait_queue_head_t */
+#include <linux/types.h> /* pid_t */
+#include <linux/netdevice.h> /* struct net_device_stats, struct sk_buff */
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <asm/atomic.h> /* struct atmic_t */
+#include <linux/timer.h> /* struct timer_list */
+#include <linux/string.h>
+#include <linux/completion.h> /* struct completion */
+#include <linux/workqueue.h>
+
+#include <asm/io.h>
+
+#include "ks7010_sdio.h"
+
+#ifdef KS_WLAN_DEBUG
+#define DPRINTK(n, fmt, args...) \
+ if (KS_WLAN_DEBUG>(n)) printk(KERN_NOTICE "%s: "fmt, __FUNCTION__, ## args)
+#else
+#define DPRINTK(n, fmt, args...)
+#endif
+
+struct ks_wlan_parameter {
+ uint8_t operation_mode; /* Operation Mode */
+ uint8_t channel; /* Channel */
+ uint8_t tx_rate; /* Transmit Rate */
+ struct {
+ uint8_t size;
+ uint8_t body[16];
+ } rate_set;
+ uint8_t bssid[ETH_ALEN]; /* BSSID */
+ struct {
+ uint8_t size;
+ uint8_t body[32 + 1];
+ } ssid; /* SSID */
+ uint8_t preamble; /* Preamble */
+ uint8_t powermgt; /* PowerManagementMode */
+ uint32_t scan_type; /* AP List Scan Type */
+#define BEACON_LOST_COUNT_MIN 0
+#define BEACON_LOST_COUNT_MAX 65535
+ uint32_t beacon_lost_count; /* Beacon Lost Count */
+ uint32_t rts; /* RTS Threashold */
+ uint32_t fragment; /* Fragmentation Threashold */
+ uint32_t privacy_invoked;
+ uint32_t wep_index;
+ struct {
+ uint8_t size;
+ uint8_t val[13 * 2 + 1];
+ } wep_key[4];
+ uint16_t authenticate_type;
+ uint16_t phy_type; /* 11b/11g/11bg mode type */
+ uint16_t cts_mode; /* for 11g/11bg mode cts mode */
+ uint16_t phy_info_timer; /* phy information timer */
+};
+
+enum {
+ DEVICE_STATE_OFF = 0, /* this means hw_unavailable is != 0 */
+ DEVICE_STATE_PREBOOT, /* we are in a pre-boot state (empty RAM) */
+ DEVICE_STATE_BOOT, /* boot state (fw upload, run fw) */
+ DEVICE_STATE_PREINIT, /* pre-init state */
+ DEVICE_STATE_INIT, /* init state (restore MIB backup to device) */
+ DEVICE_STATE_READY, /* driver&device are in operational state */
+ DEVICE_STATE_SLEEP /* device in sleep mode */
+};
+
+/* SME flag */
+#define SME_MODE_SET (1<<0)
+#define SME_RTS (1<<1)
+#define SME_FRAG (1<<2)
+#define SME_WEP_FLAG (1<<3)
+#define SME_WEP_INDEX (1<<4)
+#define SME_WEP_VAL1 (1<<5)
+#define SME_WEP_VAL2 (1<<6)
+#define SME_WEP_VAL3 (1<<7)
+#define SME_WEP_VAL4 (1<<8)
+#define SME_WEP_VAL_MASK (SME_WEP_VAL1|SME_WEP_VAL2|SME_WEP_VAL3|SME_WEP_VAL4)
+#define SME_RSN (1<<9)
+#define SME_RSN_MULTICAST (1<<10)
+#define SME_RSN_UNICAST (1<<11)
+#define SME_RSN_AUTH (1<<12)
+
+#define SME_AP_SCAN (1<<13)
+#define SME_MULTICAST (1<<14)
+
+/* SME Event */
+enum {
+ SME_START,
+
+ SME_MULTICAST_REQUEST,
+ SME_MACADDRESS_SET_REQUEST,
+ SME_BSS_SCAN_REQUEST,
+ SME_SET_FLAG,
+ SME_SET_TXKEY,
+ SME_SET_KEY1,
+ SME_SET_KEY2,
+ SME_SET_KEY3,
+ SME_SET_KEY4,
+ SME_SET_PMK_TSC,
+ SME_SET_GMK1_TSC,
+ SME_SET_GMK2_TSC,
+ SME_SET_GMK3_TSC,
+ SME_SET_PMKSA,
+ SME_POW_MNGMT_REQUEST,
+ SME_PHY_INFO_REQUEST,
+ SME_MIC_FAILURE_REQUEST,
+ SME_GET_MAC_ADDRESS,
+ SME_GET_PRODUCT_VERSION,
+ SME_STOP_REQUEST,
+ SME_RTS_THRESHOLD_REQUEST,
+ SME_FRAGMENTATION_THRESHOLD_REQUEST,
+ SME_WEP_INDEX_REQUEST,
+ SME_WEP_KEY1_REQUEST,
+ SME_WEP_KEY2_REQUEST,
+ SME_WEP_KEY3_REQUEST,
+ SME_WEP_KEY4_REQUEST,
+ SME_WEP_FLAG_REQUEST,
+ SME_RSN_UCAST_REQUEST,
+ SME_RSN_MCAST_REQUEST,
+ SME_RSN_AUTH_REQUEST,
+ SME_RSN_ENABLED_REQUEST,
+ SME_RSN_MODE_REQUEST,
+#ifdef WPS
+ SME_WPS_ENABLE_REQUEST,
+ SME_WPS_PROBE_REQUEST,
+#endif
+ SME_SET_GAIN,
+ SME_GET_GAIN,
+ SME_SLEEP_REQUEST,
+ SME_SET_REGION,
+ SME_MODE_SET_REQUEST,
+ SME_START_REQUEST,
+ SME_GET_EEPROM_CKSUM,
+
+ SME_MIC_FAILURE_CONFIRM,
+ SME_START_CONFIRM,
+
+ SME_MULTICAST_CONFIRM,
+ SME_BSS_SCAN_CONFIRM,
+ SME_GET_CURRENT_AP,
+ SME_POW_MNGMT_CONFIRM,
+ SME_PHY_INFO_CONFIRM,
+ SME_STOP_CONFIRM,
+ SME_RTS_THRESHOLD_CONFIRM,
+ SME_FRAGMENTATION_THRESHOLD_CONFIRM,
+ SME_WEP_INDEX_CONFIRM,
+ SME_WEP_KEY1_CONFIRM,
+ SME_WEP_KEY2_CONFIRM,
+ SME_WEP_KEY3_CONFIRM,
+ SME_WEP_KEY4_CONFIRM,
+ SME_WEP_FLAG_CONFIRM,
+ SME_RSN_UCAST_CONFIRM,
+ SME_RSN_MCAST_CONFIRM,
+ SME_RSN_AUTH_CONFIRM,
+ SME_RSN_ENABLED_CONFIRM,
+ SME_RSN_MODE_CONFIRM,
+ SME_MODE_SET_CONFIRM,
+ SME_SLEEP_CONFIRM,
+
+ SME_RSN_SET_CONFIRM,
+ SME_WEP_SET_CONFIRM,
+ SME_TERMINATE,
+
+ SME_EVENT_SIZE /* end */
+};
+
+/* SME Status */
+enum {
+ SME_IDLE,
+ SME_SETUP,
+ SME_DISCONNECT,
+ SME_CONNECT
+};
+
+#define SME_EVENT_BUFF_SIZE 128
+
+struct sme_info {
+ int sme_status;
+ int event_buff[SME_EVENT_BUFF_SIZE];
+ unsigned int qhead;
+ unsigned int qtail;
+#ifdef KS_WLAN_DEBUG
+ /* for debug */
+ unsigned int max_event_count;
+#endif
+ spinlock_t sme_spin;
+ unsigned long sme_flag;
+};
+
+struct hostt_t {
+ int buff[SME_EVENT_BUFF_SIZE];
+ unsigned int qhead;
+ unsigned int qtail;
+};
+
+#define RSN_IE_BODY_MAX 64
+struct rsn_ie_t {
+ uint8_t id; /* 0xdd = WPA or 0x30 = RSN */
+ uint8_t size; /* max ? 255 ? */
+ uint8_t body[RSN_IE_BODY_MAX];
+} __attribute__ ((packed));
+
+#ifdef WPS
+#define WPS_IE_BODY_MAX 255
+struct wps_ie_t {
+ uint8_t id; /* 221 'dd <len> 00 50 F2 04' */
+ uint8_t size; /* max ? 255 ? */
+ uint8_t body[WPS_IE_BODY_MAX];
+} __attribute__ ((packed));
+#endif /* WPS */
+
+struct local_ap_t {
+ uint8_t bssid[6];
+ uint8_t rssi;
+ uint8_t sq;
+ struct {
+ uint8_t size;
+ uint8_t body[32];
+ uint8_t ssid_pad;
+ } ssid;
+ struct {
+ uint8_t size;
+ uint8_t body[16];
+ uint8_t rate_pad;
+ } rate_set;
+ uint16_t capability;
+ uint8_t channel;
+ uint8_t noise;
+ struct rsn_ie_t wpa_ie;
+ struct rsn_ie_t rsn_ie;
+#ifdef WPS
+ struct wps_ie_t wps_ie;
+#endif /* WPS */
+};
+
+#define LOCAL_APLIST_MAX 31
+#define LOCAL_CURRENT_AP LOCAL_APLIST_MAX
+struct local_aplist_t {
+ int size;
+ struct local_ap_t ap[LOCAL_APLIST_MAX + 1];
+};
+
+struct local_gain_t {
+ uint8_t TxMode;
+ uint8_t RxMode;
+ uint8_t TxGain;
+ uint8_t RxGain;
+};
+
+struct local_eeprom_sum_t {
+ uint8_t type;
+ uint8_t result;
+};
+
+enum {
+ EEPROM_OK,
+ EEPROM_CHECKSUM_NONE,
+ EEPROM_FW_NOT_SUPPORT,
+ EEPROM_NG,
+};
+
+/* Power Save Status */
+enum {
+ PS_NONE,
+ PS_ACTIVE_SET,
+ PS_SAVE_SET,
+ PS_CONF_WAIT,
+ PS_SNOOZE,
+ PS_WAKEUP
+};
+
+struct power_save_status_t {
+ atomic_t status; /* initialvalue 0 */
+ struct completion wakeup_wait;
+ atomic_t confirm_wait;
+ atomic_t snooze_guard;
+};
+
+struct sleep_status_t {
+ atomic_t status; /* initialvalue 0 */
+ atomic_t doze_request;
+ atomic_t wakeup_request;
+};
+
+/* WPA */
+struct scan_ext_t {
+ unsigned int flag;
+ char ssid[IW_ESSID_MAX_SIZE + 1];
+};
+
+enum {
+ CIPHER_NONE,
+ CIPHER_WEP40,
+ CIPHER_TKIP,
+ CIPHER_CCMP,
+ CIPHER_WEP104
+};
+
+#define CIPHER_ID_WPA_NONE "\x00\x50\xf2\x00"
+#define CIPHER_ID_WPA_WEP40 "\x00\x50\xf2\x01"
+#define CIPHER_ID_WPA_TKIP "\x00\x50\xf2\x02"
+#define CIPHER_ID_WPA_CCMP "\x00\x50\xf2\x04"
+#define CIPHER_ID_WPA_WEP104 "\x00\x50\xf2\x05"
+
+#define CIPHER_ID_WPA2_NONE "\x00\x0f\xac\x00"
+#define CIPHER_ID_WPA2_WEP40 "\x00\x0f\xac\x01"
+#define CIPHER_ID_WPA2_TKIP "\x00\x0f\xac\x02"
+#define CIPHER_ID_WPA2_CCMP "\x00\x0f\xac\x04"
+#define CIPHER_ID_WPA2_WEP104 "\x00\x0f\xac\x05"
+
+#define CIPHER_ID_LEN 4
+
+enum {
+ KEY_MGMT_802_1X,
+ KEY_MGMT_PSK,
+ KEY_MGMT_WPANONE,
+};
+
+#define KEY_MGMT_ID_WPA_NONE "\x00\x50\xf2\x00"
+#define KEY_MGMT_ID_WPA_1X "\x00\x50\xf2\x01"
+#define KEY_MGMT_ID_WPA_PSK "\x00\x50\xf2\x02"
+#define KEY_MGMT_ID_WPA_WPANONE "\x00\x50\xf2\xff"
+
+#define KEY_MGMT_ID_WPA2_NONE "\x00\x0f\xac\x00"
+#define KEY_MGMT_ID_WPA2_1X "\x00\x0f\xac\x01"
+#define KEY_MGMT_ID_WPA2_PSK "\x00\x0f\xac\x02"
+#define KEY_MGMT_ID_WPA2_WPANONE "\x00\x0f\xac\xff"
+
+#define KEY_MGMT_ID_LEN 4
+
+#define MIC_KEY_SIZE 8
+
+struct wpa_key_t {
+ uint32_t ext_flags; /* IW_ENCODE_EXT_xxx */
+ uint8_t tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+ uint8_t rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+ struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
+ * (group) keys or unicast address for
+ * individual keys */
+ uint16_t alg;
+ uint16_t key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
+ uint8_t key_val[IW_ENCODING_TOKEN_MAX];
+ uint8_t tx_mic_key[MIC_KEY_SIZE];
+ uint8_t rx_mic_key[MIC_KEY_SIZE];
+};
+#define WPA_KEY_INDEX_MAX 4
+#define WPA_RX_SEQ_LEN 6
+
+struct mic_failure_t {
+ uint16_t failure; /* MIC Failure counter 0 or 1 or 2 */
+ uint16_t counter; /* 1sec counter 0-60 */
+ uint32_t last_failure_time;
+ int stop; /* stop flag */
+};
+
+struct wpa_status_t {
+ int wpa_enabled;
+ unsigned int rsn_enabled;
+ int version;
+ int pairwise_suite; /* unicast cipher */
+ int group_suite; /* multicast cipher */
+ int key_mgmt_suite; /* authentication key management suite */
+ int auth_alg;
+ int txkey;
+ struct wpa_key_t key[WPA_KEY_INDEX_MAX];
+ struct scan_ext_t scan_ext;
+ struct mic_failure_t mic_failure;
+};
+
+#include <linux/list.h>
+#define PMK_LIST_MAX 8
+struct pmk_list_t {
+ uint16_t size;
+ struct list_head head;
+ struct pmk_t {
+ struct list_head list;
+ uint8_t bssid[ETH_ALEN];
+ uint8_t pmkid[IW_PMKID_LEN];
+ } pmk[PMK_LIST_MAX];
+};
+
+#ifdef WPS
+struct wps_status_t {
+ int wps_enabled;
+ int ielen;
+ uint8_t ie[255];
+};
+#endif /* WPS */
+
+struct ks_wlan_private {
+
+ struct hw_info_t ks_wlan_hw; /* hardware information */
+
+ struct net_device *net_dev;
+ int reg_net; /* register_netdev */
+ struct net_device_stats nstats;
+ struct iw_statistics wstats;
+
+ struct completion confirm_wait;
+
+ /* trx device & sme */
+ struct tx_device tx_dev;
+ struct rx_device rx_dev;
+ struct sme_info sme_i;
+ u8 *rxp;
+ unsigned int rx_size;
+ struct tasklet_struct sme_task;
+ struct work_struct ks_wlan_wakeup_task;
+ int scan_ind_count;
+
+ unsigned char eth_addr[ETH_ALEN];
+
+ struct local_aplist_t aplist;
+ struct local_ap_t current_ap;
+ struct power_save_status_t psstatus;
+ struct sleep_status_t sleepstatus;
+ struct wpa_status_t wpa;
+ struct pmk_list_t pmklist;
+ /* wireless parameter */
+ struct ks_wlan_parameter reg;
+ uint8_t current_rate;
+
+ char nick[IW_ESSID_MAX_SIZE + 1];
+
+ spinlock_t multicast_spin;
+
+ spinlock_t dev_read_lock;
+ wait_queue_head_t devread_wait;
+
+ unsigned int need_commit; /* for ioctl */
+
+ /* DeviceIoControl */
+ int device_open_status;
+ atomic_t event_count;
+ atomic_t rec_count;
+ int dev_count;
+#define DEVICE_STOCK_COUNT 20
+ unsigned char *dev_data[DEVICE_STOCK_COUNT];
+ int dev_size[DEVICE_STOCK_COUNT];
+
+ /* ioctl : IOCTL_FIRMWARE_VERSION */
+ unsigned char firmware_version[128 + 1];
+ int version_size;
+
+ int mac_address_valid; /* Mac Address Status */
+
+ int dev_state;
+
+ struct sk_buff *skb;
+ unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
+ /* spinlock_t lock; */
+#define FORCE_DISCONNECT 0x80000000
+#define CONNECT_STATUS_MASK 0x7FFFFFFF
+ uint32_t connect_status; /* connect status */
+ int infra_status; /* Infractructure status */
+
+ uint8_t data_buff[0x1000];
+
+ uint8_t scan_ssid_len;
+ uint8_t scan_ssid[IW_ESSID_MAX_SIZE + 1];
+ struct local_gain_t gain;
+#ifdef WPS
+ struct net_device *l2_dev;
+ int l2_fd;
+ struct wps_status_t wps;
+#endif /* WPS */
+ uint8_t sleep_mode;
+
+ uint8_t region;
+ struct local_eeprom_sum_t eeprom_sum;
+ uint8_t eeprom_checksum;
+
+ struct hostt_t hostt;
+
+ unsigned long last_doze;
+ unsigned long last_wakeup;
+
+ uint wakeup_count; /* for detect wakeup loop */
+};
+
+extern int ks_wlan_net_start(struct net_device *dev);
+extern int ks_wlan_net_stop(struct net_device *dev);
+
+#endif /* _KS_WLAN_H */
diff --git a/drivers/staging/ks7010/ks_wlan_ioctl.h b/drivers/staging/ks7010/ks_wlan_ioctl.h
new file mode 100644
index 000000000000..49369e497808
--- /dev/null
+++ b/drivers/staging/ks7010/ks_wlan_ioctl.h
@@ -0,0 +1,67 @@
+/*
+ * Driver for KeyStream 11b/g wireless LAN
+ *
+ * Copyright (c) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KS_WLAN_IOCTL_H
+#define _KS_WLAN_IOCTL_H
+
+#include <linux/wireless.h>
+/* The low order bit identify a SET (0) or a GET (1) ioctl. */
+
+/* SIOCIWFIRSTPRIV+0 */
+/* former KS_WLAN_GET_DRIVER_VERSION SIOCIWFIRSTPRIV+1 */
+/* SIOCIWFIRSTPRIV+2 */
+#define KS_WLAN_GET_FIRM_VERSION SIOCIWFIRSTPRIV+3
+#ifdef WPS
+#define KS_WLAN_SET_WPS_ENABLE SIOCIWFIRSTPRIV+4
+#define KS_WLAN_GET_WPS_ENABLE SIOCIWFIRSTPRIV+5
+#define KS_WLAN_SET_WPS_PROBE_REQ SIOCIWFIRSTPRIV+6
+#endif
+#define KS_WLAN_GET_EEPROM_CKSUM SIOCIWFIRSTPRIV+7
+#define KS_WLAN_SET_PREAMBLE SIOCIWFIRSTPRIV+8
+#define KS_WLAN_GET_PREAMBLE SIOCIWFIRSTPRIV+9
+#define KS_WLAN_SET_POWER_SAVE SIOCIWFIRSTPRIV+10
+#define KS_WLAN_GET_POWER_SAVE SIOCIWFIRSTPRIV+11
+#define KS_WLAN_SET_SCAN_TYPE SIOCIWFIRSTPRIV+12
+#define KS_WLAN_GET_SCAN_TYPE SIOCIWFIRSTPRIV+13
+#define KS_WLAN_SET_RX_GAIN SIOCIWFIRSTPRIV+14
+#define KS_WLAN_GET_RX_GAIN SIOCIWFIRSTPRIV+15
+#define KS_WLAN_HOSTT SIOCIWFIRSTPRIV+16 /* unused */
+//#define KS_WLAN_SET_REGION SIOCIWFIRSTPRIV+17
+#define KS_WLAN_SET_BEACON_LOST SIOCIWFIRSTPRIV+18
+#define KS_WLAN_GET_BEACON_LOST SIOCIWFIRSTPRIV+19
+
+#define KS_WLAN_SET_TX_GAIN SIOCIWFIRSTPRIV+20
+#define KS_WLAN_GET_TX_GAIN SIOCIWFIRSTPRIV+21
+
+/* for KS7010 */
+#define KS_WLAN_SET_PHY_TYPE SIOCIWFIRSTPRIV+22
+#define KS_WLAN_GET_PHY_TYPE SIOCIWFIRSTPRIV+23
+#define KS_WLAN_SET_CTS_MODE SIOCIWFIRSTPRIV+24
+#define KS_WLAN_GET_CTS_MODE SIOCIWFIRSTPRIV+25
+/* SIOCIWFIRSTPRIV+26 */
+/* SIOCIWFIRSTPRIV+27 */
+#define KS_WLAN_SET_SLEEP_MODE SIOCIWFIRSTPRIV+28 /* sleep mode */
+#define KS_WLAN_GET_SLEEP_MODE SIOCIWFIRSTPRIV+29 /* sleep mode */
+/* SIOCIWFIRSTPRIV+30 */
+/* SIOCIWFIRSTPRIV+31 */
+
+#ifdef __KERNEL__
+
+#include "ks_wlan.h"
+#include <linux/netdevice.h>
+
+extern int ks_wlan_read_config_file(struct ks_wlan_private *priv);
+extern int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
+ unsigned int commit_flag);
+
+#endif /* __KERNEL__ */
+
+#endif /* _KS_WLAN_IOCTL_H */
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
new file mode 100644
index 000000000000..1e21eb1c4667
--- /dev/null
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -0,0 +1,3528 @@
+/*
+ * Driver for KeyStream 11b/g wireless LAN
+ *
+ * Copyright (C) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/mii.h>
+#include <linux/pci.h>
+#include <linux/ctype.h>
+#include <linux/timer.h>
+#include <asm/atomic.h>
+#include <linux/io.h>
+#include <asm/uaccess.h>
+
+static int wep_on_off;
+#define WEP_OFF 0
+#define WEP_ON_64BIT 1
+#define WEP_ON_128BIT 2
+
+#include "ks_wlan.h"
+#include "ks_hostif.h"
+#include "ks_wlan_ioctl.h"
+
+/* Include Wireless Extension definition and check version */
+#include <linux/wireless.h>
+#define WIRELESS_SPY /* enable iwspy support */
+#include <net/iw_handler.h> /* New driver API */
+
+/* Frequency list (map channels to frequencies) */
+static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484
+};
+
+/* A few details needed for WEP (Wireless Equivalent Privacy) */
+#define MAX_KEY_SIZE 13 /* 128 (?) bits */
+#define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */
+typedef struct wep_key_t {
+ u16 len;
+ u8 key[16]; /* 40-bit and 104-bit keys */
+} wep_key_t;
+
+/* Backward compatibility */
+#ifndef IW_ENCODE_NOKEY
+#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
+#define IW_ENCODE_MODE (IW_ENCODE_DISABLED | IW_ENCODE_RESTRICTED | IW_ENCODE_OPEN)
+#endif /* IW_ENCODE_NOKEY */
+
+/* List of Wireless Handlers (new API) */
+static const struct iw_handler_def ks_wlan_handler_def;
+
+#define KSC_OPNOTSUPP /* Operation Not Support */
+
+/*
+ * function prototypes
+ */
+extern int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p,
+ unsigned long size,
+ void (*complete_handler) (void *arg1, void *arg2),
+ void *arg1, void *arg2);
+static int ks_wlan_open(struct net_device *dev);
+static void ks_wlan_tx_timeout(struct net_device *dev);
+static int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int ks_wlan_close(struct net_device *dev);
+static void ks_wlan_set_multicast_list(struct net_device *dev);
+static struct net_device_stats *ks_wlan_get_stats(struct net_device *dev);
+static int ks_wlan_set_mac_address(struct net_device *dev, void *addr);
+static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd);
+
+static atomic_t update_phyinfo;
+static struct timer_list update_phyinfo_timer;
+static
+int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
+{
+ struct iw_statistics *wstats = &priv->wstats;
+
+ DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
+
+ if (priv->dev_state < DEVICE_STATE_READY) {
+ return -1; /* not finished initialize */
+ }
+ if (atomic_read(&update_phyinfo))
+ return 1;
+
+ /* The status */
+ wstats->status = priv->reg.operation_mode; /* Operation mode */
+
+ /* Signal quality and co. But where is the noise level ??? */
+ hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST);
+
+ /* interruptible_sleep_on_timeout(&priv->confirm_wait, HZ/2); */
+ if (!wait_for_completion_interruptible_timeout
+ (&priv->confirm_wait, HZ / 2)) {
+ DPRINTK(1, "wait time out!!\n");
+ }
+
+ atomic_inc(&update_phyinfo);
+ update_phyinfo_timer.expires = jiffies + HZ; /* 1sec */
+ add_timer(&update_phyinfo_timer);
+
+ return 0;
+}
+
+static
+void ks_wlan_update_phyinfo_timeout(unsigned long ptr)
+{
+ DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
+ atomic_set(&update_phyinfo, 0);
+}
+
+int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
+ unsigned int commit_flag)
+{
+ DPRINTK(2, "\n");
+
+ hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+
+ if (commit_flag & SME_RTS)
+ hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST);
+ if (commit_flag & SME_FRAG)
+ hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST);
+
+ if (commit_flag & SME_WEP_INDEX)
+ hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST);
+ if (commit_flag & SME_WEP_VAL1)
+ hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST);
+ if (commit_flag & SME_WEP_VAL2)
+ hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST);
+ if (commit_flag & SME_WEP_VAL3)
+ hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST);
+ if (commit_flag & SME_WEP_VAL4)
+ hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST);
+ if (commit_flag & SME_WEP_FLAG)
+ hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
+
+ if (commit_flag & SME_RSN) {
+ hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST);
+ hostif_sme_enqueue(priv, SME_RSN_MODE_REQUEST);
+ }
+ if (commit_flag & SME_RSN_MULTICAST)
+ hostif_sme_enqueue(priv, SME_RSN_MCAST_REQUEST);
+ if (commit_flag & SME_RSN_UNICAST)
+ hostif_sme_enqueue(priv, SME_RSN_UCAST_REQUEST);
+ if (commit_flag & SME_RSN_AUTH)
+ hostif_sme_enqueue(priv, SME_RSN_AUTH_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_START_REQUEST);
+
+ return 0;
+}
+
+/*
+ * Initial Wireless Extension code for Ks_Wlannet driver by :
+ * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
+ * Conversion to new driver API by :
+ * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 26 March 02
+ * Javier also did a good amount of work here, adding some new extensions
+ * and fixing my code. Let's just say that without him this code just
+ * would not work at all... - Jean II
+ */
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get protocol name */
+static int ks_wlan_get_name(struct net_device *dev,
+ struct iw_request_info *info, char *cwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (priv->dev_state < DEVICE_STATE_READY) {
+ strcpy(cwrq, "NOT READY!");
+ } else if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ strcpy(cwrq, "IEEE 802.11b");
+ } else if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+ strcpy(cwrq, "IEEE 802.11g");
+ } else {
+ strcpy(cwrq, "IEEE 802.11b/g");
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set frequency */
+static int ks_wlan_set_freq(struct net_device *dev,
+ struct iw_request_info *info, struct iw_freq *fwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int rc = -EINPROGRESS; /* Call commit handler */
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* If setting by frequency, convert to a channel */
+ if ((fwrq->e == 1) &&
+ (fwrq->m >= (int)2.412e8) && (fwrq->m <= (int)2.487e8)) {
+ int f = fwrq->m / 100000;
+ int c = 0;
+ while ((c < 14) && (f != frequency_list[c]))
+ c++;
+ /* Hack to fall through... */
+ fwrq->e = 0;
+ fwrq->m = c + 1;
+ }
+ /* Setting by channel number */
+ if ((fwrq->m > 1000) || (fwrq->e > 0))
+ rc = -EOPNOTSUPP;
+ else {
+ int channel = fwrq->m;
+ /* We should do a better check than that,
+ * based on the card capability !!! */
+ if ((channel < 1) || (channel > 14)) {
+ printk(KERN_DEBUG
+ "%s: New channel value of %d is invalid!\n",
+ dev->name, fwrq->m);
+ rc = -EINVAL;
+ } else {
+ /* Yes ! We can set it !!! */
+ priv->reg.channel = (u8) (channel);
+ priv->need_commit |= SME_MODE_SET;
+ }
+ }
+
+ return rc;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get frequency */
+static int ks_wlan_get_freq(struct net_device *dev,
+ struct iw_request_info *info, struct iw_freq *fwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int f;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ f = (int)priv->current_ap.channel;
+ } else
+ f = (int)priv->reg.channel;
+ fwrq->m = frequency_list[f - 1] * 100000;
+ fwrq->e = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set ESSID */
+static int ks_wlan_set_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ size_t len;
+
+ DPRINTK(2, " %d\n", dwrq->flags);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* Check if we asked for `any' */
+ if (dwrq->flags == 0) {
+ /* Just send an empty SSID list */
+ memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
+ priv->reg.ssid.size = 0;
+ } else {
+#if 1
+ len = dwrq->length;
+ /* iwconfig uses nul termination in SSID.. */
+ if (len > 0 && extra[len - 1] == '\0')
+ len--;
+
+ /* Check the size of the string */
+ if (len > IW_ESSID_MAX_SIZE) {
+ return -EINVAL;
+ }
+#else
+ /* Check the size of the string */
+ if (dwrq->length > IW_ESSID_MAX_SIZE + 1) {
+ return -E2BIG;
+ }
+#endif
+
+ /* Set the SSID */
+ memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
+
+#if 1
+ memcpy(priv->reg.ssid.body, extra, len);
+ priv->reg.ssid.size = len;
+#else
+ memcpy(priv->reg.ssid.body, extra, dwrq->length);
+ priv->reg.ssid.size = dwrq->length;
+#endif
+ }
+ /* Write it to the card */
+ priv->need_commit |= SME_MODE_SET;
+
+// return -EINPROGRESS; /* Call commit handler */
+ ks_wlan_setup_parameter(priv, priv->need_commit);
+ priv->need_commit = 0;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get ESSID */
+static int ks_wlan_get_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* Note : if dwrq->flags != 0, we should
+ * get the relevant SSID from the SSID list... */
+ if (priv->reg.ssid.size) {
+ /* Get the current SSID */
+ memcpy(extra, priv->reg.ssid.body, priv->reg.ssid.size);
+#if 0
+ extra[priv->reg.ssid.size] = '\0';
+#endif
+ /* If none, we may want to get the one that was set */
+
+ /* Push it out ! */
+#if 1
+ dwrq->length = priv->reg.ssid.size;
+#else
+ dwrq->length = priv->reg.ssid.size + 1;
+#endif
+ dwrq->flags = 1; /* active */
+ } else {
+#if 1
+ dwrq->length = 0;
+#else
+ extra[0] = '\0';
+ dwrq->length = 1;
+#endif
+ dwrq->flags = 0; /* ANY */
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set AP address */
+static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (priv->reg.operation_mode == MODE_ADHOC ||
+ priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+ memcpy(priv->reg.bssid, (u8 *) & ap_addr->sa_data, ETH_ALEN);
+
+ if (is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+ priv->need_commit |= SME_MODE_SET;
+ }
+ } else {
+ memset(priv->reg.bssid, 0x0, ETH_ALEN);
+ return -EOPNOTSUPP;
+ }
+
+ DPRINTK(2, "bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ priv->reg.bssid[0], priv->reg.bssid[1], priv->reg.bssid[2],
+ priv->reg.bssid[3], priv->reg.bssid[4], priv->reg.bssid[5]);
+
+ /* Write it to the card */
+ if (priv->need_commit) {
+ priv->need_commit |= SME_MODE_SET;
+ return -EINPROGRESS; /* Call commit handler */
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get AP address */
+static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
+ struct sockaddr *awrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN);
+ } else {
+ memset(awrq->sa_data, 0, ETH_ALEN);
+ }
+
+ awrq->sa_family = ARPHRD_ETHER;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Nickname */
+static int ks_wlan_set_nick(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *dwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* Check the size of the string */
+ if (dwrq->length > 16 + 1) {
+ return -E2BIG;
+ }
+ memset(priv->nick, 0, sizeof(priv->nick));
+ memcpy(priv->nick, extra, dwrq->length);
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Nickname */
+static int ks_wlan_get_nick(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *dwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ strncpy(extra, priv->nick, 16);
+ extra[16] = '\0';
+ dwrq->length = strlen(extra) + 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Bit-Rate */
+static int ks_wlan_set_rate(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int i = 0;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ if (vwrq->fixed == 1) {
+ switch (vwrq->value) {
+ case 11000000:
+ case 5500000:
+ priv->reg.rate_set.body[0] =
+ (uint8_t) (vwrq->value / 500000);
+ break;
+ case 2000000:
+ case 1000000:
+ priv->reg.rate_set.body[0] =
+ ((uint8_t) (vwrq->value / 500000)) |
+ BASIC_RATE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->reg.tx_rate = TX_RATE_FIXED;
+ priv->reg.rate_set.size = 1;
+ } else { /* vwrq->fixed == 0 */
+ if (vwrq->value > 0) {
+ switch (vwrq->value) {
+ case 11000000:
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M;
+ i++;
+ case 5500000:
+ priv->reg.rate_set.body[2] = TX_RATE_5M;
+ i++;
+ case 2000000:
+ priv->reg.rate_set.body[1] =
+ TX_RATE_2M | BASIC_RATE;
+ i++;
+ case 1000000:
+ priv->reg.rate_set.body[0] =
+ TX_RATE_1M | BASIC_RATE;
+ i++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
+ priv->reg.rate_set.size = i;
+ } else {
+ priv->reg.rate_set.body[3] = TX_RATE_11M;
+ priv->reg.rate_set.body[2] = TX_RATE_5M;
+ priv->reg.rate_set.body[1] =
+ TX_RATE_2M | BASIC_RATE;
+ priv->reg.rate_set.body[0] =
+ TX_RATE_1M | BASIC_RATE;
+ priv->reg.tx_rate = TX_RATE_FULL_AUTO;
+ priv->reg.rate_set.size = 4;
+ }
+ }
+ } else { /* D_11B_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+ if (vwrq->fixed == 1) {
+ switch (vwrq->value) {
+ case 54000000:
+ case 48000000:
+ case 36000000:
+ case 18000000:
+ case 9000000:
+ priv->reg.rate_set.body[0] =
+ (uint8_t) (vwrq->value / 500000);
+ break;
+ case 24000000:
+ case 12000000:
+ case 11000000:
+ case 6000000:
+ case 5500000:
+ case 2000000:
+ case 1000000:
+ priv->reg.rate_set.body[0] =
+ ((uint8_t) (vwrq->value / 500000)) |
+ BASIC_RATE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->reg.tx_rate = TX_RATE_FIXED;
+ priv->reg.rate_set.size = 1;
+ } else { /* vwrq->fixed == 0 */
+ if (vwrq->value > 0) {
+ switch (vwrq->value) {
+ case 54000000:
+ priv->reg.rate_set.body[11] =
+ TX_RATE_54M;
+ i++;
+ case 48000000:
+ priv->reg.rate_set.body[10] =
+ TX_RATE_48M;
+ i++;
+ case 36000000:
+ priv->reg.rate_set.body[9] =
+ TX_RATE_36M;
+ i++;
+ case 24000000:
+ case 18000000:
+ case 12000000:
+ case 11000000:
+ case 9000000:
+ case 6000000:
+ if (vwrq->value == 24000000) {
+ priv->reg.rate_set.body[8] =
+ TX_RATE_18M;
+ i++;
+ priv->reg.rate_set.body[7] =
+ TX_RATE_9M;
+ i++;
+ priv->reg.rate_set.body[6] =
+ TX_RATE_24M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[5] =
+ TX_RATE_12M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[4] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M | BASIC_RATE;
+ i++;
+ } else if (vwrq->value == 18000000) {
+ priv->reg.rate_set.body[7] =
+ TX_RATE_18M;
+ i++;
+ priv->reg.rate_set.body[6] =
+ TX_RATE_9M;
+ i++;
+ priv->reg.rate_set.body[5] =
+ TX_RATE_12M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[4] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M | BASIC_RATE;
+ i++;
+ } else if (vwrq->value == 12000000) {
+ priv->reg.rate_set.body[6] =
+ TX_RATE_9M;
+ i++;
+ priv->reg.rate_set.body[5] =
+ TX_RATE_12M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[4] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M | BASIC_RATE;
+ i++;
+ } else if (vwrq->value == 11000000) {
+ priv->reg.rate_set.body[5] =
+ TX_RATE_9M;
+ i++;
+ priv->reg.rate_set.body[4] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M | BASIC_RATE;
+ i++;
+ } else if (vwrq->value == 9000000) {
+ priv->reg.rate_set.body[4] =
+ TX_RATE_9M;
+ i++;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ } else { /* vwrq->value == 6000000 */
+ priv->reg.rate_set.body[3] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ }
+ case 5500000:
+ priv->reg.rate_set.body[2] =
+ TX_RATE_5M | BASIC_RATE;
+ i++;
+ case 2000000:
+ priv->reg.rate_set.body[1] =
+ TX_RATE_2M | BASIC_RATE;
+ i++;
+ case 1000000:
+ priv->reg.rate_set.body[0] =
+ TX_RATE_1M | BASIC_RATE;
+ i++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
+ priv->reg.rate_set.size = i;
+ } else {
+ priv->reg.rate_set.body[11] = TX_RATE_54M;
+ priv->reg.rate_set.body[10] = TX_RATE_48M;
+ priv->reg.rate_set.body[9] = TX_RATE_36M;
+ priv->reg.rate_set.body[8] = TX_RATE_18M;
+ priv->reg.rate_set.body[7] = TX_RATE_9M;
+ priv->reg.rate_set.body[6] =
+ TX_RATE_24M | BASIC_RATE;
+ priv->reg.rate_set.body[5] =
+ TX_RATE_12M | BASIC_RATE;
+ priv->reg.rate_set.body[4] =
+ TX_RATE_6M | BASIC_RATE;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M | BASIC_RATE;
+ priv->reg.rate_set.body[2] =
+ TX_RATE_5M | BASIC_RATE;
+ priv->reg.rate_set.body[1] =
+ TX_RATE_2M | BASIC_RATE;
+ priv->reg.rate_set.body[0] =
+ TX_RATE_1M | BASIC_RATE;
+ priv->reg.tx_rate = TX_RATE_FULL_AUTO;
+ priv->reg.rate_set.size = 12;
+ }
+ }
+ }
+
+ priv->need_commit |= SME_MODE_SET;
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Bit-Rate */
+static int ks_wlan_get_rate(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "in_interrupt = %ld update_phyinfo = %d\n",
+ in_interrupt(), atomic_read(&update_phyinfo));
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (!atomic_read(&update_phyinfo)) {
+ ks_wlan_update_phy_information(priv);
+ }
+ vwrq->value = ((priv->current_rate) & RATE_MASK) * 500000;
+ if (priv->reg.tx_rate == TX_RATE_FIXED)
+ vwrq->fixed = 1;
+ else
+ vwrq->fixed = 0;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set RTS threshold */
+static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int rthr = vwrq->value;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (vwrq->disabled)
+ rthr = 2347;
+ if ((rthr < 0) || (rthr > 2347)) {
+ return -EINVAL;
+ }
+ priv->reg.rts = rthr;
+ priv->need_commit |= SME_RTS;
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get RTS threshold */
+static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ vwrq->value = priv->reg.rts;
+ vwrq->disabled = (vwrq->value >= 2347);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Fragmentation threshold */
+static int ks_wlan_set_frag(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int fthr = vwrq->value;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (vwrq->disabled)
+ fthr = 2346;
+ if ((fthr < 256) || (fthr > 2346)) {
+ return -EINVAL;
+ }
+ fthr &= ~0x1; /* Get an even value - is it really needed ??? */
+ priv->reg.fragment = fthr;
+ priv->need_commit |= SME_FRAG;
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Fragmentation threshold */
+static int ks_wlan_get_frag(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ vwrq->value = priv->reg.fragment;
+ vwrq->disabled = (vwrq->value >= 2346);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Mode of Operation */
+static int ks_wlan_set_mode(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "mode=%d\n", *uwrq);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ switch (*uwrq) {
+ case IW_MODE_ADHOC:
+ priv->reg.operation_mode = MODE_ADHOC;
+ priv->need_commit |= SME_MODE_SET;
+ break;
+ case IW_MODE_INFRA:
+ priv->reg.operation_mode = MODE_INFRASTRUCTURE;
+ priv->need_commit |= SME_MODE_SET;
+ break;
+ case IW_MODE_AUTO:
+ case IW_MODE_MASTER:
+ case IW_MODE_REPEAT:
+ case IW_MODE_SECOND:
+ case IW_MODE_MONITOR:
+ default:
+ return -EINVAL;
+ }
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Mode of Operation */
+static int ks_wlan_get_mode(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* If not managed, assume it's ad-hoc */
+ switch (priv->reg.operation_mode) {
+ case MODE_INFRASTRUCTURE:
+ *uwrq = IW_MODE_INFRA;
+ break;
+ case MODE_ADHOC:
+ *uwrq = IW_MODE_ADHOC;
+ break;
+ default:
+ *uwrq = IW_MODE_ADHOC;
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Encryption Key */
+static int ks_wlan_set_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ wep_key_t key;
+ int index = (dwrq->flags & IW_ENCODE_INDEX);
+ int current_index = priv->reg.wep_index;
+ int i;
+
+ DPRINTK(2, "flags=%04X\n", dwrq->flags);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* index check */
+ if ((index < 0) || (index > 4))
+ return -EINVAL;
+ else if (index == 0)
+ index = current_index;
+ else
+ index--;
+
+ /* Is WEP supported ? */
+ /* Basic checking: do we have a key to set ? */
+ if (dwrq->length > 0) {
+ if (dwrq->length > MAX_KEY_SIZE) { /* Check the size of the key */
+ return -EINVAL;
+ }
+ if (dwrq->length > MIN_KEY_SIZE) { /* Set the length */
+ key.len = MAX_KEY_SIZE;
+ priv->reg.privacy_invoked = 0x01;
+ priv->need_commit |= SME_WEP_FLAG;
+ wep_on_off = WEP_ON_128BIT;
+ } else {
+ if (dwrq->length > 0) {
+ key.len = MIN_KEY_SIZE;
+ priv->reg.privacy_invoked = 0x01;
+ priv->need_commit |= SME_WEP_FLAG;
+ wep_on_off = WEP_ON_64BIT;
+ } else { /* Disable the key */
+ key.len = 0;
+ }
+ }
+ /* Check if the key is not marked as invalid */
+ if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
+ /* Cleanup */
+ memset(key.key, 0, MAX_KEY_SIZE);
+ /* Copy the key in the driver */
+ if (copy_from_user
+ (key.key, dwrq->pointer, dwrq->length)) {
+ key.len = 0;
+ return -EFAULT;
+ }
+ /* Send the key to the card */
+ priv->reg.wep_key[index].size = key.len;
+ for (i = 0; i < (priv->reg.wep_key[index].size); i++) {
+ priv->reg.wep_key[index].val[i] = key.key[i];
+ }
+ priv->need_commit |= (SME_WEP_VAL1 << index);
+ priv->reg.wep_index = index;
+ priv->need_commit |= SME_WEP_INDEX;
+ }
+ } else {
+ if (dwrq->flags & IW_ENCODE_DISABLED) {
+ priv->reg.wep_key[0].size = 0;
+ priv->reg.wep_key[1].size = 0;
+ priv->reg.wep_key[2].size = 0;
+ priv->reg.wep_key[3].size = 0;
+ priv->reg.privacy_invoked = 0x00;
+ if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+ priv->need_commit |= SME_MODE_SET;
+ }
+ priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
+ wep_on_off = WEP_OFF;
+ priv->need_commit |= SME_WEP_FLAG;
+ } else {
+ /* Do we want to just set the transmit key index ? */
+ if ((index >= 0) && (index < 4)) {
+ /* set_wep_key(priv, index, 0, 0, 1); xxx */
+ if (priv->reg.wep_key[index].size) {
+ priv->reg.wep_index = index;
+ priv->need_commit |= SME_WEP_INDEX;
+ } else
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Commit the changes if needed */
+ if (dwrq->flags & IW_ENCODE_MODE)
+ priv->need_commit |= SME_WEP_FLAG;
+
+ if (dwrq->flags & IW_ENCODE_OPEN) {
+ if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+ priv->need_commit |= SME_MODE_SET;
+ }
+ priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
+ } else if (dwrq->flags & IW_ENCODE_RESTRICTED) {
+ if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM) {
+ priv->need_commit |= SME_MODE_SET;
+ }
+ priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
+ }
+// return -EINPROGRESS; /* Call commit handler */
+ if (priv->need_commit) {
+ ks_wlan_setup_parameter(priv, priv->need_commit);
+ priv->need_commit = 0;
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Encryption Key */
+static int ks_wlan_get_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ char zeros[16];
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ dwrq->flags = IW_ENCODE_DISABLED;
+
+ /* Check encryption mode */
+ switch (priv->reg.authenticate_type) {
+ case AUTH_TYPE_OPEN_SYSTEM:
+ dwrq->flags = IW_ENCODE_OPEN;
+ break;
+ case AUTH_TYPE_SHARED_KEY:
+ dwrq->flags = IW_ENCODE_RESTRICTED;
+ break;
+ }
+
+ memset(zeros, 0, sizeof(zeros));
+
+ /* Which key do we want ? -1 -> tx index */
+ if ((index < 0) || (index >= 4))
+ index = priv->reg.wep_index;
+ if (priv->reg.privacy_invoked) {
+ dwrq->flags &= ~IW_ENCODE_DISABLED;
+ /* dwrq->flags |= IW_ENCODE_NOKEY; */
+ }
+ dwrq->flags |= index + 1;
+ DPRINTK(2, "encoding flag = 0x%04X\n", dwrq->flags);
+ /* Copy the key to the user buffer */
+ if ((index >= 0) && (index < 4))
+ dwrq->length = priv->reg.wep_key[index].size;
+ if (dwrq->length > 16) {
+ dwrq->length = 0;
+ }
+#if 1 /* IW_ENCODE_NOKEY; */
+ if (dwrq->length) {
+ if ((index >= 0) && (index < 4))
+ memcpy(extra, priv->reg.wep_key[index].val,
+ dwrq->length);
+ } else
+ memcpy(extra, zeros, dwrq->length);
+#endif
+ return 0;
+}
+
+#ifndef KSC_OPNOTSUPP
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Tx-Power */
+static int ks_wlan_set_txpow(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ return -EOPNOTSUPP; /* Not Support */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Tx-Power */
+static int ks_wlan_get_txpow(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* Not Support */
+ vwrq->value = 0;
+ vwrq->disabled = (vwrq->value == 0);
+ vwrq->fixed = 1;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Retry limits */
+static int ks_wlan_set_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ return -EOPNOTSUPP; /* Not Support */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Retry limits */
+static int ks_wlan_get_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* Not Support */
+ vwrq->value = 0;
+ vwrq->disabled = (vwrq->value == 0);
+ vwrq->fixed = 1;
+ return 0;
+}
+#endif /* KSC_OPNOTSUPP */
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get range info */
+static int ks_wlan_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_range *range = (struct iw_range *)extra;
+ int i, k;
+
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ dwrq->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(*range));
+ range->min_nwid = 0x0000;
+ range->max_nwid = 0x0000;
+ range->num_channels = 14;
+ /* Should be based on cap_rid.country to give only
+ * what the current card support */
+ k = 0;
+ for (i = 0; i < 13; i++) { /* channel 1 -- 13 */
+ range->freq[k].i = i + 1; /* List index */
+ range->freq[k].m = frequency_list[i] * 100000;
+ range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
+ }
+ range->num_frequency = k;
+ if (priv->reg.phy_type == D_11B_ONLY_MODE || priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) { /* channel 14 */
+ range->freq[13].i = 14; /* List index */
+ range->freq[13].m = frequency_list[13] * 100000;
+ range->freq[13].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
+ range->num_frequency = 14;
+ }
+
+ /* Hum... Should put the right values there */
+ range->max_qual.qual = 100;
+ range->max_qual.level = 256 - 128; /* 0 dBm? */
+ range->max_qual.noise = 256 - 128;
+ range->sensitivity = 1;
+
+ if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ range->bitrate[0] = 1e6;
+ range->bitrate[1] = 2e6;
+ range->bitrate[2] = 5.5e6;
+ range->bitrate[3] = 11e6;
+ range->num_bitrates = 4;
+ } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+ range->bitrate[0] = 1e6;
+ range->bitrate[1] = 2e6;
+ range->bitrate[2] = 5.5e6;
+ range->bitrate[3] = 11e6;
+
+ range->bitrate[4] = 6e6;
+ range->bitrate[5] = 9e6;
+ range->bitrate[6] = 12e6;
+ if (IW_MAX_BITRATES < 9) {
+ range->bitrate[7] = 54e6;
+ range->num_bitrates = 8;
+ } else {
+ range->bitrate[7] = 18e6;
+ range->bitrate[8] = 24e6;
+ range->bitrate[9] = 36e6;
+ range->bitrate[10] = 48e6;
+ range->bitrate[11] = 54e6;
+
+ range->num_bitrates = 12;
+ }
+ }
+
+ /* Set an indication of the max TCP throughput
+ * in bit/s that we can expect using this interface.
+ * May be use for QoS stuff... Jean II */
+ if (i > 2)
+ range->throughput = 5000 * 1000;
+ else
+ range->throughput = 1500 * 1000;
+
+ range->min_rts = 0;
+ range->max_rts = 2347;
+ range->min_frag = 256;
+ range->max_frag = 2346;
+
+ range->encoding_size[0] = 5; /* WEP: RC4 40 bits */
+ range->encoding_size[1] = 13; /* WEP: RC4 ~128 bits */
+ range->num_encoding_sizes = 2;
+ range->max_encoding_tokens = 4;
+
+ /* power management not support */
+ range->pmp_flags = IW_POWER_ON;
+ range->pmt_flags = IW_POWER_ON;
+ range->pm_capa = 0;
+
+ /* Transmit Power - values are in dBm( or mW) */
+ range->txpower[0] = -256;
+ range->num_txpower = 1;
+ range->txpower_capa = IW_TXPOW_DBM;
+ /* range->txpower_capa = IW_TXPOW_MWATT; */
+
+ range->we_version_source = 21;
+ range->we_version_compiled = WIRELESS_EXT;
+
+ range->retry_capa = IW_RETRY_ON;
+ range->retry_flags = IW_RETRY_ON;
+ range->r_time_flags = IW_RETRY_ON;
+
+ /* Experimental measurements - boundary 11/5.5 Mb/s */
+ /* Note : with or without the (local->rssi), results
+ * are somewhat different. - Jean II */
+ range->avg_qual.qual = 50;
+ range->avg_qual.level = 186; /* -70 dBm */
+ range->avg_qual.noise = 0;
+
+ /* Event capability (kernel + driver) */
+ range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
+ IW_EVENT_CAPA_MASK(SIOCGIWAP) |
+ IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
+ range->event_capa[1] = IW_EVENT_CAPA_K_1;
+ range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
+ IW_EVENT_CAPA_MASK(IWEVMICHAELMICFAILURE));
+
+ /* encode extension (WPA) capability */
+ range->enc_capa = (IW_ENC_CAPA_WPA |
+ IW_ENC_CAPA_WPA2 |
+ IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Power Management */
+static int ks_wlan_set_power(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ short enabled;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ enabled = vwrq->disabled ? 0 : 1;
+ if (enabled == 0) { /* 0 */
+ priv->reg.powermgt = POWMGT_ACTIVE_MODE;
+ } else if (enabled) { /* 1 */
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+ priv->reg.powermgt = POWMGT_SAVE1_MODE;
+ else
+ return -EINVAL;
+ } else if (enabled) { /* 2 */
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+ priv->reg.powermgt = POWMGT_SAVE2_MODE;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Power Management */
+static int ks_wlan_get_power(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (priv->reg.powermgt > 0)
+ vwrq->disabled = 0;
+ else
+ vwrq->disabled = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get wirless statistics */
+static int ks_wlan_get_iwstats(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_quality *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ vwrq->qual = 0; /* not supported */
+ vwrq->level = priv->wstats.qual.level;
+ vwrq->noise = 0; /* not supported */
+ vwrq->updated = 0;
+
+ return 0;
+}
+
+#ifndef KSC_OPNOTSUPP
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Sensitivity */
+static int ks_wlan_set_sens(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ return -EOPNOTSUPP; /* Not Support */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Sensitivity */
+static int ks_wlan_get_sens(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ /* Not Support */
+ vwrq->value = 0;
+ vwrq->disabled = (vwrq->value == 0);
+ vwrq->fixed = 1;
+ return 0;
+}
+#endif /* KSC_OPNOTSUPP */
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get AP List */
+/* Note : this is deprecated in favor of IWSCAN */
+static int ks_wlan_get_aplist(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct sockaddr *address = (struct sockaddr *)extra;
+ struct iw_quality qual[LOCAL_APLIST_MAX];
+
+ int i;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ for (i = 0; i < priv->aplist.size; i++) {
+ memcpy(address[i].sa_data, &(priv->aplist.ap[i].bssid[0]),
+ ETH_ALEN);
+ address[i].sa_family = ARPHRD_ETHER;
+ qual[i].level = 256 - priv->aplist.ap[i].rssi;
+ qual[i].qual = priv->aplist.ap[i].sq;
+ qual[i].noise = 0; /* invalid noise value */
+ qual[i].updated = 7;
+ }
+ if (i) {
+ dwrq->flags = 1; /* Should be define'd */
+ memcpy(extra + sizeof(struct sockaddr) * i,
+ &qual, sizeof(struct iw_quality) * i);
+ }
+ dwrq->length = i;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : Initiate Scan */
+static int ks_wlan_set_scan(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_scan_req *req = NULL;
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* specified SSID SCAN */
+ if (wrqu->data.length == sizeof(struct iw_scan_req)
+ && wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ req = (struct iw_scan_req *)extra;
+ priv->scan_ssid_len = req->essid_len;
+ memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
+ } else {
+ priv->scan_ssid_len = 0;
+ }
+
+ priv->sme_i.sme_flag |= SME_AP_SCAN;
+ hostif_sme_enqueue(priv, SME_BSS_SCAN_REQUEST);
+
+ /* At this point, just return to the user. */
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Translate scan data returned from the card to a card independent
+ * format that the Wireless Tools will understand - Jean II
+ */
+static inline char *ks_wlan_translate_scan(struct net_device *dev,
+ struct iw_request_info *info,
+ char *current_ev, char *end_buf,
+ struct local_ap_t *ap)
+{
+ /* struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; */
+ struct iw_event iwe; /* Temporary buffer */
+ u16 capabilities;
+ char *current_val; /* For rates */
+ int i;
+ static const char rsn_leader[] = "rsn_ie=";
+ static const char wpa_leader[] = "wpa_ie=";
+ char buf0[RSN_IE_BODY_MAX * 2 + 30];
+ char buf1[RSN_IE_BODY_MAX * 2 + 30];
+ char *pbuf;
+ /* First entry *MUST* be the AP MAC address */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, ap->bssid, ETH_ALEN);
+ current_ev =
+ iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+ IW_EV_ADDR_LEN);
+
+ /* Other entries will be displayed in the order we give them */
+
+ /* Add the ESSID */
+ iwe.u.data.length = ap->ssid.size;
+ if (iwe.u.data.length > 32)
+ iwe.u.data.length = 32;
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ current_ev =
+ iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+ &(ap->ssid.body[0]));
+
+ /* Add mode */
+ iwe.cmd = SIOCGIWMODE;
+ capabilities = le16_to_cpu(ap->capability);
+ if (capabilities & (BSS_CAP_ESS | BSS_CAP_IBSS)) {
+ if (capabilities & BSS_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ current_ev =
+ iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+ IW_EV_UINT_LEN);
+ }
+
+ /* Add frequency */
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = ap->channel;
+ iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
+ iwe.u.freq.e = 1;
+ current_ev =
+ iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+ IW_EV_FREQ_LEN);
+
+ /* Add quality statistics */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.level = 256 - ap->rssi;
+ iwe.u.qual.qual = ap->sq;
+ iwe.u.qual.noise = 0; /* invalid noise value */
+ current_ev =
+ iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+ IW_EV_QUAL_LEN);
+
+ /* Add encryption capability */
+ iwe.cmd = SIOCGIWENCODE;
+ if (capabilities & BSS_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ current_ev =
+ iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+ &(ap->ssid.body[0]));
+
+ /* Rate : stuffing multiple values in a single event require a bit
+ * more of magic - Jean II */
+ current_val = current_ev + IW_EV_LCP_LEN;
+
+ iwe.cmd = SIOCGIWRATE;
+ /* Those two flags are ignored... */
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+
+ /* Max 16 values */
+ for (i = 0; i < 16; i++) {
+ /* NULL terminated */
+ if (i >= ap->rate_set.size)
+ break;
+ /* Bit rate given in 500 kb/s units (+ 0x80) */
+ iwe.u.bitrate.value = ((ap->rate_set.body[i] & 0x7f) * 500000);
+ /* Add new value to event */
+ current_val =
+ iwe_stream_add_value(info, current_ev, current_val, end_buf,
+ &iwe, IW_EV_PARAM_LEN);
+ }
+ /* Check if we added any event */
+ if ((current_val - current_ev) > IW_EV_LCP_LEN)
+ current_ev = current_val;
+
+#define GENERIC_INFO_ELEM_ID 0xdd
+#define RSN_INFO_ELEM_ID 0x30
+ if (ap->rsn_ie.id == RSN_INFO_ELEM_ID && ap->rsn_ie.size != 0) {
+ pbuf = &buf0[0];
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ memcpy(buf0, rsn_leader, sizeof(rsn_leader) - 1);
+ iwe.u.data.length += sizeof(rsn_leader) - 1;
+ pbuf += sizeof(rsn_leader) - 1;
+
+ pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.id);
+ pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.size);
+ iwe.u.data.length += 4;
+
+ for (i = 0; i < ap->rsn_ie.size; i++)
+ pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.body[i]);
+ iwe.u.data.length += (ap->rsn_ie.size) * 2;
+
+ DPRINTK(4, "ap->rsn.size=%d\n", ap->rsn_ie.size);
+
+ current_ev =
+ iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+ &buf0[0]);
+ }
+ if (ap->wpa_ie.id == GENERIC_INFO_ELEM_ID && ap->wpa_ie.size != 0) {
+ pbuf = &buf1[0];
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ memcpy(buf1, wpa_leader, sizeof(wpa_leader) - 1);
+ iwe.u.data.length += sizeof(wpa_leader) - 1;
+ pbuf += sizeof(wpa_leader) - 1;
+
+ pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.id);
+ pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.size);
+ iwe.u.data.length += 4;
+
+ for (i = 0; i < ap->wpa_ie.size; i++)
+ pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.body[i]);
+ iwe.u.data.length += (ap->wpa_ie.size) * 2;
+
+ DPRINTK(4, "ap->rsn.size=%d\n", ap->wpa_ie.size);
+ DPRINTK(4, "iwe.u.data.length=%d\n", iwe.u.data.length);
+
+ current_ev =
+ iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+ &buf1[0]);
+ }
+
+ /* The other data in the scan result are not really
+ * interesting, so for now drop it - Jean II */
+ return current_ev;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : Read Scan Results */
+static int ks_wlan_get_scan(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *dwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int i;
+ char *current_ev = extra;
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (priv->sme_i.sme_flag & SME_AP_SCAN) {
+ DPRINTK(2, "flag AP_SCAN\n");
+ return -EAGAIN;
+ }
+
+ if (priv->aplist.size == 0) {
+ /* Client error, no scan results...
+ * The caller need to restart the scan. */
+ DPRINTK(2, "aplist 0\n");
+ return -ENODATA;
+ }
+#if 0
+ /* current connect ap */
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if ((extra + dwrq->length) - current_ev <= IW_EV_ADDR_LEN) {
+ dwrq->length = 0;
+ return -E2BIG;
+ }
+ current_ev = ks_wlan_translate_scan(dev, current_ev,
+// extra + IW_SCAN_MAX_DATA,
+ extra + dwrq->length,
+ &(priv->current_ap));
+ }
+#endif
+ /* Read and parse all entries */
+ for (i = 0; i < priv->aplist.size; i++) {
+ if ((extra + dwrq->length) - current_ev <= IW_EV_ADDR_LEN) {
+ dwrq->length = 0;
+ return -E2BIG;
+ }
+ /* Translate to WE format this entry */
+ current_ev = ks_wlan_translate_scan(dev, info, current_ev,
+// extra + IW_SCAN_MAX_DATA,
+ extra + dwrq->length,
+ &(priv->aplist.ap[i]));
+ }
+ /* Length of data */
+ dwrq->length = (current_ev - extra);
+ dwrq->flags = 0;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Commit handler : called after a bunch of SET operations */
+static int ks_wlan_config_commit(struct net_device *dev,
+ struct iw_request_info *info, void *zwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (!priv->need_commit)
+ return 0;
+
+ ks_wlan_setup_parameter(priv, priv->need_commit);
+ priv->need_commit = 0;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless handler : set association ie params */
+static int ks_wlan_set_genie(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ return 0;
+// return -EOPNOTSUPP;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless handler : set authentication mode params */
+static int ks_wlan_set_auth_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int index = (vwrq->flags & IW_AUTH_INDEX);
+ int value = vwrq->value;
+
+ DPRINTK(2, "index=%d:value=%08X\n", index, value);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ switch (index) {
+ case IW_AUTH_WPA_VERSION: /* 0 */
+ switch (value) {
+ case IW_AUTH_WPA_VERSION_DISABLED:
+ priv->wpa.version = value;
+ if (priv->wpa.rsn_enabled) {
+ priv->wpa.rsn_enabled = 0;
+ }
+ priv->need_commit |= SME_RSN;
+ break;
+ case IW_AUTH_WPA_VERSION_WPA:
+ case IW_AUTH_WPA_VERSION_WPA2:
+ priv->wpa.version = value;
+ if (!(priv->wpa.rsn_enabled)) {
+ priv->wpa.rsn_enabled = 1;
+ }
+ priv->need_commit |= SME_RSN;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE: /* 1 */
+ switch (value) {
+ case IW_AUTH_CIPHER_NONE:
+ if (priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x00;
+ priv->need_commit |= SME_WEP_FLAG;
+ }
+ break;
+ case IW_AUTH_CIPHER_WEP40:
+ case IW_AUTH_CIPHER_TKIP:
+ case IW_AUTH_CIPHER_CCMP:
+ case IW_AUTH_CIPHER_WEP104:
+ if (!priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x01;
+ priv->need_commit |= SME_WEP_FLAG;
+ }
+ priv->wpa.pairwise_suite = value;
+ priv->need_commit |= SME_RSN_UNICAST;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case IW_AUTH_CIPHER_GROUP: /* 2 */
+ switch (value) {
+ case IW_AUTH_CIPHER_NONE:
+ if (priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x00;
+ priv->need_commit |= SME_WEP_FLAG;
+ }
+ break;
+ case IW_AUTH_CIPHER_WEP40:
+ case IW_AUTH_CIPHER_TKIP:
+ case IW_AUTH_CIPHER_CCMP:
+ case IW_AUTH_CIPHER_WEP104:
+ if (!priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x01;
+ priv->need_commit |= SME_WEP_FLAG;
+ }
+ priv->wpa.group_suite = value;
+ priv->need_commit |= SME_RSN_MULTICAST;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case IW_AUTH_KEY_MGMT: /* 3 */
+ switch (value) {
+ case IW_AUTH_KEY_MGMT_802_1X:
+ case IW_AUTH_KEY_MGMT_PSK:
+ case 0: /* NONE or 802_1X_NO_WPA */
+ case 4: /* WPA_NONE */
+ priv->wpa.key_mgmt_suite = value;
+ priv->need_commit |= SME_RSN_AUTH;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case IW_AUTH_80211_AUTH_ALG: /* 6 */
+ switch (value) {
+ case IW_AUTH_ALG_OPEN_SYSTEM:
+ priv->wpa.auth_alg = value;
+ priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
+ break;
+ case IW_AUTH_ALG_SHARED_KEY:
+ priv->wpa.auth_alg = value;
+ priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
+ break;
+ case IW_AUTH_ALG_LEAP:
+ default:
+ return -EOPNOTSUPP;
+ }
+ priv->need_commit |= SME_MODE_SET;
+ break;
+ case IW_AUTH_WPA_ENABLED: /* 7 */
+ priv->wpa.wpa_enabled = value;
+ break;
+ case IW_AUTH_PRIVACY_INVOKED: /* 10 */
+ if ((value && !priv->reg.privacy_invoked) ||
+ (!value && priv->reg.privacy_invoked)) {
+ priv->reg.privacy_invoked = value ? 0x01 : 0x00;
+ priv->need_commit |= SME_WEP_FLAG;
+ }
+ break;
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* 4 */
+ case IW_AUTH_TKIP_COUNTERMEASURES: /* 5 */
+ case IW_AUTH_DROP_UNENCRYPTED: /* 8 */
+ case IW_AUTH_ROAMING_CONTROL: /* 9 */
+ default:
+ break;
+ }
+
+ /* return -EINPROGRESS; */
+ if (priv->need_commit) {
+ ks_wlan_setup_parameter(priv, priv->need_commit);
+ priv->need_commit = 0;
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless handler : get authentication mode params */
+static int ks_wlan_get_auth_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int index = (vwrq->flags & IW_AUTH_INDEX);
+ DPRINTK(2, "index=%d\n", index);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* WPA (not used ?? wpa_supplicant) */
+ switch (index) {
+ case IW_AUTH_WPA_VERSION:
+ vwrq->value = priv->wpa.version;
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+ vwrq->value = priv->wpa.pairwise_suite;
+ break;
+ case IW_AUTH_CIPHER_GROUP:
+ vwrq->value = priv->wpa.group_suite;
+ break;
+ case IW_AUTH_KEY_MGMT:
+ vwrq->value = priv->wpa.key_mgmt_suite;
+ break;
+ case IW_AUTH_80211_AUTH_ALG:
+ vwrq->value = priv->wpa.auth_alg;
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ vwrq->value = priv->wpa.rsn_enabled;
+ break;
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* OK??? */
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ case IW_AUTH_DROP_UNENCRYPTED:
+ default:
+ /* return -EOPNOTSUPP; */
+ break;
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set encoding token & mode (WPA)*/
+static int ks_wlan_set_encode_ext(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_encode_ext *enc;
+ int index = dwrq->flags & IW_ENCODE_INDEX;
+ unsigned int commit = 0;
+
+ enc = (struct iw_encode_ext *)extra;
+
+ DPRINTK(2, "flags=%04X:: ext_flags=%08X\n", dwrq->flags,
+ enc->ext_flags);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (index < 1 || index > 4)
+ return -EINVAL;
+ else
+ index--;
+
+ if (dwrq->flags & IW_ENCODE_DISABLED) {
+ priv->wpa.key[index].key_len = 0;
+ }
+
+ if (enc) {
+ priv->wpa.key[index].ext_flags = enc->ext_flags;
+ if (enc->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ priv->wpa.txkey = index;
+ commit |= SME_WEP_INDEX;
+ } else if (enc->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+ memcpy(&priv->wpa.key[index].rx_seq[0],
+ enc->rx_seq, IW_ENCODE_SEQ_MAX_SIZE);
+ }
+
+ memcpy(&priv->wpa.key[index].addr.sa_data[0],
+ &enc->addr.sa_data[0], ETH_ALEN);
+
+ switch (enc->alg) {
+ case IW_ENCODE_ALG_NONE:
+ if (priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x00;
+ commit |= SME_WEP_FLAG;
+ }
+ priv->wpa.key[index].key_len = 0;
+
+ break;
+ case IW_ENCODE_ALG_WEP:
+ case IW_ENCODE_ALG_CCMP:
+ if (!priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x01;
+ commit |= SME_WEP_FLAG;
+ }
+ if (enc->key_len) {
+ memcpy(&priv->wpa.key[index].key_val[0],
+ &enc->key[0], enc->key_len);
+ priv->wpa.key[index].key_len = enc->key_len;
+ commit |= (SME_WEP_VAL1 << index);
+ }
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ if (!priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x01;
+ commit |= SME_WEP_FLAG;
+ }
+ if (enc->key_len == 32) {
+ memcpy(&priv->wpa.key[index].key_val[0],
+ &enc->key[0], enc->key_len - 16);
+ priv->wpa.key[index].key_len =
+ enc->key_len - 16;
+ if (priv->wpa.key_mgmt_suite == 4) { /* WPA_NONE */
+ memcpy(&priv->wpa.key[index].
+ tx_mic_key[0], &enc->key[16], 8);
+ memcpy(&priv->wpa.key[index].
+ rx_mic_key[0], &enc->key[16], 8);
+ } else {
+ memcpy(&priv->wpa.key[index].
+ tx_mic_key[0], &enc->key[16], 8);
+ memcpy(&priv->wpa.key[index].
+ rx_mic_key[0], &enc->key[24], 8);
+ }
+ commit |= (SME_WEP_VAL1 << index);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->wpa.key[index].alg = enc->alg;
+ } else
+ return -EINVAL;
+
+ if (commit) {
+ if (commit & SME_WEP_INDEX)
+ hostif_sme_enqueue(priv, SME_SET_TXKEY);
+ if (commit & SME_WEP_VAL_MASK)
+ hostif_sme_enqueue(priv, SME_SET_KEY1 + index);
+ if (commit & SME_WEP_FLAG)
+ hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get encoding token & mode (WPA)*/
+static int ks_wlan_get_encode_ext(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* WPA (not used ?? wpa_supplicant)
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ struct iw_encode_ext *enc;
+ enc = (struct iw_encode_ext *)extra;
+ int index = dwrq->flags & IW_ENCODE_INDEX;
+ WPA (not used ?? wpa_supplicant) */
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : PMKSA cache operation (WPA2) */
+static int ks_wlan_set_pmksa(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_pmksa *pmksa;
+ int i;
+ struct pmk_t *pmk;
+ struct list_head *ptr;
+
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (!extra) {
+ return -EINVAL;
+ }
+ pmksa = (struct iw_pmksa *)extra;
+ DPRINTK(2, "cmd=%d\n", pmksa->cmd);
+
+ switch (pmksa->cmd) {
+ case IW_PMKSA_ADD:
+ if (list_empty(&priv->pmklist.head)) { /* new list */
+ for (i = 0; i < PMK_LIST_MAX; i++) {
+ pmk = &priv->pmklist.pmk[i];
+ if (!memcmp
+ ("\x00\x00\x00\x00\x00\x00", pmk->bssid,
+ ETH_ALEN))
+ break;
+ }
+ memcpy(pmk->bssid, pmksa->bssid.sa_data, ETH_ALEN);
+ memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
+ list_add(&pmk->list, &priv->pmklist.head);
+ priv->pmklist.size++;
+ } else { /* search cache data */
+ list_for_each(ptr, &priv->pmklist.head) {
+ pmk = list_entry(ptr, struct pmk_t, list);
+ if (!memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN)) { /* match address! list move to head. */
+ memcpy(pmk->pmkid, pmksa->pmkid,
+ IW_PMKID_LEN);
+ list_move(&pmk->list,
+ &priv->pmklist.head);
+ break;
+ }
+ }
+ if (ptr == &priv->pmklist.head) { /* not find address. */
+ if (PMK_LIST_MAX > priv->pmklist.size) { /* new cache data */
+ for (i = 0; i < PMK_LIST_MAX; i++) {
+ pmk = &priv->pmklist.pmk[i];
+ if (!memcmp
+ ("\x00\x00\x00\x00\x00\x00",
+ pmk->bssid, ETH_ALEN))
+ break;
+ }
+ memcpy(pmk->bssid, pmksa->bssid.sa_data,
+ ETH_ALEN);
+ memcpy(pmk->pmkid, pmksa->pmkid,
+ IW_PMKID_LEN);
+ list_add(&pmk->list,
+ &priv->pmklist.head);
+ priv->pmklist.size++;
+ } else { /* overwrite old cache data */
+ pmk =
+ list_entry(priv->pmklist.head.prev,
+ struct pmk_t, list);
+ memcpy(pmk->bssid, pmksa->bssid.sa_data,
+ ETH_ALEN);
+ memcpy(pmk->pmkid, pmksa->pmkid,
+ IW_PMKID_LEN);
+ list_move(&pmk->list,
+ &priv->pmklist.head);
+ }
+ }
+ }
+ break;
+ case IW_PMKSA_REMOVE:
+ if (list_empty(&priv->pmklist.head)) { /* list empty */
+ return -EINVAL;
+ } else { /* search cache data */
+ list_for_each(ptr, &priv->pmklist.head) {
+ pmk = list_entry(ptr, struct pmk_t, list);
+ if (!memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN)) { /* match address! list del. */
+ memset(pmk->bssid, 0, ETH_ALEN);
+ memset(pmk->pmkid, 0, IW_PMKID_LEN);
+ list_del_init(&pmk->list);
+ break;
+ }
+ }
+ if (ptr == &priv->pmklist.head) { /* not find address. */
+ return 0;
+ }
+ }
+ break;
+ case IW_PMKSA_FLUSH:
+ memset(&(priv->pmklist), 0, sizeof(priv->pmklist));
+ INIT_LIST_HEAD(&priv->pmklist.head);
+ for (i = 0; i < PMK_LIST_MAX; i++)
+ INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hostif_sme_enqueue(priv, SME_SET_PMKSA);
+ return 0;
+}
+
+static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
+{
+
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_statistics *wstats = &priv->wstats;
+
+ if (!atomic_read(&update_phyinfo)) {
+ if (priv->dev_state < DEVICE_STATE_READY)
+ return NULL; /* not finished initialize */
+ else
+ return wstats;
+ }
+
+ /* Packets discarded in the wireless adapter due to wireless
+ * specific problems */
+ wstats->discard.nwid = 0; /* Rx invalid nwid */
+ wstats->discard.code = 0; /* Rx invalid crypt */
+ wstats->discard.fragment = 0; /* Rx invalid frag */
+ wstats->discard.retries = 0; /* Tx excessive retries */
+ wstats->discard.misc = 0; /* Invalid misc */
+ wstats->miss.beacon = 0; /* Missed beacon */
+
+ return wstats;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set stop request */
+static int ks_wlan_set_stop_request(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (!(*uwrq))
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set MLME */
+#include <linux/ieee80211.h>
+static int ks_wlan_set_mlme(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *dwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_mlme *mlme = (struct iw_mlme *)extra;
+ __u32 mode;
+
+ DPRINTK(2, ":%d :%d\n", mlme->cmd, mlme->reason_code);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ switch (mlme->cmd) {
+ case IW_MLME_DEAUTH:
+ if (mlme->reason_code == WLAN_REASON_MIC_FAILURE) {
+ return 0;
+ }
+ case IW_MLME_DISASSOC:
+ mode = 1;
+ return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
+ default:
+ return -EOPNOTSUPP; /* Not Support */
+ }
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get firemware version */
+static int ks_wlan_get_firmware_version(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ strcpy(extra, &(priv->firmware_version[0]));
+ dwrq->length = priv->version_size + 1;
+ return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : set force disconnect status */
+static int ks_wlan_set_detach(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == CONNECT_STATUS) { /* 0 */
+ priv->connect_status &= ~FORCE_DISCONNECT;
+ if ((priv->connect_status & CONNECT_STATUS_MASK) ==
+ CONNECT_STATUS)
+ netif_carrier_on(dev);
+ } else if (*uwrq == DISCONNECT_STATUS) { /* 1 */
+ priv->connect_status |= FORCE_DISCONNECT;
+ netif_carrier_off(dev);
+ } else
+ return -EINVAL;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get force disconnect status */
+static int ks_wlan_get_detach(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = ((priv->connect_status & FORCE_DISCONNECT) ? 1 : 0);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get connect status */
+static int ks_wlan_get_connect(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = (priv->connect_status & CONNECT_STATUS_MASK);
+ return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/* Private handler : set preamble */
+static int ks_wlan_set_preamble(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == LONG_PREAMBLE) { /* 0 */
+ priv->reg.preamble = LONG_PREAMBLE;
+ } else if (*uwrq == SHORT_PREAMBLE) { /* 1 */
+ priv->reg.preamble = SHORT_PREAMBLE;
+ } else
+ return -EINVAL;
+
+ priv->need_commit |= SME_MODE_SET;
+ return -EINPROGRESS; /* Call commit handler */
+
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get preamble */
+static int ks_wlan_get_preamble(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.preamble;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set power save mode */
+static int ks_wlan_set_powermgt(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == POWMGT_ACTIVE_MODE) { /* 0 */
+ priv->reg.powermgt = POWMGT_ACTIVE_MODE;
+ } else if (*uwrq == POWMGT_SAVE1_MODE) { /* 1 */
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+ priv->reg.powermgt = POWMGT_SAVE1_MODE;
+ else
+ return -EINVAL;
+ } else if (*uwrq == POWMGT_SAVE2_MODE) { /* 2 */
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+ priv->reg.powermgt = POWMGT_SAVE2_MODE;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get power save made */
+static int ks_wlan_get_powermgt(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.powermgt;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set scan type */
+static int ks_wlan_set_scan_type(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == ACTIVE_SCAN) { /* 0 */
+ priv->reg.scan_type = ACTIVE_SCAN;
+ } else if (*uwrq == PASSIVE_SCAN) { /* 1 */
+ priv->reg.scan_type = PASSIVE_SCAN;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get scan type */
+static int ks_wlan_get_scan_type(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.scan_type;
+ return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : write raw data to device */
+static int ks_wlan_data_write(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ unsigned char *wbuff = NULL;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ wbuff = (unsigned char *)kmalloc(dwrq->length, GFP_ATOMIC);
+ if (!wbuff)
+ return -EFAULT;
+ memcpy(wbuff, extra, dwrq->length);
+
+ /* write to device */
+ ks_wlan_hw_tx(priv, wbuff, dwrq->length, NULL, NULL, NULL);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : read raw data form device */
+static int ks_wlan_data_read(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ unsigned short read_length;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (!atomic_read(&priv->event_count)) {
+ if (priv->dev_state < DEVICE_STATE_BOOT) { /* Remove device */
+ read_length = 4;
+ memset(extra, 0xff, read_length);
+ dwrq->length = read_length;
+ return 0;
+ }
+ read_length = 0;
+ memset(extra, 0, 1);
+ dwrq->length = 0;
+ return 0;
+ }
+
+ if (atomic_read(&priv->event_count) > 0)
+ atomic_dec(&priv->event_count);
+
+ spin_lock(&priv->dev_read_lock); /* request spin lock */
+
+ /* Copy length max size 0x07ff */
+ if (priv->dev_size[priv->dev_count] > 2047)
+ read_length = 2047;
+ else
+ read_length = priv->dev_size[priv->dev_count];
+
+ /* Copy data */
+ memcpy(extra, &(priv->dev_data[priv->dev_count][0]), read_length);
+
+ spin_unlock(&priv->dev_read_lock); /* release spin lock */
+
+ /* Initialize */
+ priv->dev_data[priv->dev_count] = 0;
+ priv->dev_size[priv->dev_count] = 0;
+
+ priv->dev_count++;
+ if (priv->dev_count == DEVICE_STOCK_COUNT)
+ priv->dev_count = 0;
+
+ /* Set read size */
+ dwrq->length = read_length;
+
+ return 0;
+}
+#endif
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : get wep string */
+#define WEP_ASCII_BUFF_SIZE (17+64*4+1)
+static int ks_wlan_get_wep_ascii(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ int i, j, len = 0;
+ char tmp[WEP_ASCII_BUFF_SIZE];
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ strcpy(tmp, " WEP keys ASCII \n");
+ len += strlen(" WEP keys ASCII \n");
+
+ for (i = 0; i < 4; i++) {
+ strcpy(tmp + len, "\t[");
+ len += strlen("\t[");
+ tmp[len] = '1' + i;
+ len++;
+ strcpy(tmp + len, "] ");
+ len += strlen("] ");
+ if (priv->reg.wep_key[i].size) {
+ strcpy(tmp + len,
+ (priv->reg.wep_key[i].size <
+ 6 ? "(40bits) [" : "(104bits) ["));
+ len +=
+ strlen((priv->reg.wep_key[i].size <
+ 6 ? "(40bits) [" : "(104bits) ["));
+ for (j = 0; j < priv->reg.wep_key[i].size; j++, len++)
+ tmp[len] =
+ (isprint(priv->reg.wep_key[i].val[j]) ?
+ priv->reg.wep_key[i].val[j] : ' ');
+
+ strcpy(tmp + len, "]\n");
+ len += strlen("]\n");
+ } else {
+ strcpy(tmp + len, "off\n");
+ len += strlen("off\n");
+ }
+ }
+
+ memcpy(extra, tmp, len);
+ dwrq->length = len + 1;
+ return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/* Private handler : set beacon lost count */
+static int ks_wlan_set_beacon_lost(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX) {
+ priv->reg.beacon_lost_count = *uwrq;
+ } else
+ return -EINVAL;
+
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+ priv->need_commit |= SME_MODE_SET;
+ return -EINPROGRESS; /* Call commit handler */
+ } else
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get beacon lost count */
+static int ks_wlan_get_beacon_lost(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.beacon_lost_count;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set phy type */
+static int ks_wlan_set_phy_type(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == D_11B_ONLY_MODE) { /* 0 */
+ priv->reg.phy_type = D_11B_ONLY_MODE;
+ } else if (*uwrq == D_11G_ONLY_MODE) { /* 1 */
+ priv->reg.phy_type = D_11G_ONLY_MODE;
+ } else if (*uwrq == D_11BG_COMPATIBLE_MODE) { /* 2 */
+ priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
+ } else
+ return -EINVAL;
+
+ priv->need_commit |= SME_MODE_SET;
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get phy type */
+static int ks_wlan_get_phy_type(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.phy_type;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set cts mode */
+static int ks_wlan_set_cts_mode(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == CTS_MODE_FALSE) { /* 0 */
+ priv->reg.cts_mode = CTS_MODE_FALSE;
+ } else if (*uwrq == CTS_MODE_TRUE) { /* 1 */
+ if (priv->reg.phy_type == D_11G_ONLY_MODE ||
+ priv->reg.phy_type == D_11BG_COMPATIBLE_MODE)
+ priv->reg.cts_mode = CTS_MODE_TRUE;
+ else
+ priv->reg.cts_mode = CTS_MODE_FALSE;
+ } else
+ return -EINVAL;
+
+ priv->need_commit |= SME_MODE_SET;
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get cts mode */
+static int ks_wlan_get_cts_mode(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.cts_mode;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set sleep mode */
+static int ks_wlan_set_sleep_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "\n");
+
+ if (*uwrq == SLP_SLEEP) {
+ priv->sleep_mode = *uwrq;
+ printk("SET_SLEEP_MODE %d\n", priv->sleep_mode);
+
+ hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+ hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
+
+ } else if (*uwrq == SLP_ACTIVE) {
+ priv->sleep_mode = *uwrq;
+ printk("SET_SLEEP_MODE %d\n", priv->sleep_mode);
+ hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
+ } else {
+ printk("SET_SLEEP_MODE %d errror\n", *uwrq);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get sleep mode */
+static int ks_wlan_get_sleep_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "GET_SLEEP_MODE %d\n", priv->sleep_mode);
+ *uwrq = priv->sleep_mode;
+
+ return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : set phy information timer */
+static int ks_wlan_set_phy_information_timer(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq >= 0 && *uwrq <= 0xFFFF) /* 0-65535 */
+ priv->reg.phy_info_timer = (uint16_t) * uwrq;
+ else
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get phy information timer */
+static int ks_wlan_get_phy_information_timer(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.phy_info_timer;
+ return 0;
+}
+#endif
+
+#ifdef WPS
+/*------------------------------------------------------------------*/
+/* Private handler : set WPS enable */
+static int ks_wlan_set_wps_enable(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == 0 || *uwrq == 1)
+ priv->wps.wps_enabled = *uwrq;
+ else
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_WPS_ENABLE_REQUEST);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get WPS enable */
+static int ks_wlan_get_wps_enable(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->wps.wps_enabled;
+ printk("return=%d\n", *uwrq);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set WPS probe req */
+static int ks_wlan_set_wps_probe_req(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ uint8_t *p = extra;
+ unsigned char len;
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ DPRINTK(2, "dwrq->length=%d\n", dwrq->length);
+
+ /* length check */
+ if (p[1] + 2 != dwrq->length || dwrq->length > 256) {
+ return -EINVAL;
+ }
+
+ priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */
+ len = p[1] + 2; /* IE header + IE */
+
+ memcpy(priv->wps.ie, &len, sizeof(len));
+ p = memcpy(priv->wps.ie + 1, p, len);
+
+ DPRINTK(2, "%d(%#x): %02X %02X %02X %02X ... %02X %02X %02X\n",
+ priv->wps.ielen, priv->wps.ielen, p[0], p[1], p[2], p[3],
+ p[priv->wps.ielen - 3], p[priv->wps.ielen - 2],
+ p[priv->wps.ielen - 1]);
+
+ hostif_sme_enqueue(priv, SME_WPS_PROBE_REQUEST);
+
+ return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : get WPS probe req */
+static int ks_wlan_get_wps_probe_req(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ return 0;
+}
+#endif
+#endif /* WPS */
+
+/*------------------------------------------------------------------*/
+/* Private handler : set tx gain control value */
+static int ks_wlan_set_tx_gain(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
+ priv->gain.TxGain = (uint8_t) * uwrq;
+ else
+ return -EINVAL;
+
+ if (priv->gain.TxGain < 0xFF)
+ priv->gain.TxMode = 1;
+ else
+ priv->gain.TxMode = 0;
+
+ hostif_sme_enqueue(priv, SME_SET_GAIN);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get tx gain control value */
+static int ks_wlan_get_tx_gain(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->gain.TxGain;
+ hostif_sme_enqueue(priv, SME_GET_GAIN);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set rx gain control value */
+static int ks_wlan_set_rx_gain(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
+ priv->gain.RxGain = (uint8_t) * uwrq;
+ else
+ return -EINVAL;
+
+ if (priv->gain.RxGain < 0xFF)
+ priv->gain.RxMode = 1;
+ else
+ priv->gain.RxMode = 0;
+
+ hostif_sme_enqueue(priv, SME_SET_GAIN);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get rx gain control value */
+static int ks_wlan_get_rx_gain(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->gain.RxGain;
+ hostif_sme_enqueue(priv, SME_GET_GAIN);
+ return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : set region value */
+static int ks_wlan_set_region(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq >= 0x9 && *uwrq <= 0xF) /* 0x9-0xf */
+ priv->region = (uint8_t) * uwrq;
+ else
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_SET_REGION);
+ return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/* Private handler : get eeprom checksum result */
+static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ *uwrq = priv->eeprom_checksum;
+ return 0;
+}
+
+static void print_hif_event(int event)
+{
+
+ switch (event) {
+ case HIF_DATA_REQ:
+ printk("HIF_DATA_REQ\n");
+ break;
+ case HIF_DATA_IND:
+ printk("HIF_DATA_IND\n");
+ break;
+ case HIF_MIB_GET_REQ:
+ printk("HIF_MIB_GET_REQ\n");
+ break;
+ case HIF_MIB_GET_CONF:
+ printk("HIF_MIB_GET_CONF\n");
+ break;
+ case HIF_MIB_SET_REQ:
+ printk("HIF_MIB_SET_REQ\n");
+ break;
+ case HIF_MIB_SET_CONF:
+ printk("HIF_MIB_SET_CONF\n");
+ break;
+ case HIF_POWERMGT_REQ:
+ printk("HIF_POWERMGT_REQ\n");
+ break;
+ case HIF_POWERMGT_CONF:
+ printk("HIF_POWERMGT_CONF\n");
+ break;
+ case HIF_START_REQ:
+ printk("HIF_START_REQ\n");
+ break;
+ case HIF_START_CONF:
+ printk("HIF_START_CONF\n");
+ break;
+ case HIF_CONNECT_IND:
+ printk("HIF_CONNECT_IND\n");
+ break;
+ case HIF_STOP_REQ:
+ printk("HIF_STOP_REQ\n");
+ break;
+ case HIF_STOP_CONF:
+ printk("HIF_STOP_CONF\n");
+ break;
+ case HIF_PS_ADH_SET_REQ:
+ printk("HIF_PS_ADH_SET_REQ\n");
+ break;
+ case HIF_PS_ADH_SET_CONF:
+ printk("HIF_PS_ADH_SET_CONF\n");
+ break;
+ case HIF_INFRA_SET_REQ:
+ printk("HIF_INFRA_SET_REQ\n");
+ break;
+ case HIF_INFRA_SET_CONF:
+ printk("HIF_INFRA_SET_CONF\n");
+ break;
+ case HIF_ADH_SET_REQ:
+ printk("HIF_ADH_SET_REQ\n");
+ break;
+ case HIF_ADH_SET_CONF:
+ printk("HIF_ADH_SET_CONF\n");
+ break;
+ case HIF_AP_SET_REQ:
+ printk("HIF_AP_SET_REQ\n");
+ break;
+ case HIF_AP_SET_CONF:
+ printk("HIF_AP_SET_CONF\n");
+ break;
+ case HIF_ASSOC_INFO_IND:
+ printk("HIF_ASSOC_INFO_IND\n");
+ break;
+ case HIF_MIC_FAILURE_REQ:
+ printk("HIF_MIC_FAILURE_REQ\n");
+ break;
+ case HIF_MIC_FAILURE_CONF:
+ printk("HIF_MIC_FAILURE_CONF\n");
+ break;
+ case HIF_SCAN_REQ:
+ printk("HIF_SCAN_REQ\n");
+ break;
+ case HIF_SCAN_CONF:
+ printk("HIF_SCAN_CONF\n");
+ break;
+ case HIF_PHY_INFO_REQ:
+ printk("HIF_PHY_INFO_REQ\n");
+ break;
+ case HIF_PHY_INFO_CONF:
+ printk("HIF_PHY_INFO_CONF\n");
+ break;
+ case HIF_SLEEP_REQ:
+ printk("HIF_SLEEP_REQ\n");
+ break;
+ case HIF_SLEEP_CONF:
+ printk("HIF_SLEEP_CONF\n");
+ break;
+ case HIF_PHY_INFO_IND:
+ printk("HIF_PHY_INFO_IND\n");
+ break;
+ case HIF_SCAN_IND:
+ printk("HIF_SCAN_IND\n");
+ break;
+ case HIF_INFRA_SET2_REQ:
+ printk("HIF_INFRA_SET2_REQ\n");
+ break;
+ case HIF_INFRA_SET2_CONF:
+ printk("HIF_INFRA_SET2_CONF\n");
+ break;
+ case HIF_ADH_SET2_REQ:
+ printk("HIF_ADH_SET2_REQ\n");
+ break;
+ case HIF_ADH_SET2_CONF:
+ printk("HIF_ADH_SET2_CONF\n");
+ }
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get host command history */
+static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ int i, event;
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ for (i = 63; i >= 0; i--) {
+ event =
+ priv->hostt.buff[(priv->hostt.qtail - 1 - i) %
+ SME_EVENT_BUFF_SIZE];
+ print_hif_event(event);
+ }
+ return 0;
+}
+
+/* Structures to export the Wireless Handlers */
+
+static const struct iw_priv_args ks_wlan_private_args[] = {
+/*{ cmd, set_args, get_args, name[16] } */
+ {KS_WLAN_GET_FIRM_VERSION, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_CHAR | (128 + 1), "GetFirmwareVer"},
+#ifdef WPS
+ {KS_WLAN_SET_WPS_ENABLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetWPSEnable"},
+ {KS_WLAN_GET_WPS_ENABLE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetW"},
+ {KS_WLAN_SET_WPS_PROBE_REQ, IW_PRIV_TYPE_BYTE | 2047, IW_PRIV_TYPE_NONE,
+ "SetWPSProbeReq"},
+#endif /* WPS */
+ {KS_WLAN_SET_PREAMBLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetPreamble"},
+ {KS_WLAN_GET_PREAMBLE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPreamble"},
+ {KS_WLAN_SET_POWER_SAVE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetPowerSave"},
+ {KS_WLAN_GET_POWER_SAVE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPowerSave"},
+ {KS_WLAN_SET_SCAN_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetScanType"},
+ {KS_WLAN_GET_SCAN_TYPE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetScanType"},
+ {KS_WLAN_SET_RX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetRxGain"},
+ {KS_WLAN_GET_RX_GAIN, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetRxGain"},
+ {KS_WLAN_HOSTT, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_CHAR | (128 + 1),
+ "hostt"},
+ {KS_WLAN_SET_BEACON_LOST, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetBeaconLost"},
+ {KS_WLAN_GET_BEACON_LOST, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetBeaconLost"},
+ {KS_WLAN_SET_SLEEP_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetSleepMode"},
+ {KS_WLAN_GET_SLEEP_MODE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetSleepMode"},
+ {KS_WLAN_SET_TX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetTxGain"},
+ {KS_WLAN_GET_TX_GAIN, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetTxGain"},
+ {KS_WLAN_SET_PHY_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetPhyType"},
+ {KS_WLAN_GET_PHY_TYPE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPhyType"},
+ {KS_WLAN_SET_CTS_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetCtsMode"},
+ {KS_WLAN_GET_CTS_MODE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetCtsMode"},
+ {KS_WLAN_GET_EEPROM_CKSUM, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetChecksum"},
+};
+
+static const iw_handler ks_wlan_handler[] = {
+ (iw_handler) ks_wlan_config_commit, /* SIOCSIWCOMMIT */
+ (iw_handler) ks_wlan_get_name, /* SIOCGIWNAME */
+ (iw_handler) NULL, /* SIOCSIWNWID */
+ (iw_handler) NULL, /* SIOCGIWNWID */
+ (iw_handler) ks_wlan_set_freq, /* SIOCSIWFREQ */
+ (iw_handler) ks_wlan_get_freq, /* SIOCGIWFREQ */
+ (iw_handler) ks_wlan_set_mode, /* SIOCSIWMODE */
+ (iw_handler) ks_wlan_get_mode, /* SIOCGIWMODE */
+#ifndef KSC_OPNOTSUPP
+ (iw_handler) ks_wlan_set_sens, /* SIOCSIWSENS */
+ (iw_handler) ks_wlan_get_sens, /* SIOCGIWSENS */
+#else /* KSC_OPNOTSUPP */
+ (iw_handler) NULL, /* SIOCSIWSENS */
+ (iw_handler) NULL, /* SIOCGIWSENS */
+#endif /* KSC_OPNOTSUPP */
+ (iw_handler) NULL, /* SIOCSIWRANGE */
+ (iw_handler) ks_wlan_get_range, /* SIOCGIWRANGE */
+ (iw_handler) NULL, /* SIOCSIWPRIV */
+ (iw_handler) NULL, /* SIOCGIWPRIV */
+ (iw_handler) NULL, /* SIOCSIWSTATS */
+ (iw_handler) ks_wlan_get_iwstats, /* SIOCGIWSTATS */
+ (iw_handler) NULL, /* SIOCSIWSPY */
+ (iw_handler) NULL, /* SIOCGIWSPY */
+ (iw_handler) NULL, /* SIOCSIWTHRSPY */
+ (iw_handler) NULL, /* SIOCGIWTHRSPY */
+ (iw_handler) ks_wlan_set_wap, /* SIOCSIWAP */
+ (iw_handler) ks_wlan_get_wap, /* SIOCGIWAP */
+// (iw_handler) NULL, /* SIOCSIWMLME */
+ (iw_handler) ks_wlan_set_mlme, /* SIOCSIWMLME */
+ (iw_handler) ks_wlan_get_aplist, /* SIOCGIWAPLIST */
+ (iw_handler) ks_wlan_set_scan, /* SIOCSIWSCAN */
+ (iw_handler) ks_wlan_get_scan, /* SIOCGIWSCAN */
+ (iw_handler) ks_wlan_set_essid, /* SIOCSIWESSID */
+ (iw_handler) ks_wlan_get_essid, /* SIOCGIWESSID */
+ (iw_handler) ks_wlan_set_nick, /* SIOCSIWNICKN */
+ (iw_handler) ks_wlan_get_nick, /* SIOCGIWNICKN */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) ks_wlan_set_rate, /* SIOCSIWRATE */
+ (iw_handler) ks_wlan_get_rate, /* SIOCGIWRATE */
+ (iw_handler) ks_wlan_set_rts, /* SIOCSIWRTS */
+ (iw_handler) ks_wlan_get_rts, /* SIOCGIWRTS */
+ (iw_handler) ks_wlan_set_frag, /* SIOCSIWFRAG */
+ (iw_handler) ks_wlan_get_frag, /* SIOCGIWFRAG */
+#ifndef KSC_OPNOTSUPP
+ (iw_handler) ks_wlan_set_txpow, /* SIOCSIWTXPOW */
+ (iw_handler) ks_wlan_get_txpow, /* SIOCGIWTXPOW */
+ (iw_handler) ks_wlan_set_retry, /* SIOCSIWRETRY */
+ (iw_handler) ks_wlan_get_retry, /* SIOCGIWRETRY */
+#else /* KSC_OPNOTSUPP */
+ (iw_handler) NULL, /* SIOCSIWTXPOW */
+ (iw_handler) NULL, /* SIOCGIWTXPOW */
+ (iw_handler) NULL, /* SIOCSIWRETRY */
+ (iw_handler) NULL, /* SIOCGIWRETRY */
+#endif /* KSC_OPNOTSUPP */
+ (iw_handler) ks_wlan_set_encode, /* SIOCSIWENCODE */
+ (iw_handler) ks_wlan_get_encode, /* SIOCGIWENCODE */
+ (iw_handler) ks_wlan_set_power, /* SIOCSIWPOWER */
+ (iw_handler) ks_wlan_get_power, /* SIOCGIWPOWER */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+// (iw_handler) NULL, /* SIOCSIWGENIE */
+ (iw_handler) ks_wlan_set_genie, /* SIOCSIWGENIE */
+ (iw_handler) NULL, /* SIOCGIWGENIE */
+ (iw_handler) ks_wlan_set_auth_mode, /* SIOCSIWAUTH */
+ (iw_handler) ks_wlan_get_auth_mode, /* SIOCGIWAUTH */
+ (iw_handler) ks_wlan_set_encode_ext, /* SIOCSIWENCODEEXT */
+ (iw_handler) ks_wlan_get_encode_ext, /* SIOCGIWENCODEEXT */
+ (iw_handler) ks_wlan_set_pmksa, /* SIOCSIWPMKSA */
+ (iw_handler) NULL, /* -- hole -- */
+};
+
+/* private_handler */
+static const iw_handler ks_wlan_private_handler[] = {
+ (iw_handler) NULL, /* 0 */
+ (iw_handler) NULL, /* 1, used to be: KS_WLAN_GET_DRIVER_VERSION */
+ (iw_handler) NULL, /* 2 */
+ (iw_handler) ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */
+#ifdef WPS
+ (iw_handler) ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */
+ (iw_handler) ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */
+ (iw_handler) ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */
+#else
+ (iw_handler) NULL, /* 4 */
+ (iw_handler) NULL, /* 5 */
+ (iw_handler) NULL, /* 6 */
+#endif /* WPS */
+
+ (iw_handler) ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */
+ (iw_handler) ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */
+ (iw_handler) ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */
+ (iw_handler) ks_wlan_set_powermgt, /* 10 KS_WLAN_SET_POWER_SAVE */
+ (iw_handler) ks_wlan_get_powermgt, /* 11 KS_WLAN_GET_POWER_SAVE */
+ (iw_handler) ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */
+ (iw_handler) ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */
+ (iw_handler) ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */
+ (iw_handler) ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */
+ (iw_handler) ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */
+ (iw_handler) NULL, /* 17 */
+ (iw_handler) ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */
+ (iw_handler) ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */
+ (iw_handler) ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */
+ (iw_handler) ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */
+ (iw_handler) ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */
+ (iw_handler) ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */
+ (iw_handler) ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */
+ (iw_handler) ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */
+ (iw_handler) NULL, /* 26 */
+ (iw_handler) NULL, /* 27 */
+ (iw_handler) ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */
+ (iw_handler) ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */
+ (iw_handler) NULL, /* 30 */
+ (iw_handler) NULL, /* 31 */
+};
+
+static const struct iw_handler_def ks_wlan_handler_def = {
+ .num_standard = sizeof(ks_wlan_handler) / sizeof(iw_handler),
+ .num_private = sizeof(ks_wlan_private_handler) / sizeof(iw_handler),
+ .num_private_args =
+ sizeof(ks_wlan_private_args) / sizeof(struct iw_priv_args),
+ .standard = (iw_handler *) ks_wlan_handler,
+ .private = (iw_handler *) ks_wlan_private_handler,
+ .private_args = (struct iw_priv_args *)ks_wlan_private_args,
+ .get_wireless_stats = ks_get_wireless_stats,
+};
+
+static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd)
+{
+ int rc = 0;
+ struct iwreq *wrq = (struct iwreq *)rq;
+ switch (cmd) {
+ case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */
+ rc = ks_wlan_set_stop_request(dev, NULL, &(wrq->u.mode), NULL);
+ break;
+ // All other calls are currently unsupported
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ DPRINTK(5, "return=%d\n", rc);
+ return rc;
+}
+
+static
+struct net_device_stats *ks_wlan_get_stats(struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
+ if (priv->dev_state < DEVICE_STATE_READY) {
+ return NULL; /* not finished initialize */
+ }
+
+ return &priv->nstats;
+}
+
+static
+int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+ struct sockaddr *mac_addr = (struct sockaddr *)addr;
+ if (netif_running(dev))
+ return -EBUSY;
+ memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
+ memcpy(priv->eth_addr, mac_addr->sa_data, ETH_ALEN);
+
+ priv->mac_address_valid = 0;
+ hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
+ printk(KERN_INFO
+ "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
+ priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+ return 0;
+}
+
+static
+void ks_wlan_tx_timeout(struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
+ DPRINTK(1, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead,
+ priv->tx_dev.qtail);
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ }
+ priv->nstats.tx_errors++;
+ netif_wake_queue(dev);
+
+ return;
+}
+
+static
+int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+ int rc = 0;
+
+ DPRINTK(3, "in_interrupt()=%ld\n", in_interrupt());
+
+ if (skb == NULL) {
+ printk(KERN_ERR "ks_wlan: skb == NULL!!!\n");
+ return 0;
+ }
+ if (priv->dev_state < DEVICE_STATE_READY) {
+ dev_kfree_skb(skb);
+ return 0; /* not finished initialize */
+ }
+
+ if (netif_running(dev))
+ netif_stop_queue(dev);
+
+ rc = hostif_data_request(priv, skb);
+ netif_trans_update(dev);
+
+ DPRINTK(4, "rc=%d\n", rc);
+ if (rc) {
+ rc = 0;
+ }
+
+ return rc;
+}
+
+void send_packet_complete(void *arg1, void *arg2)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)arg1;
+ struct sk_buff *packet = (struct sk_buff *)arg2;
+
+ DPRINTK(3, "\n");
+
+ priv->nstats.tx_bytes += packet->len;
+ priv->nstats.tx_packets++;
+
+ if (netif_queue_stopped(priv->net_dev))
+ netif_wake_queue(priv->net_dev);
+
+ if (packet) {
+ dev_kfree_skb(packet);
+ packet = NULL;
+ }
+
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This routine is not state sensitive and need not be SMP locked. */
+static
+void ks_wlan_set_multicast_list(struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
+ DPRINTK(4, "\n");
+ if (priv->dev_state < DEVICE_STATE_READY) {
+ return; /* not finished initialize */
+ }
+ hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
+
+ return;
+}
+
+static
+int ks_wlan_open(struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
+ priv->cur_rx = 0;
+
+ if (!priv->mac_address_valid) {
+ printk(KERN_ERR "ks_wlan : %s Not READY !!\n", dev->name);
+ return -EBUSY;
+ } else
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static
+int ks_wlan_close(struct net_device *dev)
+{
+
+ netif_stop_queue(dev);
+
+ DPRINTK(4, "%s: Shutting down ethercard, status was 0x%4.4x.\n",
+ dev->name, 0x00);
+
+ return 0;
+}
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (3*HZ)
+static const unsigned char dummy_addr[] =
+ { 0x00, 0x0b, 0xe3, 0x00, 0x00, 0x00 };
+
+static const struct net_device_ops ks_wlan_netdev_ops = {
+ .ndo_start_xmit = ks_wlan_start_xmit,
+ .ndo_open = ks_wlan_open,
+ .ndo_stop = ks_wlan_close,
+ .ndo_do_ioctl = ks_wlan_netdev_ioctl,
+ .ndo_set_mac_address = ks_wlan_set_mac_address,
+ .ndo_get_stats = ks_wlan_get_stats,
+ .ndo_tx_timeout = ks_wlan_tx_timeout,
+ .ndo_set_rx_mode = ks_wlan_set_multicast_list,
+};
+
+int ks_wlan_net_start(struct net_device *dev)
+{
+ struct ks_wlan_private *priv;
+ /* int rc; */
+
+ priv = netdev_priv(dev);
+ priv->mac_address_valid = 0;
+ priv->need_commit = 0;
+
+ priv->device_open_status = 1;
+
+ /* phy information update timer */
+ atomic_set(&update_phyinfo, 0);
+ init_timer(&update_phyinfo_timer);
+ update_phyinfo_timer.function = ks_wlan_update_phyinfo_timeout;
+ update_phyinfo_timer.data = (unsigned long)priv;
+
+ /* dummy address set */
+ memcpy(priv->eth_addr, dummy_addr, ETH_ALEN);
+ dev->dev_addr[0] = priv->eth_addr[0];
+ dev->dev_addr[1] = priv->eth_addr[1];
+ dev->dev_addr[2] = priv->eth_addr[2];
+ dev->dev_addr[3] = priv->eth_addr[3];
+ dev->dev_addr[4] = priv->eth_addr[4];
+ dev->dev_addr[5] = priv->eth_addr[5];
+ dev->dev_addr[6] = 0x00;
+ dev->dev_addr[7] = 0x00;
+
+ /* The ks_wlan-specific entries in the device structure. */
+ dev->netdev_ops = &ks_wlan_netdev_ops;
+ dev->wireless_handlers = (struct iw_handler_def *)&ks_wlan_handler_def;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+int ks_wlan_net_stop(struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
+ int ret = 0;
+ priv->device_open_status = 0;
+ del_timer_sync(&update_phyinfo_timer);
+
+ if (netif_running(dev))
+ netif_stop_queue(dev);
+
+ return ret;
+}
+
+int ks_wlan_reset(struct net_device *dev)
+{
+ return 0;
+}
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
new file mode 100644
index 000000000000..e14c109b3cab
--- /dev/null
+++ b/drivers/staging/ks7010/michael_mic.c
@@ -0,0 +1,139 @@
+/*
+ * Driver for KeyStream wireless LAN
+ *
+ * Copyright (C) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include "michael_mic.h"
+
+// Rotation functions on 32 bit values
+#define ROL32( A, n ) ( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) )
+#define ROR32( A, n ) ROL32( (A), 32-(n) )
+// Convert from Byte[] to UInt32 in a portable way
+#define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)
+
+// Convert from UInt32 to Byte[] in a portable way
+#define putUInt32( A, B, C ) A[B+0] = (uint8_t) (C & 0xff); \
+ A[B+1] = (uint8_t) ((C>>8) & 0xff); \
+ A[B+2] = (uint8_t) ((C>>16) & 0xff); \
+ A[B+3] = (uint8_t) ((C>>24) & 0xff)
+
+// Reset the state to the empty message.
+#define MichaelClear( A ) A->L = A->K0; \
+ A->R = A->K1; \
+ A->nBytesInM = 0;
+
+static
+void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t * key)
+{
+ // Set the key
+ Mic->K0 = getUInt32(key, 0);
+ Mic->K1 = getUInt32(key, 4);
+
+ //clear();
+ MichaelClear(Mic);
+}
+
+#define MichaelBlockFunction(L, R) \
+do{ \
+ R ^= ROL32( L, 17 ); \
+ L += R; \
+ R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); \
+ L += R; \
+ R ^= ROL32( L, 3 ); \
+ L += R; \
+ R ^= ROR32( L, 2 ); \
+ L += R; \
+}while(0)
+
+static
+void MichaelAppend(struct michel_mic_t *Mic, uint8_t * src, int nBytes)
+{
+ int addlen;
+ if (Mic->nBytesInM) {
+ addlen = 4 - Mic->nBytesInM;
+ if (addlen > nBytes)
+ addlen = nBytes;
+ memcpy(&Mic->M[Mic->nBytesInM], src, addlen);
+ Mic->nBytesInM += addlen;
+ src += addlen;
+ nBytes -= addlen;
+
+ if (Mic->nBytesInM < 4)
+ return;
+
+ Mic->L ^= getUInt32(Mic->M, 0);
+ MichaelBlockFunction(Mic->L, Mic->R);
+ Mic->nBytesInM = 0;
+ }
+
+ while (nBytes >= 4) {
+ Mic->L ^= getUInt32(src, 0);
+ MichaelBlockFunction(Mic->L, Mic->R);
+ src += 4;
+ nBytes -= 4;
+ }
+
+ if (nBytes > 0) {
+ Mic->nBytesInM = nBytes;
+ memcpy(Mic->M, src, nBytes);
+ }
+}
+
+static
+void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t * dst)
+{
+ uint8_t *data = Mic->M;
+ switch (Mic->nBytesInM) {
+ case 0:
+ Mic->L ^= 0x5a;
+ break;
+ case 1:
+ Mic->L ^= data[0] | 0x5a00;
+ break;
+ case 2:
+ Mic->L ^= data[0] | (data[1] << 8) | 0x5a0000;
+ break;
+ case 3:
+ Mic->L ^= data[0] | (data[1] << 8) | (data[2] << 16) |
+ 0x5a000000;
+ break;
+ }
+ MichaelBlockFunction(Mic->L, Mic->R);
+ MichaelBlockFunction(Mic->L, Mic->R);
+ // The appendByte function has already computed the result.
+ putUInt32(dst, 0, Mic->L);
+ putUInt32(dst, 4, Mic->R);
+
+ // Reset to the empty message.
+ MichaelClear(Mic);
+}
+
+void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t * Key,
+ uint8_t * Data, int Len, uint8_t priority,
+ uint8_t * Result)
+{
+ uint8_t pad_data[4] = { priority, 0, 0, 0 };
+ // Compute the MIC value
+ /*
+ * IEEE802.11i page 47
+ * Figure 43g TKIP MIC processing format
+ * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+ * |6 |6 |1 |3 |M |1 |1 |1 |1 |1 |1 |1 |1 | Octet
+ * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+ * |DA|SA|Priority|0 |Data|M0|M1|M2|M3|M4|M5|M6|M7|
+ * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+ */
+ MichaelInitializeFunction(Mic, Key);
+ MichaelAppend(Mic, (uint8_t *) Data, 12); /* |DA|SA| */
+ MichaelAppend(Mic, pad_data, 4); /* |Priority|0|0|0| */
+ MichaelAppend(Mic, (uint8_t *) (Data + 12), Len - 12); /* |Data| */
+ MichaelGetMIC(Mic, Result);
+}
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
new file mode 100644
index 000000000000..c7e4eb280961
--- /dev/null
+++ b/drivers/staging/ks7010/michael_mic.h
@@ -0,0 +1,26 @@
+/*
+ * Driver for KeyStream wireless LAN
+ *
+ * Copyright (C) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* MichelMIC routine define */
+struct michel_mic_t {
+ uint32_t K0; // Key
+ uint32_t K1; // Key
+ uint32_t L; // Current state
+ uint32_t R; // Current state
+ uint8_t M[4]; // Message accumulator (single word)
+ int nBytesInM; // # bytes in M
+ uint8_t Result[8];
+};
+
+extern
+void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t * Key,
+ uint8_t * Data, int Len, uint8_t priority,
+ uint8_t * Result);
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index 1edfca58c1c6..be0675d8ff5e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 4141afb101bb..3f6447c65042 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 455c54d0d17c..25adab19fd86 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index 2e008bffc89a..d3f9a6020ee3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -16,10 +16,6 @@
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see http://www.gnu.org/licenses
*
- * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores,
- * CA 94065 USA or visit www.oracle.com if you need additional information or
- * have any questions.
- *
* GPL HEADER END
*/
/*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 119986bc7961..6949a1846635 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 4b9102bd95d5..cce6b58e3682 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index ac4e8cfe6c8c..8c75d5075590 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 2fd2a9690a34..4daa3823f60a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index e02cde5aeca1..0ee60ff336f2 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
index 2c7ec2d28f38..008da4497bda 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index f9b20c5accbf..a7e1340e69a1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index a268ef7aa19d..e8695e4a39d1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index 7656b09b8752..b646acd1f7e7 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
index 6ce9accb91ad..dfff17088403 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
@@ -35,7 +35,7 @@
#define MAX_NUM_SHOW_ENTRIES 32
#define LNET_MAX_STR_LEN 128
#define LNET_MAX_SHOW_NUM_CPT 128
-#define LNET_UNDEFINED_HOPS ((__u32) -1)
+#define LNET_UNDEFINED_HOPS ((__u32)(-1))
struct lnet_ioctl_config_lnd_cmn_tunables {
__u32 lct_version;
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 24c4a08e6dc6..7967b013cbae 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -38,6 +38,7 @@
#include <linux/kthread.h>
#include <linux/uio.h>
#include <linux/types.h>
+#include <linux/completion.h>
#include "types.h"
#include "lnetctl.h"
@@ -610,7 +611,7 @@ typedef struct {
/* rcd ready for free */
struct list_head ln_rcd_zombie;
/* serialise startup/shutdown */
- struct semaphore ln_rc_signal;
+ struct completion ln_rc_signal;
struct mutex ln_api_mutex;
struct mutex ln_lnd_mutex;
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 1c679cb72785..e098b6c086e1 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -68,9 +68,9 @@ typedef __u64 lnet_nid_t;
typedef __u32 lnet_pid_t;
/** wildcard NID that matches any end-point address */
-#define LNET_NID_ANY ((lnet_nid_t) -1)
+#define LNET_NID_ANY ((lnet_nid_t)(-1))
/** wildcard PID that matches any lnet_pid_t */
-#define LNET_PID_ANY ((lnet_pid_t) -1)
+#define LNET_PID_ANY ((lnet_pid_t)(-1))
#define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
#define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 6c59f2ff2220..4f5978b3767b 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -44,7 +40,7 @@
static lnd_t the_o2iblnd;
-kib_data_t kiblnd_data;
+struct kib_data kiblnd_data;
static __u32 kiblnd_cksum(void *ptr, int nob)
{
@@ -98,40 +94,40 @@ static char *kiblnd_msgtype2str(int type)
static int kiblnd_msgtype2size(int type)
{
- const int hdr_size = offsetof(kib_msg_t, ibm_u);
+ const int hdr_size = offsetof(struct kib_msg, ibm_u);
switch (type) {
case IBLND_MSG_CONNREQ:
case IBLND_MSG_CONNACK:
- return hdr_size + sizeof(kib_connparams_t);
+ return hdr_size + sizeof(struct kib_connparams);
case IBLND_MSG_NOOP:
return hdr_size;
case IBLND_MSG_IMMEDIATE:
- return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
+ return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]);
case IBLND_MSG_PUT_REQ:
- return hdr_size + sizeof(kib_putreq_msg_t);
+ return hdr_size + sizeof(struct kib_putreq_msg);
case IBLND_MSG_PUT_ACK:
- return hdr_size + sizeof(kib_putack_msg_t);
+ return hdr_size + sizeof(struct kib_putack_msg);
case IBLND_MSG_GET_REQ:
- return hdr_size + sizeof(kib_get_msg_t);
+ return hdr_size + sizeof(struct kib_get_msg);
case IBLND_MSG_PUT_NAK:
case IBLND_MSG_PUT_DONE:
case IBLND_MSG_GET_DONE:
- return hdr_size + sizeof(kib_completion_msg_t);
+ return hdr_size + sizeof(struct kib_completion_msg);
default:
return -1;
}
}
-static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
+static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
{
- kib_rdma_desc_t *rd;
+ struct kib_rdma_desc *rd;
int nob;
int n;
int i;
@@ -156,7 +152,7 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
return 1;
}
- nob = offsetof(kib_msg_t, ibm_u) +
+ nob = offsetof(struct kib_msg, ibm_u) +
kiblnd_rd_msg_size(rd, msg->ibm_type, n);
if (msg->ibm_nob < nob) {
@@ -176,10 +172,10 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
return 0;
}
-void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
+void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version,
int credits, lnet_nid_t dstnid, __u64 dststamp)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
/*
* CAVEAT EMPTOR! all message fields not set here should have been
@@ -202,9 +198,9 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
}
}
-int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
+int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
{
- const int hdr_size = offsetof(kib_msg_t, ibm_u);
+ const int hdr_size = offsetof(struct kib_msg, ibm_u);
__u32 msg_cksum;
__u16 version;
int msg_nob;
@@ -315,10 +311,10 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
return 0;
}
-int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
+int kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid)
{
- kib_peer_t *peer;
- kib_net_t *net = ni->ni_data;
+ struct kib_peer *peer;
+ struct kib_net *net = ni->ni_data;
int cpt = lnet_cpt_of_nid(nid);
unsigned long flags;
@@ -357,9 +353,9 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
return 0;
}
-void kiblnd_destroy_peer(kib_peer_t *peer)
+void kiblnd_destroy_peer(struct kib_peer *peer)
{
- kib_net_t *net = peer->ibp_ni->ni_data;
+ struct kib_net *net = peer->ibp_ni->ni_data;
LASSERT(net);
LASSERT(!atomic_read(&peer->ibp_refcount));
@@ -378,7 +374,7 @@ void kiblnd_destroy_peer(kib_peer_t *peer)
atomic_dec(&net->ibn_npeers);
}
-kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
+struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid)
{
/*
* the caller is responsible for accounting the additional reference
@@ -386,10 +382,10 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
*/
struct list_head *peer_list = kiblnd_nid2peerlist(nid);
struct list_head *tmp;
- kib_peer_t *peer;
+ struct kib_peer *peer;
list_for_each(tmp, peer_list) {
- peer = list_entry(tmp, kib_peer_t, ibp_list);
+ peer = list_entry(tmp, struct kib_peer, ibp_list);
LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_nid != nid)
@@ -404,7 +400,7 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
return NULL;
}
-void kiblnd_unlink_peer_locked(kib_peer_t *peer)
+void kiblnd_unlink_peer_locked(struct kib_peer *peer)
{
LASSERT(list_empty(&peer->ibp_conns));
@@ -417,7 +413,7 @@ void kiblnd_unlink_peer_locked(kib_peer_t *peer)
static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
lnet_nid_t *nidp, int *count)
{
- kib_peer_t *peer;
+ struct kib_peer *peer;
struct list_head *ptmp;
int i;
unsigned long flags;
@@ -426,7 +422,7 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = list_entry(ptmp, struct kib_peer, ibp_list);
LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
@@ -448,17 +444,17 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
return -ENOENT;
}
-static void kiblnd_del_peer_locked(kib_peer_t *peer)
+static void kiblnd_del_peer_locked(struct kib_peer *peer)
{
struct list_head *ctmp;
struct list_head *cnxt;
- kib_conn_t *conn;
+ struct kib_conn *conn;
if (list_empty(&peer->ibp_conns)) {
kiblnd_unlink_peer_locked(peer);
} else {
list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
kiblnd_close_conn_locked(conn, 0);
}
@@ -475,7 +471,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
LIST_HEAD(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
- kib_peer_t *peer;
+ struct kib_peer *peer;
int lo;
int hi;
int i;
@@ -494,7 +490,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = list_entry(ptmp, struct kib_peer, ibp_list);
LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
@@ -522,11 +518,11 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
return rc;
}
-static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
+static struct kib_conn *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
{
- kib_peer_t *peer;
+ struct kib_peer *peer;
struct list_head *ptmp;
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
int i;
unsigned long flags;
@@ -535,7 +531,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = list_entry(ptmp, struct kib_peer, ibp_list);
LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
@@ -545,7 +541,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
if (index-- > 0)
continue;
- conn = list_entry(ctmp, kib_conn_t,
+ conn = list_entry(ctmp, struct kib_conn,
ibc_list);
kiblnd_conn_addref(conn);
read_unlock_irqrestore(
@@ -594,7 +590,7 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
cmid->route.path_rec->mtu = mtu;
}
-static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
+static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
{
cpumask_t *mask;
int vectors;
@@ -621,7 +617,7 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
return 1;
}
-kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid,
int state, int version)
{
/*
@@ -634,12 +630,12 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
* its ref on 'cmid').
*/
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_net_t *net = peer->ibp_ni->ni_data;
- kib_dev_t *dev;
+ struct kib_net *net = peer->ibp_ni->ni_data;
+ struct kib_dev *dev;
struct ib_qp_init_attr *init_qp_attr;
struct kib_sched_info *sched;
struct ib_cq_init_attr cq_attr = {};
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct ib_cq *cq;
unsigned long flags;
int cpt;
@@ -723,7 +719,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
write_unlock_irqrestore(glock, flags);
LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
- IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
+ IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
if (!conn->ibc_rxs) {
CERROR("Cannot allocate RX buffers\n");
goto failed_2;
@@ -833,10 +829,10 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
return NULL;
}
-void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
+void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
- kib_peer_t *peer = conn->ibc_peer;
+ struct kib_peer *peer = conn->ibc_peer;
int rc;
LASSERT(!in_interrupt());
@@ -879,7 +875,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
if (conn->ibc_rxs) {
LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
+ IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
}
if (conn->ibc_connvars)
@@ -890,7 +886,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
/* See CAVEAT EMPTOR above in kiblnd_create_conn */
if (conn->ibc_state != IBLND_CONN_INIT) {
- kib_net_t *net = peer->ibp_ni->ni_data;
+ struct kib_net *net = peer->ibp_ni->ni_data;
kiblnd_peer_decref(peer);
rdma_destroy_id(cmid);
@@ -900,15 +896,15 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
LIBCFS_FREE(conn, sizeof(*conn));
}
-int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
+int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
{
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
libcfs_nid2str(peer->ibp_nid),
@@ -921,16 +917,16 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
return count;
}
-int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+int kiblnd_close_stale_conns_locked(struct kib_peer *peer,
int version, __u64 incarnation)
{
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
if (conn->ibc_version == version &&
conn->ibc_incarnation == incarnation)
@@ -951,7 +947,7 @@ int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
{
- kib_peer_t *peer;
+ struct kib_peer *peer;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
@@ -972,7 +968,7 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = list_entry(ptmp, struct kib_peer, ibp_list);
LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
@@ -1016,7 +1012,7 @@ static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
break;
}
case IOC_LIBCFS_GET_CONN: {
- kib_conn_t *conn;
+ struct kib_conn *conn;
rc = 0;
conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
@@ -1052,7 +1048,7 @@ static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_t *peer;
+ struct kib_peer *peer;
unsigned long flags;
read_lock_irqsave(glock, flags);
@@ -1078,7 +1074,7 @@ static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
last_alive ? cfs_duration_sec(now - last_alive) : -1);
}
-static void kiblnd_free_pages(kib_pages_t *p)
+static void kiblnd_free_pages(struct kib_pages *p)
{
int npages = p->ibp_npages;
int i;
@@ -1088,22 +1084,22 @@ static void kiblnd_free_pages(kib_pages_t *p)
__free_page(p->ibp_pages[i]);
}
- LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
+ LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
}
-int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
+int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
{
- kib_pages_t *p;
+ struct kib_pages *p;
int i;
LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
- offsetof(kib_pages_t, ibp_pages[npages]));
+ offsetof(struct kib_pages, ibp_pages[npages]));
if (!p) {
CERROR("Can't allocate descriptor for %d pages\n", npages);
return -ENOMEM;
}
- memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
+ memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
p->ibp_npages = npages;
for (i = 0; i < npages; i++) {
@@ -1121,9 +1117,9 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
return 0;
}
-void kiblnd_unmap_rx_descs(kib_conn_t *conn)
+void kiblnd_unmap_rx_descs(struct kib_conn *conn)
{
- kib_rx_t *rx;
+ struct kib_rx *rx;
int i;
LASSERT(conn->ibc_rxs);
@@ -1145,9 +1141,9 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn)
conn->ibc_rx_pages = NULL;
}
-void kiblnd_map_rx_descs(kib_conn_t *conn)
+void kiblnd_map_rx_descs(struct kib_conn *conn)
{
- kib_rx_t *rx;
+ struct kib_rx *rx;
struct page *pg;
int pg_off;
int ipg;
@@ -1158,7 +1154,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
rx = &conn->ibc_rxs[i];
rx->rx_conn = conn;
- rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
+ rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off);
rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
rx->rx_msg,
@@ -1183,10 +1179,10 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
}
}
-static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
+static void kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo)
{
- kib_hca_dev_t *hdev = tpo->tpo_hdev;
- kib_tx_t *tx;
+ struct kib_hca_dev *hdev = tpo->tpo_hdev;
+ struct kib_tx *tx;
int i;
LASSERT(!tpo->tpo_pool.po_allocated);
@@ -1206,9 +1202,9 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
tpo->tpo_hdev = NULL;
}
-static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
+static struct kib_hca_dev *kiblnd_current_hdev(struct kib_dev *dev)
{
- kib_hca_dev_t *hdev;
+ struct kib_hca_dev *hdev;
unsigned long flags;
int i = 0;
@@ -1232,14 +1228,14 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
return hdev;
}
-static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
+static void kiblnd_map_tx_pool(struct kib_tx_pool *tpo)
{
- kib_pages_t *txpgs = tpo->tpo_tx_pages;
- kib_pool_t *pool = &tpo->tpo_pool;
- kib_net_t *net = pool->po_owner->ps_net;
- kib_dev_t *dev;
+ struct kib_pages *txpgs = tpo->tpo_tx_pages;
+ struct kib_pool *pool = &tpo->tpo_pool;
+ struct kib_net *net = pool->po_owner->ps_net;
+ struct kib_dev *dev;
struct page *page;
- kib_tx_t *tx;
+ struct kib_tx *tx;
int page_offset;
int ipage;
int i;
@@ -1260,7 +1256,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
page = txpgs->ibp_pages[ipage];
tx = &tpo->tpo_tx_descs[i];
- tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
+ tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) +
page_offset);
tx->tx_msgaddr = kiblnd_dma_map_single(
@@ -1283,11 +1279,11 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
}
}
-struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd,
int negotiated_nfrags)
{
- kib_net_t *net = ni->ni_data;
- kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct kib_net *net = ni->ni_data;
+ struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
__u16 nfrags;
int mod;
@@ -1304,7 +1300,7 @@ struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
return hdev->ibh_mrs;
}
-static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
+static void kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
{
LASSERT(!fpo->fpo_map_count);
@@ -1335,7 +1331,7 @@ static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
- kib_fmr_pool_t *fpo, *tmp;
+ struct kib_fmr_pool *fpo, *tmp;
list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
list_del(&fpo->fpo_list);
@@ -1361,7 +1357,7 @@ kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
return max(IBLND_FMR_POOL_FLUSH, size);
}
-static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo)
{
struct ib_fmr_pool_param param = {
.max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
@@ -1388,7 +1384,7 @@ static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
return rc;
}
-static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo)
{
struct kib_fast_reg_descriptor *frd, *tmp;
int i, rc;
@@ -1438,12 +1434,12 @@ out:
return rc;
}
-static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
- kib_fmr_pool_t **pp_fpo)
+static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
+ struct kib_fmr_pool **pp_fpo)
{
- kib_dev_t *dev = fps->fps_net->ibn_dev;
+ struct kib_dev *dev = fps->fps_net->ibn_dev;
struct ib_device_attr *dev_attr;
- kib_fmr_pool_t *fpo;
+ struct kib_fmr_pool *fpo;
int rc;
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
@@ -1488,7 +1484,7 @@ out_fpo:
return rc;
}
-static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
+static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps,
struct list_head *zombies)
{
if (!fps->fps_net) /* intialized? */
@@ -1497,8 +1493,8 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
spin_lock(&fps->fps_lock);
while (!list_empty(&fps->fps_pool_list)) {
- kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
- kib_fmr_pool_t, fpo_list);
+ struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next,
+ struct kib_fmr_pool, fpo_list);
fpo->fpo_failed = 1;
list_del(&fpo->fpo_list);
if (!fpo->fpo_map_count)
@@ -1510,7 +1506,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
spin_unlock(&fps->fps_lock);
}
-static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
+static void kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps)
{
if (fps->fps_net) { /* initialized? */
kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
@@ -1519,11 +1515,11 @@ static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
}
static int
-kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
- kib_net_t *net,
+kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts,
+ struct kib_net *net,
struct lnet_ioctl_config_o2iblnd_tunables *tunables)
{
- kib_fmr_pool_t *fpo;
+ struct kib_fmr_pool *fpo;
int rc;
memset(fps, 0, sizeof(*fps));
@@ -1546,7 +1542,7 @@ kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
return rc;
}
-static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
+static int kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, unsigned long now)
{
if (fpo->fpo_map_count) /* still in use */
return 0;
@@ -1556,10 +1552,10 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
}
static int
-kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
+kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
{
__u64 *pages = tx->tx_pages;
- kib_hca_dev_t *hdev;
+ struct kib_hca_dev *hdev;
int npages;
int size;
int i;
@@ -1577,13 +1573,13 @@ kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
return npages;
}
-void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
+void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
{
LIST_HEAD(zombies);
- kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps;
+ struct kib_fmr_pool *fpo = fmr->fmr_pool;
+ struct kib_fmr_poolset *fps;
unsigned long now = cfs_time_current();
- kib_fmr_pool_t *tmp;
+ struct kib_fmr_pool *tmp;
int rc;
if (!fpo)
@@ -1633,14 +1629,14 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
kiblnd_destroy_fmr_pool_list(&zombies);
}
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
- kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
- kib_fmr_t *fmr)
+int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, __u32 nob, __u64 iov,
+ struct kib_fmr *fmr)
{
__u64 *pages = tx->tx_pages;
bool is_rx = (rd != tx->tx_rd);
bool tx_pages_mapped = 0;
- kib_fmr_pool_t *fpo;
+ struct kib_fmr_pool *fpo;
int npages = 0;
__u64 version;
int rc;
@@ -1780,7 +1776,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
goto again;
}
-static void kiblnd_fini_pool(kib_pool_t *pool)
+static void kiblnd_fini_pool(struct kib_pool *pool)
{
LASSERT(list_empty(&pool->po_free_list));
LASSERT(!pool->po_allocated);
@@ -1788,7 +1784,7 @@ static void kiblnd_fini_pool(kib_pool_t *pool)
CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
}
-static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
+static void kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size)
{
CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
@@ -1801,10 +1797,10 @@ static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
static void kiblnd_destroy_pool_list(struct list_head *head)
{
- kib_pool_t *pool;
+ struct kib_pool *pool;
while (!list_empty(head)) {
- pool = list_entry(head->next, kib_pool_t, po_list);
+ pool = list_entry(head->next, struct kib_pool, po_list);
list_del(&pool->po_list);
LASSERT(pool->po_owner);
@@ -1812,15 +1808,15 @@ static void kiblnd_destroy_pool_list(struct list_head *head)
}
}
-static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
+static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
{
if (!ps->ps_net) /* intialized? */
return;
spin_lock(&ps->ps_lock);
while (!list_empty(&ps->ps_pool_list)) {
- kib_pool_t *po = list_entry(ps->ps_pool_list.next,
- kib_pool_t, po_list);
+ struct kib_pool *po = list_entry(ps->ps_pool_list.next,
+ struct kib_pool, po_list);
po->po_failed = 1;
list_del(&po->po_list);
if (!po->po_allocated)
@@ -1831,7 +1827,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
spin_unlock(&ps->ps_lock);
}
-static void kiblnd_fini_poolset(kib_poolset_t *ps)
+static void kiblnd_fini_poolset(struct kib_poolset *ps)
{
if (ps->ps_net) { /* initialized? */
kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
@@ -1839,14 +1835,14 @@ static void kiblnd_fini_poolset(kib_poolset_t *ps)
}
}
-static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
- kib_net_t *net, char *name, int size,
+static int kiblnd_init_poolset(struct kib_poolset *ps, int cpt,
+ struct kib_net *net, char *name, int size,
kib_ps_pool_create_t po_create,
kib_ps_pool_destroy_t po_destroy,
kib_ps_node_init_t nd_init,
kib_ps_node_fini_t nd_fini)
{
- kib_pool_t *pool;
+ struct kib_pool *pool;
int rc;
memset(ps, 0, sizeof(*ps));
@@ -1874,7 +1870,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
return rc;
}
-static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
+static int kiblnd_pool_is_idle(struct kib_pool *pool, unsigned long now)
{
if (pool->po_allocated) /* still in use */
return 0;
@@ -1883,11 +1879,11 @@ static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
return cfs_time_aftereq(now, pool->po_deadline);
}
-void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
+void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
{
LIST_HEAD(zombies);
- kib_poolset_t *ps = pool->po_owner;
- kib_pool_t *tmp;
+ struct kib_poolset *ps = pool->po_owner;
+ struct kib_pool *tmp;
unsigned long now = cfs_time_current();
spin_lock(&ps->ps_lock);
@@ -1913,10 +1909,10 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
kiblnd_destroy_pool_list(&zombies);
}
-struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
+struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps)
{
struct list_head *node;
- kib_pool_t *pool;
+ struct kib_pool *pool;
unsigned int interval = 1;
unsigned long time_before;
unsigned int trips = 0;
@@ -1986,9 +1982,9 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
goto again;
}
-static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
+static void kiblnd_destroy_tx_pool(struct kib_pool *pool)
{
- kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
+ struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool, tpo_pool);
int i;
LASSERT(!pool->po_allocated);
@@ -2002,7 +1998,7 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
goto out;
for (i = 0; i < pool->po_size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ struct kib_tx *tx = &tpo->tpo_tx_descs[i];
list_del(&tx->tx_list);
if (tx->tx_pages)
@@ -2011,8 +2007,8 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
sizeof(*tx->tx_pages));
if (tx->tx_frags)
LIBCFS_FREE(tx->tx_frags,
- IBLND_MAX_RDMA_FRAGS *
- sizeof(*tx->tx_frags));
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags));
if (tx->tx_wrq)
LIBCFS_FREE(tx->tx_wrq,
(1 + IBLND_MAX_RDMA_FRAGS) *
@@ -2023,12 +2019,12 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
sizeof(*tx->tx_sge));
if (tx->tx_rd)
LIBCFS_FREE(tx->tx_rd,
- offsetof(kib_rdma_desc_t,
+ offsetof(struct kib_rdma_desc,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
}
LIBCFS_FREE(tpo->tpo_tx_descs,
- pool->po_size * sizeof(kib_tx_t));
+ pool->po_size * sizeof(struct kib_tx));
out:
kiblnd_fini_pool(pool);
LIBCFS_FREE(tpo, sizeof(*tpo));
@@ -2041,13 +2037,13 @@ static int kiblnd_tx_pool_size(int ncpts)
return max(IBLND_TX_POOL, ntx);
}
-static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
- kib_pool_t **pp_po)
+static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
+ struct kib_pool **pp_po)
{
int i;
int npg;
- kib_pool_t *pool;
- kib_tx_pool_t *tpo;
+ struct kib_pool *pool;
+ struct kib_tx_pool *tpo;
LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
if (!tpo) {
@@ -2068,17 +2064,17 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
}
LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
- size * sizeof(kib_tx_t));
+ size * sizeof(struct kib_tx));
if (!tpo->tpo_tx_descs) {
CERROR("Can't allocate %d tx descriptors\n", size);
ps->ps_pool_destroy(pool);
return -ENOMEM;
}
- memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
+ memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx));
for (i = 0; i < size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ struct kib_tx *tx = &tpo->tpo_tx_descs[i];
tx->tx_pool = tpo;
if (ps->ps_net->ibn_fmr_ps) {
@@ -2090,11 +2086,12 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
}
LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
- IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags));
if (!tx->tx_frags)
break;
- sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
+ sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
(1 + IBLND_MAX_RDMA_FRAGS) *
@@ -2109,7 +2106,7 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
break;
LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
- offsetof(kib_rdma_desc_t,
+ offsetof(struct kib_rdma_desc,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
if (!tx->tx_rd)
break;
@@ -2125,22 +2122,23 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
return -ENOMEM;
}
-static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
+static void kiblnd_tx_init(struct kib_pool *pool, struct list_head *node)
{
- kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
- tps_poolset);
- kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
+ struct kib_tx_poolset *tps = container_of(pool->po_owner,
+ struct kib_tx_poolset,
+ tps_poolset);
+ struct kib_tx *tx = list_entry(node, struct kib_tx, tx_list);
tx->tx_cookie = tps->tps_next_tx_cookie++;
}
-static void kiblnd_net_fini_pools(kib_net_t *net)
+static void kiblnd_net_fini_pools(struct kib_net *net)
{
int i;
cfs_cpt_for_each(i, lnet_cpt_table()) {
- kib_tx_poolset_t *tps;
- kib_fmr_poolset_t *fps;
+ struct kib_tx_poolset *tps;
+ struct kib_fmr_poolset *fps;
if (net->ibn_tx_ps) {
tps = net->ibn_tx_ps[i];
@@ -2164,7 +2162,7 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
}
}
-static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
+static int kiblnd_net_init_pools(struct kib_net *net, lnet_ni_t *ni, __u32 *cpts,
int ncpts)
{
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
@@ -2206,7 +2204,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
* number of CPTs that exist, i.e net->ibn_fmr_ps[cpt].
*/
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_fmr_poolset_t));
+ sizeof(struct kib_fmr_poolset));
if (!net->ibn_fmr_ps) {
CERROR("Failed to allocate FMR pool array\n");
rc = -ENOMEM;
@@ -2234,7 +2232,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
* number of CPTs that exist, i.e net->ibn_tx_ps[cpt].
*/
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_tx_poolset_t));
+ sizeof(struct kib_tx_poolset));
if (!net->ibn_tx_ps) {
CERROR("Failed to allocate tx pool array\n");
rc = -ENOMEM;
@@ -2263,7 +2261,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
return rc;
}
-static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
+static int kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
{
/*
* It's safe to assume a HCA can handle a page size
@@ -2283,7 +2281,7 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
return -EINVAL;
}
-static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
+static void kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev)
{
if (!hdev->ibh_mrs)
return;
@@ -2293,7 +2291,7 @@ static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
hdev->ibh_mrs = NULL;
}
-void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
+void kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
{
kiblnd_hdev_cleanup_mrs(hdev);
@@ -2306,7 +2304,7 @@ void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
LIBCFS_FREE(hdev, sizeof(*hdev));
}
-static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
+static int kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev)
{
struct ib_mr *mr;
int rc;
@@ -2335,7 +2333,7 @@ static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
return 0;
}
-static int kiblnd_dev_need_failover(kib_dev_t *dev)
+static int kiblnd_dev_need_failover(struct kib_dev *dev)
{
struct rdma_cm_id *cmid;
struct sockaddr_in srcaddr;
@@ -2389,15 +2387,15 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
return rc;
}
-int kiblnd_dev_failover(kib_dev_t *dev)
+int kiblnd_dev_failover(struct kib_dev *dev)
{
LIST_HEAD(zombie_tpo);
LIST_HEAD(zombie_ppo);
LIST_HEAD(zombie_fpo);
struct rdma_cm_id *cmid = NULL;
- kib_hca_dev_t *hdev = NULL;
+ struct kib_hca_dev *hdev = NULL;
struct ib_pd *pd;
- kib_net_t *net;
+ struct kib_net *net;
struct sockaddr_in addr;
unsigned long flags;
int rc = 0;
@@ -2522,7 +2520,7 @@ int kiblnd_dev_failover(kib_dev_t *dev)
return rc;
}
-void kiblnd_destroy_dev(kib_dev_t *dev)
+void kiblnd_destroy_dev(struct kib_dev *dev)
{
LASSERT(!dev->ibd_nnets);
LASSERT(list_empty(&dev->ibd_nets));
@@ -2536,10 +2534,10 @@ void kiblnd_destroy_dev(kib_dev_t *dev)
LIBCFS_FREE(dev, sizeof(*dev));
}
-static kib_dev_t *kiblnd_create_dev(char *ifname)
+static struct kib_dev *kiblnd_create_dev(char *ifname)
{
struct net_device *netdev;
- kib_dev_t *dev;
+ struct kib_dev *dev;
__u32 netmask;
__u32 ip;
int up;
@@ -2654,7 +2652,7 @@ static void kiblnd_base_shutdown(void)
static void kiblnd_shutdown(lnet_ni_t *ni)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
int i;
unsigned long flags;
@@ -2851,7 +2849,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
return rc;
}
-static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
+static int kiblnd_dev_start_threads(struct kib_dev *dev, int newdev, __u32 *cpts,
int ncpts)
{
int cpt;
@@ -2877,10 +2875,10 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
return 0;
}
-static kib_dev_t *kiblnd_dev_search(char *ifname)
+static struct kib_dev *kiblnd_dev_search(char *ifname)
{
- kib_dev_t *alias = NULL;
- kib_dev_t *dev;
+ struct kib_dev *alias = NULL;
+ struct kib_dev *dev;
char *colon;
char *colon2;
@@ -2912,8 +2910,8 @@ static kib_dev_t *kiblnd_dev_search(char *ifname)
static int kiblnd_startup(lnet_ni_t *ni)
{
char *ifname;
- kib_dev_t *ibdev = NULL;
- kib_net_t *net;
+ struct kib_dev *ibdev = NULL;
+ struct kib_net *net;
struct timespec64 tv;
unsigned long flags;
int rc;
@@ -3020,11 +3018,11 @@ static void __exit ko2iblnd_exit(void)
static int __init ko2iblnd_init(void)
{
- CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
- CLASSERT(offsetof(kib_msg_t,
+ CLASSERT(sizeof(struct kib_msg) <= IBLND_MSG_SIZE);
+ CLASSERT(offsetof(struct kib_msg,
ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
<= IBLND_MSG_SIZE);
- CLASSERT(offsetof(kib_msg_t,
+ CLASSERT(offsetof(struct kib_msg,
ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
<= IBLND_MSG_SIZE);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index b22984fd9ad3..078a0c3e8845 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -78,12 +74,12 @@
#define IBLND_N_SCHED 2
#define IBLND_N_SCHED_HIGH 4
-typedef struct {
+struct kib_tunables {
int *kib_dev_failover; /* HCA failover */
unsigned int *kib_service; /* IB service number */
int *kib_min_reconnect_interval; /* first failed connection retry... */
int *kib_max_reconnect_interval; /* exponentially increasing to this */
- int *kib_cksum; /* checksum kib_msg_t? */
+ int *kib_cksum; /* checksum struct kib_msg? */
int *kib_timeout; /* comms timeout (seconds) */
int *kib_keepalive; /* keepalive timeout (seconds) */
int *kib_ntx; /* # tx descs */
@@ -94,22 +90,22 @@ typedef struct {
int *kib_require_priv_port; /* accept only privileged ports */
int *kib_use_priv_port; /* use privileged port for active connect */
int *kib_nscheds; /* # threads on each CPT */
-} kib_tunables_t;
+};
-extern kib_tunables_t kiblnd_tunables;
+extern struct kib_tunables kiblnd_tunables;
#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
-#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */
+#define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *)0)->ibm_credits)) - 1) /* Max # of peer credits */
/* when eagerly to return credits */
#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
IBLND_CREDIT_HIGHWATER_V1 : \
t->lnd_peercredits_hiw)
-#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(&init_net, \
+#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \
cb, dev, \
ps, qpt)
@@ -150,7 +146,7 @@ struct kib_hca_dev;
#define KIB_IFNAME_SIZE 256
#endif
-typedef struct {
+struct kib_dev {
struct list_head ibd_list; /* chain on kib_devs */
struct list_head ibd_fail_list; /* chain on kib_failed_devs */
__u32 ibd_ifip; /* IPoIB interface IP */
@@ -165,9 +161,9 @@ typedef struct {
unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
struct list_head ibd_nets;
struct kib_hca_dev *ibd_hdev;
-} kib_dev_t;
+};
-typedef struct kib_hca_dev {
+struct kib_hca_dev {
struct rdma_cm_id *ibh_cmid; /* listener cmid */
struct ib_device *ibh_ibdev; /* IB device */
int ibh_page_shift; /* page shift of current HCA */
@@ -177,19 +173,19 @@ typedef struct kib_hca_dev {
__u64 ibh_mr_size; /* size of MR */
struct ib_mr *ibh_mrs; /* global MR */
struct ib_pd *ibh_pd; /* PD */
- kib_dev_t *ibh_dev; /* owner */
+ struct kib_dev *ibh_dev; /* owner */
atomic_t ibh_ref; /* refcount */
-} kib_hca_dev_t;
+};
/** # of seconds to keep pool alive */
#define IBLND_POOL_DEADLINE 300
/** # of seconds to retry if allocation failed */
#define IBLND_POOL_RETRY 1
-typedef struct {
+struct kib_pages {
int ibp_npages; /* # pages */
struct page *ibp_pages[0]; /* page array */
-} kib_pages_t;
+};
struct kib_pool;
struct kib_poolset;
@@ -204,7 +200,7 @@ struct kib_net;
#define IBLND_POOL_NAME_LEN 32
-typedef struct kib_poolset {
+struct kib_poolset {
spinlock_t ps_lock; /* serialize */
struct kib_net *ps_net; /* network it belongs to */
char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
@@ -220,31 +216,31 @@ typedef struct kib_poolset {
kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
kib_ps_node_fini_t ps_node_fini; /* finalize node */
-} kib_poolset_t;
+};
-typedef struct kib_pool {
+struct kib_pool {
struct list_head po_list; /* chain on pool list */
struct list_head po_free_list; /* pre-allocated node */
- kib_poolset_t *po_owner; /* pool_set of this pool */
+ struct kib_poolset *po_owner; /* pool_set of this pool */
unsigned long po_deadline; /* deadline of this pool */
int po_allocated; /* # of elements in use */
int po_failed; /* pool is created on failed HCA */
int po_size; /* # of pre-allocated elements */
-} kib_pool_t;
+};
-typedef struct {
- kib_poolset_t tps_poolset; /* pool-set */
+struct kib_tx_poolset {
+ struct kib_poolset tps_poolset; /* pool-set */
__u64 tps_next_tx_cookie; /* cookie of TX */
-} kib_tx_poolset_t;
+};
-typedef struct {
- kib_pool_t tpo_pool; /* pool */
- struct kib_hca_dev *tpo_hdev; /* device for this pool */
- struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
- kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
-} kib_tx_pool_t;
+struct kib_tx_pool {
+ struct kib_pool tpo_pool; /* pool */
+ struct kib_hca_dev *tpo_hdev; /* device for this pool */
+ struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
+ struct kib_pages *tpo_tx_pages; /* premapped tx msg pages */
+};
-typedef struct {
+struct kib_fmr_poolset {
spinlock_t fps_lock; /* serialize */
struct kib_net *fps_net; /* IB network */
struct list_head fps_pool_list; /* FMR pool list */
@@ -257,7 +253,7 @@ typedef struct {
int fps_increasing; /* is allocating new pool */
unsigned long fps_next_retry; /* time stamp for retry if*/
/* failed to allocate */
-} kib_fmr_poolset_t;
+};
struct kib_fast_reg_descriptor { /* For fast registration */
struct list_head frd_list;
@@ -267,10 +263,10 @@ struct kib_fast_reg_descriptor { /* For fast registration */
bool frd_valid;
};
-typedef struct {
- struct list_head fpo_list; /* chain on pool list */
- struct kib_hca_dev *fpo_hdev; /* device for this pool */
- kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
+struct kib_fmr_pool {
+ struct list_head fpo_list; /* chain on pool list */
+ struct kib_hca_dev *fpo_hdev; /* device for this pool */
+ struct kib_fmr_poolset *fpo_owner; /* owner of this pool */
union {
struct {
struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
@@ -284,17 +280,17 @@ typedef struct {
int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
int fpo_is_fmr;
-} kib_fmr_pool_t;
+};
-typedef struct {
- kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+struct kib_fmr {
+ struct kib_fmr_pool *fmr_pool; /* pool of FMR */
struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
struct kib_fast_reg_descriptor *fmr_frd;
u32 fmr_key;
-} kib_fmr_t;
+};
-typedef struct kib_net {
- struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */
+struct kib_net {
+ struct list_head ibn_list; /* chain on struct kib_dev::ibd_nets */
__u64 ibn_incarnation;/* my epoch */
int ibn_init; /* initialisation state */
int ibn_shutdown; /* shutting down? */
@@ -302,11 +298,11 @@ typedef struct kib_net {
atomic_t ibn_npeers; /* # peers extant */
atomic_t ibn_nconns; /* # connections extant */
- kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
- kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
+ struct kib_tx_poolset **ibn_tx_ps; /* tx pool-set */
+ struct kib_fmr_poolset **ibn_fmr_ps; /* fmr pool-set */
- kib_dev_t *ibn_dev; /* underlying IB device */
-} kib_net_t;
+ struct kib_dev *ibn_dev; /* underlying IB device */
+};
#define KIB_THREAD_SHIFT 16
#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
@@ -322,7 +318,7 @@ struct kib_sched_info {
int ibs_cpt; /* CPT id */
};
-typedef struct {
+struct kib_data {
int kib_init; /* initialisation state */
int kib_shutdown; /* shut down? */
struct list_head kib_devs; /* IB devices extant */
@@ -349,7 +345,7 @@ typedef struct {
spinlock_t kib_connd_lock; /* serialise */
struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
struct kib_sched_info **kib_scheds; /* percpt data for schedulers */
-} kib_data_t;
+};
#define IBLND_INIT_NOTHING 0
#define IBLND_INIT_DATA 1
@@ -360,51 +356,51 @@ typedef struct {
* These are sent in sender's byte order (i.e. receiver flips).
*/
-typedef struct kib_connparams {
+struct kib_connparams {
__u16 ibcp_queue_depth;
__u16 ibcp_max_frags;
__u32 ibcp_max_msg_size;
-} WIRE_ATTR kib_connparams_t;
+} WIRE_ATTR;
-typedef struct {
+struct kib_immediate_msg {
lnet_hdr_t ibim_hdr; /* portals header */
char ibim_payload[0]; /* piggy-backed payload */
-} WIRE_ATTR kib_immediate_msg_t;
+} WIRE_ATTR;
-typedef struct {
+struct kib_rdma_frag {
__u32 rf_nob; /* # bytes this frag */
__u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
-} WIRE_ATTR kib_rdma_frag_t;
+} WIRE_ATTR;
-typedef struct {
+struct kib_rdma_desc {
__u32 rd_key; /* local/remote key */
__u32 rd_nfrags; /* # fragments */
- kib_rdma_frag_t rd_frags[0]; /* buffer frags */
-} WIRE_ATTR kib_rdma_desc_t;
+ struct kib_rdma_frag rd_frags[0]; /* buffer frags */
+} WIRE_ATTR;
-typedef struct {
+struct kib_putreq_msg {
lnet_hdr_t ibprm_hdr; /* portals header */
__u64 ibprm_cookie; /* opaque completion cookie */
-} WIRE_ATTR kib_putreq_msg_t;
+} WIRE_ATTR;
-typedef struct {
+struct kib_putack_msg {
__u64 ibpam_src_cookie; /* reflected completion cookie */
__u64 ibpam_dst_cookie; /* opaque completion cookie */
- kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
-} WIRE_ATTR kib_putack_msg_t;
+ struct kib_rdma_desc ibpam_rd; /* sender's sink buffer */
+} WIRE_ATTR;
-typedef struct {
+struct kib_get_msg {
lnet_hdr_t ibgm_hdr; /* portals header */
__u64 ibgm_cookie; /* opaque completion cookie */
- kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
-} WIRE_ATTR kib_get_msg_t;
+ struct kib_rdma_desc ibgm_rd; /* rdma descriptor */
+} WIRE_ATTR;
-typedef struct {
+struct kib_completion_msg {
__u64 ibcm_cookie; /* opaque completion cookie */
__s32 ibcm_status; /* < 0 failure: >= 0 length */
-} WIRE_ATTR kib_completion_msg_t;
+} WIRE_ATTR;
-typedef struct {
+struct kib_msg {
/* First 2 fields fixed FOR ALL TIME */
__u32 ibm_magic; /* I'm an ibnal message */
__u16 ibm_version; /* this is my version number */
@@ -419,14 +415,14 @@ typedef struct {
__u64 ibm_dststamp; /* destination's incarnation */
union {
- kib_connparams_t connparams;
- kib_immediate_msg_t immediate;
- kib_putreq_msg_t putreq;
- kib_putack_msg_t putack;
- kib_get_msg_t get;
- kib_completion_msg_t completion;
+ struct kib_connparams connparams;
+ struct kib_immediate_msg immediate;
+ struct kib_putreq_msg putreq;
+ struct kib_putack_msg putack;
+ struct kib_get_msg get;
+ struct kib_completion_msg completion;
} WIRE_ATTR ibm_u;
-} WIRE_ATTR kib_msg_t;
+} WIRE_ATTR;
#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
@@ -445,14 +441,14 @@ typedef struct {
#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
-typedef struct {
+struct kib_rej {
__u32 ibr_magic; /* sender's magic */
__u16 ibr_version; /* sender's version */
__u8 ibr_why; /* reject reason */
__u8 ibr_padding; /* padding */
__u64 ibr_incarnation; /* incarnation of peer */
- kib_connparams_t ibr_cp; /* connection parameters */
-} WIRE_ATTR kib_rej_t;
+ struct kib_connparams ibr_cp; /* connection parameters */
+} WIRE_ATTR;
/* connection rejection reasons */
#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
@@ -467,28 +463,26 @@ typedef struct {
/***********************************************************************/
-typedef struct kib_rx /* receive message */
-{
+struct kib_rx { /* receive message */
struct list_head rx_list; /* queue for attention */
struct kib_conn *rx_conn; /* owning conn */
int rx_nob; /* # bytes received (-1 while posted) */
enum ib_wc_status rx_status; /* completion status */
- kib_msg_t *rx_msg; /* message buffer (host vaddr) */
+ struct kib_msg *rx_msg; /* message buffer (host vaddr) */
__u64 rx_msgaddr; /* message buffer (I/O addr) */
DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); /* for dma_unmap_single() */
struct ib_recv_wr rx_wrq; /* receive work item... */
struct ib_sge rx_sge; /* ...and its memory */
-} kib_rx_t;
+};
#define IBLND_POSTRX_DONT_POST 0 /* don't post */
#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give self back 1 reserved credit */
-typedef struct kib_tx /* transmit message */
-{
+struct kib_tx { /* transmit message */
struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
- kib_tx_pool_t *tx_pool; /* pool I'm from */
+ struct kib_tx_pool *tx_pool; /* pool I'm from */
struct kib_conn *tx_conn; /* owning conn */
short tx_sending; /* # tx callbacks outstanding */
short tx_queued; /* queued for sending */
@@ -497,28 +491,28 @@ typedef struct kib_tx /* transmit message */
unsigned long tx_deadline; /* completion deadline */
__u64 tx_cookie; /* completion cookie */
lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
- kib_msg_t *tx_msg; /* message buffer (host vaddr) */
+ struct kib_msg *tx_msg; /* message buffer (host vaddr) */
__u64 tx_msgaddr; /* message buffer (I/O addr) */
DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
int tx_nwrq; /* # send work items */
struct ib_rdma_wr *tx_wrq; /* send work items... */
struct ib_sge *tx_sge; /* ...and their memory */
- kib_rdma_desc_t *tx_rd; /* rdma descriptor */
+ struct kib_rdma_desc *tx_rd; /* rdma descriptor */
int tx_nfrags; /* # entries in... */
struct scatterlist *tx_frags; /* dma_map_sg descriptor */
__u64 *tx_pages; /* rdma phys page addrs */
- kib_fmr_t fmr; /* FMR */
+ struct kib_fmr fmr; /* FMR */
int tx_dmadir; /* dma direction */
-} kib_tx_t;
+};
-typedef struct kib_connvars {
- kib_msg_t cv_msg; /* connection-in-progress variables */
-} kib_connvars_t;
+struct kib_connvars {
+ struct kib_msg cv_msg; /* connection-in-progress variables */
+};
-typedef struct kib_conn {
+struct kib_conn {
struct kib_sched_info *ibc_sched; /* scheduler information */
struct kib_peer *ibc_peer; /* owning peer */
- kib_hca_dev_t *ibc_hdev; /* HCA bound on */
+ struct kib_hca_dev *ibc_hdev; /* HCA bound on */
struct list_head ibc_list; /* stash on peer's conn list */
struct list_head ibc_sched_list; /* schedule for attention */
__u16 ibc_version; /* version of connection */
@@ -553,14 +547,14 @@ typedef struct kib_conn {
/* reserve an ACK/DONE msg */
struct list_head ibc_active_txs; /* active tx awaiting completion */
spinlock_t ibc_lock; /* serialise */
- kib_rx_t *ibc_rxs; /* the rx descs */
- kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
+ struct kib_rx *ibc_rxs; /* the rx descs */
+ struct kib_pages *ibc_rx_pages; /* premapped rx msg pages */
struct rdma_cm_id *ibc_cmid; /* CM id */
struct ib_cq *ibc_cq; /* completion queue */
- kib_connvars_t *ibc_connvars; /* in-progress connection state */
-} kib_conn_t;
+ struct kib_connvars *ibc_connvars; /* in-progress connection state */
+};
#define IBLND_CONN_INIT 0 /* being initialised */
#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
@@ -569,7 +563,7 @@ typedef struct kib_conn {
#define IBLND_CONN_CLOSING 4 /* being closed */
#define IBLND_CONN_DISCONNECTED 5 /* disconnected */
-typedef struct kib_peer {
+struct kib_peer {
struct list_head ibp_list; /* stash on global peer list */
lnet_nid_t ibp_nid; /* who's on the other end(s) */
lnet_ni_t *ibp_ni; /* LNet interface */
@@ -596,11 +590,11 @@ typedef struct kib_peer {
__u16 ibp_max_frags;
/* max_peer_credits */
__u16 ibp_queue_depth;
-} kib_peer_t;
+};
-extern kib_data_t kiblnd_data;
+extern struct kib_data kiblnd_data;
-void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+void kiblnd_hdev_destroy(struct kib_hca_dev *hdev);
int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
@@ -645,14 +639,14 @@ kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
}
static inline void
-kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
+kiblnd_hdev_addref_locked(struct kib_hca_dev *hdev)
{
LASSERT(atomic_read(&hdev->ibh_ref) > 0);
atomic_inc(&hdev->ibh_ref);
}
static inline void
-kiblnd_hdev_decref(kib_hca_dev_t *hdev)
+kiblnd_hdev_decref(struct kib_hca_dev *hdev)
{
LASSERT(atomic_read(&hdev->ibh_ref) > 0);
if (atomic_dec_and_test(&hdev->ibh_ref))
@@ -660,7 +654,7 @@ kiblnd_hdev_decref(kib_hca_dev_t *hdev)
}
static inline int
-kiblnd_dev_can_failover(kib_dev_t *dev)
+kiblnd_dev_can_failover(struct kib_dev *dev)
{
if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
return 0;
@@ -716,7 +710,7 @@ do { \
} while (0)
static inline bool
-kiblnd_peer_connecting(kib_peer_t *peer)
+kiblnd_peer_connecting(struct kib_peer *peer)
{
return peer->ibp_connecting ||
peer->ibp_reconnecting ||
@@ -724,7 +718,7 @@ kiblnd_peer_connecting(kib_peer_t *peer)
}
static inline bool
-kiblnd_peer_idle(kib_peer_t *peer)
+kiblnd_peer_idle(struct kib_peer *peer)
{
return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns);
}
@@ -739,23 +733,23 @@ kiblnd_nid2peerlist(lnet_nid_t nid)
}
static inline int
-kiblnd_peer_active(kib_peer_t *peer)
+kiblnd_peer_active(struct kib_peer *peer)
{
/* Am I in the peer hash table? */
return !list_empty(&peer->ibp_list);
}
-static inline kib_conn_t *
-kiblnd_get_conn_locked(kib_peer_t *peer)
+static inline struct kib_conn *
+kiblnd_get_conn_locked(struct kib_peer *peer)
{
LASSERT(!list_empty(&peer->ibp_conns));
/* just return the first connection */
- return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
+ return list_entry(peer->ibp_conns.next, struct kib_conn, ibc_list);
}
static inline int
-kiblnd_send_keepalive(kib_conn_t *conn)
+kiblnd_send_keepalive(struct kib_conn *conn)
{
return (*kiblnd_tunables.kib_keepalive > 0) &&
cfs_time_after(jiffies, conn->ibc_last_send +
@@ -764,7 +758,7 @@ kiblnd_send_keepalive(kib_conn_t *conn)
}
static inline int
-kiblnd_need_noop(kib_conn_t *conn)
+kiblnd_need_noop(struct kib_conn *conn)
{
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
@@ -800,14 +794,14 @@ kiblnd_need_noop(kib_conn_t *conn)
}
static inline void
-kiblnd_abort_receives(kib_conn_t *conn)
+kiblnd_abort_receives(struct kib_conn *conn)
{
ib_modify_qp(conn->ibc_cmid->qp,
&kiblnd_data.kib_error_qpa, IB_QP_STATE);
}
static inline const char *
-kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
+kiblnd_queue2str(struct kib_conn *conn, struct list_head *q)
{
if (q == &conn->ibc_tx_queue)
return "tx_queue";
@@ -858,21 +852,21 @@ kiblnd_wreqid2type(__u64 wreqid)
}
static inline void
-kiblnd_set_conn_state(kib_conn_t *conn, int state)
+kiblnd_set_conn_state(struct kib_conn *conn, int state)
{
conn->ibc_state = state;
mb();
}
static inline void
-kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob)
+kiblnd_init_msg(struct kib_msg *msg, int type, int body_nob)
{
msg->ibm_type = type;
- msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+ msg->ibm_nob = offsetof(struct kib_msg, ibm_u) + body_nob;
}
static inline int
-kiblnd_rd_size(kib_rdma_desc_t *rd)
+kiblnd_rd_size(struct kib_rdma_desc *rd)
{
int i;
int size;
@@ -884,25 +878,25 @@ kiblnd_rd_size(kib_rdma_desc_t *rd)
}
static inline __u64
-kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
+kiblnd_rd_frag_addr(struct kib_rdma_desc *rd, int index)
{
return rd->rd_frags[index].rf_addr;
}
static inline __u32
-kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
+kiblnd_rd_frag_size(struct kib_rdma_desc *rd, int index)
{
return rd->rd_frags[index].rf_nob;
}
static inline __u32
-kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
+kiblnd_rd_frag_key(struct kib_rdma_desc *rd, int index)
{
return rd->rd_key;
}
static inline int
-kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
+kiblnd_rd_consume_frag(struct kib_rdma_desc *rd, int index, __u32 nob)
{
if (nob < rd->rd_frags[index].rf_nob) {
rd->rd_frags[index].rf_addr += nob;
@@ -915,14 +909,14 @@ kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
}
static inline int
-kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
+kiblnd_rd_msg_size(struct kib_rdma_desc *rd, int msgtype, int n)
{
LASSERT(msgtype == IBLND_MSG_GET_REQ ||
msgtype == IBLND_MSG_PUT_ACK);
return msgtype == IBLND_MSG_GET_REQ ?
- offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
- offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
+ offsetof(struct kib_get_msg, ibgm_rd.rd_frags[n]) :
+ offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[n]);
}
static inline __u64
@@ -981,17 +975,17 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd,
int negotiated_nfrags);
-void kiblnd_map_rx_descs(kib_conn_t *conn);
-void kiblnd_unmap_rx_descs(kib_conn_t *conn);
-void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
-struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
+void kiblnd_map_rx_descs(struct kib_conn *conn);
+void kiblnd_unmap_rx_descs(struct kib_conn *conn);
+void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node);
+struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps);
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
- kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
- kib_fmr_t *fmr);
-void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
+int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, __u32 nob, __u64 iov,
+ struct kib_fmr *fmr);
+void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status);
int kiblnd_tunables_setup(struct lnet_ni *ni);
void kiblnd_tunables_init(void);
@@ -1001,30 +995,31 @@ int kiblnd_scheduler(void *arg);
int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
int kiblnd_failover_thread(void *arg);
-int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
+int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages);
int kiblnd_cm_callback(struct rdma_cm_id *cmid,
struct rdma_cm_event *event);
int kiblnd_translate_mtu(int value);
-int kiblnd_dev_failover(kib_dev_t *dev);
-int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
-void kiblnd_destroy_peer(kib_peer_t *peer);
-bool kiblnd_reconnect_peer(kib_peer_t *peer);
-void kiblnd_destroy_dev(kib_dev_t *dev);
-void kiblnd_unlink_peer_locked(kib_peer_t *peer);
-kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
-int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+int kiblnd_dev_failover(struct kib_dev *dev);
+int kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid);
+void kiblnd_destroy_peer(struct kib_peer *peer);
+bool kiblnd_reconnect_peer(struct kib_peer *peer);
+void kiblnd_destroy_dev(struct kib_dev *dev);
+void kiblnd_unlink_peer_locked(struct kib_peer *peer);
+struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid);
+int kiblnd_close_stale_conns_locked(struct kib_peer *peer,
int version, __u64 incarnation);
-int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
+int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
-kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state, int version);
-void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn);
-void kiblnd_close_conn(kib_conn_t *conn, int error);
-void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
+struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
+ struct rdma_cm_id *cmid,
+ int state, int version);
+void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn);
+void kiblnd_close_conn(struct kib_conn *conn, int error);
+void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
-void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
+void kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid);
void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
int status);
@@ -1032,10 +1027,10 @@ void kiblnd_qp_event(struct ib_event *event, void *arg);
void kiblnd_cq_event(struct ib_event *event, void *arg);
void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
-void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
+void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version,
int credits, lnet_nid_t dstnid, __u64 dststamp);
-int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
-int kiblnd_post_rx(kib_rx_t *rx, int credit);
+int kiblnd_unpack_msg(struct kib_msg *msg, int nob);
+int kiblnd_post_rx(struct kib_rx *rx, int credit);
int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 845e49a52430..596a697b9d39 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -40,22 +36,22 @@
#include "o2iblnd.h"
-static void kiblnd_peer_alive(kib_peer_t *peer);
-static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
-static void kiblnd_check_sends(kib_conn_t *conn);
-static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx,
+static void kiblnd_peer_alive(struct kib_peer *peer);
+static void kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error);
+static void kiblnd_check_sends(struct kib_conn *conn);
+static void kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx,
int type, int body_nob);
-static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
-static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
-static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
+static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+ int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie);
+static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
+static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
+static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx);
static void
-kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
+kiblnd_tx_done(lnet_ni_t *ni, struct kib_tx *tx)
{
lnet_msg_t *lntmsg[2];
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
int rc;
int i;
@@ -97,10 +93,10 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
void
kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
{
- kib_tx_t *tx;
+ struct kib_tx *tx;
while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, kib_tx_t, tx_list);
+ tx = list_entry(txlist->next, struct kib_tx, tx_list);
list_del(&tx->tx_list);
/* complete now */
@@ -110,19 +106,19 @@ kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
}
}
-static kib_tx_t *
+static struct kib_tx *
kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
{
- kib_net_t *net = (kib_net_t *)ni->ni_data;
+ struct kib_net *net = (struct kib_net *)ni->ni_data;
struct list_head *node;
- kib_tx_t *tx;
- kib_tx_poolset_t *tps;
+ struct kib_tx *tx;
+ struct kib_tx_poolset *tps;
tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
node = kiblnd_pool_alloc_node(&tps->tps_poolset);
if (!node)
return NULL;
- tx = list_entry(node, kib_tx_t, tx_list);
+ tx = list_entry(node, struct kib_tx, tx_list);
LASSERT(!tx->tx_nwrq);
LASSERT(!tx->tx_queued);
@@ -138,9 +134,9 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
}
static void
-kiblnd_drop_rx(kib_rx_t *rx)
+kiblnd_drop_rx(struct kib_rx *rx)
{
- kib_conn_t *conn = rx->rx_conn;
+ struct kib_conn *conn = rx->rx_conn;
struct kib_sched_info *sched = conn->ibc_sched;
unsigned long flags;
@@ -153,10 +149,10 @@ kiblnd_drop_rx(kib_rx_t *rx)
}
int
-kiblnd_post_rx(kib_rx_t *rx, int credit)
+kiblnd_post_rx(struct kib_rx *rx, int credit)
{
- kib_conn_t *conn = rx->rx_conn;
- kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
+ struct kib_conn *conn = rx->rx_conn;
+ struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
struct ib_recv_wr *bad_wrq = NULL;
struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
int rc;
@@ -223,13 +219,13 @@ out:
return rc;
}
-static kib_tx_t *
-kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
+static struct kib_tx *
+kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, __u64 cookie)
{
struct list_head *tmp;
list_for_each(tmp, &conn->ibc_active_txs) {
- kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
+ struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_sending || tx->tx_waiting);
@@ -249,9 +245,9 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
}
static void
-kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
+kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, __u64 cookie)
{
- kib_tx_t *tx;
+ struct kib_tx *tx;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
int idle;
@@ -287,10 +283,10 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
}
static void
-kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
+kiblnd_send_completion(struct kib_conn *conn, int type, int status, __u64 cookie)
{
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+ struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
if (!tx) {
CERROR("Can't get tx for completion %x for %s\n",
@@ -300,19 +296,19 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
tx->tx_msg->ibm_u.completion.ibcm_status = status;
tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
- kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
+ kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
kiblnd_queue_tx(tx, conn);
}
static void
-kiblnd_handle_rx(kib_rx_t *rx)
+kiblnd_handle_rx(struct kib_rx *rx)
{
- kib_msg_t *msg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
+ struct kib_msg *msg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
int credits = msg->ibm_credits;
- kib_tx_t *tx;
+ struct kib_tx *tx;
int rc = 0;
int rc2;
int post_credit;
@@ -467,12 +463,12 @@ kiblnd_handle_rx(kib_rx_t *rx)
}
static void
-kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
+kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
{
- kib_msg_t *msg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
+ struct kib_msg *msg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
int rc;
int err = -EIO;
@@ -561,10 +557,10 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
}
static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
+kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc *rd, __u32 nob)
{
- kib_hca_dev_t *hdev;
- kib_fmr_poolset_t *fps;
+ struct kib_hca_dev *hdev;
+ struct kib_fmr_poolset *fps;
int cpt;
int rc;
@@ -593,9 +589,9 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
return 0;
}
-static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
+static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
LASSERT(net);
@@ -609,11 +605,11 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
}
}
-static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+static int kiblnd_map_tx(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
int nfrags)
{
- kib_net_t *net = ni->ni_data;
- kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct kib_net *net = ni->ni_data;
+ struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
struct ib_mr *mr = NULL;
__u32 nob;
int i;
@@ -651,10 +647,10 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
}
static int
-kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+kiblnd_setup_rd_iov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
unsigned int niov, struct kvec *iov, int offset, int nob)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
struct page *page;
struct scatterlist *sg;
unsigned long vaddr;
@@ -689,6 +685,10 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
sg_set_page(sg, page, fragnob, page_offset);
sg = sg_next(sg);
+ if (!sg) {
+ CERROR("lacking enough sg entries to map tx\n");
+ return -EFAULT;
+ }
if (offset + fragnob < iov->iov_len) {
offset += fragnob;
@@ -704,10 +704,10 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
}
static int
-kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
int nkiov, lnet_kiov_t *kiov, int offset, int nob)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
struct scatterlist *sg;
int fragnob;
@@ -733,6 +733,10 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
sg_set_page(sg, kiov->kiov_page, fragnob,
kiov->kiov_offset + offset);
sg = sg_next(sg);
+ if (!sg) {
+ CERROR("lacking enough sg entries to map tx\n");
+ return -EFAULT;
+ }
offset = 0;
kiov++;
@@ -744,11 +748,11 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
}
static int
-kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
+kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
__must_hold(&conn->ibc_lock)
{
- kib_msg_t *msg = tx->tx_msg;
- kib_peer_t *peer = conn->ibc_peer;
+ struct kib_msg *msg = tx->tx_msg;
+ struct kib_peer *peer = conn->ibc_peer;
struct lnet_ni *ni = peer->ibp_ni;
int ver = conn->ibc_version;
int rc;
@@ -901,11 +905,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
}
static void
-kiblnd_check_sends(kib_conn_t *conn)
+kiblnd_check_sends(struct kib_conn *conn)
{
int ver = conn->ibc_version;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx;
+ struct kib_tx *tx;
/* Don't send anything until after the connection is established */
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
@@ -924,7 +928,7 @@ kiblnd_check_sends(kib_conn_t *conn)
while (conn->ibc_reserved_credits > 0 &&
!list_empty(&conn->ibc_tx_queue_rsrvd)) {
tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
list_del(&tx->tx_list);
list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
@@ -948,16 +952,16 @@ kiblnd_check_sends(kib_conn_t *conn)
if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
tx = list_entry(conn->ibc_tx_queue_nocred.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT(!IBLND_OOB_CAPABLE(ver));
credit = 1;
tx = list_entry(conn->ibc_tx_noops.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
tx = list_entry(conn->ibc_tx_queue.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
} else {
break;
}
@@ -970,10 +974,10 @@ kiblnd_check_sends(kib_conn_t *conn)
}
static void
-kiblnd_tx_complete(kib_tx_t *tx, int status)
+kiblnd_tx_complete(struct kib_tx *tx, int status)
{
int failed = (status != IB_WC_SUCCESS);
- kib_conn_t *conn = tx->tx_conn;
+ struct kib_conn *conn = tx->tx_conn;
int idle;
LASSERT(tx->tx_sending > 0);
@@ -1025,12 +1029,12 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
}
static void
-kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
+kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx, int type, int body_nob)
{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+ struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
- int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+ int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
struct ib_mr *mr = hdev->ibh_mrs;
LASSERT(tx->tx_nwrq >= 0);
@@ -1057,11 +1061,11 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
}
static int
-kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+ int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie)
{
- kib_msg_t *ibmsg = tx->tx_msg;
- kib_rdma_desc_t *srcrd = tx->tx_rd;
+ struct kib_msg *ibmsg = tx->tx_msg;
+ struct kib_rdma_desc *srcrd = tx->tx_rd;
struct ib_sge *sge = &tx->tx_sge[0];
struct ib_rdma_wr *wrq, *next;
int rc = resid;
@@ -1099,7 +1103,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
wrknob = min(min(kiblnd_rd_frag_size(srcrd, srcidx),
kiblnd_rd_frag_size(dstrd, dstidx)),
- (__u32) resid);
+ (__u32)resid);
sge = &tx->tx_sge[tx->tx_nwrq];
sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
@@ -1135,13 +1139,13 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
ibmsg->ibm_u.completion.ibcm_status = rc;
ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
- type, sizeof(kib_completion_msg_t));
+ type, sizeof(struct kib_completion_msg));
return rc;
}
static void
-kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
+kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
{
struct list_head *q;
@@ -1196,7 +1200,7 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
}
static void
-kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn)
+kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
{
spin_lock(&conn->ibc_lock);
kiblnd_queue_tx_locked(tx, conn);
@@ -1243,11 +1247,11 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
}
static void
-kiblnd_connect_peer(kib_peer_t *peer)
+kiblnd_connect_peer(struct kib_peer *peer)
{
struct rdma_cm_id *cmid;
- kib_dev_t *dev;
- kib_net_t *net = peer->ibp_ni->ni_data;
+ struct kib_dev *dev;
+ struct kib_net *net = peer->ibp_ni->ni_data;
struct sockaddr_in srcaddr;
struct sockaddr_in dstaddr;
int rc;
@@ -1311,7 +1315,7 @@ kiblnd_connect_peer(kib_peer_t *peer)
}
bool
-kiblnd_reconnect_peer(kib_peer_t *peer)
+kiblnd_reconnect_peer(struct kib_peer *peer)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
char *reason = NULL;
@@ -1361,11 +1365,11 @@ no_reconnect:
}
void
-kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
+kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid)
{
- kib_peer_t *peer;
- kib_peer_t *peer2;
- kib_conn_t *conn;
+ struct kib_peer *peer;
+ struct kib_peer *peer2;
+ struct kib_conn *conn;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
int rc;
@@ -1468,7 +1472,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
peer->ibp_connecting = 1;
/* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(!((kib_net_t *)ni->ni_data)->ibn_shutdown);
+ LASSERT(!((struct kib_net *)ni->ni_data)->ibn_shutdown);
if (tx)
list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
@@ -1495,9 +1499,9 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
- kib_msg_t *ibmsg;
- kib_rdma_desc_t *rd;
- kib_tx_t *tx;
+ struct kib_msg *ibmsg;
+ struct kib_rdma_desc *rd;
+ struct kib_tx *tx;
int nob;
int rc;
@@ -1528,7 +1532,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
break; /* send IMMEDIATE */
/* is the REPLY message too small for RDMA? */
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
+ nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
if (nob <= IBLND_MSG_SIZE)
break; /* send IMMEDIATE */
@@ -1558,7 +1562,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
return -EIO;
}
- nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]);
+ nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
ibmsg->ibm_u.get.ibgm_hdr = *hdr;
@@ -1580,7 +1584,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
case LNET_MSG_REPLY:
case LNET_MSG_PUT:
/* Is the payload small enough not to need RDMA? */
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
+ nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
if (nob <= IBLND_MSG_SIZE)
break; /* send IMMEDIATE */
@@ -1610,7 +1614,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
ibmsg = tx->tx_msg;
ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(struct kib_putreq_msg));
tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
@@ -1620,7 +1624,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
/* send IMMEDIATE */
- LASSERT(offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
+ LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
<= IBLND_MSG_SIZE);
tx = kiblnd_get_idle_tx(ni, target.nid);
@@ -1635,16 +1639,16 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
if (payload_kiov)
lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
payload_niov, payload_kiov,
payload_offset, payload_nob);
else
lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
payload_niov, payload_iov,
payload_offset, payload_nob);
- nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
+ nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
@@ -1653,7 +1657,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
}
static void
-kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
+kiblnd_reply(lnet_ni_t *ni, struct kib_rx *rx, lnet_msg_t *lntmsg)
{
lnet_process_id_t target = lntmsg->msg_target;
unsigned int niov = lntmsg->msg_niov;
@@ -1661,7 +1665,7 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
lnet_kiov_t *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
- kib_tx_t *tx;
+ struct kib_tx *tx;
int rc;
tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
@@ -1718,10 +1722,10 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
- kib_rx_t *rx = private;
- kib_msg_t *rxmsg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- kib_tx_t *tx;
+ struct kib_rx *rx = private;
+ struct kib_msg *rxmsg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
+ struct kib_tx *tx;
int nob;
int post_credit = IBLND_POSTRX_PEER_CREDIT;
int rc = 0;
@@ -1736,7 +1740,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
LBUG();
case IBLND_MSG_IMMEDIATE:
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
+ nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
if (nob > rx->rx_nob) {
CERROR("Immediate message from %s too big: %d(%d)\n",
libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
@@ -1748,19 +1752,19 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
if (kiov)
lnet_copy_flat2kiov(niov, kiov, offset,
IBLND_MSG_SIZE, rxmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
mlen);
else
lnet_copy_flat2iov(niov, iov, offset,
IBLND_MSG_SIZE, rxmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
mlen);
lnet_finalize(ni, lntmsg, 0);
break;
case IBLND_MSG_PUT_REQ: {
- kib_msg_t *txmsg;
- kib_rdma_desc_t *rd;
+ struct kib_msg *txmsg;
+ struct kib_rdma_desc *rd;
if (!mlen) {
lnet_finalize(ni, lntmsg, 0);
@@ -1796,7 +1800,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
break;
}
- nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]);
+ nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
@@ -1847,7 +1851,7 @@ kiblnd_thread_fini(void)
}
static void
-kiblnd_peer_alive(kib_peer_t *peer)
+kiblnd_peer_alive(struct kib_peer *peer)
{
/* This is racy, but everyone's only writing cfs_time_current() */
peer->ibp_last_alive = cfs_time_current();
@@ -1855,7 +1859,7 @@ kiblnd_peer_alive(kib_peer_t *peer)
}
static void
-kiblnd_peer_notify(kib_peer_t *peer)
+kiblnd_peer_notify(struct kib_peer *peer)
{
int error = 0;
unsigned long last_alive = 0;
@@ -1878,7 +1882,7 @@ kiblnd_peer_notify(kib_peer_t *peer)
}
void
-kiblnd_close_conn_locked(kib_conn_t *conn, int error)
+kiblnd_close_conn_locked(struct kib_conn *conn, int error)
{
/*
* This just does the immediate housekeeping. 'error' is zero for a
@@ -1888,8 +1892,8 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
* already dealing with it (either to set it up or tear it down).
* Caller holds kib_global_lock exclusively in irq context
*/
- kib_peer_t *peer = conn->ibc_peer;
- kib_dev_t *dev;
+ struct kib_peer *peer = conn->ibc_peer;
+ struct kib_dev *dev;
unsigned long flags;
LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
@@ -1918,7 +1922,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
- dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
+ dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev;
list_del(&conn->ibc_list);
/* connd (see below) takes over ibc_list's ref */
@@ -1948,7 +1952,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
}
void
-kiblnd_close_conn(kib_conn_t *conn, int error)
+kiblnd_close_conn(struct kib_conn *conn, int error)
{
unsigned long flags;
@@ -1960,11 +1964,11 @@ kiblnd_close_conn(kib_conn_t *conn, int error)
}
static void
-kiblnd_handle_early_rxs(kib_conn_t *conn)
+kiblnd_handle_early_rxs(struct kib_conn *conn)
{
unsigned long flags;
- kib_rx_t *rx;
- kib_rx_t *tmp;
+ struct kib_rx *rx;
+ struct kib_rx *tmp;
LASSERT(!in_interrupt());
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
@@ -1982,17 +1986,17 @@ kiblnd_handle_early_rxs(kib_conn_t *conn)
}
static void
-kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
+kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
{
LIST_HEAD(zombies);
struct list_head *tmp;
struct list_head *nxt;
- kib_tx_t *tx;
+ struct kib_tx *tx;
spin_lock(&conn->ibc_lock);
list_for_each_safe(tmp, nxt, txs) {
- tx = list_entry(tmp, kib_tx_t, tx_list);
+ tx = list_entry(tmp, struct kib_tx, tx_list);
if (txs == &conn->ibc_active_txs) {
LASSERT(!tx->tx_queued);
@@ -2017,7 +2021,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
}
static void
-kiblnd_finalise_conn(kib_conn_t *conn)
+kiblnd_finalise_conn(struct kib_conn *conn)
{
LASSERT(!in_interrupt());
LASSERT(conn->ibc_state > IBLND_CONN_INIT);
@@ -2045,7 +2049,7 @@ kiblnd_finalise_conn(kib_conn_t *conn)
}
static void
-kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
+kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error)
{
LIST_HEAD(zombies);
unsigned long flags;
@@ -2099,11 +2103,11 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
}
static void
-kiblnd_connreq_done(kib_conn_t *conn, int status)
+kiblnd_connreq_done(struct kib_conn *conn, int status)
{
- kib_peer_t *peer = conn->ibc_peer;
- kib_tx_t *tx;
- kib_tx_t *tmp;
+ struct kib_peer *peer = conn->ibc_peer;
+ struct kib_tx *tx;
+ struct kib_tx *tmp;
struct list_head txs;
unsigned long flags;
int active;
@@ -2209,7 +2213,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
}
static void
-kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
+kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
{
int rc;
@@ -2223,17 +2227,17 @@ static int
kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
{
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- kib_msg_t *reqmsg = priv;
- kib_msg_t *ackmsg;
- kib_dev_t *ibdev;
- kib_peer_t *peer;
- kib_peer_t *peer2;
- kib_conn_t *conn;
+ struct kib_msg *reqmsg = priv;
+ struct kib_msg *ackmsg;
+ struct kib_dev *ibdev;
+ struct kib_peer *peer;
+ struct kib_peer *peer2;
+ struct kib_conn *conn;
lnet_ni_t *ni = NULL;
- kib_net_t *net = NULL;
+ struct kib_net *net = NULL;
lnet_nid_t nid;
struct rdma_conn_param cp;
- kib_rej_t rej;
+ struct kib_rej rej;
int version = IBLND_MSG_VERSION;
unsigned long flags;
int rc;
@@ -2242,7 +2246,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
LASSERT(!in_interrupt());
/* cmid inherits 'context' from the corresponding listener id */
- ibdev = (kib_dev_t *)cmid->context;
+ ibdev = (struct kib_dev *)cmid->context;
LASSERT(ibdev);
memset(&rej, 0, sizeof(rej));
@@ -2260,7 +2264,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
- if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
+ if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
CERROR("Short connection request\n");
goto failed;
}
@@ -2295,7 +2299,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
if (ni) {
- net = (kib_net_t *)ni->ni_data;
+ net = (struct kib_net *)ni->ni_data;
rej.ibr_incarnation = net->ibn_incarnation;
}
@@ -2534,11 +2538,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
static void
-kiblnd_check_reconnect(kib_conn_t *conn, int version,
- __u64 incarnation, int why, kib_connparams_t *cp)
+kiblnd_check_reconnect(struct kib_conn *conn, int version,
+ __u64 incarnation, int why, struct kib_connparams *cp)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_t *peer = conn->ibc_peer;
+ struct kib_peer *peer = conn->ibc_peer;
char *reason;
int msg_size = IBLND_MSG_SIZE;
int frag_num = -1;
@@ -2647,9 +2651,9 @@ out:
}
static void
-kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
+kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
{
- kib_peer_t *peer = conn->ibc_peer;
+ struct kib_peer *peer = conn->ibc_peer;
LASSERT(!in_interrupt());
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
@@ -2667,9 +2671,9 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
break;
case IB_CM_REJ_CONSUMER_DEFINED:
- if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
- kib_rej_t *rej = priv;
- kib_connparams_t *cp = NULL;
+ if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
+ struct kib_rej *rej = priv;
+ struct kib_connparams *cp = NULL;
int flip = 0;
__u64 incarnation = -1;
@@ -2692,7 +2696,7 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
flip = 1;
}
- if (priv_nob >= sizeof(kib_rej_t) &&
+ if (priv_nob >= sizeof(struct kib_rej) &&
rej->ibr_version > IBLND_MSG_VERSION_1) {
/*
* priv_nob is always 148 in current version
@@ -2775,12 +2779,12 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
}
static void
-kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
+kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
{
- kib_peer_t *peer = conn->ibc_peer;
+ struct kib_peer *peer = conn->ibc_peer;
lnet_ni_t *ni = peer->ibp_ni;
- kib_net_t *net = ni->ni_data;
- kib_msg_t *msg = priv;
+ struct kib_net *net = ni->ni_data;
+ struct kib_msg *msg = priv;
int ver = conn->ibc_version;
int rc = kiblnd_unpack_msg(msg, priv_nob);
unsigned long flags;
@@ -2877,9 +2881,9 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
static int
kiblnd_active_connect(struct rdma_cm_id *cmid)
{
- kib_peer_t *peer = (kib_peer_t *)cmid->context;
- kib_conn_t *conn;
- kib_msg_t *msg;
+ struct kib_peer *peer = (struct kib_peer *)cmid->context;
+ struct kib_conn *conn;
+ struct kib_msg *msg;
struct rdma_conn_param cp;
int version;
__u64 incarnation;
@@ -2944,8 +2948,8 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
int
kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
{
- kib_peer_t *peer;
- kib_conn_t *conn;
+ struct kib_peer *peer;
+ struct kib_conn *conn;
int rc;
switch (event->event) {
@@ -2963,7 +2967,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return rc;
case RDMA_CM_EVENT_ADDR_ERROR:
- peer = (kib_peer_t *)cmid->context;
+ peer = (struct kib_peer *)cmid->context;
CNETERR("%s: ADDR ERROR %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
@@ -2971,7 +2975,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return -EHOSTUNREACH; /* rc destroys cmid */
case RDMA_CM_EVENT_ADDR_RESOLVED:
- peer = (kib_peer_t *)cmid->context;
+ peer = (struct kib_peer *)cmid->context;
CDEBUG(D_NET, "%s Addr resolved: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
@@ -2994,7 +2998,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return rc; /* rc destroys cmid */
case RDMA_CM_EVENT_ROUTE_ERROR:
- peer = (kib_peer_t *)cmid->context;
+ peer = (struct kib_peer *)cmid->context;
CNETERR("%s: ROUTE ERROR %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
@@ -3002,7 +3006,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return -EHOSTUNREACH; /* rc destroys cmid */
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- peer = (kib_peer_t *)cmid->context;
+ peer = (struct kib_peer *)cmid->context;
CDEBUG(D_NET, "%s Route resolved: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
@@ -3016,7 +3020,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return event->status; /* rc destroys cmid */
case RDMA_CM_EVENT_UNREACHABLE:
- conn = (kib_conn_t *)cmid->context;
+ conn = (struct kib_conn *)cmid->context;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
CNETERR("%s: UNREACHABLE %d\n",
@@ -3026,7 +3030,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return 0;
case RDMA_CM_EVENT_CONNECT_ERROR:
- conn = (kib_conn_t *)cmid->context;
+ conn = (struct kib_conn *)cmid->context;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
CNETERR("%s: CONNECT ERROR %d\n",
@@ -3036,7 +3040,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return 0;
case RDMA_CM_EVENT_REJECTED:
- conn = (kib_conn_t *)cmid->context;
+ conn = (struct kib_conn *)cmid->context;
switch (conn->ibc_state) {
default:
LBUG();
@@ -3058,7 +3062,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return 0;
case RDMA_CM_EVENT_ESTABLISHED:
- conn = (kib_conn_t *)cmid->context;
+ conn = (struct kib_conn *)cmid->context;
switch (conn->ibc_state) {
default:
LBUG();
@@ -3084,7 +3088,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
return 0;
case RDMA_CM_EVENT_DISCONNECTED:
- conn = (kib_conn_t *)cmid->context;
+ conn = (struct kib_conn *)cmid->context;
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
CERROR("%s DISCONNECTED\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
@@ -3113,13 +3117,13 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
}
static int
-kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
+kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
{
- kib_tx_t *tx;
+ struct kib_tx *tx;
struct list_head *ttmp;
list_for_each(ttmp, txs) {
- tx = list_entry(ttmp, kib_tx_t, tx_list);
+ tx = list_entry(ttmp, struct kib_tx, tx_list);
if (txs != &conn->ibc_active_txs) {
LASSERT(tx->tx_queued);
@@ -3140,7 +3144,7 @@ kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
}
static int
-kiblnd_conn_timed_out_locked(kib_conn_t *conn)
+kiblnd_conn_timed_out_locked(struct kib_conn *conn)
{
return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
@@ -3156,10 +3160,10 @@ kiblnd_check_conns(int idx)
LIST_HEAD(checksends);
struct list_head *peers = &kiblnd_data.kib_peers[idx];
struct list_head *ptmp;
- kib_peer_t *peer;
- kib_conn_t *conn;
- kib_conn_t *temp;
- kib_conn_t *tmp;
+ struct kib_peer *peer;
+ struct kib_conn *conn;
+ struct kib_conn *temp;
+ struct kib_conn *tmp;
struct list_head *ctmp;
unsigned long flags;
@@ -3171,13 +3175,13 @@ kiblnd_check_conns(int idx)
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
list_for_each(ptmp, peers) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = list_entry(ptmp, struct kib_peer, ibp_list);
list_for_each(ctmp, &peer->ibp_conns) {
int timedout;
int sendnoop;
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
@@ -3235,7 +3239,7 @@ kiblnd_check_conns(int idx)
}
static void
-kiblnd_disconnect_conn(kib_conn_t *conn)
+kiblnd_disconnect_conn(struct kib_conn *conn)
{
LASSERT(!in_interrupt());
LASSERT(current == kiblnd_data.kib_connd);
@@ -3264,7 +3268,7 @@ kiblnd_connd(void *arg)
spinlock_t *lock= &kiblnd_data.kib_connd_lock;
wait_queue_t wait;
unsigned long flags;
- kib_conn_t *conn;
+ struct kib_conn *conn;
int timeout;
int i;
int dropped_lock;
@@ -3284,10 +3288,10 @@ kiblnd_connd(void *arg)
dropped_lock = 0;
if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
- kib_peer_t *peer = NULL;
+ struct kib_peer *peer = NULL;
conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- kib_conn_t, ibc_list);
+ struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
if (conn->ibc_reconnect) {
peer = conn->ibc_peer;
@@ -3314,7 +3318,7 @@ kiblnd_connd(void *arg)
if (!list_empty(&kiblnd_data.kib_connd_conns)) {
conn = list_entry(kiblnd_data.kib_connd_conns.next,
- kib_conn_t, ibc_list);
+ struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
@@ -3338,7 +3342,7 @@ kiblnd_connd(void *arg)
break;
conn = list_entry(kiblnd_data.kib_reconn_list.next,
- kib_conn_t, ibc_list);
+ struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
@@ -3409,7 +3413,7 @@ kiblnd_connd(void *arg)
void
kiblnd_qp_event(struct ib_event *event, void *arg)
{
- kib_conn_t *conn = arg;
+ struct kib_conn *conn = arg;
switch (event->event) {
case IB_EVENT_COMM_EST:
@@ -3471,7 +3475,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
* occurred. But in this case, !ibc_nrx && !ibc_nsends_posted
* and this CQ is about to be destroyed so I NOOP.
*/
- kib_conn_t *conn = arg;
+ struct kib_conn *conn = arg;
struct kib_sched_info *sched = conn->ibc_sched;
unsigned long flags;
@@ -3498,7 +3502,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
void
kiblnd_cq_event(struct ib_event *event, void *arg)
{
- kib_conn_t *conn = arg;
+ struct kib_conn *conn = arg;
CERROR("%s: async CQ event type %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
@@ -3509,7 +3513,7 @@ kiblnd_scheduler(void *arg)
{
long id = (long)arg;
struct kib_sched_info *sched;
- kib_conn_t *conn;
+ struct kib_conn *conn;
wait_queue_t wait;
unsigned long flags;
struct ib_wc wc;
@@ -3544,7 +3548,7 @@ kiblnd_scheduler(void *arg)
did_something = 0;
if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next, kib_conn_t,
+ conn = list_entry(sched->ibs_conns.next, struct kib_conn,
ibc_sched_list);
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
@@ -3644,7 +3648,7 @@ int
kiblnd_failover_thread(void *arg)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_dev_t *dev;
+ struct kib_dev *dev;
wait_queue_t wait;
unsigned long flags;
int rc;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index f8fdd4ae3dbf..44e960f60833 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -145,7 +141,7 @@ static int use_privileged_port = 1;
module_param(use_privileged_port, int, 0644);
MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
-kib_tunables_t kiblnd_tunables = {
+struct kib_tunables kiblnd_tunables = {
.kib_dev_failover = &dev_failover,
.kib_service = &service,
.kib_cksum = &cksum,
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 406c0e7a57b9..07ec540946cd 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -44,14 +40,14 @@
#include "socklnd.h"
static lnd_t the_ksocklnd;
-ksock_nal_data_t ksocknal_data;
+struct ksock_nal_data ksocknal_data;
-static ksock_interface_t *
+static struct ksock_interface *
ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
int i;
- ksock_interface_t *iface;
+ struct ksock_interface *iface;
for (i = 0; i < net->ksnn_ninterfaces; i++) {
LASSERT(i < LNET_MAX_INTERFACES);
@@ -64,10 +60,10 @@ ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
return NULL;
}
-static ksock_route_t *
+static struct ksock_route *
ksocknal_create_route(__u32 ipaddr, int port)
{
- ksock_route_t *route;
+ struct ksock_route *route;
LIBCFS_ALLOC(route, sizeof(*route));
if (!route)
@@ -89,7 +85,7 @@ ksocknal_create_route(__u32 ipaddr, int port)
}
void
-ksocknal_destroy_route(ksock_route_t *route)
+ksocknal_destroy_route(struct ksock_route *route)
{
LASSERT(!atomic_read(&route->ksnr_refcount));
@@ -100,11 +96,11 @@ ksocknal_destroy_route(ksock_route_t *route)
}
static int
-ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
{
int cpt = lnet_cpt_of_nid(id.nid);
- ksock_net_t *net = ni->ni_data;
- ksock_peer_t *peer;
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_peer *peer;
LASSERT(id.nid != LNET_NID_ANY);
LASSERT(id.pid != LNET_PID_ANY);
@@ -148,9 +144,9 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
}
void
-ksocknal_destroy_peer(ksock_peer_t *peer)
+ksocknal_destroy_peer(struct ksock_peer *peer)
{
- ksock_net_t *net = peer->ksnp_ni->ni_data;
+ struct ksock_net *net = peer->ksnp_ni->ni_data;
CDEBUG(D_NET, "peer %s %p deleted\n",
libcfs_id2str(peer->ksnp_id), peer);
@@ -175,15 +171,15 @@ ksocknal_destroy_peer(ksock_peer_t *peer)
spin_unlock_bh(&net->ksnn_lock);
}
-ksock_peer_t *
+struct ksock_peer *
ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
{
struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
struct list_head *tmp;
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
list_for_each(tmp, peer_list) {
- peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(tmp, struct ksock_peer, ksnp_list);
LASSERT(!peer->ksnp_closing);
@@ -202,10 +198,10 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
return NULL;
}
-ksock_peer_t *
+struct ksock_peer *
ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
{
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
read_lock(&ksocknal_data.ksnd_global_lock);
peer = ksocknal_find_peer_locked(ni, id);
@@ -217,11 +213,11 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
}
static void
-ksocknal_unlink_peer_locked(ksock_peer_t *peer)
+ksocknal_unlink_peer_locked(struct ksock_peer *peer)
{
int i;
__u32 ip;
- ksock_interface_t *iface;
+ struct ksock_interface *iface;
for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
LASSERT(i < LNET_MAX_INTERFACES);
@@ -253,9 +249,9 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
int *port, int *conn_count, int *share_count)
{
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
struct list_head *ptmp;
- ksock_route_t *route;
+ struct ksock_route *route;
struct list_head *rtmp;
int i;
int j;
@@ -265,7 +261,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
@@ -303,7 +299,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
if (index-- > 0)
continue;
- route = list_entry(rtmp, ksock_route_t,
+ route = list_entry(rtmp, struct ksock_route,
ksnr_list);
*id = peer->ksnp_id;
@@ -323,11 +319,11 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
}
static void
-ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
{
- ksock_peer_t *peer = route->ksnr_peer;
+ struct ksock_peer *peer = route->ksnr_peer;
int type = conn->ksnc_type;
- ksock_interface_t *iface;
+ struct ksock_interface *iface;
conn->ksnc_route = route;
ksocknal_route_addref(route);
@@ -369,11 +365,11 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
}
static void
-ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
+ksocknal_add_route_locked(struct ksock_peer *peer, struct ksock_route *route)
{
struct list_head *tmp;
- ksock_conn_t *conn;
- ksock_route_t *route2;
+ struct ksock_conn *conn;
+ struct ksock_route *route2;
LASSERT(!peer->ksnp_closing);
LASSERT(!route->ksnr_peer);
@@ -383,7 +379,7 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
/* LASSERT(unique) */
list_for_each(tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+ route2 = list_entry(tmp, struct ksock_route, ksnr_list);
if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
CERROR("Duplicate route %s %pI4h\n",
@@ -399,7 +395,7 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
continue;
@@ -410,11 +406,11 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
}
static void
-ksocknal_del_route_locked(ksock_route_t *route)
+ksocknal_del_route_locked(struct ksock_route *route)
{
- ksock_peer_t *peer = route->ksnr_peer;
- ksock_interface_t *iface;
- ksock_conn_t *conn;
+ struct ksock_peer *peer = route->ksnr_peer;
+ struct ksock_interface *iface;
+ struct ksock_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
@@ -422,7 +418,7 @@ ksocknal_del_route_locked(ksock_route_t *route)
/* Close associated conns */
list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_route != route)
continue;
@@ -455,10 +451,10 @@ int
ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
{
struct list_head *tmp;
- ksock_peer_t *peer;
- ksock_peer_t *peer2;
- ksock_route_t *route;
- ksock_route_t *route2;
+ struct ksock_peer *peer;
+ struct ksock_peer *peer2;
+ struct ksock_route *route;
+ struct ksock_route *route2;
int rc;
if (id.nid == LNET_NID_ANY ||
@@ -479,7 +475,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
write_lock_bh(&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
- LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
+ LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
peer2 = ksocknal_find_peer_locked(ni, id);
if (peer2) {
@@ -493,7 +489,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
route2 = NULL;
list_for_each(tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+ route2 = list_entry(tmp, struct ksock_route, ksnr_list);
if (route2->ksnr_ipaddr == ipaddr)
break;
@@ -514,10 +510,10 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
}
static void
-ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
+ksocknal_del_peer_locked(struct ksock_peer *peer, __u32 ip)
{
- ksock_conn_t *conn;
- ksock_route_t *route;
+ struct ksock_conn *conn;
+ struct ksock_route *route;
struct list_head *tmp;
struct list_head *nxt;
int nshared;
@@ -528,7 +524,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
ksocknal_peer_addref(peer);
list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
/* no match */
if (!(!ip || route->ksnr_ipaddr == ip))
@@ -541,7 +537,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
nshared = 0;
list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
nshared += route->ksnr_share_count;
}
@@ -551,7 +547,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
* left
*/
list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
/* we should only be removing auto-entries */
LASSERT(!route->ksnr_share_count);
@@ -559,7 +555,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
}
list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
ksocknal_close_conn_locked(conn, 0);
}
@@ -575,7 +571,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
LIST_HEAD(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
int lo;
int hi;
int i;
@@ -593,7 +589,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
@@ -628,12 +624,12 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
return rc;
}
-static ksock_conn_t *
+static struct ksock_conn *
ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
{
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
struct list_head *ptmp;
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
struct list_head *ctmp;
int i;
@@ -641,7 +637,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
LASSERT(!peer->ksnp_closing);
@@ -652,7 +648,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
if (index-- > 0)
continue;
- conn = list_entry(ctmp, ksock_conn_t,
+ conn = list_entry(ctmp, struct ksock_conn,
ksnc_list);
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -665,11 +661,11 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
return NULL;
}
-static ksock_sched_t *
+static struct ksock_sched *
ksocknal_choose_scheduler_locked(unsigned int cpt)
{
struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
int i;
LASSERT(info->ksi_nthreads > 0);
@@ -691,7 +687,7 @@ ksocknal_choose_scheduler_locked(unsigned int cpt)
static int
ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
int i;
int nip;
@@ -719,7 +715,7 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
}
static int
-ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
+ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
{
int best_netmatch = 0;
int best_xor = 0;
@@ -751,12 +747,12 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
}
static int
-ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
+ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- ksock_net_t *net = peer->ksnp_ni->ni_data;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
+ struct ksock_net *net = peer->ksnp_ni->ni_data;
+ struct ksock_interface *iface;
+ struct ksock_interface *best_iface;
int n_ips;
int i;
int j;
@@ -862,17 +858,17 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
}
static void
-ksocknal_create_routes(ksock_peer_t *peer, int port,
+ksocknal_create_routes(struct ksock_peer *peer, int port,
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
- ksock_route_t *newroute = NULL;
+ struct ksock_route *newroute = NULL;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
lnet_ni_t *ni = peer->ksnp_ni;
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
struct list_head *rtmp;
- ksock_route_t *route;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
+ struct ksock_route *route;
+ struct ksock_interface *iface;
+ struct ksock_interface *best_iface;
int best_netmatch;
int this_netmatch;
int best_nroutes;
@@ -919,7 +915,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
/* Already got a route? */
route = NULL;
list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t, ksnr_list);
+ route = list_entry(rtmp, struct ksock_route, ksnr_list);
if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
break;
@@ -941,7 +937,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
/* Using this interface already? */
list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t,
+ route = list_entry(rtmp, struct ksock_route,
ksnr_list);
if (route->ksnr_myipaddr == iface->ksni_ipaddr)
@@ -985,7 +981,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
int
ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
{
- ksock_connreq_t *cr;
+ struct ksock_connreq *cr;
int rc;
__u32 peer_ip;
int peer_port;
@@ -1014,9 +1010,9 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
}
static int
-ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
+ksocknal_connecting(struct ksock_peer *peer, __u32 ipaddr)
{
- ksock_route_t *route;
+ struct ksock_route *route;
list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
if (route->ksnr_ipaddr == ipaddr)
@@ -1026,7 +1022,7 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
}
int
-ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
+ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
struct socket *sock, int type)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
@@ -1034,15 +1030,15 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
lnet_process_id_t peerid;
struct list_head *tmp;
__u64 incarnation;
- ksock_conn_t *conn;
- ksock_conn_t *conn2;
- ksock_peer_t *peer = NULL;
- ksock_peer_t *peer2;
- ksock_sched_t *sched;
+ struct ksock_conn *conn;
+ struct ksock_conn *conn2;
+ struct ksock_peer *peer = NULL;
+ struct ksock_peer *peer2;
+ struct ksock_sched *sched;
ksock_hello_msg_t *hello;
int cpt;
- ksock_tx_t *tx;
- ksock_tx_t *txtmp;
+ struct ksock_tx *tx;
+ struct ksock_tx *txtmp;
int rc;
int active;
char *warn = NULL;
@@ -1150,7 +1146,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
write_lock_bh(global_lock);
/* called with a ref on ni, so shutdown can't have started */
- LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
+ LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
peer2 = ksocknal_find_peer_locked(ni, peerid);
if (!peer2) {
@@ -1233,7 +1229,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
*/
if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
@@ -1273,7 +1269,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
* continually create duplicate routes.
*/
list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
continue;
@@ -1432,16 +1428,16 @@ failed_0:
}
void
-ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
+ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
{
/*
* This just does the immmediate housekeeping, and queues the
* connection for the reaper to terminate.
* Caller holds ksnd_global_lock exclusively in irq context
*/
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_route_t *route;
- ksock_conn_t *conn2;
+ struct ksock_peer *peer = conn->ksnc_peer;
+ struct ksock_route *route;
+ struct ksock_conn *conn2;
struct list_head *tmp;
LASSERT(!peer->ksnp_error);
@@ -1459,7 +1455,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
conn2 = NULL;
list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn2->ksnc_route == route &&
conn2->ksnc_type == conn->ksnc_type)
@@ -1484,7 +1480,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
/* No more connections to this peer */
if (!list_empty(&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
@@ -1524,7 +1520,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
}
void
-ksocknal_peer_failed(ksock_peer_t *peer)
+ksocknal_peer_failed(struct ksock_peer *peer)
{
int notify = 0;
unsigned long last_alive = 0;
@@ -1552,12 +1548,12 @@ ksocknal_peer_failed(ksock_peer_t *peer)
}
void
-ksocknal_finalize_zcreq(ksock_conn_t *conn)
+ksocknal_finalize_zcreq(struct ksock_conn *conn)
{
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_tx_t *tx;
- ksock_tx_t *temp;
- ksock_tx_t *tmp;
+ struct ksock_peer *peer = conn->ksnc_peer;
+ struct ksock_tx *tx;
+ struct ksock_tx *temp;
+ struct ksock_tx *tmp;
LIST_HEAD(zlist);
/*
@@ -1589,7 +1585,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
}
void
-ksocknal_terminate_conn(ksock_conn_t *conn)
+ksocknal_terminate_conn(struct ksock_conn *conn)
{
/*
* This gets called by the reaper (guaranteed thread context) to
@@ -1597,8 +1593,8 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
* ksnc_refcount will eventually hit zero, and then the reaper will
* destroy it.
*/
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_peer *peer = conn->ksnc_peer;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
int failed = 0;
LASSERT(conn->ksnc_closing);
@@ -1656,7 +1652,7 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
}
void
-ksocknal_queue_zombie_conn(ksock_conn_t *conn)
+ksocknal_queue_zombie_conn(struct ksock_conn *conn)
{
/* Queue the conn for the reaper to destroy */
@@ -1670,7 +1666,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn)
}
void
-ksocknal_destroy_conn(ksock_conn_t *conn)
+ksocknal_destroy_conn(struct ksock_conn *conn)
{
unsigned long last_rcv;
@@ -1730,15 +1726,15 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
}
int
-ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
+ksocknal_close_peer_conns_locked(struct ksock_peer *peer, __u32 ipaddr, int why)
{
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
if (!ipaddr || conn->ksnc_ipaddr == ipaddr) {
count++;
@@ -1750,9 +1746,9 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
}
int
-ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
+ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
{
- ksock_peer_t *peer = conn->ksnc_peer;
+ struct ksock_peer *peer = conn->ksnc_peer;
__u32 ipaddr = conn->ksnc_ipaddr;
int count;
@@ -1768,7 +1764,7 @@ ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
int
ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
{
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
@@ -1789,7 +1785,7 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt,
&ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
(id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
@@ -1844,7 +1840,7 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
int connect = 1;
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
- ksock_peer_t *peer = NULL;
+ struct ksock_peer *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
lnet_process_id_t id = {
.nid = nid,
@@ -1856,11 +1852,11 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
peer = ksocknal_find_peer_locked(ni, id);
if (peer) {
struct list_head *tmp;
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
int bufnob;
list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
if (bufnob < conn->ksnc_tx_bufnob) {
@@ -1902,12 +1898,12 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
}
static void
-ksocknal_push_peer(ksock_peer_t *peer)
+ksocknal_push_peer(struct ksock_peer *peer)
{
int index;
int i;
struct list_head *tmp;
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
for (index = 0; ; index++) {
read_lock(&ksocknal_data.ksnd_global_lock);
@@ -1917,7 +1913,7 @@ ksocknal_push_peer(ksock_peer_t *peer)
list_for_each(tmp, &peer->ksnp_conns) {
if (i++ == index) {
- conn = list_entry(tmp, ksock_conn_t,
+ conn = list_entry(tmp, struct ksock_conn,
ksnc_list);
ksocknal_conn_addref(conn);
break;
@@ -1954,7 +1950,7 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
int peer_off; /* searching offset in peer hash table */
for (peer_off = 0; ; peer_off++) {
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
int i = 0;
read_lock(&ksocknal_data.ksnd_global_lock);
@@ -1986,15 +1982,15 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
static int
ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
{
- ksock_net_t *net = ni->ni_data;
- ksock_interface_t *iface;
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_interface *iface;
int rc;
int i;
int j;
struct list_head *ptmp;
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
struct list_head *rtmp;
- ksock_route_t *route;
+ struct ksock_route *route;
if (!ipaddress || !netmask)
return -EINVAL;
@@ -2017,7 +2013,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t,
+ peer = list_entry(ptmp, struct ksock_peer,
ksnp_list);
for (j = 0; j < peer->ksnp_n_passive_ips; j++)
@@ -2025,7 +2021,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
iface->ksni_npeers++;
list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t,
+ route = list_entry(rtmp, struct ksock_route,
ksnr_list);
if (route->ksnr_myipaddr == ipaddress)
@@ -2044,12 +2040,12 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
}
static void
-ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
+ksocknal_peer_del_interface_locked(struct ksock_peer *peer, __u32 ipaddr)
{
struct list_head *tmp;
struct list_head *nxt;
- ksock_route_t *route;
- ksock_conn_t *conn;
+ struct ksock_route *route;
+ struct ksock_conn *conn;
int i;
int j;
@@ -2063,7 +2059,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
}
list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
if (route->ksnr_myipaddr != ipaddr)
continue;
@@ -2077,7 +2073,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
}
list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_myipaddr == ipaddr)
ksocknal_close_conn_locked(conn, 0);
@@ -2087,11 +2083,11 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
static int
ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
int rc = -ENOENT;
struct list_head *tmp;
struct list_head *nxt;
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
__u32 this_ip;
int i;
int j;
@@ -2115,7 +2111,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
list_for_each_safe(tmp, nxt,
&ksocknal_data.ksnd_peers[j]) {
- peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(tmp, struct ksock_peer, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
@@ -2139,8 +2135,8 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
switch (cmd) {
case IOC_LIBCFS_GET_INTERFACE: {
- ksock_net_t *net = ni->ni_data;
- ksock_interface_t *iface;
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_interface *iface;
read_lock(&ksocknal_data.ksnd_global_lock);
@@ -2209,7 +2205,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
int txmem;
int rxmem;
int nagle;
- ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
+ struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
if (!conn)
return -ENOENT;
@@ -2284,8 +2280,8 @@ ksocknal_free_buffers(void)
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
struct list_head zlist;
- ksock_tx_t *tx;
- ksock_tx_t *temp;
+ struct ksock_tx *tx;
+ struct ksock_tx *temp;
list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
@@ -2304,7 +2300,7 @@ static void
ksocknal_base_shutdown(void)
{
struct ksock_sched_info *info;
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
int i;
int j;
@@ -2446,7 +2442,7 @@ ksocknal_base_startup(void)
goto failed;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
int nthrs;
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
@@ -2534,7 +2530,7 @@ ksocknal_base_startup(void)
static void
ksocknal_debug_peerhash(lnet_ni_t *ni)
{
- ksock_peer_t *peer = NULL;
+ struct ksock_peer *peer = NULL;
struct list_head *tmp;
int i;
@@ -2542,7 +2538,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(tmp, struct ksock_peer, ksnp_list);
if (peer->ksnp_ni == ni)
break;
@@ -2552,8 +2548,8 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
}
if (peer) {
- ksock_route_t *route;
- ksock_conn_t *conn;
+ struct ksock_route *route;
+ struct ksock_conn *conn;
CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
libcfs_id2str(peer->ksnp_id),
@@ -2565,7 +2561,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
!list_empty(&peer->ksnp_zc_req_list));
list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
atomic_read(&route->ksnr_refcount),
route->ksnr_scheduled, route->ksnr_connecting,
@@ -2573,7 +2569,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
}
list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
atomic_read(&conn->ksnc_conn_refcount),
atomic_read(&conn->ksnc_sock_refcount),
@@ -2587,7 +2583,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
void
ksocknal_shutdown(lnet_ni_t *ni)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
int i;
lnet_process_id_t anyid = {0};
@@ -2637,7 +2633,7 @@ ksocknal_shutdown(lnet_ni_t *ni)
}
static int
-ksocknal_enumerate_interfaces(ksock_net_t *net)
+ksocknal_enumerate_interfaces(struct ksock_net *net)
{
char **names;
int i;
@@ -2694,7 +2690,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net)
}
static int
-ksocknal_search_new_ipif(ksock_net_t *net)
+ksocknal_search_new_ipif(struct ksock_net *net)
{
int new_ipif = 0;
int i;
@@ -2703,7 +2699,7 @@ ksocknal_search_new_ipif(ksock_net_t *net)
char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
char *colon = strchr(ifnam, ':');
int found = 0;
- ksock_net_t *tmp;
+ struct ksock_net *tmp;
int j;
if (colon) /* ignore alias device */
@@ -2760,7 +2756,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
for (i = 0; i < nthrs; i++) {
long id;
char name[20];
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
@@ -2782,7 +2778,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
}
static int
-ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
+ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
{
int newif = ksocknal_search_new_ipif(net);
int rc;
@@ -2810,7 +2806,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
int
ksocknal_startup(lnet_ni_t *ni)
{
- ksock_net_t *net;
+ struct ksock_net *net;
int rc;
int i;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index a60d72f9432f..a56632b4ee37 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -77,8 +77,7 @@
struct ksock_sched_info;
-typedef struct /* per scheduler state */
-{
+struct ksock_sched { /* per scheduler state */
spinlock_t kss_lock; /* serialise */
struct list_head kss_rx_conns; /* conn waiting to be read */
struct list_head kss_tx_conns; /* conn waiting to be written */
@@ -89,13 +88,13 @@ typedef struct /* per scheduler state */
struct ksock_sched_info *kss_info; /* owner of it */
struct page *kss_rx_scratch_pgs[LNET_MAX_IOV];
struct kvec kss_scratch_iov[LNET_MAX_IOV];
-} ksock_sched_t;
+};
struct ksock_sched_info {
int ksi_nthreads_max; /* max allowed threads */
int ksi_nthreads; /* number of threads */
int ksi_cpt; /* CPT id */
- ksock_sched_t *ksi_scheds; /* array of schedulers */
+ struct ksock_sched *ksi_scheds; /* array of schedulers */
};
#define KSOCK_CPT_SHIFT 16
@@ -103,16 +102,15 @@ struct ksock_sched_info {
#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT)
#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
-typedef struct /* in-use interface */
-{
+struct ksock_interface { /* in-use interface */
__u32 ksni_ipaddr; /* interface's IP address */
__u32 ksni_netmask; /* interface's network mask */
int ksni_nroutes; /* # routes using (active) */
int ksni_npeers; /* # peers using (passive) */
char ksni_name[IFNAMSIZ]; /* interface name */
-} ksock_interface_t;
+};
-typedef struct {
+struct ksock_tunables {
int *ksnd_timeout; /* "stuck" socket timeout
* (seconds) */
int *ksnd_nscheds; /* # scheduler threads in each
@@ -155,24 +153,24 @@ typedef struct {
* Chelsio TOE) */
int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
* enable ZC receive */
-} ksock_tunables_t;
+};
-typedef struct {
+struct ksock_net {
__u64 ksnn_incarnation; /* my epoch */
spinlock_t ksnn_lock; /* serialise */
struct list_head ksnn_list; /* chain on global list */
int ksnn_npeers; /* # peers */
int ksnn_shutdown; /* shutting down? */
int ksnn_ninterfaces; /* IP interfaces */
- ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
-} ksock_net_t;
+ struct ksock_interface ksnn_interfaces[LNET_MAX_INTERFACES];
+};
/** connd timeout */
#define SOCKNAL_CONND_TIMEOUT 120
/** reserved thread for accepting & creating new connd */
#define SOCKNAL_CONND_RESV 1
-typedef struct {
+struct ksock_nal_data {
int ksnd_init; /* initialisation state
*/
int ksnd_nnets; /* # networks set up */
@@ -229,7 +227,7 @@ typedef struct {
spinlock_t ksnd_tx_lock; /* serialise, g_lock
* unsafe */
-} ksock_nal_data_t;
+};
#define SOCKNAL_INIT_NOTHING 0
#define SOCKNAL_INIT_DATA 1
@@ -250,8 +248,7 @@ struct ksock_peer; /* forward ref */
struct ksock_route; /* forward ref */
struct ksock_proto; /* forward ref */
-typedef struct /* transmit packet */
-{
+struct ksock_tx { /* transmit packet */
struct list_head tx_list; /* queue on conn for transmission etc
*/
struct list_head tx_zc_list; /* queue on peer for ZC request */
@@ -281,20 +278,20 @@ typedef struct /* transmit packet */
struct kvec iov[1]; /* virt hdr + payload */
} virt;
} tx_frags;
-} ksock_tx_t;
+};
-#define KSOCK_NOOP_TX_SIZE (offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
+#define KSOCK_NOOP_TX_SIZE (offsetof(struct ksock_tx, tx_frags.paged.kiov[0]))
-/* network zero copy callback descriptor embedded in ksock_tx_t */
+/* network zero copy callback descriptor embedded in struct ksock_tx */
/*
* space for the rx frag descriptors; we either read a single contiguous
* header, or up to LNET_MAX_IOV frags of payload of either type.
*/
-typedef union {
+union ksock_rxiovspace {
struct kvec iov[LNET_MAX_IOV];
lnet_kiov_t kiov[LNET_MAX_IOV];
-} ksock_rxiovspace_t;
+};
#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */
@@ -303,7 +300,7 @@ typedef union {
#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
#define SOCKNAL_RX_SLOP 6 /* skipping body */
-typedef struct ksock_conn {
+struct ksock_conn {
struct ksock_peer *ksnc_peer; /* owning peer */
struct ksock_route *ksnc_route; /* owning route */
struct list_head ksnc_list; /* stash on peer's conn list */
@@ -314,8 +311,8 @@ typedef struct ksock_conn {
* write_space() callback */
atomic_t ksnc_conn_refcount;/* conn refcount */
atomic_t ksnc_sock_refcount;/* sock refcount */
- ksock_sched_t *ksnc_scheduler; /* who schedules this connection
- */
+ struct ksock_sched *ksnc_scheduler; /* who schedules this connection
+ */
__u32 ksnc_myipaddr; /* my IP */
__u32 ksnc_ipaddr; /* peer's IP */
int ksnc_port; /* peer's port */
@@ -341,7 +338,7 @@ typedef struct ksock_conn {
struct kvec *ksnc_rx_iov; /* the iovec frags */
int ksnc_rx_nkiov; /* # page frags */
lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
- ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */
+ union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
__u32 ksnc_rx_csum; /* partial checksum for incoming
* data */
void *ksnc_cookie; /* rx lnet_finalize passthru arg
@@ -357,7 +354,7 @@ typedef struct ksock_conn {
struct list_head ksnc_tx_list; /* where I enq waiting for output
* space */
struct list_head ksnc_tx_queue; /* packets waiting to be sent */
- ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet
+ struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet
* message or ZC-ACK */
unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out
*/
@@ -367,9 +364,9 @@ typedef struct ksock_conn {
int ksnc_tx_scheduled; /* being progressed */
unsigned long ksnc_tx_last_post; /* time stamp of the last posted
* TX */
-} ksock_conn_t;
+};
-typedef struct ksock_route {
+struct ksock_route {
struct list_head ksnr_list; /* chain on peer route list */
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
struct ksock_peer *ksnr_peer; /* owning peer */
@@ -389,11 +386,11 @@ typedef struct ksock_route {
unsigned int ksnr_share_count; /* created explicitly? */
int ksnr_conn_count; /* # conns established by this
* route */
-} ksock_route_t;
+};
#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
-typedef struct ksock_peer {
+struct ksock_peer {
struct list_head ksnp_list; /* stash on global peer list */
unsigned long ksnp_last_alive; /* when (in jiffies) I was last
* alive */
@@ -420,49 +417,49 @@ typedef struct ksock_peer {
/* preferred local interfaces */
__u32 ksnp_passive_ips[LNET_MAX_INTERFACES];
-} ksock_peer_t;
+};
-typedef struct ksock_connreq {
+struct ksock_connreq {
struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */
lnet_ni_t *ksncr_ni; /* chosen NI */
struct socket *ksncr_sock; /* accepted socket */
-} ksock_connreq_t;
+};
-extern ksock_nal_data_t ksocknal_data;
-extern ksock_tunables_t ksocknal_tunables;
+extern struct ksock_nal_data ksocknal_data;
+extern struct ksock_tunables ksocknal_tunables;
#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
* preferred */
-typedef struct ksock_proto {
+struct ksock_proto {
/* version number of protocol */
int pro_version;
/* handshake function */
- int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *);
+ int (*pro_send_hello)(struct ksock_conn *, ksock_hello_msg_t *);
/* handshake function */
- int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);
+ int (*pro_recv_hello)(struct ksock_conn *, ksock_hello_msg_t *, int);
/* message pack */
- void (*pro_pack)(ksock_tx_t *);
+ void (*pro_pack)(struct ksock_tx *);
/* message unpack */
void (*pro_unpack)(ksock_msg_t *);
/* queue tx on the connection */
- ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *);
+ struct ksock_tx *(*pro_queue_tx_msg)(struct ksock_conn *, struct ksock_tx *);
/* queue ZC ack on the connection */
- int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64);
+ int (*pro_queue_tx_zcack)(struct ksock_conn *, struct ksock_tx *, __u64);
/* handle ZC request */
- int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int);
+ int (*pro_handle_zcreq)(struct ksock_conn *, __u64, int);
/* handle ZC ACK */
- int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64);
+ int (*pro_handle_zcack)(struct ksock_conn *, __u64, __u64);
/*
* msg type matches the connection type:
@@ -471,12 +468,12 @@ typedef struct ksock_proto {
* return MATCH_YES : matching type
* return MATCH_MAY : can be backup
*/
- int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int);
-} ksock_proto_t;
+ int (*pro_match_tx)(struct ksock_conn *, struct ksock_tx *, int);
+};
-extern ksock_proto_t ksocknal_protocol_v1x;
-extern ksock_proto_t ksocknal_protocol_v2x;
-extern ksock_proto_t ksocknal_protocol_v3x;
+extern struct ksock_proto ksocknal_protocol_v1x;
+extern struct ksock_proto ksocknal_protocol_v2x;
+extern struct ksock_proto ksocknal_protocol_v3x;
#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
@@ -517,17 +514,17 @@ ksocknal_nid2peerlist(lnet_nid_t nid)
}
static inline void
-ksocknal_conn_addref(ksock_conn_t *conn)
+ksocknal_conn_addref(struct ksock_conn *conn)
{
LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
atomic_inc(&conn->ksnc_conn_refcount);
}
-void ksocknal_queue_zombie_conn(ksock_conn_t *conn);
-void ksocknal_finalize_zcreq(ksock_conn_t *conn);
+void ksocknal_queue_zombie_conn(struct ksock_conn *conn);
+void ksocknal_finalize_zcreq(struct ksock_conn *conn);
static inline void
-ksocknal_conn_decref(ksock_conn_t *conn)
+ksocknal_conn_decref(struct ksock_conn *conn)
{
LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
@@ -535,7 +532,7 @@ ksocknal_conn_decref(ksock_conn_t *conn)
}
static inline int
-ksocknal_connsock_addref(ksock_conn_t *conn)
+ksocknal_connsock_addref(struct ksock_conn *conn)
{
int rc = -ESHUTDOWN;
@@ -551,7 +548,7 @@ ksocknal_connsock_addref(ksock_conn_t *conn)
}
static inline void
-ksocknal_connsock_decref(ksock_conn_t *conn)
+ksocknal_connsock_decref(struct ksock_conn *conn)
{
LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
@@ -563,17 +560,17 @@ ksocknal_connsock_decref(ksock_conn_t *conn)
}
static inline void
-ksocknal_tx_addref(ksock_tx_t *tx)
+ksocknal_tx_addref(struct ksock_tx *tx)
{
LASSERT(atomic_read(&tx->tx_refcount) > 0);
atomic_inc(&tx->tx_refcount);
}
-void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx);
-void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx);
+void ksocknal_tx_prep(struct ksock_conn *, struct ksock_tx *tx);
+void ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx);
static inline void
-ksocknal_tx_decref(ksock_tx_t *tx)
+ksocknal_tx_decref(struct ksock_tx *tx)
{
LASSERT(atomic_read(&tx->tx_refcount) > 0);
if (atomic_dec_and_test(&tx->tx_refcount))
@@ -581,16 +578,16 @@ ksocknal_tx_decref(ksock_tx_t *tx)
}
static inline void
-ksocknal_route_addref(ksock_route_t *route)
+ksocknal_route_addref(struct ksock_route *route)
{
LASSERT(atomic_read(&route->ksnr_refcount) > 0);
atomic_inc(&route->ksnr_refcount);
}
-void ksocknal_destroy_route(ksock_route_t *route);
+void ksocknal_destroy_route(struct ksock_route *route);
static inline void
-ksocknal_route_decref(ksock_route_t *route)
+ksocknal_route_decref(struct ksock_route *route)
{
LASSERT(atomic_read(&route->ksnr_refcount) > 0);
if (atomic_dec_and_test(&route->ksnr_refcount))
@@ -598,16 +595,16 @@ ksocknal_route_decref(ksock_route_t *route)
}
static inline void
-ksocknal_peer_addref(ksock_peer_t *peer)
+ksocknal_peer_addref(struct ksock_peer *peer)
{
LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
atomic_inc(&peer->ksnp_refcount);
}
-void ksocknal_destroy_peer(ksock_peer_t *peer);
+void ksocknal_destroy_peer(struct ksock_peer *peer);
static inline void
-ksocknal_peer_decref(ksock_peer_t *peer)
+ksocknal_peer_decref(struct ksock_peer *peer)
{
LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
if (atomic_dec_and_test(&peer->ksnp_refcount))
@@ -625,71 +622,71 @@ int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
-ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
-ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
-void ksocknal_peer_failed(ksock_peer_t *peer);
-int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
+struct ksock_peer *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
+struct ksock_peer *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
+void ksocknal_peer_failed(struct ksock_peer *peer);
+int ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
struct socket *sock, int type);
-void ksocknal_close_conn_locked(ksock_conn_t *conn, int why);
-void ksocknal_terminate_conn(ksock_conn_t *conn);
-void ksocknal_destroy_conn(ksock_conn_t *conn);
-int ksocknal_close_peer_conns_locked(ksock_peer_t *peer,
+void ksocknal_close_conn_locked(struct ksock_conn *conn, int why);
+void ksocknal_terminate_conn(struct ksock_conn *conn);
+void ksocknal_destroy_conn(struct ksock_conn *conn);
+int ksocknal_close_peer_conns_locked(struct ksock_peer *peer,
__u32 ipaddr, int why);
-int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why);
+int ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why);
int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
-ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
- ksock_tx_t *tx, int nonblk);
+struct ksock_conn *ksocknal_find_conn_locked(struct ksock_peer *peer,
+ struct ksock_tx *tx, int nonblk);
-int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
+int ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx,
lnet_process_id_t id);
-ksock_tx_t *ksocknal_alloc_tx(int type, int size);
-void ksocknal_free_tx(ksock_tx_t *tx);
-ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
-void ksocknal_next_tx_carrier(ksock_conn_t *conn);
-void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
+struct ksock_tx *ksocknal_alloc_tx(int type, int size);
+void ksocknal_free_tx(struct ksock_tx *tx);
+struct ksock_tx *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
+void ksocknal_next_tx_carrier(struct ksock_conn *conn);
+void ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn);
void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error);
void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
void ksocknal_thread_fini(void);
-void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
-ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
-ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
-int ksocknal_new_packet(ksock_conn_t *conn, int skip);
+void ksocknal_launch_all_connections_locked(struct ksock_peer *peer);
+struct ksock_route *ksocknal_find_connectable_route_locked(struct ksock_peer *peer);
+struct ksock_route *ksocknal_find_connecting_route_locked(struct ksock_peer *peer);
+int ksocknal_new_packet(struct ksock_conn *conn, int skip);
int ksocknal_scheduler(void *arg);
int ksocknal_connd(void *arg);
int ksocknal_reaper(void *arg);
-int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+int ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn,
lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
-int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+int ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
ksock_hello_msg_t *hello, lnet_process_id_t *id,
__u64 *incarnation);
-void ksocknal_read_callback(ksock_conn_t *conn);
-void ksocknal_write_callback(ksock_conn_t *conn);
-
-int ksocknal_lib_zc_capable(ksock_conn_t *conn);
-void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_push_conn(ksock_conn_t *conn);
-int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
+void ksocknal_read_callback(struct ksock_conn *conn);
+void ksocknal_write_callback(struct ksock_conn *conn);
+
+int ksocknal_lib_zc_capable(struct ksock_conn *conn);
+void ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn);
+void ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn);
+void ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn);
+void ksocknal_lib_push_conn(struct ksock_conn *conn);
+int ksocknal_lib_get_conn_addrs(struct ksock_conn *conn);
int ksocknal_lib_setup_sock(struct socket *so);
-int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx);
-int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx);
-void ksocknal_lib_eager_ack(ksock_conn_t *conn);
-int ksocknal_lib_recv_iov(ksock_conn_t *conn);
-int ksocknal_lib_recv_kiov(ksock_conn_t *conn);
-int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem,
+int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx);
+int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx);
+void ksocknal_lib_eager_ack(struct ksock_conn *conn);
+int ksocknal_lib_recv_iov(struct ksock_conn *conn);
+int ksocknal_lib_recv_kiov(struct ksock_conn *conn);
+int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
int *rxmem, int *nagle);
-void ksocknal_read_callback(ksock_conn_t *conn);
-void ksocknal_write_callback(ksock_conn_t *conn);
+void ksocknal_read_callback(struct ksock_conn *conn);
+void ksocknal_write_callback(struct ksock_conn *conn);
int ksocknal_tunables_init(void);
-void ksocknal_lib_csum_tx(ksock_tx_t *tx);
+void ksocknal_lib_csum_tx(struct ksock_tx *tx);
-int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
+int ksocknal_lib_memory_pressure(struct ksock_conn *conn);
int ksocknal_lib_bind_thread_to_cpu(int id);
#endif /* _SOCKLND_SOCKLND_H_ */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 976fd78926e0..303576d815c6 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -23,10 +23,10 @@
#include "socklnd.h"
-ksock_tx_t *
+struct ksock_tx *
ksocknal_alloc_tx(int type, int size)
{
- ksock_tx_t *tx = NULL;
+ struct ksock_tx *tx = NULL;
if (type == KSOCK_MSG_NOOP) {
LASSERT(size == KSOCK_NOOP_TX_SIZE);
@@ -36,7 +36,7 @@ ksocknal_alloc_tx(int type, int size)
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
- next, ksock_tx_t, tx_list);
+ next, struct ksock_tx, tx_list);
LASSERT(tx->tx_desc_size == size);
list_del(&tx->tx_list);
}
@@ -61,10 +61,10 @@ ksocknal_alloc_tx(int type, int size)
return tx;
}
-ksock_tx_t *
+struct ksock_tx *
ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
{
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
if (!tx) {
@@ -87,7 +87,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
}
void
-ksocknal_free_tx(ksock_tx_t *tx)
+ksocknal_free_tx(struct ksock_tx *tx)
{
atomic_dec(&ksocknal_data.ksnd_nactive_txs);
@@ -104,7 +104,7 @@ ksocknal_free_tx(ksock_tx_t *tx)
}
static int
-ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct kvec *iov = tx->tx_iov;
int nob;
@@ -126,7 +126,7 @@ ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
do {
LASSERT(tx->tx_niov > 0);
- if (nob < (int) iov->iov_len) {
+ if (nob < (int)iov->iov_len) {
iov->iov_base = (void *)((char *)iov->iov_base + nob);
iov->iov_len -= nob;
return rc;
@@ -141,7 +141,7 @@ ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
}
static int
-ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
{
lnet_kiov_t *kiov = tx->tx_kiov;
int nob;
@@ -179,7 +179,7 @@ ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
}
static int
-ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
{
int rc;
int bufnob;
@@ -247,7 +247,7 @@ ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
}
static int
-ksocknal_recv_iov(ksock_conn_t *conn)
+ksocknal_recv_iov(struct ksock_conn *conn)
{
struct kvec *iov = conn->ksnc_rx_iov;
int nob;
@@ -294,7 +294,7 @@ ksocknal_recv_iov(ksock_conn_t *conn)
}
static int
-ksocknal_recv_kiov(ksock_conn_t *conn)
+ksocknal_recv_kiov(struct ksock_conn *conn)
{
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
int nob;
@@ -326,7 +326,7 @@ ksocknal_recv_kiov(ksock_conn_t *conn)
do {
LASSERT(conn->ksnc_rx_nkiov > 0);
- if (nob < (int) kiov->kiov_len) {
+ if (nob < (int)kiov->kiov_len) {
kiov->kiov_offset += nob;
kiov->kiov_len -= nob;
return -EAGAIN;
@@ -341,7 +341,7 @@ ksocknal_recv_kiov(ksock_conn_t *conn)
}
static int
-ksocknal_receive(ksock_conn_t *conn)
+ksocknal_receive(struct ksock_conn *conn)
{
/*
* Return 1 on success, 0 on EOF, < 0 on error.
@@ -391,7 +391,7 @@ ksocknal_receive(ksock_conn_t *conn)
}
void
-ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
+ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx)
{
lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
@@ -412,10 +412,10 @@ ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
void
ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
{
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, ksock_tx_t, tx_list);
+ tx = list_entry(txlist->next, struct ksock_tx, tx_list);
if (error && tx->tx_lnetmsg) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
@@ -435,10 +435,10 @@ ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
}
static void
-ksocknal_check_zc_req(ksock_tx_t *tx)
+ksocknal_check_zc_req(struct ksock_tx *tx)
{
- ksock_conn_t *conn = tx->tx_conn;
- ksock_peer_t *peer = conn->ksnc_peer;
+ struct ksock_conn *conn = tx->tx_conn;
+ struct ksock_peer *peer = conn->ksnc_peer;
/*
* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
@@ -482,9 +482,9 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
}
static void
-ksocknal_uncheck_zc_req(ksock_tx_t *tx)
+ksocknal_uncheck_zc_req(struct ksock_tx *tx)
{
- ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
+ struct ksock_peer *peer = tx->tx_conn->ksnc_peer;
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_zc_capable);
@@ -508,7 +508,7 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
}
static int
-ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
{
int rc;
@@ -583,7 +583,7 @@ ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
}
static void
-ksocknal_launch_connection_locked(ksock_route_t *route)
+ksocknal_launch_connection_locked(struct ksock_route *route)
{
/* called holding write lock on ksnd_global_lock */
@@ -604,9 +604,9 @@ ksocknal_launch_connection_locked(ksock_route_t *route)
}
void
-ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
+ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
{
- ksock_route_t *route;
+ struct ksock_route *route;
/* called holding write lock on ksnd_global_lock */
for (;;) {
@@ -619,18 +619,18 @@ ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
}
}
-ksock_conn_t *
-ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
+struct ksock_conn *
+ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk)
{
struct list_head *tmp;
- ksock_conn_t *conn;
- ksock_conn_t *typed = NULL;
- ksock_conn_t *fallback = NULL;
+ struct ksock_conn *conn;
+ struct ksock_conn *typed = NULL;
+ struct ksock_conn *fallback = NULL;
int tnob = 0;
int fnob = 0;
list_for_each(tmp, &peer->ksnp_conns) {
- ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
+ struct ksock_conn *c = list_entry(tmp, struct ksock_conn, ksnc_list);
int nob = atomic_read(&c->ksnc_tx_nob) +
c->ksnc_sock->sk->sk_wmem_queued;
int rc;
@@ -677,7 +677,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
}
void
-ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
{
conn->ksnc_proto->pro_pack(tx);
@@ -687,11 +687,11 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
}
void
-ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
{
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
ksock_msg_t *msg = &tx->tx_msg;
- ksock_tx_t *ztx = NULL;
+ struct ksock_tx *ztx = NULL;
int bufnob = 0;
/*
@@ -784,15 +784,15 @@ ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
spin_unlock_bh(&sched->kss_lock);
}
-ksock_route_t *
-ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
+struct ksock_route *
+ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
{
unsigned long now = cfs_time_current();
struct list_head *tmp;
- ksock_route_t *route;
+ struct ksock_route *route;
list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
@@ -820,14 +820,14 @@ ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
return NULL;
}
-ksock_route_t *
-ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
+struct ksock_route *
+ksocknal_find_connecting_route_locked(struct ksock_peer *peer)
{
struct list_head *tmp;
- ksock_route_t *route;
+ struct ksock_route *route;
list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
@@ -839,10 +839,10 @@ ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
}
int
-ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
+ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx, lnet_process_id_t id)
{
- ksock_peer_t *peer;
- ksock_conn_t *conn;
+ struct ksock_peer *peer;
+ struct ksock_conn *conn;
rwlock_t *g_lock;
int retry;
int rc;
@@ -942,7 +942,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
int desc_size;
int rc;
@@ -960,10 +960,10 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
LASSERT(!in_interrupt());
if (payload_iov)
- desc_size = offsetof(ksock_tx_t,
+ desc_size = offsetof(struct ksock_tx,
tx_frags.virt.iov[1 + payload_niov]);
else
- desc_size = offsetof(ksock_tx_t,
+ desc_size = offsetof(struct ksock_tx,
tx_frags.paged.kiov[payload_niov]);
if (lntmsg->msg_vmflush)
@@ -1037,7 +1037,7 @@ ksocknal_thread_fini(void)
}
int
-ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
{
static char ksocknal_slop_buffer[4096];
@@ -1120,7 +1120,7 @@ ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
}
static int
-ksocknal_process_receive(ksock_conn_t *conn)
+ksocknal_process_receive(struct ksock_conn *conn)
{
lnet_hdr_t *lhdr;
lnet_process_id_t *id;
@@ -1328,8 +1328,8 @@ ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
- ksock_conn_t *conn = private;
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_conn *conn = private;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
LASSERT(mlen <= rlen);
LASSERT(niov <= LNET_MAX_IOV);
@@ -1382,7 +1382,7 @@ ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
}
static inline int
-ksocknal_sched_cansleep(ksock_sched_t *sched)
+ksocknal_sched_cansleep(struct ksock_sched *sched)
{
int rc;
@@ -1399,9 +1399,9 @@ ksocknal_sched_cansleep(ksock_sched_t *sched)
int ksocknal_scheduler(void *arg)
{
struct ksock_sched_info *info;
- ksock_sched_t *sched;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct ksock_sched *sched;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
int rc;
int nloops = 0;
long id = (long)arg;
@@ -1426,7 +1426,7 @@ int ksocknal_scheduler(void *arg)
if (!list_empty(&sched->kss_rx_conns)) {
conn = list_entry(sched->kss_rx_conns.next,
- ksock_conn_t, ksnc_rx_list);
+ struct ksock_conn, ksnc_rx_list);
list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
@@ -1481,7 +1481,7 @@ int ksocknal_scheduler(void *arg)
}
conn = list_entry(sched->kss_tx_conns.next,
- ksock_conn_t, ksnc_tx_list);
+ struct ksock_conn, ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
@@ -1489,7 +1489,7 @@ int ksocknal_scheduler(void *arg)
LASSERT(!list_empty(&conn->ksnc_tx_queue));
tx = list_entry(conn->ksnc_tx_queue.next,
- ksock_tx_t, tx_list);
+ struct ksock_tx, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
@@ -1575,9 +1575,9 @@ int ksocknal_scheduler(void *arg)
* Add connection to kss_rx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_read_callback(ksock_conn_t *conn)
+void ksocknal_read_callback(struct ksock_conn *conn)
{
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
sched = conn->ksnc_scheduler;
@@ -1600,9 +1600,9 @@ void ksocknal_read_callback(ksock_conn_t *conn)
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_write_callback(ksock_conn_t *conn)
+void ksocknal_write_callback(struct ksock_conn *conn)
{
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
sched = conn->ksnc_scheduler;
@@ -1623,7 +1623,7 @@ void ksocknal_write_callback(ksock_conn_t *conn)
spin_unlock_bh(&sched->kss_lock);
}
-static ksock_proto_t *
+static struct ksock_proto *
ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
{
__u32 version = 0;
@@ -1666,11 +1666,11 @@ ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
}
int
-ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn,
lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
- ksock_net_t *net = (ksock_net_t *)ni->ni_data;
+ struct ksock_net *net = (struct ksock_net *)ni->ni_data;
LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
@@ -1704,7 +1704,7 @@ ksocknal_invert_type(int type)
}
int
-ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
__u64 *incarnation)
{
@@ -1718,7 +1718,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
int timeout;
int proto_match;
int rc;
- ksock_proto_t *proto;
+ struct ksock_proto *proto;
lnet_process_id_t recv_id;
/* socket type set on active connections - not set on passive */
@@ -1847,10 +1847,10 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
}
static int
-ksocknal_connect(ksock_route_t *route)
+ksocknal_connect(struct ksock_route *route)
{
LIST_HEAD(zombies);
- ksock_peer_t *peer = route->ksnr_peer;
+ struct ksock_peer *peer = route->ksnr_peer;
int type;
int wanted;
struct socket *sock;
@@ -1989,7 +1989,7 @@ ksocknal_connect(ksock_route_t *route)
if (!list_empty(&peer->ksnp_tx_queue) &&
!peer->ksnp_accepting &&
!ksocknal_find_connecting_route_locked(peer)) {
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
/*
* ksnp_tx_queue is queued on a conn on successful
@@ -1997,7 +1997,7 @@ ksocknal_connect(ksock_route_t *route)
*/
if (!list_empty(&peer->ksnp_conns)) {
conn = list_entry(peer->ksnp_conns.next,
- ksock_conn_t, ksnc_list);
+ struct ksock_conn, ksnc_list);
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
}
@@ -2131,10 +2131,10 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout)
* Go through connd_routes queue looking for a route that we can process
* right now, @timeout_p can be updated if we need to come back later
*/
-static ksock_route_t *
+static struct ksock_route *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
- ksock_route_t *route;
+ struct ksock_route *route;
unsigned long now;
now = cfs_time_current();
@@ -2158,7 +2158,7 @@ int
ksocknal_connd(void *arg)
{
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
- ksock_connreq_t *cr;
+ struct ksock_connreq *cr;
wait_queue_t wait;
int nloops = 0;
int cons_retry = 0;
@@ -2174,7 +2174,7 @@ ksocknal_connd(void *arg)
ksocknal_data.ksnd_connd_running++;
while (!ksocknal_data.ksnd_shuttingdown) {
- ksock_route_t *route = NULL;
+ struct ksock_route *route = NULL;
time64_t sec = ktime_get_real_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
int dropped_lock = 0;
@@ -2192,8 +2192,8 @@ ksocknal_connd(void *arg)
if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
/* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
- next, ksock_connreq_t, ksncr_list);
+ cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
+ struct ksock_connreq, ksncr_list);
list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
@@ -2267,17 +2267,17 @@ ksocknal_connd(void *arg)
return 0;
}
-static ksock_conn_t *
-ksocknal_find_timed_out_conn(ksock_peer_t *peer)
+static struct ksock_conn *
+ksocknal_find_timed_out_conn(struct ksock_peer *peer)
{
/* We're called with a shared lock on ksnd_global_lock */
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
struct list_head *ctmp;
list_for_each(ctmp, &peer->ksnp_conns) {
int error;
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT(!conn->ksnc_closing);
@@ -2351,10 +2351,10 @@ ksocknal_find_timed_out_conn(ksock_peer_t *peer)
}
static inline void
-ksocknal_flush_stale_txs(ksock_peer_t *peer)
+ksocknal_flush_stale_txs(struct ksock_peer *peer)
{
- ksock_tx_t *tx;
- ksock_tx_t *tmp;
+ struct ksock_tx *tx;
+ struct ksock_tx *tmp;
LIST_HEAD(stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2374,12 +2374,12 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
}
static int
-ksocknal_send_keepalive_locked(ksock_peer_t *peer)
+ksocknal_send_keepalive_locked(struct ksock_peer *peer)
__must_hold(&ksocknal_data.ksnd_global_lock)
{
- ksock_sched_t *sched;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct ksock_sched *sched;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
return 0;
@@ -2440,9 +2440,9 @@ static void
ksocknal_check_peer_timeouts(int idx)
{
struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
- ksock_peer_t *peer;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct ksock_peer *peer;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
again:
/*
@@ -2483,8 +2483,8 @@ ksocknal_check_peer_timeouts(int idx)
* holding only shared lock
*/
if (!list_empty(&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx = list_entry(peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ struct ksock_tx *tx = list_entry(peer->ksnp_tx_queue.next,
+ struct ksock_tx, tx_list);
if (cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline)) {
@@ -2518,7 +2518,7 @@ ksocknal_check_peer_timeouts(int idx)
}
tx = list_entry(peer->ksnp_zc_req_list.next,
- ksock_tx_t, tx_zc_list);
+ struct ksock_tx, tx_zc_list);
deadline = tx->tx_deadline;
resid = tx->tx_resid;
conn = tx->tx_conn;
@@ -2544,8 +2544,8 @@ int
ksocknal_reaper(void *arg)
{
wait_queue_t wait;
- ksock_conn_t *conn;
- ksock_sched_t *sched;
+ struct ksock_conn *conn;
+ struct ksock_sched *sched;
struct list_head enomem_conns;
int nenomem_conns;
long timeout;
@@ -2563,7 +2563,7 @@ ksocknal_reaper(void *arg)
while (!ksocknal_data.ksnd_shuttingdown) {
if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
- ksock_conn_t, ksnc_list);
+ struct ksock_conn, ksnc_list);
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2577,7 +2577,7 @@ ksocknal_reaper(void *arg)
if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
- ksock_conn_t, ksnc_list);
+ struct ksock_conn, ksnc_list);
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2599,7 +2599,7 @@ ksocknal_reaper(void *arg)
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
while (!list_empty(&enomem_conns)) {
- conn = list_entry(enomem_conns.next, ksock_conn_t,
+ conn = list_entry(enomem_conns.next, struct ksock_conn,
ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 964b4e338fe0..6a17757fce1e 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -37,7 +33,7 @@
#include "socklnd.h"
int
-ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
+ksocknal_lib_get_conn_addrs(struct ksock_conn *conn)
{
int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr,
&conn->ksnc_port);
@@ -60,7 +56,7 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
}
int
-ksocknal_lib_zc_capable(ksock_conn_t *conn)
+ksocknal_lib_zc_capable(struct ksock_conn *conn)
{
int caps = conn->ksnc_sock->sk->sk_route_caps;
@@ -75,7 +71,7 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn)
}
int
-ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct socket *sock = conn->ksnc_sock;
int nob;
@@ -118,7 +114,7 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
}
int
-ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct socket *sock = conn->ksnc_sock;
lnet_kiov_t *kiov = tx->tx_kiov;
@@ -187,7 +183,7 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
}
void
-ksocknal_lib_eager_ack(ksock_conn_t *conn)
+ksocknal_lib_eager_ack(struct ksock_conn *conn)
{
int opt = 1;
struct socket *sock = conn->ksnc_sock;
@@ -203,7 +199,7 @@ ksocknal_lib_eager_ack(ksock_conn_t *conn)
}
int
-ksocknal_lib_recv_iov(ksock_conn_t *conn)
+ksocknal_lib_recv_iov(struct ksock_conn *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX
struct kvec scratch;
@@ -309,7 +305,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
}
int
-ksocknal_lib_recv_kiov(ksock_conn_t *conn)
+ksocknal_lib_recv_kiov(struct ksock_conn *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
struct kvec scratch;
@@ -393,7 +389,7 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
}
void
-ksocknal_lib_csum_tx(ksock_tx_t *tx)
+ksocknal_lib_csum_tx(struct ksock_tx *tx)
{
int i;
__u32 csum;
@@ -432,7 +428,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
}
int
-ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
{
struct socket *sock = conn->ksnc_sock;
int len;
@@ -562,7 +558,7 @@ ksocknal_lib_setup_sock(struct socket *sock)
}
void
-ksocknal_lib_push_conn(ksock_conn_t *conn)
+ksocknal_lib_push_conn(struct ksock_conn *conn)
{
struct sock *sk;
struct tcp_sock *tp;
@@ -599,7 +595,7 @@ ksocknal_lib_push_conn(ksock_conn_t *conn)
static void
ksocknal_data_ready(struct sock *sk)
{
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
@@ -619,7 +615,7 @@ ksocknal_data_ready(struct sock *sk)
static void
ksocknal_write_space(struct sock *sk)
{
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
int wspace;
int min_wpace;
@@ -663,14 +659,14 @@ ksocknal_write_space(struct sock *sk)
}
void
-ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn)
{
conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
conn->ksnc_saved_write_space = sock->sk->sk_write_space;
}
void
-ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn)
{
sock->sk->sk_user_data = conn;
sock->sk->sk_data_ready = ksocknal_data_ready;
@@ -678,7 +674,7 @@ ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
}
void
-ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn)
{
/*
* Remove conn's network callbacks.
@@ -697,10 +693,10 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
}
int
-ksocknal_lib_memory_pressure(ksock_conn_t *conn)
+ksocknal_lib_memory_pressure(struct ksock_conn *conn)
{
int rc = 0;
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
index 6329cbe66573..fc7eec83ac07 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
@@ -139,7 +139,7 @@ module_param(protocol, int, 0644);
MODULE_PARM_DESC(protocol, "protocol version");
#endif
-ksock_tunables_t ksocknal_tunables;
+struct ksock_tunables ksocknal_tunables;
int ksocknal_tunables_init(void)
{
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 32cc31e4cc29..82e174f6d9fe 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -38,8 +38,8 @@
* pro_match_tx() : Called holding glock
*/
-static ksock_tx_t *
-ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
+static struct ksock_tx *
+ksocknal_queue_tx_msg_v1(struct ksock_conn *conn, struct ksock_tx *tx_msg)
{
/* V1.x, just enqueue it */
list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
@@ -47,9 +47,9 @@ ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
}
void
-ksocknal_next_tx_carrier(ksock_conn_t *conn)
+ksocknal_next_tx_carrier(struct ksock_conn *conn)
{
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
+ struct ksock_tx *tx = conn->ksnc_tx_carrier;
/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
LASSERT(!list_empty(&conn->ksnc_tx_queue));
@@ -66,10 +66,10 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn)
}
static int
-ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
- ksock_tx_t *tx_ack, __u64 cookie)
+ksocknal_queue_tx_zcack_v2(struct ksock_conn *conn,
+ struct ksock_tx *tx_ack, __u64 cookie)
{
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
+ struct ksock_tx *tx = conn->ksnc_tx_carrier;
LASSERT(!tx_ack ||
tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
@@ -112,10 +112,10 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
return 1;
}
-static ksock_tx_t *
-ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
+static struct ksock_tx *
+ksocknal_queue_tx_msg_v2(struct ksock_conn *conn, struct ksock_tx *tx_msg)
{
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
+ struct ksock_tx *tx = conn->ksnc_tx_carrier;
/*
* Enqueue tx_msg:
@@ -149,10 +149,10 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
}
static int
-ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
- ksock_tx_t *tx_ack, __u64 cookie)
+ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
+ struct ksock_tx *tx_ack, __u64 cookie)
{
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
if (conn->ksnc_type != SOCKLND_CONN_ACK)
return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
@@ -267,7 +267,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
}
static int
-ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
+ksocknal_match_tx(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
{
int nob;
@@ -311,7 +311,7 @@ ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
}
static int
-ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
+ksocknal_match_tx_v3(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
{
int nob;
@@ -355,18 +355,18 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
/* (Sink) handle incoming ZC request from sender */
static int
-ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
+ksocknal_handle_zcreq(struct ksock_conn *c, __u64 cookie, int remote)
{
- ksock_peer_t *peer = c->ksnc_peer;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct ksock_peer *peer = c->ksnc_peer;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
int rc;
read_lock(&ksocknal_data.ksnd_global_lock);
conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
if (conn) {
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
@@ -399,12 +399,12 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
/* (Sender) handle ZC_ACK from sink */
static int
-ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
+ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
{
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_tx_t *tx;
- ksock_tx_t *temp;
- ksock_tx_t *tmp;
+ struct ksock_peer *peer = conn->ksnc_peer;
+ struct ksock_tx *tx;
+ struct ksock_tx *temp;
+ struct ksock_tx *tmp;
LIST_HEAD(zlist);
int count;
@@ -446,7 +446,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
}
static int
-ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
+ksocknal_send_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello)
{
struct socket *sock = conn->ksnc_sock;
lnet_hdr_t *hdr;
@@ -503,7 +503,7 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
if (!hello->kshm_nips)
goto out;
- for (i = 0; i < (int) hello->kshm_nips; i++)
+ for (i = 0; i < (int)hello->kshm_nips; i++)
hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]);
rc = lnet_sock_write(sock, hello->kshm_ips,
@@ -521,7 +521,7 @@ out:
}
static int
-ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
+ksocknal_send_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello)
{
struct socket *sock = conn->ksnc_sock;
int rc;
@@ -563,7 +563,7 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
}
static int
-ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
+ksocknal_recv_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello,
int timeout)
{
struct socket *sock = conn->ksnc_sock;
@@ -622,7 +622,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
goto out;
}
- for (i = 0; i < (int) hello->kshm_nips; i++) {
+ for (i = 0; i < (int)hello->kshm_nips; i++) {
hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
if (!hello->kshm_ips[i]) {
@@ -639,7 +639,7 @@ out:
}
static int
-ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout)
+ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout)
{
struct socket *sock = conn->ksnc_sock;
int rc;
@@ -690,7 +690,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
return rc;
}
- for (i = 0; i < (int) hello->kshm_nips; i++) {
+ for (i = 0; i < (int)hello->kshm_nips; i++) {
if (conn->ksnc_flip)
__swab32s(&hello->kshm_ips[i]);
@@ -705,7 +705,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
}
static void
-ksocknal_pack_msg_v1(ksock_tx_t *tx)
+ksocknal_pack_msg_v1(struct ksock_tx *tx)
{
/* V1.x has no KSOCK_MSG_NOOP */
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
@@ -719,7 +719,7 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx)
}
static void
-ksocknal_pack_msg_v2(ksock_tx_t *tx)
+ksocknal_pack_msg_v2(struct ksock_tx *tx)
{
tx->tx_iov[0].iov_base = &tx->tx_msg;
@@ -755,7 +755,7 @@ ksocknal_unpack_msg_v2(ksock_msg_t *msg)
return; /* Do nothing */
}
-ksock_proto_t ksocknal_protocol_v1x = {
+struct ksock_proto ksocknal_protocol_v1x = {
.pro_version = KSOCK_PROTO_V1,
.pro_send_hello = ksocknal_send_hello_v1,
.pro_recv_hello = ksocknal_recv_hello_v1,
@@ -768,7 +768,7 @@ ksock_proto_t ksocknal_protocol_v1x = {
.pro_match_tx = ksocknal_match_tx
};
-ksock_proto_t ksocknal_protocol_v2x = {
+struct ksock_proto ksocknal_protocol_v2x = {
.pro_version = KSOCK_PROTO_V2,
.pro_send_hello = ksocknal_send_hello_v2,
.pro_recv_hello = ksocknal_recv_hello_v2,
@@ -781,7 +781,7 @@ ksock_proto_t ksocknal_protocol_v2x = {
.pro_match_tx = ksocknal_match_tx
};
-ksock_proto_t ksocknal_protocol_v3x = {
+struct ksock_proto ksocknal_protocol_v3x = {
.pro_version = KSOCK_PROTO_V3,
.pro_send_hello = ksocknal_send_hello_v2,
.pro_recv_hello = ksocknal_recv_hello_v2,
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 8c260c3d5da4..42b15a769183 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -366,12 +362,12 @@ void libcfs_debug_dumplog(void)
* get to schedule()
*/
init_waitqueue_entry(&wait, current);
- set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&debug_ctlwq, &wait);
dumper = kthread_run(libcfs_debug_dumplog_thread,
(void *)(long)current_pid(),
"libcfs_debug_dumper");
+ set_current_state(TASK_INTERRUPTIBLE);
if (IS_ERR(dumper))
pr_err("LustreError: cannot start log dump thread: %ld\n",
PTR_ERR(dumper));
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index 086e690bd6f2..9288ee08d1f7 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -16,10 +16,6 @@
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see http://www.gnu.org/licenses
*
- * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores,
- * CA 94065 USA or visit www.oracle.com if you need additional information or
- * have any questions.
- *
* GPL HEADER END
*/
/*
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index cc45ed82b2be..23283b6e09ab 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index 50ac1536db4b..fc697cdfcdaf 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 84f9b7b47581..5c0116ade909 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -99,6 +99,7 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
(*type)->cht_size);
if (err != 0) {
+ ahash_request_free(*req);
crypto_free_ahash(tfm);
return err;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
index 13d31e8a931d..3e22cad18a8b 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
index 638e4b33d3a9..435b784c52f8 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
index 86f32ffc5d04..a6a76a681ea9 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
@@ -11,7 +11,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
*/
/*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index d89f71ee45b2..38308f8b6aae 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index bbe19a684c81..291d286eab48 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
index 91c2ae8f9d67..8b551d2708ba 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index f2d041118cf7..86b4d25cad46 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c
index c75ae9a68e76..a9bdb284fd15 100644
--- a/drivers/staging/lustre/lnet/libcfs/prng.c
+++ b/drivers/staging/lustre/lnet/libcfs/prng.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 7739b9469c5a..1c7efdfaffcf 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index ac84e7f4c859..d878676bc375 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index 92236ae59e49..e98c818a14fb 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index 1452bb3ad9eb..8c50c99d82d5 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index fe0dbe7468e7..346db892f275 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1677,7 +1673,7 @@ lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
if (!ni || !config)
return;
- net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
+ net_config = (struct lnet_ioctl_net_config *)config->cfg_bulk;
if (!net_config)
return;
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 480cc9c6caab..a72afdf68bb2 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index adbcadbab1be..d05c6cc797f6 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index 75d31217bf92..1834bf7a27ef 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index e671aed373df..b430046dc294 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index c5d5bedb3128..e6d3b801d87d 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index f879d7f28708..910e106e221d 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index 468eda611bf8..08402712a452 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 246b5c141d01..4ffbd3e441e8 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -200,7 +196,7 @@ static int __init lnet_init(void)
* Have to schedule a separate thread to avoid deadlocking
* in modload
*/
- (void) kthread_run(lnet_configure, NULL, "lnet_initd");
+ (void)kthread_run(lnet_configure, NULL, "lnet_initd");
}
return 0;
diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c
index 7d76f28d3a7a..e4aceb71c4ec 100644
--- a/drivers/staging/lustre/lnet/lnet/net_fault.c
+++ b/drivers/staging/lustre/lnet/lnet/net_fault.c
@@ -760,9 +760,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
}
- init_timer(&rule->dl_timer);
- rule->dl_timer.function = delay_timer_cb;
- rule->dl_timer.data = (unsigned long)rule;
+ setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule);
spin_lock_init(&rule->dl_lock);
INIT_LIST_HEAD(&rule->dl_msg_list);
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index ebf468fbc64f..a6d7a6159b8f 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index b026feebc03a..e8061916c241 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index b01dc424c514..063543233035 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -18,6 +18,7 @@
*/
#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/completion.h>
#include "../../include/linux/lnet/lib-lnet.h"
#define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
@@ -1065,7 +1066,7 @@ lnet_router_checker_start(void)
return -EINVAL;
}
- sema_init(&the_lnet.ln_rc_signal, 0);
+ init_completion(&the_lnet.ln_rc_signal);
rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
if (rc) {
@@ -1079,7 +1080,7 @@ lnet_router_checker_start(void)
rc = PTR_ERR(task);
CERROR("Can't start router checker thread: %d\n", rc);
/* block until event callback signals exit */
- down(&the_lnet.ln_rc_signal);
+ wait_for_completion(&the_lnet.ln_rc_signal);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
LASSERT(!rc);
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
@@ -1112,7 +1113,7 @@ lnet_router_checker_stop(void)
wake_up(&the_lnet.ln_rc_waitq);
/* block until event callback signals exit */
- down(&the_lnet.ln_rc_signal);
+ wait_for_completion(&the_lnet.ln_rc_signal);
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
@@ -1295,7 +1296,7 @@ rescan:
lnet_prune_rc_data(1); /* wait for UNLINK */
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
- up(&the_lnet.ln_rc_signal);
+ complete(&the_lnet.ln_rc_signal);
/* The unlink event callback will signal final completion */
return 0;
}
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index a63d86c4c10d..13d0454e7fcb 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 408c614b6ca3..b786f8b4a73d 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 6f687581117d..1be3cad727ae 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 90c3385a355c..7ec6fc96959e 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index a03e52d29d3f..4c33621f06da 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index becd22e41da9..78b147732615 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 30e4f71f14c2..c2f121f44d33 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index cc046b1d4d0a..71485f992297 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index ad26fe9dd4af..9331ca4e3606 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 3c45a7cfae18..3b26d6eb4240 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index c9b904cade16..4ab2ee264004 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 4eac1c9e639f..d033ac03d953 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -15,12 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- * copy of GPLv2].
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index b6c4aae007af..dcd22580b1f0 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h
index f1fbebd8a67c..441d6d6b4f8e 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.h
+++ b/drivers/staging/lustre/lnet/selftest/timer.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 8ac7cd4d6fdb..9f5d75f166e7 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -54,9 +54,3 @@ config LUSTRE_TRANSLATE_ERRNOS
bool
depends on LUSTRE_FS && !X86
default y
-
-config LUSTRE_LLITE_LLOOP
- tristate "Lustre virtual block device"
- depends on LUSTRE_FS && BLOCK
- depends on !PPC_64K_PAGES && !ARM64_64K_PAGES && !MICROBLAZE_64K_PAGES && !PAGE_SIZE_64KB && !IA64_PAGE_SIZE_64KB && !PARISC_PAGE_SIZE_64KB
- default m
diff --git a/drivers/staging/lustre/lustre/fid/fid_internal.h b/drivers/staging/lustre/lustre/fid/fid_internal.h
index b79a813977cf..5c53773ecc5a 100644
--- a/drivers/staging/lustre/lustre/fid/fid_internal.h
+++ b/drivers/staging/lustre/lustre/fid/fid_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fid/fid_lib.c b/drivers/staging/lustre/lustre/fid/fid_lib.c
index dd65159ebb38..99ae7eb6720e 100644
--- a/drivers/staging/lustre/lustre/fid/fid_lib.c
+++ b/drivers/staging/lustre/lustre/fid/fid_lib.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 3a4df626462f..454744d25956 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -98,8 +94,10 @@ static int seq_client_rpc(struct lu_client_seq *seq,
* request here, otherwise if MDT0 is failed(umounted),
* it can not release the export of MDT0
*/
- if (seq->lcs_type == LUSTRE_SEQ_DATA)
- req->rq_no_delay = req->rq_no_resend = 1;
+ if (seq->lcs_type == LUSTRE_SEQ_DATA) {
+ req->rq_no_delay = 1;
+ req->rq_no_resend = 1;
+ }
debug_mask = D_CONSOLE;
} else {
if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 1f0e78686278..81b7ca9ea2fd 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 5a04e99d9249..0100a935f4ff 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 75d6a48637a9..f0efe5b9fbec 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 304c0ec268c9..e59d626a1548 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index ca898befeba6..61ac420798af 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index d4c33dd110ab..3cd4a2577d90 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -2326,7 +2322,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
*/
struct cl_client_cache {
/**
- * # of users (OSCs)
+ * # of client cache refcount
+ * # of users (OSCs) + 2 (held by llite and lov)
*/
atomic_t ccc_users;
/**
@@ -2361,6 +2358,13 @@ struct cl_client_cache {
};
+/**
+ * cl_cache functions
+ */
+struct cl_client_cache *cl_cache_init(unsigned long lru_page_max);
+void cl_cache_incref(struct cl_client_cache *cache);
+void cl_cache_decref(struct cl_client_cache *cache);
+
/** @} cl_page */
/** \defgroup cl_lock cl_lock
diff --git a/drivers/staging/lustre/lustre/include/interval_tree.h b/drivers/staging/lustre/lustre/include/interval_tree.h
index f6df3f33e770..4a15228b5570 100644
--- a/drivers/staging/lustre/lustre/include/interval_tree.h
+++ b/drivers/staging/lustre/lustre/include/interval_tree.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index 79d8f93075d1..1eb64ec4bed4 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
index 3420cfd1278d..d18e8a76bb25 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index c6c7f54637fb..5842cb18b49e 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_user.h b/drivers/staging/lustre/lustre/include/linux/lustre_user.h
index 9cc2849f3f85..e967950e8536 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_user.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 4146c9c3999f..d68e60e7fef7 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 2816512185af..6e25c1bb6aa3 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -783,7 +779,7 @@ do { \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
- CDEBUG(mask, format, ## __VA_ARGS__); \
+ CDEBUG(mask, format "\n", ## __VA_ARGS__); \
} \
} while (0)
diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
index 07d45de69dd9..c2340d643e84 100644
--- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
+++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 9c53c1792dc8..051864c23b5b 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -386,7 +382,7 @@ static inline __u64 fid_ver_oid(const struct lu_fid *fid)
* used for other purposes and not risk collisions with existing inodes.
*
* Different FID Format
- * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
+ * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
*/
enum fid_seq {
FID_SEQ_OST_MDT0 = 0,
@@ -704,7 +700,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
* be passed through unchanged. Only legacy OST objects in "group 0"
* will be mapped into the IDIF namespace so that they can fit into the
* struct lu_fid fields without loss. For reference see:
- * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
+ * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
*/
static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
__u32 ost_idx)
@@ -1241,8 +1237,16 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
*/
#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
-#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
-#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
+#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /* Remote client, never used
+ * in production. Removed in
+ * 2.9. Keep this flag to
+ * avoid reuse.
+ */
+#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /* Remote client by force,
+ * never used in production.
+ * Removed in 2.9. Keep this
+ * flag to avoid reuse
+ */
#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
@@ -1703,7 +1707,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
#define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
-#define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
+/* OBD_MD_FLRMTPERM (0x0000010000000000ULL) remote perm, obsolete */
#define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
#define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
@@ -1715,10 +1719,10 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
*/
#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
-#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
-#define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
-#define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
-#define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
+/* OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) lfs lsetfacl, obsolete */
+/* OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) lfs lgetfacl, obsolete */
+/* OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) lfs rsetfacl, obsolete */
+/* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
@@ -2159,26 +2163,8 @@ enum {
CFS_SETUID_PERM = 0x01,
CFS_SETGID_PERM = 0x02,
CFS_SETGRP_PERM = 0x04,
- CFS_RMTACL_PERM = 0x08,
- CFS_RMTOWN_PERM = 0x10
};
-/* inode access permission for remote user, the inode info are omitted,
- * for client knows them.
- */
-struct mdt_remote_perm {
- __u32 rp_uid;
- __u32 rp_gid;
- __u32 rp_fsuid;
- __u32 rp_fsuid_h;
- __u32 rp_fsgid;
- __u32 rp_fsgid_h;
- __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
- __u32 rp_padding;
-};
-
-void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
-
struct mdt_rec_setattr {
__u32 sa_opcode;
__u32 sa_cap;
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 59ba48ac31a7..ef6f38ff359e 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -215,7 +211,7 @@ struct ost_id {
#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
-#define LL_IOC_RMTACL _IOW('f', 167, long)
+/* LL_IOC_RMTACL 167 obsolete */
#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long)
#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long)
#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long)
@@ -542,19 +538,6 @@ struct identity_downcall_data {
__u32 idd_groups[0];
};
-/* for non-mapped uid/gid */
-#define NOBODY_UID 99
-#define NOBODY_GID 99
-
-#define INVALID_ID (-1)
-
-enum {
- RMT_LSETFACL = 1,
- RMT_LGETFACL = 2,
- RMT_RSETFACL = 3,
- RMT_RGETFACL = 4
-};
-
/* lustre volatile file support
* file name header: .^L^S^T^R:volatile"
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_acl.h b/drivers/staging/lustre/lustre/include/lustre_acl.h
index aa4cfa7b749d..fecabe139b1f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_acl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_acl.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index e229e91f7f56..95a0be13c0fb 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_debug.h b/drivers/staging/lustre/lustre/include/lustre_debug.h
index 8a089413c92e..93c1bdaf71a4 100644
--- a/drivers/staging/lustre/lustre/include/lustre_debug.h
+++ b/drivers/staging/lustre/lustre/include/lustre_debug.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index b36821ffb252..8886458748c1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 9cade144faca..60051a5cfe20 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1077,7 +1073,7 @@ void ldlm_lock2handle(const struct ldlm_lock *lock,
struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags);
void ldlm_cancel_callback(struct ldlm_lock *);
int ldlm_lock_remove_from_lru(struct ldlm_lock *);
-int ldlm_lock_set_data(struct lustre_handle *, void *);
+int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data);
/**
* Obtain a lock reference by its handle.
@@ -1166,10 +1162,10 @@ do { \
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
void ldlm_lock_put(struct ldlm_lock *lock);
void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
-int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode);
+int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
@@ -1178,10 +1174,10 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
enum ldlm_type type, ldlm_policy_data_t *,
enum ldlm_mode mode, struct lustre_handle *,
int unref);
-enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
__u64 *bits);
void ldlm_lock_cancel(struct ldlm_lock *lock);
-void ldlm_lock_dump_handle(int level, struct lustre_handle *);
+void ldlm_lock_dump_handle(int level, const struct lustre_handle *);
void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
/* resource.c */
@@ -1255,9 +1251,9 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
enum ldlm_type type, __u8 with_policy,
enum ldlm_mode mode,
__u64 *flags, void *lvb, __u32 lvb_len,
- struct lustre_handle *lockh, int rc);
+ const struct lustre_handle *lockh, int rc);
int ldlm_cli_update_pool(struct ptlrpc_request *req);
-int ldlm_cli_cancel(struct lustre_handle *lockh,
+int ldlm_cli_cancel(const struct lustre_handle *lockh,
enum ldlm_cancel_flags cancel_flags);
int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
enum ldlm_cancel_flags flags, void *opaque);
diff --git a/drivers/staging/lustre/lustre/include/lustre_eacl.h b/drivers/staging/lustre/lustre/include/lustre_eacl.h
index 0b66593a9526..d1039e1ff70d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_eacl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_eacl.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -70,17 +66,6 @@ typedef struct {
#define CFS_ACL_XATTR_COUNT(size, prefix) \
(((size) - sizeof(prefix ## _header)) / sizeof(prefix ## _entry))
-extern ext_acl_xattr_header *
-lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size);
-extern int
-lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
- posix_acl_xattr_header **out);
-extern void
-lustre_ext_acl_xattr_free(ext_acl_xattr_header *header);
-extern ext_acl_xattr_header *
-lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
- ext_acl_xattr_header *ext_header);
-
#endif /* CONFIG_FS_POSIX_ACL */
/** @} eacl */
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index 3014d27e6dc2..6e7cc4689fb8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -180,19 +176,6 @@ static inline int exp_connect_lru_resize(struct obd_export *exp)
return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE);
}
-static inline int exp_connect_rmtclient(struct obd_export *exp)
-{
- return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT);
-}
-
-static inline int client_is_remote(struct obd_export *exp)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
-
- return !!(imp->imp_connect_data.ocd_connect_flags &
- OBD_CONNECT_RMT_CLIENT);
-}
-
static inline int exp_connect_vbr(struct obd_export *exp)
{
return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR);
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 12e8b585c2b4..743671a547ef 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -45,7 +41,7 @@
*
* @{
*
- * http://wiki.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
+ * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
* describes the FID namespace and interoperability requirements for FIDs.
* The important parts of that document are included here for reference.
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 4cf2b0e61672..932410d3e3cc 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
index 5488a698dabd..cde7ed702c86 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h
index 27f169d2ed34..1a63a6b9e116 100644
--- a/drivers/staging/lustre/lustre/include/lustre_handles.h
+++ b/drivers/staging/lustre/lustre/include/lustre_handles.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 8325c82b3ebf..4445be7a59dd 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_intent.h b/drivers/staging/lustre/lustre/include/lustre_intent.h
index c491d52d86a2..ed2b6c674109 100644
--- a/drivers/staging/lustre/lustre/include/lustre_intent.h
+++ b/drivers/staging/lustre/lustre/include/lustre_intent.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -38,7 +34,11 @@
#define LUSTRE_INTENT_H
/* intent IT_XXX are defined in lustre/include/obd.h */
-struct lustre_intent_data {
+
+struct lookup_intent {
+ int it_op;
+ int it_create_mode;
+ __u64 it_flags;
int it_disposition;
int it_status;
__u64 it_lock_handle;
@@ -46,17 +46,23 @@ struct lustre_intent_data {
int it_lock_mode;
int it_remote_lock_mode;
__u64 it_remote_lock_handle;
- void *it_data;
+ struct ptlrpc_request *it_request;
unsigned int it_lock_set:1;
};
-struct lookup_intent {
- int it_op;
- int it_create_mode;
- __u64 it_flags;
- union {
- struct lustre_intent_data lustre;
- } d;
-};
+static inline int it_disposition(struct lookup_intent *it, int flag)
+{
+ return it->it_disposition & flag;
+}
+
+static inline void it_set_disposition(struct lookup_intent *it, int flag)
+{
+ it->it_disposition |= flag;
+}
+
+static inline void it_clear_disposition(struct lookup_intent *it, int flag)
+{
+ it->it_disposition &= ~flag;
+}
#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 00b976766aef..06958f217fc8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h
index fcc5ebbceed8..b16897702559 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lite.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lite.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 49618e186824..b96e02317bfc 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index f267ff8a6ec8..fa62b95d351f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -189,9 +185,6 @@ struct mdc_cache_waiter {
};
/* mdc/mdc_locks.c */
-int it_disposition(struct lookup_intent *it, int flag);
-void it_clear_disposition(struct lookup_intent *it, int flag);
-void it_set_disposition(struct lookup_intent *it, int flag);
int it_open_error(int phase, struct lookup_intent *it);
static inline bool cl_is_lov_delay_create(unsigned int flags)
diff --git a/drivers/staging/lustre/lustre/include/lustre_mds.h b/drivers/staging/lustre/lustre/include/lustre_mds.h
index 95d27ddecfb3..4104bd9bd5c4 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mds.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mds.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index a7973d5de168..d5debd615fdf 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -270,6 +266,11 @@
/* Macro to hide a typecast. */
#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
+struct ptlrpc_replay_async_args {
+ int praa_old_state;
+ int praa_old_status;
+};
+
/**
* Structure to single define portal connection.
*/
@@ -479,8 +480,9 @@ enum rq_phase {
RQ_PHASE_BULK = 0xebc0de02,
RQ_PHASE_INTERPRET = 0xebc0de03,
RQ_PHASE_COMPLETE = 0xebc0de04,
- RQ_PHASE_UNREGISTERING = 0xebc0de05,
- RQ_PHASE_UNDEFINED = 0xebc0de06
+ RQ_PHASE_UNREG_RPC = 0xebc0de05,
+ RQ_PHASE_UNREG_BULK = 0xebc0de06,
+ RQ_PHASE_UNDEFINED = 0xebc0de07
};
/** Type of request interpreter call-back */
@@ -1247,22 +1249,103 @@ struct ptlrpc_hpreq_ops {
void (*hpreq_fini)(struct ptlrpc_request *);
};
-/**
- * Represents remote procedure call.
- *
- * This is a staple structure used by everybody wanting to send a request
- * in Lustre.
- */
-struct ptlrpc_request {
- /* Request type: one of PTL_RPC_MSG_* */
- int rq_type;
- /** Result of request processing */
- int rq_status;
+struct ptlrpc_cli_req {
+ /** For bulk requests on client only: bulk descriptor */
+ struct ptlrpc_bulk_desc *cr_bulk;
+ /** optional time limit for send attempts */
+ long cr_delay_limit;
+ /** time request was first queued */
+ time_t cr_queued_time;
+ /** request sent timeval */
+ struct timespec64 cr_sent_tv;
+ /** time for request really sent out */
+ time_t cr_sent_out;
+ /** when req reply unlink must finish. */
+ time_t cr_reply_deadline;
+ /** when req bulk unlink must finish. */
+ time_t cr_bulk_deadline;
+ /** when req unlink must finish. */
+ time_t cr_req_deadline;
+ /** Portal to which this request would be sent */
+ short cr_req_ptl;
+ /** Portal where to wait for reply and where reply would be sent */
+ short cr_rep_ptl;
+ /** request resending number */
+ unsigned int cr_resend_nr;
+ /** What was import generation when this request was sent */
+ int cr_imp_gen;
+ enum lustre_imp_state cr_send_state;
+ /** Per-request waitq introduced by bug 21938 for recovery waiting */
+ wait_queue_head_t cr_set_waitq;
+ /** Link item for request set lists */
+ struct list_head cr_set_chain;
+ /** link to waited ctx */
+ struct list_head cr_ctx_chain;
+
+ /** client's half ctx */
+ struct ptlrpc_cli_ctx *cr_cli_ctx;
+ /** Link back to the request set */
+ struct ptlrpc_request_set *cr_set;
+ /** outgoing request MD handle */
+ lnet_handle_md_t cr_req_md_h;
+ /** request-out callback parameter */
+ struct ptlrpc_cb_id cr_req_cbid;
+ /** incoming reply MD handle */
+ lnet_handle_md_t cr_reply_md_h;
+ wait_queue_head_t cr_reply_waitq;
+ /** reply callback parameter */
+ struct ptlrpc_cb_id cr_reply_cbid;
+ /** Async completion handler, called when reply is received */
+ ptlrpc_interpterer_t cr_reply_interp;
+ /** Async completion context */
+ union ptlrpc_async_args cr_async_args;
+ /** Opaq data for replay and commit callbacks. */
+ void *cr_cb_data;
/**
- * Linkage item through which this request is included into
- * sending/delayed lists on client and into rqbd list on server
+ * Commit callback, called when request is committed and about to be
+ * freed.
*/
- struct list_head rq_list;
+ void (*cr_commit_cb)(struct ptlrpc_request *);
+ /** Replay callback, called after request is replayed at recovery */
+ void (*cr_replay_cb)(struct ptlrpc_request *);
+};
+
+/** client request member alias */
+/* NB: these alias should NOT be used by any new code, instead they should
+ * be removed step by step to avoid potential abuse
+ */
+#define rq_bulk rq_cli.cr_bulk
+#define rq_delay_limit rq_cli.cr_delay_limit
+#define rq_queued_time rq_cli.cr_queued_time
+#define rq_sent_tv rq_cli.cr_sent_tv
+#define rq_real_sent rq_cli.cr_sent_out
+#define rq_reply_deadline rq_cli.cr_reply_deadline
+#define rq_bulk_deadline rq_cli.cr_bulk_deadline
+#define rq_req_deadline rq_cli.cr_req_deadline
+#define rq_nr_resend rq_cli.cr_resend_nr
+#define rq_request_portal rq_cli.cr_req_ptl
+#define rq_reply_portal rq_cli.cr_rep_ptl
+#define rq_import_generation rq_cli.cr_imp_gen
+#define rq_send_state rq_cli.cr_send_state
+#define rq_set_chain rq_cli.cr_set_chain
+#define rq_ctx_chain rq_cli.cr_ctx_chain
+#define rq_set rq_cli.cr_set
+#define rq_set_waitq rq_cli.cr_set_waitq
+#define rq_cli_ctx rq_cli.cr_cli_ctx
+#define rq_req_md_h rq_cli.cr_req_md_h
+#define rq_req_cbid rq_cli.cr_req_cbid
+#define rq_reply_md_h rq_cli.cr_reply_md_h
+#define rq_reply_waitq rq_cli.cr_reply_waitq
+#define rq_reply_cbid rq_cli.cr_reply_cbid
+#define rq_interpret_reply rq_cli.cr_reply_interp
+#define rq_async_args rq_cli.cr_async_args
+#define rq_cb_data rq_cli.cr_cb_data
+#define rq_commit_cb rq_cli.cr_commit_cb
+#define rq_replay_cb rq_cli.cr_replay_cb
+
+struct ptlrpc_srv_req {
+ /** initial thread servicing this request */
+ struct ptlrpc_thread *sr_svc_thread;
/**
* Server side list of incoming unserved requests sorted by arrival
* time. Traversed from time to time to notice about to expire
@@ -1270,32 +1353,86 @@ struct ptlrpc_request {
* know server is alive and well, just very busy to service their
* requests in time
*/
- struct list_head rq_timed_list;
- /** server-side history, used for debugging purposes. */
- struct list_head rq_history_list;
+ struct list_head sr_timed_list;
/** server-side per-export list */
- struct list_head rq_exp_list;
- /** server-side hp handlers */
- struct ptlrpc_hpreq_ops *rq_ops;
-
- /** initial thread servicing this request */
- struct ptlrpc_thread *rq_svc_thread;
-
+ struct list_head sr_exp_list;
+ /** server-side history, used for debuging purposes. */
+ struct list_head sr_hist_list;
/** history sequence # */
- __u64 rq_history_seq;
+ __u64 sr_hist_seq;
+ /** the index of service's srv_at_array into which request is linked */
+ time_t sr_at_index;
+ /** authed uid */
+ uid_t sr_auth_uid;
+ /** authed uid mapped to */
+ uid_t sr_auth_mapped_uid;
+ /** RPC is generated from what part of Lustre */
+ enum lustre_sec_part sr_sp_from;
+ /** request session context */
+ struct lu_context sr_ses;
/** \addtogroup nrs
* @{
*/
/** stub for NRS request */
- struct ptlrpc_nrs_request rq_nrq;
+ struct ptlrpc_nrs_request sr_nrq;
/** @} nrs */
- /** the index of service's srv_at_array into which request is linked */
- u32 rq_at_index;
+ /** request arrival time */
+ struct timespec64 sr_arrival_time;
+ /** server's half ctx */
+ struct ptlrpc_svc_ctx *sr_svc_ctx;
+ /** (server side), pointed directly into req buffer */
+ struct ptlrpc_user_desc *sr_user_desc;
+ /** separated reply state */
+ struct ptlrpc_reply_state *sr_reply_state;
+ /** server-side hp handlers */
+ struct ptlrpc_hpreq_ops *sr_ops;
+ /** incoming request buffer */
+ struct ptlrpc_request_buffer_desc *sr_rqbd;
+};
+
+/** server request member alias */
+/* NB: these alias should NOT be used by any new code, instead they should
+ * be removed step by step to avoid potential abuse
+ */
+#define rq_svc_thread rq_srv.sr_svc_thread
+#define rq_timed_list rq_srv.sr_timed_list
+#define rq_exp_list rq_srv.sr_exp_list
+#define rq_history_list rq_srv.sr_hist_list
+#define rq_history_seq rq_srv.sr_hist_seq
+#define rq_at_index rq_srv.sr_at_index
+#define rq_auth_uid rq_srv.sr_auth_uid
+#define rq_auth_mapped_uid rq_srv.sr_auth_mapped_uid
+#define rq_sp_from rq_srv.sr_sp_from
+#define rq_session rq_srv.sr_ses
+#define rq_nrq rq_srv.sr_nrq
+#define rq_arrival_time rq_srv.sr_arrival_time
+#define rq_reply_state rq_srv.sr_reply_state
+#define rq_svc_ctx rq_srv.sr_svc_ctx
+#define rq_user_desc rq_srv.sr_user_desc
+#define rq_ops rq_srv.sr_ops
+#define rq_rqbd rq_srv.sr_rqbd
+
+/**
+ * Represents remote procedure call.
+ *
+ * This is a staple structure used by everybody wanting to send a request
+ * in Lustre.
+ */
+struct ptlrpc_request {
+ /* Request type: one of PTL_RPC_MSG_* */
+ int rq_type;
+ /** Result of request processing */
+ int rq_status;
+ /**
+ * Linkage item through which this request is included into
+ * sending/delayed lists on client and into rqbd list on server
+ */
+ struct list_head rq_list;
/** Lock to protect request flags and some other important bits, like
* rq_list
*/
spinlock_t rq_lock;
- /** client-side flags are serialized by rq_lock */
+ /** client-side flags are serialized by rq_lock @{ */
unsigned int rq_intr:1, rq_replied:1, rq_err:1,
rq_timedout:1, rq_resend:1, rq_restart:1,
/**
@@ -1311,18 +1448,15 @@ struct ptlrpc_request {
rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
rq_early:1,
- rq_req_unlink:1, rq_reply_unlink:1,
+ rq_req_unlinked:1, /* unlinked request buffer from lnet */
+ rq_reply_unlinked:1, /* unlinked reply buffer from lnet */
rq_memalloc:1, /* req originated from "kswapd" */
- /* server-side flags */
- rq_packed_final:1, /* packed final reply */
- rq_hp:1, /* high priority RPC */
- rq_at_linked:1, /* link into service's srv_at_array */
- rq_reply_truncate:1,
rq_committed:1,
- /* whether the "rq_set" is a valid one */
+ rq_reply_truncated:1,
+ /** whether the "rq_set" is a valid one */
rq_invalid_rqset:1,
rq_generation_set:1,
- /* do not resend request on -EINPROGRESS */
+ /** do not resend request on -EINPROGRESS */
rq_no_retry_einprogress:1,
/* allow the req to be sent if the import is in recovery
* status
@@ -1330,20 +1464,24 @@ struct ptlrpc_request {
rq_allow_replay:1,
/* bulk request, sent to server, but uncommitted */
rq_unstable:1;
+ /** @} */
- unsigned int rq_nr_resend;
-
- enum rq_phase rq_phase; /* one of RQ_PHASE_* */
- enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
- atomic_t rq_refcount; /* client-side refcount for SENT race,
- * server-side refcount for multiple replies
- */
-
- /** Portal to which this request would be sent */
- short rq_request_portal; /* XXX FIXME bug 249 */
- /** Portal where to wait for reply and where reply would be sent */
- short rq_reply_portal; /* XXX FIXME bug 249 */
+ /** server-side flags @{ */
+ unsigned int
+ rq_hp:1, /**< high priority RPC */
+ rq_at_linked:1, /**< link into service's srv_at_array */
+ rq_packed_final:1; /**< packed final reply */
+ /** @} */
+ /** one of RQ_PHASE_* */
+ enum rq_phase rq_phase;
+ /** one of RQ_PHASE_* to be used next */
+ enum rq_phase rq_next_phase;
+ /**
+ * client-side refcount for SENT race, server-side refcount
+ * for multiple replies
+ */
+ atomic_t rq_refcount;
/**
* client-side:
* !rq_truncate : # reply bytes actually received,
@@ -1354,6 +1492,8 @@ struct ptlrpc_request {
int rq_reqlen;
/** Reply length */
int rq_replen;
+ /** Pool if request is from preallocated list */
+ struct ptlrpc_request_pool *rq_pool;
/** Request message - what client sent */
struct lustre_msg *rq_reqmsg;
/** Reply message - server response */
@@ -1366,19 +1506,20 @@ struct ptlrpc_request {
* List item to for replay list. Not yet committed requests get linked
* there.
* Also see \a rq_replay comment above.
+ * It's also link chain on obd_export::exp_req_replay_queue
*/
struct list_head rq_replay_list;
-
+ /** non-shared members for client & server request*/
+ union {
+ struct ptlrpc_cli_req rq_cli;
+ struct ptlrpc_srv_req rq_srv;
+ };
/**
* security and encryption data
* @{
*/
- struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
- struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
- struct list_head rq_ctx_chain; /**< link to waited ctx */
-
- struct sptlrpc_flavor rq_flvr; /**< for client & server */
- enum lustre_sec_part rq_sp_from;
+ /** description of flavors for client & server */
+ struct sptlrpc_flavor rq_flvr;
/* client/server security flags */
unsigned int
@@ -1388,7 +1529,6 @@ struct ptlrpc_request {
rq_bulk_write:1, /* request bulk write */
/* server authentication flags */
rq_auth_gss:1, /* authenticated by gss */
- rq_auth_remote:1, /* authed as remote user */
rq_auth_usr_root:1, /* authed as root */
rq_auth_usr_mdt:1, /* authed as mdt */
rq_auth_usr_ost:1, /* authed as ost */
@@ -1397,19 +1537,15 @@ struct ptlrpc_request {
rq_pack_bulk:1,
/* doesn't expect reply FIXME */
rq_no_reply:1,
- rq_pill_init:1; /* pill initialized */
-
- uid_t rq_auth_uid; /* authed uid */
- uid_t rq_auth_mapped_uid; /* authed uid mapped to */
-
- /* (server side), pointed directly into req buffer */
- struct ptlrpc_user_desc *rq_user_desc;
-
- /* various buffer pointers */
- struct lustre_msg *rq_reqbuf; /* req wrapper */
- char *rq_repbuf; /* rep buffer */
- struct lustre_msg *rq_repdata; /* rep wrapper msg */
- struct lustre_msg *rq_clrbuf; /* only in priv mode */
+ rq_pill_init:1, /* pill initialized */
+ rq_srv_req:1; /* server request */
+
+ /** various buffer pointers */
+ struct lustre_msg *rq_reqbuf; /**< req wrapper */
+ char *rq_repbuf; /**< rep buffer */
+ struct lustre_msg *rq_repdata; /**< rep wrapper msg */
+ /** only in priv mode */
+ struct lustre_msg *rq_clrbuf;
int rq_reqbuf_len; /* req wrapper buf len */
int rq_reqdata_len; /* req wrapper msg len */
int rq_repbuf_len; /* rep buffer len */
@@ -1426,97 +1562,28 @@ struct ptlrpc_request {
__u32 rq_req_swab_mask;
__u32 rq_rep_swab_mask;
- /** What was import generation when this request was sent */
- int rq_import_generation;
- enum lustre_imp_state rq_send_state;
-
/** how many early replies (for stats) */
int rq_early_count;
- /** client+server request */
- lnet_handle_md_t rq_req_md_h;
- struct ptlrpc_cb_id rq_req_cbid;
- /** optional time limit for send attempts */
- long rq_delay_limit;
- /** time request was first queued */
- unsigned long rq_queued_time;
-
- /* server-side... */
- /** request arrival time */
- struct timespec64 rq_arrival_time;
- /** separated reply state */
- struct ptlrpc_reply_state *rq_reply_state;
- /** incoming request buffer */
- struct ptlrpc_request_buffer_desc *rq_rqbd;
-
- /** client-only incoming reply */
- lnet_handle_md_t rq_reply_md_h;
- wait_queue_head_t rq_reply_waitq;
- struct ptlrpc_cb_id rq_reply_cbid;
-
+ /** Server-side, export on which request was received */
+ struct obd_export *rq_export;
+ /** import where request is being sent */
+ struct obd_import *rq_import;
/** our LNet NID */
lnet_nid_t rq_self;
/** Peer description (the other side) */
lnet_process_id_t rq_peer;
- /** Server-side, export on which request was received */
- struct obd_export *rq_export;
- /** Client side, import where request is being sent */
- struct obd_import *rq_import;
-
- /** Replay callback, called after request is replayed at recovery */
- void (*rq_replay_cb)(struct ptlrpc_request *);
/**
- * Commit callback, called when request is committed and about to be
- * freed.
+ * service time estimate (secs)
+ * If the request is not served by this time, it is marked as timed out.
*/
- void (*rq_commit_cb)(struct ptlrpc_request *);
- /** Opaq data for replay and commit callbacks. */
- void *rq_cb_data;
-
- /** For bulk requests on client only: bulk descriptor */
- struct ptlrpc_bulk_desc *rq_bulk;
-
- /** client outgoing req */
+ int rq_timeout;
/**
* when request/reply sent (secs), or time when request should be sent
*/
time64_t rq_sent;
- /** time for request really sent out */
- time64_t rq_real_sent;
-
- /** when request must finish. volatile
- * so that servers' early reply updates to the deadline aren't
- * kept in per-cpu cache
- */
- volatile time64_t rq_deadline;
- /** when req reply unlink must finish. */
- time64_t rq_reply_deadline;
- /** when req bulk unlink must finish. */
- time64_t rq_bulk_deadline;
- /**
- * service time estimate (secs)
- * If the requestsis not served by this time, it is marked as timed out.
- */
- int rq_timeout;
-
- /** Multi-rpc bits */
- /** Per-request waitq introduced by bug 21938 for recovery waiting */
- wait_queue_head_t rq_set_waitq;
- /** Link item for request set lists */
- struct list_head rq_set_chain;
- /** Link back to the request set */
- struct ptlrpc_request_set *rq_set;
- /** Async completion handler, called when reply is received */
- ptlrpc_interpterer_t rq_interpret_reply;
- /** Async completion context */
- union ptlrpc_async_args rq_async_args;
-
- /** Pool if request is from preallocated list */
- struct ptlrpc_request_pool *rq_pool;
-
- struct lu_context rq_session;
- struct lu_context rq_recov_session;
-
+ /** when request must finish. */
+ time64_t rq_deadline;
/** request format description */
struct req_capsule rq_pill;
};
@@ -1629,8 +1696,10 @@ ptlrpc_phase2str(enum rq_phase phase)
return "Interpret";
case RQ_PHASE_COMPLETE:
return "Complete";
- case RQ_PHASE_UNREGISTERING:
- return "Unregistering";
+ case RQ_PHASE_UNREG_RPC:
+ return "UnregRPC";
+ case RQ_PHASE_UNREG_BULK:
+ return "UnregBULK";
default:
return "?Phase?";
}
@@ -1657,7 +1726,7 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req)
#define DEBUG_REQ_FLAGS(req) \
ptlrpc_rqphase2str(req), \
FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
- FLAG(req->rq_err, "E"), \
+ FLAG(req->rq_err, "E"), FLAG(req->rq_net_err, "e"), \
FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
FLAG(req->rq_no_resend, "N"), \
@@ -1665,7 +1734,7 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req)
FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
FLAG(req->rq_committed, "M")
-#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
+#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s"
void _debug_req(struct ptlrpc_request *req,
struct libcfs_debug_msg_data *data, const char *fmt, ...)
@@ -2316,8 +2385,7 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
desc = req->rq_bulk;
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- req->rq_bulk_deadline > ktime_get_real_seconds())
+ if (req->rq_bulk_deadline > ktime_get_real_seconds())
return 1;
if (!desc)
@@ -2664,13 +2732,20 @@ ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
if (req->rq_phase == new_phase)
return;
- if (new_phase == RQ_PHASE_UNREGISTERING) {
+ if (new_phase == RQ_PHASE_UNREG_RPC ||
+ new_phase == RQ_PHASE_UNREG_BULK) {
+ /* No embedded unregistering phases */
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK)
+ return;
+
req->rq_next_phase = req->rq_phase;
if (req->rq_import)
atomic_inc(&req->rq_import->imp_unregistering);
}
- if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK) {
if (req->rq_import)
atomic_dec(&req->rq_import->imp_unregistering);
}
@@ -2687,9 +2762,6 @@ ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
static inline int
ptlrpc_client_early(struct ptlrpc_request *req)
{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds())
- return 0;
return req->rq_early;
}
@@ -2699,8 +2771,7 @@ ptlrpc_client_early(struct ptlrpc_request *req)
static inline int
ptlrpc_client_replied(struct ptlrpc_request *req)
{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds())
+ if (req->rq_reply_deadline > ktime_get_real_seconds())
return 0;
return req->rq_replied;
}
@@ -2709,8 +2780,7 @@ ptlrpc_client_replied(struct ptlrpc_request *req)
static inline int
ptlrpc_client_recv(struct ptlrpc_request *req)
{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds())
+ if (req->rq_reply_deadline > ktime_get_real_seconds())
return 1;
return req->rq_receiving_reply;
}
@@ -2721,13 +2791,16 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
int rc;
spin_lock(&req->rq_lock);
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds()) {
+ if (req->rq_reply_deadline > ktime_get_real_seconds()) {
+ spin_unlock(&req->rq_lock);
+ return 1;
+ }
+ if (req->rq_req_deadline > ktime_get_real_seconds()) {
spin_unlock(&req->rq_lock);
return 1;
}
- rc = req->rq_receiving_reply;
- rc = rc || req->rq_req_unlink || req->rq_reply_unlink;
+ rc = !req->rq_req_unlinked || !req->rq_reply_unlinked ||
+ req->rq_receiving_reply;
spin_unlock(&req->rq_lock);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_param.h b/drivers/staging/lustre/lustre/include/lustre_param.h
index a42cf90c1cd8..82aadd32c2b8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_param.h
+++ b/drivers/staging/lustre/lustre/include/lustre_param.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index 0aac4391ea16..544a43c862b9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -164,7 +160,7 @@ extern struct req_format RQF_MDS_IS_SUBDIR;
extern struct req_format RQF_MDS_DONE_WRITING;
extern struct req_format RQF_MDS_REINT;
extern struct req_format RQF_MDS_REINT_CREATE;
-extern struct req_format RQF_MDS_REINT_CREATE_RMT_ACL;
+extern struct req_format RQF_MDS_REINT_CREATE_ACL;
extern struct req_format RQF_MDS_REINT_CREATE_SLAVE;
extern struct req_format RQF_MDS_REINT_CREATE_SYM;
extern struct req_format RQF_MDS_REINT_OPEN;
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 01b4e6726a68..90c183424802 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -221,13 +217,13 @@ enum sptlrpc_bulk_service {
#define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
-#define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
-#define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
+#define SPTLRPC_FLVR_INVALID ((__u32)0xFFFFFFFF)
+#define SPTLRPC_FLVR_ANY ((__u32)0xFFF00000)
/**
* extract the useful part from wire flavor
*/
-#define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
+#define WIRE_FLVR(wflvr) (((__u32)(wflvr)) & 0x000FFFFF)
/** @} flavor */
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 2d926e0ee647..a1bc2c478ff9 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -232,6 +228,12 @@ enum {
#define MDC_MAX_RIF_DEFAULT 8
#define MDC_MAX_RIF_MAX 512
+enum obd_cl_sem_lock_class {
+ OBD_CLI_SEM_NORMAL,
+ OBD_CLI_SEM_MGC,
+ OBD_CLI_SEM_MDCOSC,
+};
+
struct mdc_rpc_lock;
struct obd_import;
struct client_obd {
@@ -419,7 +421,7 @@ struct lov_obd {
enum lustre_sec_part lov_sp_me;
/* Cached LRU and unstable data from upper layer */
- void *lov_cache;
+ struct cl_client_cache *lov_cache;
struct rw_semaphore lov_notify_lock;
@@ -1119,9 +1121,6 @@ struct md_ops {
ldlm_policy_data_t *, enum ldlm_mode,
enum ldlm_cancel_flags flags, void *opaque);
- int (*get_remote_perm)(struct obd_export *, const struct lu_fid *,
- __u32, struct ptlrpc_request **);
-
int (*intent_getattr_async)(struct obd_export *,
struct md_enqueue_info *,
struct ldlm_enqueue_info *);
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
index f6c18df906a8..a8a81e662a56 100644
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 32863bcb30b9..6482a937000b 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1654,16 +1650,6 @@ static inline int md_init_ea_size(struct obd_export *exp, int easize,
cookiesize, def_cookiesize);
}
-static inline int md_get_remote_perm(struct obd_export *exp,
- const struct lu_fid *fid, __u32 suppgid,
- struct ptlrpc_request **request)
-{
- EXP_CHECK_MD_OP(exp, get_remote_perm);
- EXP_MD_COUNTER_INCREMENT(exp, get_remote_perm);
- return MDP(exp->exp_obd, get_remote_perm)(exp, fid, suppgid,
- request);
-}
-
static inline int md_intent_getattr_async(struct obd_export *exp,
struct md_enqueue_info *minfo,
struct ldlm_enqueue_info *einfo)
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 60034d39b00d..845e64a56c21 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -368,6 +364,9 @@ extern char obd_jobid_var[];
#define OBD_FAIL_PTLRPC_CLIENT_BULK_CB2 0x515
#define OBD_FAIL_PTLRPC_DELAY_IMP_FULL 0x516
#define OBD_FAIL_PTLRPC_CANCEL_RESEND 0x517
+#define OBD_FAIL_PTLRPC_DROP_BULK 0x51a
+#define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK 0x51b
+#define OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK 0x51c
#define OBD_FAIL_OBD_PING_NET 0x600
#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index 323060626fdf..f4a70ebddeaf 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index 621323f6ee60..ea8840cb9056 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index cf1f1783632f..f5023d9b78f5 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index 349bfcc9b331..d6b61bc39135 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -259,14 +255,13 @@ reprocess:
* overflow and underflow.
*/
if ((new->l_policy_data.l_flock.start >
- (lock->l_policy_data.l_flock.end + 1))
- && (lock->l_policy_data.l_flock.end !=
- OBD_OBJECT_EOF))
+ (lock->l_policy_data.l_flock.end + 1)) &&
+ (lock->l_policy_data.l_flock.end != OBD_OBJECT_EOF))
continue;
if ((new->l_policy_data.l_flock.end <
- (lock->l_policy_data.l_flock.start - 1))
- && (lock->l_policy_data.l_flock.start != 0))
+ (lock->l_policy_data.l_flock.start - 1)) &&
+ (lock->l_policy_data.l_flock.start != 0))
break;
if (new->l_policy_data.l_flock.start <
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
index b1bed1e17d32..79f4e6fa193e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 32f227f37799..e4cf65d2d3b1 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index b4ffbe2fc4ed..7c832aae7d5e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -345,7 +341,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
* Set cl_chksum* to CRC32 for now to avoid returning screwed info
* through procfs.
*/
- cli->cl_cksum_type = cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
+ cli->cl_cksum_type = OBD_CKSUM_CRC32;
+ cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
/* This value may be reduced at connect time in
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index bff94ea12d6f..a5993f745ebe 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -662,7 +658,7 @@ static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
* r/w reference type is determined by \a mode
* Calls ldlm_lock_addref_internal.
*/
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock;
@@ -704,7 +700,7 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
*
* \retval -EAGAIN lock is being canceled.
*/
-int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
+int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock;
int result;
@@ -836,7 +832,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
/**
* Decrease reader/writer refcount for LDLM lock with handle \a lockh
*/
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
@@ -853,7 +849,7 @@ EXPORT_SYMBOL(ldlm_lock_decref);
*
* Typical usage is for GROUP locks which we cannot allow to be cached.
*/
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
@@ -1322,7 +1318,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
}
EXPORT_SYMBOL(ldlm_lock_match);
-enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
__u64 *bits)
{
struct ldlm_lock *lock;
@@ -1444,7 +1440,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
memcpy(data, lvb, size);
break;
default:
- LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type);
+ LDLM_ERROR(lock, "Unknown LVB type: %d", lock->l_lvb_type);
dump_stack();
return -EINVAL;
}
@@ -1853,7 +1849,7 @@ EXPORT_SYMBOL(ldlm_lock_cancel);
/**
* Set opaque data into the lock that only makes sense to upper layer.
*/
-int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
+int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data)
{
struct ldlm_lock *lock = ldlm_handle2lock(lockh);
int rc = -EINVAL;
@@ -1879,7 +1875,7 @@ struct export_cl_data {
*
* Used when printing all locks on a resource for debug purposes.
*/
-void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
+void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh)
{
struct ldlm_lock *lock;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index ab739f079a48..821939ff2e6b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -503,7 +499,7 @@ static int ldlm_handle_setinfo(struct ptlrpc_request *req)
static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
const char *msg, int rc,
- struct lustre_handle *handle)
+ const struct lustre_handle *handle)
{
DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
"%s: [nid %s] [rc %d] [lock %#llx]",
@@ -641,7 +637,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
*/
if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
ldlm_is_failed(lock)) {
- LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
+ LDLM_DEBUG(lock,
+ "callback on lock %#llx - lock disappeared",
dlm_req->lock_handle[0].cookie);
unlock_res_and_lock(lock);
LDLM_LOCK_RELEASE(lock);
@@ -1011,9 +1008,11 @@ static int ldlm_setup(void)
blp->blp_min_threads = LDLM_NTHRS_INIT;
blp->blp_max_threads = LDLM_NTHRS_MAX;
} else {
- blp->blp_min_threads = blp->blp_max_threads =
- min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
- ldlm_num_threads));
+ blp->blp_min_threads = min_t(int, LDLM_NTHRS_MAX,
+ max_t(int, LDLM_NTHRS_INIT,
+ ldlm_num_threads));
+
+ blp->blp_max_threads = blp->blp_min_threads;
}
for (i = 0; i < blp->blp_min_threads; i++) {
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
index 0c1965ddabb9..0aed39c46154 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index b913ba9cf97c..657ed4012776 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 107314e284a0..af487f9937f4 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -340,7 +336,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
enum ldlm_type type, __u8 with_policy,
enum ldlm_mode mode,
__u64 *flags, void *lvb, __u32 lvb_len,
- struct lustre_handle *lockh, int rc)
+ const struct lustre_handle *lockh, int rc)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
int is_replay = *flags & LDLM_FL_REPLAY;
@@ -715,7 +711,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock->l_req_extent = policy->l_extent;
}
- LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n",
+ LDLM_DEBUG(lock, "client-side enqueue START, flags %llx",
*flags);
}
@@ -1027,7 +1023,7 @@ EXPORT_SYMBOL(ldlm_cli_update_pool);
*
* Lock must not have any readers or writers by this time.
*/
-int ldlm_cli_cancel(struct lustre_handle *lockh,
+int ldlm_cli_cancel(const struct lustre_handle *lockh,
enum ldlm_cancel_flags cancel_flags)
{
struct obd_export *exp;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index e99c89c34cd0..51a28d96af39 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1279,7 +1275,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
{
check_res_locked(res);
- LDLM_DEBUG(lock, "About to add this lock:\n");
+ LDLM_DEBUG(lock, "About to add this lock:");
if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 2ce10ff01b80..2cbb1b80bd41 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,11 +1,7 @@
obj-$(CONFIG_LUSTRE_FS) += lustre.o
-obj-$(CONFIG_LUSTRE_LLITE_LLOOP) += llite_lloop.o
lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
rw.o namei.o symlink.o llite_mmap.o \
- xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o \
- rw26.o super25.o statahead.o \
+ xattr.o xattr_cache.o rw26.o super25.o statahead.o \
glimpse.o lcommon_cl.o lcommon_misc.o \
vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
lproc_llite.o
-
-llite_lloop-y := lloop.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 1b6f82a1a435..581a63a0a63e 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -206,27 +202,27 @@ int ll_d_init(struct dentry *de)
void ll_intent_drop_lock(struct lookup_intent *it)
{
- if (it->it_op && it->d.lustre.it_lock_mode) {
+ if (it->it_op && it->it_lock_mode) {
struct lustre_handle handle;
- handle.cookie = it->d.lustre.it_lock_handle;
+ handle.cookie = it->it_lock_handle;
CDEBUG(D_DLMTRACE, "releasing lock with cookie %#llx from it %p\n",
handle.cookie, it);
- ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode);
+ ldlm_lock_decref(&handle, it->it_lock_mode);
/* bug 494: intent_release may be called multiple times, from
* this thread and we don't want to double-decref this lock
*/
- it->d.lustre.it_lock_mode = 0;
- if (it->d.lustre.it_remote_lock_mode != 0) {
- handle.cookie = it->d.lustre.it_remote_lock_handle;
+ it->it_lock_mode = 0;
+ if (it->it_remote_lock_mode != 0) {
+ handle.cookie = it->it_remote_lock_handle;
CDEBUG(D_DLMTRACE, "releasing remote lock with cookie%#llx from it %p\n",
handle.cookie, it);
ldlm_lock_decref(&handle,
- it->d.lustre.it_remote_lock_mode);
- it->d.lustre.it_remote_lock_mode = 0;
+ it->it_remote_lock_mode);
+ it->it_remote_lock_mode = 0;
}
}
}
@@ -237,13 +233,13 @@ void ll_intent_release(struct lookup_intent *it)
ll_intent_drop_lock(it);
/* We are still holding extra reference on a request, need to free it */
if (it_disposition(it, DISP_ENQ_OPEN_REF))
- ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */
+ ptlrpc_req_finished(it->it_request); /* ll_file_open */
if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */
- ptlrpc_req_finished(it->d.lustre.it_data);
+ ptlrpc_req_finished(it->it_request);
- it->d.lustre.it_disposition = 0;
- it->d.lustre.it_data = NULL;
+ it->it_disposition = 0;
+ it->it_request = NULL;
}
void ll_invalidate_aliases(struct inode *inode)
@@ -253,7 +249,7 @@ void ll_invalidate_aliases(struct inode *inode)
CDEBUG(D_INODE, "marking dentries for ino "DFID"(%p) invalid\n",
PFID(ll_inode2fid(inode)), inode);
- ll_lock_dcache(inode);
+ spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
dentry, dentry, dentry->d_parent,
@@ -261,7 +257,7 @@ void ll_invalidate_aliases(struct inode *inode)
d_lustre_invalidate(dentry, 0);
}
- ll_unlock_dcache(inode);
+ spin_unlock(&inode->i_lock);
}
int ll_revalidate_it_finish(struct ptlrpc_request *request,
@@ -283,7 +279,7 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
{
- if (it->d.lustre.it_lock_mode && inode) {
+ if (it->it_lock_mode && inode) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)\n",
@@ -306,6 +302,17 @@ static int ll_revalidate_dentry(struct dentry *dentry,
{
struct inode *dir = d_inode(dentry->d_parent);
+ /* If this is intermediate component path lookup and we were able to get
+ * to this dentry, then its lock has not been revoked and the
+ * path component is valid.
+ */
+ if (lookup_flags & LOOKUP_PARENT)
+ return 1;
+
+ /* Symlink - always valid as long as the dentry was found */
+ if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))
+ return 1;
+
/*
* if open&create is set, talk to MDS to make sure file is created if
* necessary, because we can't do this in ->open() later since that's
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 4b00d1ac84fb..5b381779c827 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -366,7 +362,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
ll_finish_md_op_data(op_data);
- request = (struct ptlrpc_request *)it.d.lustre.it_data;
+ request = (struct ptlrpc_request *)it.it_request;
if (request)
ptlrpc_req_finished(request);
if (rc < 0) {
@@ -378,7 +374,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
CDEBUG(D_INODE, "setting lr_lvb_inode to inode "DFID"(%p)\n",
PFID(ll_inode2fid(dir)), dir);
md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
- &it.d.lustre.it_lock_handle, dir, NULL);
+ &it.it_lock_handle, dir, NULL);
} else {
/* for cross-ref object, l_ast_data of the lock may not be set,
* we reset it here
@@ -1076,17 +1072,11 @@ static int copy_and_ioctl(int cmd, struct obd_export *exp,
void *copy;
int rc;
- copy = kzalloc(size, GFP_NOFS);
- if (!copy)
- return -ENOMEM;
-
- if (copy_from_user(copy, data, size)) {
- rc = -EFAULT;
- goto out;
- }
+ copy = memdup_user(data, size);
+ if (IS_ERR(copy))
+ return PTR_ERR(copy);
rc = obd_iocontrol(cmd, exp, size, copy, NULL);
-out:
kfree(copy);
return rc;
@@ -1107,8 +1097,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
case Q_QUOTAOFF:
case Q_SETQUOTA:
case Q_SETINFO:
- if (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
break;
case Q_GETQUOTA:
@@ -1116,8 +1105,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
!uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
(type == GRPQUOTA &&
!in_egroup_p(make_kgid(&init_user_ns, id)))) &&
- (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT))
+ !capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
break;
case Q_GETINFO:
@@ -1128,9 +1116,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
}
if (valid != QC_GENERAL) {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- return -EOPNOTSUPP;
-
if (cmd == Q_GETINFO)
qctl->qc_cmd = Q_GETOINFO;
else if (cmd == Q_GETQUOTA)
@@ -1538,7 +1523,9 @@ skip_lmm:
st.st_atime = body->atime;
st.st_mtime = body->mtime;
st.st_ctime = body->ctime;
- st.st_ino = inode->i_ino;
+ st.st_ino = cl_fid_build_ino(&body->fid1,
+ sbi->ll_flags &
+ LL_SBI_32BIT_API);
lmdp = (struct lov_user_mds_data __user *)arg;
if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
@@ -1631,8 +1618,7 @@ free_lmm:
struct obd_quotactl *oqctl;
int error = 0;
- if (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
@@ -1655,8 +1641,7 @@ free_lmm:
case OBD_IOC_POLL_QUOTACHECK: {
struct if_quotacheck *check;
- if (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
check = kzalloc(sizeof(*check), GFP_NOFS);
@@ -1713,20 +1698,6 @@ out_quotactl:
return ll_get_obd_name(inode, cmd, arg);
case LL_IOC_FLUSHCTX:
return ll_flush_ctx(inode);
-#ifdef CONFIG_FS_POSIX_ACL
- case LL_IOC_RMTACL: {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- rc = rct_add(&sbi->ll_rct, current_pid(), arg);
- if (!rc)
- fd->fd_flags |= LL_FILE_RMTACL;
- return rc;
- } else {
- return 0;
- }
- }
-#endif
case LL_IOC_GETOBDCOUNT: {
int count, vallen;
struct obd_export *exp;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index f47f2acaf90c..57281b9e31ff 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -348,18 +344,6 @@ int ll_file_release(struct inode *inode, struct file *file)
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(ll_inode2fid(inode)), inode);
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
- fd->fd_flags &= ~LL_FILE_RMTACL;
- rct_del(&sbi->ll_rct, current_pid());
- et_search_free(&sbi->ll_et, current_pid());
- }
- }
-#endif
-
if (!is_root_inode(inode))
ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
fd = LUSTRE_FPRIVATE(file);
@@ -415,7 +399,19 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm,
* parameters. No need for the open lock
*/
if (!lmm && lmmsize == 0) {
- itp->it_flags |= MDS_OPEN_LOCK;
+ struct ll_dentry_data *ldd = ll_d2d(dentry);
+ /*
+ * If we came via ll_iget_for_nfs, then we need to request
+ * struct ll_dentry_data *ldd = ll_d2d(file->f_dentry);
+ *
+ * NB: when ldd is NULL, it must have come via normal
+ * lookup path only, since ll_iget_for_nfs always calls
+ * ll_d_init().
+ */
+ if (ldd && ldd->lld_nfs_dentry) {
+ ldd->lld_nfs_dentry = 0;
+ itp->it_flags |= MDS_OPEN_LOCK;
+ }
if (itp->it_flags & FMODE_WRITE)
opc = LUSTRE_OPC_CREATE;
}
@@ -453,7 +449,7 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm,
}
rc = ll_prep_inode(&inode, req, NULL, itp);
- if (!rc && itp->d.lustre.it_lock_mode)
+ if (!rc && itp->it_lock_mode)
ll_set_lock_data(sbi->ll_md_exp, inode, itp, NULL);
out:
@@ -480,13 +476,12 @@ void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
struct obd_client_handle *och)
{
- struct ptlrpc_request *req = it->d.lustre.it_data;
struct mdt_body *body;
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
och->och_fh = body->handle;
och->och_fid = body->fid1;
- och->och_lease_handle.cookie = it->d.lustre.it_lock_handle;
+ och->och_lease_handle.cookie = it->it_lock_handle;
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
och->och_flags = it->it_flags;
@@ -504,7 +499,6 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
LASSERT(fd);
if (och) {
- struct ptlrpc_request *req = it->d.lustre.it_data;
struct mdt_body *body;
int rc;
@@ -512,13 +506,19 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
if (rc != 0)
return rc;
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ body = req_capsule_server_get(&it->it_request->rq_pill,
+ &RMF_MDT_BODY);
ll_ioepoch_open(lli, body->ioepoch);
}
LUSTRE_FPRIVATE(file) = fd;
ll_readahead_init(inode, &fd->fd_ras);
fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
+
+ /* ll_cl_context initialize */
+ rwlock_init(&fd->fd_lock);
+ INIT_LIST_HEAD(&fd->fd_lccs);
+
return 0;
}
@@ -574,7 +574,7 @@ int ll_file_open(struct inode *inode, struct file *file)
return 0;
}
- if (!it || !it->d.lustre.it_disposition) {
+ if (!it || !it->it_disposition) {
/* Convert f_flags into access mode. We cannot use file->f_mode,
* because everything but O_ACCMODE mask was stripped from
* there
@@ -644,7 +644,7 @@ restart:
}
} else {
LASSERT(*och_usecount == 0);
- if (!it->d.lustre.it_disposition) {
+ if (!it->it_disposition) {
/* We cannot just request lock handle now, new ELC code
* means that one of other OPEN locks for this file
* could be cancelled, and since blocking ast handler
@@ -681,7 +681,7 @@ restart:
LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
"inode %p: disposition %x, status %d\n", inode,
- it_disposition(it, ~0), it->d.lustre.it_status);
+ it_disposition(it, ~0), it->it_status);
rc = ll_local_open(file, it, fd, *och_p);
if (rc)
@@ -724,7 +724,7 @@ out_openerr:
}
if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
- ptlrpc_req_finished(it->d.lustre.it_data);
+ ptlrpc_req_finished(it->it_request);
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
}
@@ -865,12 +865,12 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
/* already get lease, handle lease lock */
ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- if (it.d.lustre.it_lock_mode == 0 ||
- it.d.lustre.it_lock_bits != MDS_INODELOCK_OPEN) {
+ if (it.it_lock_mode == 0 ||
+ it.it_lock_bits != MDS_INODELOCK_OPEN) {
/* open lock must return for lease */
CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
- PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode,
- it.d.lustre.it_lock_bits);
+ PFID(ll_inode2fid(inode)), it.it_lock_mode,
+ it.it_lock_bits);
rc = -EPROTO;
goto out_close;
}
@@ -880,10 +880,10 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
out_close:
/* Cancel open lock */
- if (it.d.lustre.it_lock_mode != 0) {
+ if (it.it_lock_mode != 0) {
ldlm_lock_decref_and_cancel(&och->och_lease_handle,
- it.d.lustre.it_lock_mode);
- it.d.lustre.it_lock_mode = 0;
+ it.it_lock_mode);
+ it.it_lock_mode = 0;
och->och_lease_handle.cookie = 0ULL;
}
rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
@@ -1178,7 +1178,9 @@ restart:
CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
LBUG();
}
+ ll_cl_add(file, env, io);
result = cl_io_loop(env, io);
+ ll_cl_remove(file, env);
if (args->via_io_subtype == IO_NORMAL)
up_read(&lli->lli_trunc_sem);
if (write_mutex_locked)
@@ -1397,7 +1399,7 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
if (rc)
goto out_unlock;
- rc = oit.d.lustre.it_status;
+ rc = oit.it_status;
if (rc < 0)
goto out_req_free;
@@ -1410,7 +1412,7 @@ out_unlock:
out:
return rc;
out_req_free:
- ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
+ ptlrpc_req_finished((struct ptlrpc_request *)oit.it_request);
goto out;
}
@@ -1698,7 +1700,7 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
out:
/* this one is in place of ll_file_open */
if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
- ptlrpc_req_finished(it->d.lustre.it_data);
+ ptlrpc_req_finished(it->it_request);
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
}
return rc;
@@ -2972,8 +2974,11 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
* here to preserve get_cwd functionality on 2.6.
* Bug 10503
*/
- if (!d_inode(dentry)->i_nlink)
+ if (!d_inode(dentry)->i_nlink) {
+ spin_lock(&inode->i_lock);
d_lustre_invalidate(dentry, 0);
+ spin_unlock(&inode->i_lock);
+ }
ll_lookup_finish_locks(&oit, inode);
} else if (!ll_have_md_lock(d_inode(dentry), &ibits, LCK_MINMODE)) {
@@ -3124,6 +3129,9 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type)
spin_lock(&lli->lli_lock);
/* VFS' acl_permission_check->check_acl will release the refcount */
acl = posix_acl_dup(lli->lli_posix_acl);
+#ifdef CONFIG_FS_POSIX_ACL
+ forget_cached_acl(inode, type);
+#endif
spin_unlock(&lli->lli_lock);
return acl;
@@ -3150,9 +3158,6 @@ int ll_inode_permission(struct inode *inode, int mask)
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
- if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
- return lustre_check_remote_perm(inode, mask);
-
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
rc = generic_permission(inode, mask);
@@ -3601,13 +3606,13 @@ again:
rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
NULL, 0, NULL, 0);
- ptlrpc_req_finished(it.d.lustre.it_data);
- it.d.lustre.it_data = NULL;
+ ptlrpc_req_finished(it.it_request);
+ it.it_request = NULL;
ll_finish_md_op_data(op_data);
- mode = it.d.lustre.it_lock_mode;
- it.d.lustre.it_lock_mode = 0;
+ mode = it.it_lock_mode;
+ it.it_lock_mode = 0;
ll_intent_drop_lock(&it);
if (rc == 0) {
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index d8ea75424e2f..92004a05f9ee 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index 6c00715b438f..396e4e4f0715 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index 12f3e71f48c2..f6be105eeef7 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -100,7 +96,8 @@ int cl_ocd_update(struct obd_device *host,
__u64 flags;
int result;
- if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
+ if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME) &&
+ watched->obd_set_up && !watched->obd_stopping) {
cli = &watched->u.cli;
lco = owner;
flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
@@ -115,9 +112,10 @@ int cl_ocd_update(struct obd_device *host,
mutex_unlock(&lco->lco_lock);
result = 0;
} else {
- CERROR("unexpected notification from %s %s!\n",
+ CERROR("unexpected notification from %s %s (setup:%d,stopping:%d)!\n",
watched->obd_type->typ_name,
- watched->obd_name);
+ watched->obd_name, watched->obd_set_up,
+ watched->obd_stopping);
result = -EINVAL;
}
return result;
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index 2df551d3ae6c..2326b40a0870 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 3f2f30b6542c..4d6d589a1677 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -68,6 +64,7 @@ struct ll_dentry_data {
struct lookup_intent *lld_it;
unsigned int lld_sa_generation;
unsigned int lld_invalid:1;
+ unsigned int lld_nfs_dentry:1;
struct rcu_head lld_rcu_head;
};
@@ -76,9 +73,6 @@ struct ll_dentry_data {
#define LLI_INODE_MAGIC 0x111d0de5
#define LLI_INODE_DEAD 0xdeadd00d
-/* remote client permission cache */
-#define REMOTE_PERM_HASHSIZE 16
-
struct ll_getname_data {
struct dir_context ctx;
char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
@@ -86,19 +80,6 @@ struct ll_getname_data {
int lgd_found; /* inode matched? */
};
-/* llite setxid/access permission for user on remote client */
-struct ll_remote_perm {
- struct hlist_node lrp_list;
- uid_t lrp_uid;
- gid_t lrp_gid;
- uid_t lrp_fsuid;
- gid_t lrp_fsgid;
- int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this
- * is access permission with
- * lrp_fsuid/lrp_fsgid.
- */
-};
-
struct ll_grouplock {
struct lu_env *lg_env;
struct cl_io *lg_io;
@@ -133,9 +114,6 @@ struct ll_inode_info {
spinlock_t lli_lock;
struct posix_acl *lli_posix_acl;
- struct hlist_head *lli_remote_perms;
- struct mutex lli_rmtperm_mutex;
-
/* identifying fields for both metadata and data stacks. */
struct lu_fid lli_fid;
/* Parent fid for accessing default stripe data on parent directory
@@ -145,8 +123,6 @@ struct ll_inode_info {
struct list_head lli_close_list;
- unsigned long lli_rmtperm_time;
-
/* handle is to be sent to MDS later on done_writing and setattr.
* Open handle data are needed for the recovery to reconstruct
* the inode state on the MDS. XXX: recovery is not ready yet.
@@ -411,7 +387,7 @@ enum stats_track_type {
#define LL_SBI_FLOCK 0x04
#define LL_SBI_USER_XATTR 0x08 /* support user xattr */
#define LL_SBI_ACL 0x10 /* support ACL */
-#define LL_SBI_RMT_CLIENT 0x40 /* remote client */
+/* LL_SBI_RMT_CLIENT 0x40 remote client */
#define LL_SBI_MDS_CAPA 0x80 /* support mds capa, obsolete */
#define LL_SBI_OSS_CAPA 0x100 /* support oss capa, obsolete */
#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
@@ -433,7 +409,7 @@ enum stats_track_type {
"xattr", \
"acl", \
"???", \
- "rmt_client", \
+ "???", \
"mds_capa", \
"oss_capa", \
"flock", \
@@ -449,26 +425,6 @@ enum stats_track_type {
"xattr", \
}
-#define RCE_HASHES 32
-
-struct rmtacl_ctl_entry {
- struct list_head rce_list;
- pid_t rce_key; /* hash key */
- int rce_ops; /* acl operation type */
-};
-
-struct rmtacl_ctl_table {
- spinlock_t rct_lock;
- struct list_head rct_entries[RCE_HASHES];
-};
-
-#define EE_HASHES 32
-
-struct eacl_table {
- spinlock_t et_lock;
- struct list_head et_entries[EE_HASHES];
-};
-
struct ll_sb_info {
/* this protects pglist and ra_info. It isn't safe to
* grab from interrupt contexts
@@ -497,7 +453,7 @@ struct ll_sb_info {
* any page which is sent to a server as part of a bulk request,
* but is uncommitted to stable storage.
*/
- struct cl_client_cache ll_cache;
+ struct cl_client_cache *ll_cache;
struct lprocfs_stats *ll_ra_stats;
@@ -533,8 +489,6 @@ struct ll_sb_info {
dev_t ll_sdev_orig; /* save s_dev before assign for
* clustered nfs
*/
- struct rmtacl_ctl_table ll_rct;
- struct eacl_table ll_et;
__kernel_fsid_t ll_fsid;
struct kobject ll_kobj; /* sysfs object */
struct super_block *ll_sb; /* struct super_block (for sysfs code)*/
@@ -640,6 +594,8 @@ struct ll_file_data {
* false: unknown failure, should report.
*/
bool fd_write_failed;
+ rwlock_t fd_lock; /* protect lcc list */
+ struct list_head fd_lccs; /* list of ll_cl_context */
};
struct lov_stripe_md;
@@ -715,8 +671,9 @@ void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
int ll_readahead(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue, struct ll_readahead_state *ras,
bool hit);
-struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage);
-void ll_cl_fini(struct ll_cl_context *lcc);
+struct ll_cl_context *ll_cl_find(struct file *file);
+void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io);
+void ll_cl_remove(struct file *file, const struct lu_env *env);
extern const struct address_space_operations ll_aops;
@@ -858,11 +815,11 @@ struct vvp_io_args {
};
struct ll_cl_context {
+ struct list_head lcc_list;
void *lcc_cookie;
+ const struct lu_env *lcc_env;
struct cl_io *lcc_io;
struct cl_page *lcc_page;
- struct lu_env *lcc_env;
- int lcc_refcheck;
};
struct ll_thread_info {
@@ -983,14 +940,6 @@ ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
int ll_removexattr(struct dentry *dentry, const char *name);
-/* llite/remote_perm.c */
-extern struct kmem_cache *ll_remote_perm_cachep;
-extern struct kmem_cache *ll_rmtperm_hash_cachep;
-
-void free_rmtperm_hash(struct hlist_head *hash);
-int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
-int lustre_check_remote_perm(struct inode *inode, int mask);
-
/**
* Common IO arguments for various VFS I/O interfaces.
*/
@@ -1004,40 +953,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
-/* llite/llite_rmtacl.c */
-#ifdef CONFIG_FS_POSIX_ACL
-struct eacl_entry {
- struct list_head ee_list;
- pid_t ee_key; /* hash key */
- struct lu_fid ee_fid;
- int ee_type; /* ACL type for ACCESS or DEFAULT */
- ext_acl_xattr_header *ee_acl;
-};
-
-u64 rce_ops2valid(int ops);
-struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key);
-int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops);
-int rct_del(struct rmtacl_ctl_table *rct, pid_t key);
-void rct_init(struct rmtacl_ctl_table *rct);
-void rct_fini(struct rmtacl_ctl_table *rct);
-
-void ee_free(struct eacl_entry *ee);
-int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
- ext_acl_xattr_header *header);
-struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
- struct lu_fid *fid, int type);
-void et_search_free(struct eacl_table *et, pid_t key);
-void et_init(struct eacl_table *et);
-void et_fini(struct eacl_table *et);
-#else
-static inline u64 rce_ops2valid(int ops)
-{
- return 0;
-}
-#endif
-
/* statahead.c */
-
#define LL_SA_RPC_MIN 2
#define LL_SA_RPC_DEF 32
#define LL_SA_RPC_MAX 8192
@@ -1281,7 +1197,7 @@ static inline int ll_file_nolock(const struct file *file)
static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
struct lookup_intent *it, __u64 *bits)
{
- if (!it->d.lustre.it_lock_set) {
+ if (!it->it_lock_set) {
struct lustre_handle handle;
/* If this inode is a remote object, it will get two
@@ -1292,36 +1208,26 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
* LOOKUP and PERM locks, so revoking either locks will
* case the dcache being cleared
*/
- if (it->d.lustre.it_remote_lock_mode) {
- handle.cookie = it->d.lustre.it_remote_lock_handle;
+ if (it->it_remote_lock_mode) {
+ handle.cookie = it->it_remote_lock_handle;
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for remote lock %#llx\n",
PFID(ll_inode2fid(inode)), inode,
handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode, NULL);
}
- handle.cookie = it->d.lustre.it_lock_handle;
+ handle.cookie = it->it_lock_handle;
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for lock %#llx\n",
PFID(ll_inode2fid(inode)), inode, handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode,
- &it->d.lustre.it_lock_bits);
- it->d.lustre.it_lock_set = 1;
+ &it->it_lock_bits);
+ it->it_lock_set = 1;
}
if (bits)
- *bits = it->d.lustre.it_lock_bits;
-}
-
-static inline void ll_lock_dcache(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
-}
-
-static inline void ll_unlock_dcache(struct inode *inode)
-{
- spin_unlock(&inode->i_lock);
+ *bits = it->it_lock_bits;
}
static inline int d_lustre_invalid(const struct dentry *dentry)
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 96c7e9fc6e5f..546063e728db 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -87,15 +83,11 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
pages = si.totalram - si.totalhigh;
lru_page_max = pages / 2;
- /* initialize ll_cache data */
- atomic_set(&sbi->ll_cache.ccc_users, 0);
- sbi->ll_cache.ccc_lru_max = lru_page_max;
- atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
- spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
- INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
-
- atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
- init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
+ sbi->ll_cache = cl_cache_init(lru_page_max);
+ if (!sbi->ll_cache) {
+ kfree(sbi);
+ return NULL;
+ }
sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
SBI_DEFAULT_READAHEAD_MAX);
@@ -135,6 +127,11 @@ static void ll_free_sbi(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
+ if (sbi->ll_cache) {
+ cl_cache_decref(sbi->ll_cache);
+ sbi->ll_cache = NULL;
+ }
+
kfree(sbi);
}
@@ -175,8 +172,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
- OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR |
- OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
+ OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
+ OBD_CONNECT_64BITHASH |
OBD_CONNECT_EINPROGRESS |
OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
OBD_CONNECT_LAYOUTLOCK |
@@ -217,8 +214,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* real client */
data->ocd_connect_flags |= OBD_CONNECT_REAL;
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
data->ocd_brw_size = MD_MAX_BRW_SIZE;
@@ -311,18 +306,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
sbi->ll_flags &= ~LL_SBI_ACL;
}
- if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
- if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
- sbi->ll_flags |= LL_SBI_RMT_CLIENT;
- LCONSOLE_INFO("client is set as remote by default.\n");
- }
- } else {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
- LCONSOLE_INFO("client claims to be remote, but server rejected, forced to be local.\n");
- }
- }
-
if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
sbi->ll_flags |= LL_SBI_64BIT_HASH;
@@ -356,10 +339,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
- OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
- OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
- OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
- OBD_CONNECT_MAXBYTES |
+ OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
+ OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
+ OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
OBD_CONNECT_EINPROGRESS |
OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
@@ -382,8 +364,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
}
data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
data->ocd_connect_flags,
@@ -446,9 +426,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
* XXX: move this to after cbd setup?
*/
valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- valid |= OBD_MD_FLRMTPERM;
- else if (sbi->ll_flags & LL_SBI_ACL)
+ if (sbi->ll_flags & LL_SBI_ACL)
valid |= OBD_MD_FLACL;
op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
@@ -504,13 +482,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
goto out_root;
}
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- rct_init(&sbi->ll_rct);
- et_init(&sbi->ll_et);
- }
-#endif
-
checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
KEY_CHECKSUM, sizeof(checksum), &checksum,
@@ -518,8 +489,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
cl_sb_init(sb);
err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
- KEY_CACHE_SET, sizeof(sbi->ll_cache),
- &sbi->ll_cache, NULL);
+ KEY_CACHE_SET, sizeof(*sbi->ll_cache),
+ sbi->ll_cache, NULL);
sb->s_root = d_make_root(root);
if (!sb->s_root) {
@@ -564,8 +535,6 @@ out_lock_cn_cb:
out_dt:
obd_disconnect(sbi->ll_dt_exp);
sbi->ll_dt_exp = NULL;
- /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
- obd_zombie_barrier();
out_md_fid:
obd_fid_fini(sbi->ll_md_exp->exp_obd);
out_md:
@@ -608,13 +577,6 @@ static void client_common_put_super(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- et_fini(&sbi->ll_et);
- rct_fini(&sbi->ll_rct);
- }
-#endif
-
ll_close_thread_shutdown(sbi->ll_lcq);
cl_sb_fini(sb);
@@ -622,10 +584,6 @@ static void client_common_put_super(struct super_block *sb)
obd_fid_fini(sbi->ll_dt_exp->exp_obd);
obd_disconnect(sbi->ll_dt_exp);
sbi->ll_dt_exp = NULL;
- /* wait till all OSCs are gone, since cl_cache is accessing sbi.
- * see LU-2543.
- */
- obd_zombie_barrier();
ldebugfs_unregister_mountpoint(sbi);
@@ -704,11 +662,6 @@ static int ll_options(char *options, int *flags)
*flags &= ~tmp;
goto next;
}
- tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
if (tmp) {
*flags |= tmp;
@@ -792,12 +745,9 @@ void ll_lli_init(struct ll_inode_info *lli)
lli->lli_maxbytes = MAX_LFS_FILESIZE;
spin_lock_init(&lli->lli_lock);
lli->lli_posix_acl = NULL;
- lli->lli_remote_perms = NULL;
- mutex_init(&lli->lli_rmtperm_mutex);
/* Do not set lli_fid, it has been initialized already. */
fid_zero(&lli->lli_pfid);
INIT_LIST_HEAD(&lli->lli_close_list);
- lli->lli_rmtperm_time = 0;
lli->lli_pending_och = NULL;
lli->lli_mds_read_och = NULL;
lli->lli_mds_write_och = NULL;
@@ -864,7 +814,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
try_module_get(THIS_MODULE);
/* client additional sb info */
- lsi->lsi_llsbi = sbi = ll_init_sbi(sb);
+ sbi = ll_init_sbi(sb);
+ lsi->lsi_llsbi = sbi;
if (!sbi) {
module_put(THIS_MODULE);
kfree(cfg);
@@ -965,12 +916,12 @@ void ll_put_super(struct super_block *sb)
if (!force) {
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
- !atomic_read(&sbi->ll_cache.ccc_unstable_nr),
+ rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
+ !atomic_read(&sbi->ll_cache->ccc_unstable_nr),
&lwi);
}
- ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
+ ccc_count = atomic_read(&sbi->ll_cache->ccc_unstable_nr);
if (!force && rc != -EINTR)
LASSERTF(!ccc_count, "count: %i\n", ccc_count);
@@ -1078,17 +1029,9 @@ void ll_clear_inode(struct inode *inode)
ll_xattr_cache_destroy(inode);
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- LASSERT(!lli->lli_posix_acl);
- if (lli->lli_remote_perms) {
- free_rmtperm_hash(lli->lli_remote_perms);
- lli->lli_remote_perms = NULL;
- }
- }
#ifdef CONFIG_FS_POSIX_ACL
- else if (lli->lli_posix_acl) {
+ if (lli->lli_posix_acl) {
LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
- LASSERT(!lli->lli_remote_perms);
posix_acl_release(lli->lli_posix_acl);
lli->lli_posix_acl = NULL;
}
@@ -1540,12 +1483,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
lli->lli_maxbytes = MAX_LFS_FILESIZE;
}
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- if (body->valid & OBD_MD_FLRMTPERM)
- ll_update_remote_perm(inode, md->remote_perm);
- }
#ifdef CONFIG_FS_POSIX_ACL
- else if (body->valid & OBD_MD_FLACL) {
+ if (body->valid & OBD_MD_FLACL) {
spin_lock(&lli->lli_lock);
if (lli->lli_posix_acl)
posix_acl_release(lli->lli_posix_acl);
@@ -1979,7 +1918,13 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
* At this point server returns to client's same fid as client
* generated for creating. So using ->fid1 is okay here.
*/
- LASSERT(fid_is_sane(&md.body->fid1));
+ if (!fid_is_sane(&md.body->fid1)) {
+ CERROR("%s: Fid is insane " DFID "\n",
+ ll_get_fsname(sb, NULL, 0),
+ PFID(&md.body->fid1));
+ rc = -EINVAL;
+ goto out;
+ }
*inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
sbi->ll_flags & LL_SBI_32BIT_API),
@@ -2006,11 +1951,11 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
* 3. proc2: refresh layout and layout lock granted
* 4. proc1: to apply a stale layout
*/
- if (it && it->d.lustre.it_lock_mode != 0) {
+ if (it && it->it_lock_mode != 0) {
struct lustre_handle lockh;
struct ldlm_lock *lock;
- lockh.cookie = it->d.lustre.it_lock_handle;
+ lockh.cookie = it->it_lock_handle;
lock = ldlm_handle2lock(&lockh);
LASSERT(lock);
if (ldlm_has_layout(lock)) {
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 88ef1cac9e0f..66ee5db5fce8 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -200,18 +196,11 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
- /* we grab lli_trunc_sem to exclude truncate case.
- * Otherwise, we could add dirty pages into osc cache
- * while truncate is on-going.
- */
inode = vvp_object_inode(io->ci_obj);
lli = ll_i2info(inode);
- down_read(&lli->lli_trunc_sem);
result = cl_io_loop(env, io);
- up_read(&lli->lli_trunc_sem);
-
cfs_restore_sigs(set);
if (result == 0) {
@@ -315,8 +304,13 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
vio->u.fault.ft_flags = 0;
vio->u.fault.ft_flags_valid = false;
+ /* May call ll_readpage() */
+ ll_cl_add(vma->vm_file, env, io);
+
result = cl_io_loop(env, io);
+ ll_cl_remove(vma->vm_file, env);
+
/* ft_flags are only valid if we reached
* the call to filemap_fault
*/
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index c1eef6198b25..65972c892731 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -172,6 +168,24 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
/* N.B. d_obtain_alias() drops inode ref on error */
result = d_obtain_alias(inode);
+ if (!IS_ERR(result)) {
+ int rc;
+
+ rc = ll_d_init(result);
+ if (rc < 0) {
+ dput(result);
+ result = ERR_PTR(rc);
+ } else {
+ struct ll_dentry_data *ldd = ll_d2d(result);
+
+ /*
+ * Need to signal to the ll_intent_file_open that
+ * we came from NFS and so opencache needs to be
+ * enabled for this one
+ */
+ ldd->lld_nfs_dentry = 1;
+ }
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
deleted file mode 100644
index 8509b07cb5c7..000000000000
--- a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/llite_rmtacl.c
- *
- * Lustre Remote User Access Control List.
- *
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#ifdef CONFIG_FS_POSIX_ACL
-
-#include "../include/lustre_lite.h"
-#include "../include/lustre_eacl.h"
-#include "llite_internal.h"
-
-static inline __u32 rce_hashfunc(uid_t id)
-{
- return id & (RCE_HASHES - 1);
-}
-
-static inline __u32 ee_hashfunc(uid_t id)
-{
- return id & (EE_HASHES - 1);
-}
-
-u64 rce_ops2valid(int ops)
-{
- switch (ops) {
- case RMT_LSETFACL:
- return OBD_MD_FLRMTLSETFACL;
- case RMT_LGETFACL:
- return OBD_MD_FLRMTLGETFACL;
- case RMT_RSETFACL:
- return OBD_MD_FLRMTRSETFACL;
- case RMT_RGETFACL:
- return OBD_MD_FLRMTRGETFACL;
- default:
- return 0;
- }
-}
-
-static struct rmtacl_ctl_entry *rce_alloc(pid_t key, int ops)
-{
- struct rmtacl_ctl_entry *rce;
-
- rce = kzalloc(sizeof(*rce), GFP_NOFS);
- if (!rce)
- return NULL;
-
- INIT_LIST_HEAD(&rce->rce_list);
- rce->rce_key = key;
- rce->rce_ops = ops;
-
- return rce;
-}
-
-static void rce_free(struct rmtacl_ctl_entry *rce)
-{
- if (!list_empty(&rce->rce_list))
- list_del(&rce->rce_list);
-
- kfree(rce);
-}
-
-static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct,
- pid_t key)
-{
- struct rmtacl_ctl_entry *rce;
- struct list_head *head = &rct->rct_entries[rce_hashfunc(key)];
-
- list_for_each_entry(rce, head, rce_list)
- if (rce->rce_key == key)
- return rce;
-
- return NULL;
-}
-
-struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key)
-{
- struct rmtacl_ctl_entry *rce;
-
- spin_lock(&rct->rct_lock);
- rce = __rct_search(rct, key);
- spin_unlock(&rct->rct_lock);
- return rce;
-}
-
-int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
-{
- struct rmtacl_ctl_entry *rce, *e;
-
- rce = rce_alloc(key, ops);
- if (!rce)
- return -ENOMEM;
-
- spin_lock(&rct->rct_lock);
- e = __rct_search(rct, key);
- if (unlikely(e)) {
- CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n",
- (int)key, ops);
- rce_free(e);
- }
- list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
- spin_unlock(&rct->rct_lock);
-
- return 0;
-}
-
-int rct_del(struct rmtacl_ctl_table *rct, pid_t key)
-{
- struct rmtacl_ctl_entry *rce;
-
- spin_lock(&rct->rct_lock);
- rce = __rct_search(rct, key);
- if (rce)
- rce_free(rce);
- spin_unlock(&rct->rct_lock);
-
- return rce ? 0 : -ENOENT;
-}
-
-void rct_init(struct rmtacl_ctl_table *rct)
-{
- int i;
-
- spin_lock_init(&rct->rct_lock);
- for (i = 0; i < RCE_HASHES; i++)
- INIT_LIST_HEAD(&rct->rct_entries[i]);
-}
-
-void rct_fini(struct rmtacl_ctl_table *rct)
-{
- struct rmtacl_ctl_entry *rce;
- int i;
-
- spin_lock(&rct->rct_lock);
- for (i = 0; i < RCE_HASHES; i++)
- while (!list_empty(&rct->rct_entries[i])) {
- rce = list_entry(rct->rct_entries[i].next,
- struct rmtacl_ctl_entry, rce_list);
- rce_free(rce);
- }
- spin_unlock(&rct->rct_lock);
-}
-
-static struct eacl_entry *ee_alloc(pid_t key, struct lu_fid *fid, int type,
- ext_acl_xattr_header *header)
-{
- struct eacl_entry *ee;
-
- ee = kzalloc(sizeof(*ee), GFP_NOFS);
- if (!ee)
- return NULL;
-
- INIT_LIST_HEAD(&ee->ee_list);
- ee->ee_key = key;
- ee->ee_fid = *fid;
- ee->ee_type = type;
- ee->ee_acl = header;
-
- return ee;
-}
-
-void ee_free(struct eacl_entry *ee)
-{
- if (!list_empty(&ee->ee_list))
- list_del(&ee->ee_list);
-
- if (ee->ee_acl)
- lustre_ext_acl_xattr_free(ee->ee_acl);
-
- kfree(ee);
-}
-
-static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key,
- struct lu_fid *fid, int type)
-{
- struct eacl_entry *ee;
- struct list_head *head = &et->et_entries[ee_hashfunc(key)];
-
- LASSERT(fid);
- list_for_each_entry(ee, head, ee_list)
- if (ee->ee_key == key) {
- if (lu_fid_eq(&ee->ee_fid, fid) &&
- ee->ee_type == type) {
- list_del_init(&ee->ee_list);
- return ee;
- }
- }
-
- return NULL;
-}
-
-struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
- struct lu_fid *fid, int type)
-{
- struct eacl_entry *ee;
-
- spin_lock(&et->et_lock);
- ee = __et_search_del(et, key, fid, type);
- spin_unlock(&et->et_lock);
- return ee;
-}
-
-void et_search_free(struct eacl_table *et, pid_t key)
-{
- struct eacl_entry *ee, *next;
- struct list_head *head = &et->et_entries[ee_hashfunc(key)];
-
- spin_lock(&et->et_lock);
- list_for_each_entry_safe(ee, next, head, ee_list)
- if (ee->ee_key == key)
- ee_free(ee);
-
- spin_unlock(&et->et_lock);
-}
-
-int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
- ext_acl_xattr_header *header)
-{
- struct eacl_entry *ee, *e;
-
- ee = ee_alloc(key, fid, type, header);
- if (!ee)
- return -ENOMEM;
-
- spin_lock(&et->et_lock);
- e = __et_search_del(et, key, fid, type);
- if (unlikely(e)) {
- CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n",
- (int)key, PFID(fid), type);
- ee_free(e);
- }
- list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
- spin_unlock(&et->et_lock);
-
- return 0;
-}
-
-void et_init(struct eacl_table *et)
-{
- int i;
-
- spin_lock_init(&et->et_lock);
- for (i = 0; i < EE_HASHES; i++)
- INIT_LIST_HEAD(&et->et_entries[i]);
-}
-
-void et_fini(struct eacl_table *et)
-{
- struct eacl_entry *ee;
- int i;
-
- spin_lock(&et->et_lock);
- for (i = 0; i < EE_HASHES; i++)
- while (!list_empty(&et->et_entries[i])) {
- ee = list_entry(et->et_entries[i].next,
- struct eacl_entry, ee_list);
- ee_free(ee);
- }
- spin_unlock(&et->et_lock);
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
deleted file mode 100644
index 813a9a354e5f..000000000000
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ /dev/null
@@ -1,883 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/*
- * linux/drivers/block/loop.c
- *
- * Written by Theodore Ts'o, 3/29/93
- *
- * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
- * permitted under the GNU General Public License.
- *
- * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
- * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
- *
- * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
- *
- * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
- *
- * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
- *
- * Loadable modules and other fixes by AK, 1998
- *
- * Maximum number of loop devices now dynamic via max_loop module parameter.
- * Russell Kroll <rkroll@exploits.org> 19990701
- *
- * Maximum number of loop devices when compiled-in now selectable by passing
- * max_loop=<1-255> to the kernel on boot.
- * Erik I. Bols?, <eriki@himolde.no>, Oct 31, 1999
- *
- * Completely rewrite request handling to be make_request_fn style and
- * non blocking, pushing work to a helper thread. Lots of fixes from
- * Al Viro too.
- * Jens Axboe <axboe@suse.de>, Nov 2000
- *
- * Support up to 256 loop devices
- * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
- *
- * Support for falling back on the write file operation when the address space
- * operations prepare_write and/or commit_write are not available on the
- * backing filesystem.
- * Anton Altaparmakov, 16 Feb 2005
- *
- * Still To Fix:
- * - Advisory locking is ignored here.
- * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
- *
- */
-
-#include <linux/module.h>
-
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/major.h>
-#include <linux/wait.h>
-#include <linux/blkdev.h>
-#include <linux/blkpg.h>
-#include <linux/init.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/writeback.h>
-#include <linux/buffer_head.h> /* for invalidate_bdev() */
-#include <linux/completion.h>
-#include <linux/highmem.h>
-#include <linux/gfp.h>
-#include <linux/pagevec.h>
-#include <linux/uaccess.h>
-
-#include "../include/lustre_lib.h"
-#include "../include/lustre_lite.h"
-#include "llite_internal.h"
-
-#define LLOOP_MAX_SEGMENTS LNET_MAX_IOV
-
-/* Possible states of device */
-enum {
- LLOOP_UNBOUND,
- LLOOP_BOUND,
- LLOOP_RUNDOWN,
-};
-
-struct lloop_device {
- int lo_number;
- int lo_refcnt;
- loff_t lo_offset;
- loff_t lo_sizelimit;
- int lo_flags;
- struct file *lo_backing_file;
- struct block_device *lo_device;
- unsigned lo_blocksize;
-
- gfp_t old_gfp_mask;
-
- spinlock_t lo_lock;
- struct bio *lo_bio;
- struct bio *lo_biotail;
- int lo_state;
- struct semaphore lo_sem;
- struct mutex lo_ctl_mutex;
- atomic_t lo_pending;
- wait_queue_head_t lo_bh_wait;
-
- struct request_queue *lo_queue;
-
- const struct lu_env *lo_env;
- struct cl_io lo_io;
- struct ll_dio_pages lo_pvec;
-
- /* data to handle bio for lustre. */
- struct lo_request_data {
- struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
- loff_t lrd_offsets[LLOOP_MAX_SEGMENTS];
- } lo_requests[1];
-};
-
-/*
- * Loop flags
- */
-enum {
- LO_FLAGS_READ_ONLY = 1,
-};
-
-static int lloop_major;
-#define MAX_LOOP_DEFAULT 16
-static int max_loop = MAX_LOOP_DEFAULT;
-static struct lloop_device *loop_dev;
-static struct gendisk **disks;
-static struct mutex lloop_mutex;
-static void *ll_iocontrol_magic;
-
-static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
-{
- loff_t size, offset, loopsize;
-
- /* Compute loopsize in bytes */
- size = i_size_read(file->f_mapping->host);
- offset = lo->lo_offset;
- loopsize = size - offset;
- if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
- loopsize = lo->lo_sizelimit;
-
- /*
- * Unfortunately, if we want to do I/O on the device,
- * the number of 512-byte sectors has to fit into a sector_t.
- */
- return loopsize >> 9;
-}
-
-static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
-{
- const struct lu_env *env = lo->lo_env;
- struct cl_io *io = &lo->lo_io;
- struct inode *inode = file_inode(lo->lo_backing_file);
- struct cl_object *obj = ll_i2info(inode)->lli_clob;
- pgoff_t offset;
- int ret;
- int rw;
- u32 page_count = 0;
- struct bio_vec bvec;
- struct bvec_iter iter;
- struct bio *bio;
- ssize_t bytes;
-
- struct ll_dio_pages *pvec = &lo->lo_pvec;
- struct page **pages = pvec->ldp_pages;
- loff_t *offsets = pvec->ldp_offsets;
-
- truncate_inode_pages(inode->i_mapping, 0);
-
- /* initialize the IO */
- memset(io, 0, sizeof(*io));
- io->ci_obj = obj;
- ret = cl_io_init(env, io, CIT_MISC, obj);
- if (ret)
- return io->ci_result;
- io->ci_lockreq = CILR_NEVER;
-
- rw = head->bi_rw;
- for (bio = head; bio ; bio = bio->bi_next) {
- LASSERT(rw == bio->bi_rw);
-
- offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
- bio_for_each_segment(bvec, bio, iter) {
- BUG_ON(bvec.bv_offset != 0);
- BUG_ON(bvec.bv_len != PAGE_SIZE);
-
- pages[page_count] = bvec.bv_page;
- offsets[page_count] = offset;
- page_count++;
- offset += bvec.bv_len;
- }
- LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
- }
-
- ll_stats_ops_tally(ll_i2sbi(inode),
- (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
- page_count);
-
- pvec->ldp_size = page_count << PAGE_SHIFT;
- pvec->ldp_nr = page_count;
-
- /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
- * write those pages into OST. Even worse case is that more pages
- * would be asked to write out to swap space, and then finally get here
- * again.
- * Unfortunately this is NOT easy to fix.
- * Thoughts on solution:
- * 0. Define a reserved pool for cl_pages, which could be a list of
- * pre-allocated cl_pages;
- * 1. Define a new operation in cl_object_operations{}, says clo_depth,
- * which measures how many layers for this lustre object. Generally
- * speaking, the depth would be 2, one for llite, and one for lovsub.
- * However, for SNS, there will be more since we need additional page
- * to store parity;
- * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
- * pool. Afterwards, the clio would allocate the pages from reserved
- * pool, this guarantees we needn't allocate the cl_pages from
- * generic cl_page slab cache.
- * Of course, if there is NOT enough pages in the pool, we might
- * be asked to write less pages once, this purely depends on
- * implementation. Anyway, we should be careful to avoid deadlocking.
- */
- inode_lock(inode);
- bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
- inode_unlock(inode);
- cl_io_fini(env, io);
- return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
-}
-
-/*
- * Add bio to back of pending list
- */
-static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&lo->lo_lock, flags);
- if (lo->lo_biotail) {
- lo->lo_biotail->bi_next = bio;
- lo->lo_biotail = bio;
- } else {
- lo->lo_bio = lo->lo_biotail = bio;
- }
- spin_unlock_irqrestore(&lo->lo_lock, flags);
-
- atomic_inc(&lo->lo_pending);
- if (waitqueue_active(&lo->lo_bh_wait))
- wake_up(&lo->lo_bh_wait);
-}
-
-/*
- * Grab first pending buffer
- */
-static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
-{
- struct bio *first;
- struct bio **bio;
- unsigned int count = 0;
- unsigned int page_count = 0;
- int rw;
-
- spin_lock_irq(&lo->lo_lock);
- first = lo->lo_bio;
- if (unlikely(!first)) {
- spin_unlock_irq(&lo->lo_lock);
- return 0;
- }
-
- /* TODO: need to split the bio, too bad. */
- LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
-
- rw = first->bi_rw;
- bio = &lo->lo_bio;
- while (*bio && (*bio)->bi_rw == rw) {
- CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u\n",
- (unsigned long long)(*bio)->bi_iter.bi_sector,
- (*bio)->bi_iter.bi_size,
- page_count, (*bio)->bi_vcnt);
- if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
- break;
-
- page_count += (*bio)->bi_vcnt;
- count++;
- bio = &(*bio)->bi_next;
- }
- if (*bio) {
- /* Some of bios can't be mergeable. */
- lo->lo_bio = *bio;
- *bio = NULL;
- } else {
- /* Hit the end of queue */
- lo->lo_biotail = NULL;
- lo->lo_bio = NULL;
- }
- *req = first;
- spin_unlock_irq(&lo->lo_lock);
- return count;
-}
-
-static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
-{
- struct lloop_device *lo = q->queuedata;
- int rw = bio_rw(old_bio);
- int inactive;
-
- blk_queue_split(q, &old_bio, q->bio_split);
-
- if (!lo)
- goto err;
-
- CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
- (unsigned long long)old_bio->bi_iter.bi_sector,
- old_bio->bi_iter.bi_size);
-
- spin_lock_irq(&lo->lo_lock);
- inactive = lo->lo_state != LLOOP_BOUND;
- spin_unlock_irq(&lo->lo_lock);
- if (inactive)
- goto err;
-
- if (rw == WRITE) {
- if (lo->lo_flags & LO_FLAGS_READ_ONLY)
- goto err;
- } else if (rw == READA) {
- rw = READ;
- } else if (rw != READ) {
- CERROR("lloop: unknown command (%x)\n", rw);
- goto err;
- }
- loop_add_bio(lo, old_bio);
- return BLK_QC_T_NONE;
-err:
- bio_io_error(old_bio);
- return BLK_QC_T_NONE;
-}
-
-static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
-{
- int ret;
-
- ret = do_bio_lustrebacked(lo, bio);
- while (bio) {
- struct bio *tmp = bio->bi_next;
-
- bio->bi_next = NULL;
- bio->bi_error = ret;
- bio_endio(bio);
- bio = tmp;
- }
-}
-
-static inline int loop_active(struct lloop_device *lo)
-{
- return atomic_read(&lo->lo_pending) ||
- (lo->lo_state == LLOOP_RUNDOWN);
-}
-
-/*
- * worker thread that handles reads/writes to file backed loop devices,
- * to avoid blocking in our make_request_fn.
- */
-static int loop_thread(void *data)
-{
- struct lloop_device *lo = data;
- struct bio *bio;
- unsigned int count;
- unsigned long times = 0;
- unsigned long total_count = 0;
-
- struct lu_env *env;
- int refcheck;
- int ret = 0;
-
- set_user_nice(current, MIN_NICE);
-
- lo->lo_state = LLOOP_BOUND;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
- ret = PTR_ERR(env);
- goto out;
- }
-
- lo->lo_env = env;
- memset(&lo->lo_pvec, 0, sizeof(lo->lo_pvec));
- lo->lo_pvec.ldp_pages = lo->lo_requests[0].lrd_pages;
- lo->lo_pvec.ldp_offsets = lo->lo_requests[0].lrd_offsets;
-
- /*
- * up sem, we are running
- */
- up(&lo->lo_sem);
-
- for (;;) {
- wait_event(lo->lo_bh_wait, loop_active(lo));
- if (!atomic_read(&lo->lo_pending)) {
- int exiting = 0;
-
- spin_lock_irq(&lo->lo_lock);
- exiting = (lo->lo_state == LLOOP_RUNDOWN);
- spin_unlock_irq(&lo->lo_lock);
- if (exiting)
- break;
- }
-
- bio = NULL;
- count = loop_get_bio(lo, &bio);
- if (!count) {
- CWARN("lloop(minor: %d): missing bio\n", lo->lo_number);
- continue;
- }
-
- total_count += count;
- if (total_count < count) { /* overflow */
- total_count = count;
- times = 1;
- } else {
- times++;
- }
- if ((times & 127) == 0) {
- CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
- total_count, times, total_count / times);
- }
-
- LASSERT(bio);
- LASSERT(count <= atomic_read(&lo->lo_pending));
- loop_handle_bio(lo, bio);
- atomic_sub(count, &lo->lo_pending);
- }
- cl_env_put(env, &refcheck);
-
-out:
- up(&lo->lo_sem);
- return ret;
-}
-
-static int loop_set_fd(struct lloop_device *lo, struct file *unused,
- struct block_device *bdev, struct file *file)
-{
- struct inode *inode;
- struct address_space *mapping;
- int lo_flags = 0;
- int error;
- loff_t size;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- error = -EBUSY;
- if (lo->lo_state != LLOOP_UNBOUND)
- goto out;
-
- mapping = file->f_mapping;
- inode = mapping->host;
-
- error = -EINVAL;
- if (!S_ISREG(inode->i_mode) || inode->i_sb->s_magic != LL_SUPER_MAGIC)
- goto out;
-
- if (!(file->f_mode & FMODE_WRITE))
- lo_flags |= LO_FLAGS_READ_ONLY;
-
- size = get_loop_size(lo, file);
-
- if ((loff_t)(sector_t)size != size) {
- error = -EFBIG;
- goto out;
- }
-
- /* remove all pages in cache so as dirty pages not to be existent. */
- truncate_inode_pages(mapping, 0);
-
- set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
-
- lo->lo_blocksize = PAGE_SIZE;
- lo->lo_device = bdev;
- lo->lo_flags = lo_flags;
- lo->lo_backing_file = file;
- lo->lo_sizelimit = 0;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
-
- lo->lo_bio = lo->lo_biotail = NULL;
-
- /*
- * set queue make_request_fn, and add limits based on lower level
- * device
- */
- blk_queue_make_request(lo->lo_queue, loop_make_request);
- lo->lo_queue->queuedata = lo;
-
- /* queue parameters */
- CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
- blk_queue_logical_block_size(lo->lo_queue,
- (unsigned short)PAGE_SIZE);
- blk_queue_max_hw_sectors(lo->lo_queue,
- LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
- blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
-
- set_capacity(disks[lo->lo_number], size);
- bd_set_size(bdev, size << 9);
-
- set_blocksize(bdev, lo->lo_blocksize);
-
- kthread_run(loop_thread, lo, "lloop%d", lo->lo_number);
- down(&lo->lo_sem);
- return 0;
-
-out:
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
- return error;
-}
-
-static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
- int count)
-{
- struct file *filp = lo->lo_backing_file;
- gfp_t gfp = lo->old_gfp_mask;
-
- if (lo->lo_state != LLOOP_BOUND)
- return -ENXIO;
-
- if (lo->lo_refcnt > count) /* we needed one fd for the ioctl */
- return -EBUSY;
-
- if (!filp)
- return -EINVAL;
-
- spin_lock_irq(&lo->lo_lock);
- lo->lo_state = LLOOP_RUNDOWN;
- spin_unlock_irq(&lo->lo_lock);
- wake_up(&lo->lo_bh_wait);
-
- down(&lo->lo_sem);
- lo->lo_backing_file = NULL;
- lo->lo_device = NULL;
- lo->lo_offset = 0;
- lo->lo_sizelimit = 0;
- lo->lo_flags = 0;
- invalidate_bdev(bdev);
- set_capacity(disks[lo->lo_number], 0);
- bd_set_size(bdev, 0);
- mapping_set_gfp_mask(filp->f_mapping, gfp);
- lo->lo_state = LLOOP_UNBOUND;
- fput(filp);
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
- return 0;
-}
-
-static int lo_open(struct block_device *bdev, fmode_t mode)
-{
- struct lloop_device *lo = bdev->bd_disk->private_data;
-
- mutex_lock(&lo->lo_ctl_mutex);
- lo->lo_refcnt++;
- mutex_unlock(&lo->lo_ctl_mutex);
-
- return 0;
-}
-
-static void lo_release(struct gendisk *disk, fmode_t mode)
-{
- struct lloop_device *lo = disk->private_data;
-
- mutex_lock(&lo->lo_ctl_mutex);
- --lo->lo_refcnt;
- mutex_unlock(&lo->lo_ctl_mutex);
-}
-
-/* lloop device node's ioctl function. */
-static int lo_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct lloop_device *lo = bdev->bd_disk->private_data;
- struct inode *inode = NULL;
- int err = 0;
-
- mutex_lock(&lloop_mutex);
- switch (cmd) {
- case LL_IOC_LLOOP_DETACH: {
- err = loop_clr_fd(lo, bdev, 2);
- if (err == 0)
- blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
- break;
- }
-
- case LL_IOC_LLOOP_INFO: {
- struct lu_fid fid;
-
- if (!lo->lo_backing_file) {
- err = -ENOENT;
- break;
- }
- if (!inode)
- inode = file_inode(lo->lo_backing_file);
- if (lo->lo_state == LLOOP_BOUND)
- fid = ll_i2info(inode)->lli_fid;
- else
- fid_zero(&fid);
-
- if (copy_to_user((void __user *)arg, &fid, sizeof(fid)))
- err = -EFAULT;
- break;
- }
-
- default:
- err = -EINVAL;
- break;
- }
- mutex_unlock(&lloop_mutex);
-
- return err;
-}
-
-static struct block_device_operations lo_fops = {
- .owner = THIS_MODULE,
- .open = lo_open,
- .release = lo_release,
- .ioctl = lo_ioctl,
-};
-
-/* dynamic iocontrol callback.
- * This callback is registered in lloop_init and will be called by
- * ll_iocontrol_call.
- *
- * This is a llite regular file ioctl function. It takes the responsibility
- * of attaching or detaching a file by a lloop's device number.
- */
-static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
- unsigned int cmd, unsigned long arg,
- void *magic, int *rcp)
-{
- struct lloop_device *lo = NULL;
- struct block_device *bdev = NULL;
- int err = 0;
- dev_t dev;
-
- if (magic != ll_iocontrol_magic)
- return LLIOC_CONT;
-
- if (!disks) {
- err = -ENODEV;
- goto out1;
- }
-
- CWARN("Enter llop_ioctl\n");
-
- mutex_lock(&lloop_mutex);
- switch (cmd) {
- case LL_IOC_LLOOP_ATTACH: {
- struct lloop_device *lo_free = NULL;
- int i;
-
- for (i = 0; i < max_loop; i++, lo = NULL) {
- lo = &loop_dev[i];
- if (lo->lo_state == LLOOP_UNBOUND) {
- if (!lo_free)
- lo_free = lo;
- continue;
- }
- if (file_inode(lo->lo_backing_file) == file_inode(file))
- break;
- }
- if (lo || !lo_free) {
- err = -EBUSY;
- goto out;
- }
-
- lo = lo_free;
- dev = MKDEV(lloop_major, lo->lo_number);
-
- /* quit if the used pointer is writable */
- if (put_user((long)old_encode_dev(dev), (long __user *)arg)) {
- err = -EFAULT;
- goto out;
- }
-
- bdev = blkdev_get_by_dev(dev, file->f_mode, NULL);
- if (IS_ERR(bdev)) {
- err = PTR_ERR(bdev);
- goto out;
- }
-
- get_file(file);
- err = loop_set_fd(lo, NULL, bdev, file);
- if (err) {
- fput(file);
- blkdev_put(bdev, 0);
- }
-
- break;
- }
-
- case LL_IOC_LLOOP_DETACH_BYDEV: {
- int minor;
-
- dev = old_decode_dev(arg);
- if (MAJOR(dev) != lloop_major) {
- err = -EINVAL;
- goto out;
- }
-
- minor = MINOR(dev);
- if (minor > max_loop - 1) {
- err = -EINVAL;
- goto out;
- }
-
- lo = &loop_dev[minor];
- if (lo->lo_state != LLOOP_BOUND) {
- err = -EINVAL;
- goto out;
- }
-
- bdev = lo->lo_device;
- err = loop_clr_fd(lo, bdev, 1);
- if (err == 0)
- blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
-
- break;
- }
-
- default:
- err = -EINVAL;
- break;
- }
-
-out:
- mutex_unlock(&lloop_mutex);
-out1:
- if (rcp)
- *rcp = err;
- return LLIOC_STOP;
-}
-
-static int __init lloop_init(void)
-{
- int i;
- unsigned int cmdlist[] = {
- LL_IOC_LLOOP_ATTACH,
- LL_IOC_LLOOP_DETACH_BYDEV,
- };
-
- if (max_loop < 1 || max_loop > 256) {
- max_loop = MAX_LOOP_DEFAULT;
- CWARN("lloop: invalid max_loop (must be between 1 and 256), using default (%u)\n",
- max_loop);
- }
-
- lloop_major = register_blkdev(0, "lloop");
- if (lloop_major < 0)
- return -EIO;
-
- CDEBUG(D_CONFIG, "registered lloop major %d with %u minors\n",
- lloop_major, max_loop);
-
- ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist);
- if (!ll_iocontrol_magic)
- goto out_mem1;
-
- loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL);
- if (!loop_dev)
- goto out_mem1;
-
- disks = kcalloc(max_loop, sizeof(*disks), GFP_KERNEL);
- if (!disks)
- goto out_mem2;
-
- for (i = 0; i < max_loop; i++) {
- disks[i] = alloc_disk(1);
- if (!disks[i])
- goto out_mem3;
- }
-
- mutex_init(&lloop_mutex);
-
- for (i = 0; i < max_loop; i++) {
- struct lloop_device *lo = &loop_dev[i];
- struct gendisk *disk = disks[i];
-
- lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
- if (!lo->lo_queue)
- goto out_mem4;
-
- mutex_init(&lo->lo_ctl_mutex);
- sema_init(&lo->lo_sem, 0);
- init_waitqueue_head(&lo->lo_bh_wait);
- lo->lo_number = i;
- spin_lock_init(&lo->lo_lock);
- disk->major = lloop_major;
- disk->first_minor = i;
- disk->fops = &lo_fops;
- sprintf(disk->disk_name, "lloop%d", i);
- disk->private_data = lo;
- disk->queue = lo->lo_queue;
- }
-
- /* We cannot fail after we call this, so another loop!*/
- for (i = 0; i < max_loop; i++)
- add_disk(disks[i]);
- return 0;
-
-out_mem4:
- while (i--)
- blk_cleanup_queue(loop_dev[i].lo_queue);
- i = max_loop;
-out_mem3:
- while (i--)
- put_disk(disks[i]);
- kfree(disks);
-out_mem2:
- kfree(loop_dev);
-out_mem1:
- unregister_blkdev(lloop_major, "lloop");
- ll_iocontrol_unregister(ll_iocontrol_magic);
- CERROR("lloop: ran out of memory\n");
- return -ENOMEM;
-}
-
-static void lloop_exit(void)
-{
- int i;
-
- ll_iocontrol_unregister(ll_iocontrol_magic);
- for (i = 0; i < max_loop; i++) {
- del_gendisk(disks[i]);
- blk_cleanup_queue(loop_dev[i].lo_queue);
- put_disk(disks[i]);
- }
-
- unregister_blkdev(lloop_major, "lloop");
-
- kfree(disks);
- kfree(loop_dev);
-}
-
-module_param(max_loop, int, 0444);
-MODULE_PARM_DESC(max_loop, "maximum of lloop_device");
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre virtual block device");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(lloop_init);
-module_exit(lloop_exit);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 55d62eb11957..e86bf3c53be3 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -180,11 +176,7 @@ LUSTRE_RO_ATTR(filesfree);
static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%s client\n",
- sbi->ll_flags & LL_SBI_RMT_CLIENT ? "remote" : "local");
+ return sprintf(buf, "local client\n");
}
LUSTRE_RO_ATTR(client_type);
@@ -364,7 +356,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
{
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_cache *cache = &sbi->ll_cache;
+ struct cl_client_cache *cache = sbi->ll_cache;
int shift = 20 - PAGE_SHIFT;
int max_cached_mb;
int unused_mb;
@@ -391,7 +383,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_cache *cache = &sbi->ll_cache;
+ struct cl_client_cache *cache = sbi->ll_cache;
struct lu_env *env;
int refcheck;
int mult, rc, pages_number;
@@ -830,7 +822,7 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kobj);
- struct cl_client_cache *cache = &sbi->ll_cache;
+ struct cl_client_cache *cache = sbi->ll_cache;
int pages, mb;
pages = atomic_read(&cache->ccc_unstable_nr);
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 5eba0ebae10f..3664bfd0178b 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -144,7 +140,7 @@ static void ll_invalidate_negative_children(struct inode *dir)
{
struct dentry *dentry, *tmp_subdir;
- ll_lock_dcache(dir);
+ spin_lock(&dir->i_lock);
hlist_for_each_entry(dentry, &dir->i_dentry, d_u.d_alias) {
spin_lock(&dentry->d_lock);
if (!list_empty(&dentry->d_subdirs)) {
@@ -159,7 +155,7 @@ static void ll_invalidate_negative_children(struct inode *dir)
}
spin_unlock(&dentry->d_lock);
}
- ll_unlock_dcache(dir);
+ spin_unlock(&dir->i_lock);
}
int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
@@ -318,9 +314,10 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
if (hlist_empty(&inode->i_dentry))
return NULL;
- discon_alias = invalid_alias = NULL;
+ discon_alias = NULL;
+ invalid_alias = NULL;
- ll_lock_dcache(inode);
+ spin_lock(&inode->i_lock);
hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
LASSERT(alias != dentry);
@@ -345,7 +342,7 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
dget_dlock(alias);
spin_unlock(&alias->d_lock);
}
- ll_unlock_dcache(inode);
+ spin_unlock(&inode->i_lock);
return alias;
}
@@ -396,7 +393,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
* when I return
*/
CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it,
- it->d.lustre.it_disposition);
+ it->it_disposition);
if (!it_disposition(it, DISP_LOOKUP_NEG)) {
rc = ll_prep_inode(&inode, request, (*de)->d_sb, it);
if (rc)
@@ -448,7 +445,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
/* Check that parent has UPDATE lock. */
struct lookup_intent parent_it = {
.it_op = IT_GETATTR,
- .d.lustre.it_lock_handle = 0 };
+ .it_lock_handle = 0 };
if (md_revalidate_lock(ll_i2mdexp(parent), &parent_it,
&ll_i2info(parent)->lli_fid, NULL)) {
@@ -625,13 +622,10 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
if (d_really_is_positive(dentry) && it_disposition(it, DISP_OPEN_OPEN)) {
/* Open dentry. */
if (S_ISFIFO(d_inode(dentry)->i_mode)) {
- /* We cannot call open here as it would
- * deadlock.
+ /* We cannot call open here as it might
+ * deadlock. This case is unreachable in
+ * practice because of OBD_CONNECT_NODEVOH.
*/
- if (it_disposition(it, DISP_ENQ_OPEN_REF))
- ptlrpc_req_finished(
- (struct ptlrpc_request *)
- it->d.lustre.it_data);
rc = finish_no_open(file, de);
} else {
file->private_data = it;
@@ -662,10 +656,10 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
struct ll_sb_info *sbi = ll_i2sbi(dir);
int rc;
- LASSERT(it && it->d.lustre.it_disposition);
+ LASSERT(it && it->it_disposition);
LASSERT(it_disposition(it, DISP_ENQ_CREATE_REF));
- request = it->d.lustre.it_data;
+ request = it->it_request;
it_clear_disposition(it, DISP_ENQ_CREATE_REF);
rc = ll_prep_inode(&inode, request, dir->i_sb, it);
if (rc) {
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
deleted file mode 100644
index e9d25317cd28..000000000000
--- a/drivers/staging/lustre/lustre/llite/remote_perm.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/remote_perm.c
- *
- * Lustre Permission Cache for Remote Client
- *
- * Author: Lai Siyao <lsy@clusterfs.com>
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/module.h>
-#include <linux/types.h>
-
-#include "../include/lustre_lite.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_disk.h"
-#include "../include/lustre_param.h"
-#include "llite_internal.h"
-
-struct kmem_cache *ll_remote_perm_cachep;
-struct kmem_cache *ll_rmtperm_hash_cachep;
-
-static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
-{
- struct ll_remote_perm *lrp;
-
- lrp = kmem_cache_zalloc(ll_remote_perm_cachep, GFP_KERNEL);
- if (lrp)
- INIT_HLIST_NODE(&lrp->lrp_list);
- return lrp;
-}
-
-static inline void free_ll_remote_perm(struct ll_remote_perm *lrp)
-{
- if (!lrp)
- return;
-
- if (!hlist_unhashed(&lrp->lrp_list))
- hlist_del(&lrp->lrp_list);
- kmem_cache_free(ll_remote_perm_cachep, lrp);
-}
-
-static struct hlist_head *alloc_rmtperm_hash(void)
-{
- struct hlist_head *hash;
- int i;
-
- hash = kmem_cache_zalloc(ll_rmtperm_hash_cachep, GFP_NOFS);
- if (!hash)
- return NULL;
-
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
- INIT_HLIST_HEAD(hash + i);
-
- return hash;
-}
-
-void free_rmtperm_hash(struct hlist_head *hash)
-{
- int i;
- struct ll_remote_perm *lrp;
- struct hlist_node *next;
-
- if (!hash)
- return;
-
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
- hlist_for_each_entry_safe(lrp, next, hash + i, lrp_list)
- free_ll_remote_perm(lrp);
- kmem_cache_free(ll_rmtperm_hash_cachep, hash);
-}
-
-static inline int remote_perm_hashfunc(uid_t uid)
-{
- return uid & (REMOTE_PERM_HASHSIZE - 1);
-}
-
-/* NB: setxid permission is not checked here, instead it's done on
- * MDT when client get remote permission.
- */
-static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
-{
- struct hlist_head *head;
- struct ll_remote_perm *lrp;
- int found = 0, rc;
-
- if (!lli->lli_remote_perms)
- return -ENOENT;
-
- head = lli->lli_remote_perms +
- remote_perm_hashfunc(from_kuid(&init_user_ns, current_uid()));
-
- spin_lock(&lli->lli_lock);
- hlist_for_each_entry(lrp, head, lrp_list) {
- if (lrp->lrp_uid != from_kuid(&init_user_ns, current_uid()))
- continue;
- if (lrp->lrp_gid != from_kgid(&init_user_ns, current_gid()))
- continue;
- if (lrp->lrp_fsuid != from_kuid(&init_user_ns, current_fsuid()))
- continue;
- if (lrp->lrp_fsgid != from_kgid(&init_user_ns, current_fsgid()))
- continue;
- found = 1;
- break;
- }
-
- if (!found) {
- rc = -ENOENT;
- goto out;
- }
-
- CDEBUG(D_SEC, "found remote perm: %u/%u/%u/%u - %#x\n",
- lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
- lrp->lrp_access_perm);
- rc = ((lrp->lrp_access_perm & mask) == mask) ? 0 : -EACCES;
-
-out:
- spin_unlock(&lli->lli_lock);
- return rc;
-}
-
-int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_remote_perm *lrp = NULL, *tmp = NULL;
- struct hlist_head *head, *perm_hash = NULL;
-
- LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
-
-#if 0
- if (perm->rp_uid != current->uid ||
- perm->rp_gid != current->gid ||
- perm->rp_fsuid != current->fsuid ||
- perm->rp_fsgid != current->fsgid) {
- /* user might setxid in this small period */
- CDEBUG(D_SEC,
- "remote perm user %u/%u/%u/%u != current %u/%u/%u/%u\n",
- perm->rp_uid, perm->rp_gid, perm->rp_fsuid,
- perm->rp_fsgid, current->uid, current->gid,
- current->fsuid, current->fsgid);
- return -EAGAIN;
- }
-#endif
-
- if (!lli->lli_remote_perms) {
- perm_hash = alloc_rmtperm_hash();
- if (!perm_hash) {
- CERROR("alloc lli_remote_perms failed!\n");
- return -ENOMEM;
- }
- }
-
- spin_lock(&lli->lli_lock);
-
- if (!lli->lli_remote_perms)
- lli->lli_remote_perms = perm_hash;
- else
- free_rmtperm_hash(perm_hash);
-
- head = lli->lli_remote_perms + remote_perm_hashfunc(perm->rp_uid);
-
-again:
- hlist_for_each_entry(tmp, head, lrp_list) {
- if (tmp->lrp_uid != perm->rp_uid)
- continue;
- if (tmp->lrp_gid != perm->rp_gid)
- continue;
- if (tmp->lrp_fsuid != perm->rp_fsuid)
- continue;
- if (tmp->lrp_fsgid != perm->rp_fsgid)
- continue;
- free_ll_remote_perm(lrp);
- lrp = tmp;
- break;
- }
-
- if (!lrp) {
- spin_unlock(&lli->lli_lock);
- lrp = alloc_ll_remote_perm();
- if (!lrp) {
- CERROR("alloc memory for ll_remote_perm failed!\n");
- return -ENOMEM;
- }
- spin_lock(&lli->lli_lock);
- goto again;
- }
-
- lrp->lrp_access_perm = perm->rp_access_perm;
- if (lrp != tmp) {
- lrp->lrp_uid = perm->rp_uid;
- lrp->lrp_gid = perm->rp_gid;
- lrp->lrp_fsuid = perm->rp_fsuid;
- lrp->lrp_fsgid = perm->rp_fsgid;
- hlist_add_head(&lrp->lrp_list, head);
- }
- lli->lli_rmtperm_time = cfs_time_current();
- spin_unlock(&lli->lli_lock);
-
- CDEBUG(D_SEC, "new remote perm@%p: %u/%u/%u/%u - %#x\n",
- lrp, lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
- lrp->lrp_access_perm);
-
- return 0;
-}
-
-int lustre_check_remote_perm(struct inode *inode, int mask)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- struct mdt_remote_perm *perm;
- unsigned long save;
- int i = 0, rc;
-
- do {
- save = lli->lli_rmtperm_time;
- rc = do_check_remote_perm(lli, mask);
- if (!rc || (rc != -ENOENT && i))
- break;
-
- might_sleep();
-
- mutex_lock(&lli->lli_rmtperm_mutex);
- /* check again */
- if (save != lli->lli_rmtperm_time) {
- rc = do_check_remote_perm(lli, mask);
- if (!rc || (rc != -ENOENT && i)) {
- mutex_unlock(&lli->lli_rmtperm_mutex);
- break;
- }
- }
-
- if (i++ > 5) {
- CERROR("check remote perm falls in dead loop!\n");
- LBUG();
- }
-
- rc = md_get_remote_perm(sbi->ll_md_exp, ll_inode2fid(inode),
- ll_i2suppgid(inode), &req);
- if (rc) {
- mutex_unlock(&lli->lli_rmtperm_mutex);
- break;
- }
-
- perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (unlikely(!perm)) {
- mutex_unlock(&lli->lli_rmtperm_mutex);
- rc = -EPROTO;
- break;
- }
-
- rc = ll_update_remote_perm(inode, perm);
- mutex_unlock(&lli->lli_rmtperm_mutex);
- if (rc == -ENOMEM)
- break;
-
- ptlrpc_req_finished(req);
- req = NULL;
- } while (1);
- ptlrpc_req_finished(req);
- return rc;
-}
-
-#if 0 /* NB: remote perms can't be freed in ll_mdc_blocking_ast of UPDATE lock,
- * because it will fail sanity test 48.
- */
-void ll_free_remote_perms(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct hlist_head *hash = lli->lli_remote_perms;
- struct ll_remote_perm *lrp;
- struct hlist_node *node, *next;
- int i;
-
- LASSERT(hash);
-
- spin_lock(&lli->lli_lock);
-
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
- hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list)
- free_ll_remote_perm(lrp);
- }
-
- spin_unlock(&lli->lli_lock);
-}
-#endif
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 336397773fbb..87393c4bd51e 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -59,84 +55,6 @@
#include "llite_internal.h"
#include "../include/linux/lustre_compat25.h"
-/**
- * Finalizes cl-data before exiting typical address_space operation. Dual to
- * ll_cl_init().
- */
-void ll_cl_fini(struct ll_cl_context *lcc)
-{
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
-
- LASSERT(lcc->lcc_cookie == current);
- LASSERT(env);
-
- if (page) {
- lu_ref_del(&page->cp_reference, "cl_io", io);
- cl_page_put(env, page);
- }
-
- cl_env_put(env, &lcc->lcc_refcheck);
-}
-
-/**
- * Initializes common cl-data at the typical address_space operation entry
- * point.
- */
-struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_object *clob;
- struct vvp_io *vio;
-
- int refcheck;
- int result = 0;
-
- clob = ll_i2info(file_inode(file))->lli_clob;
- LASSERT(clob);
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return ERR_CAST(env);
-
- lcc = &ll_env_info(env)->lti_io_ctx;
- memset(lcc, 0, sizeof(*lcc));
- lcc->lcc_env = env;
- lcc->lcc_refcheck = refcheck;
- lcc->lcc_cookie = current;
-
- vio = vvp_env_io(env);
- io = vio->vui_cl.cis_io;
- lcc->lcc_io = io;
- if (!io)
- result = -EIO;
-
- if (result == 0 && vmpage) {
- struct cl_page *page;
-
- LASSERT(io->ci_state == CIS_IO_GOING);
- LASSERT(vio->vui_fd == LUSTRE_FPRIVATE(file));
- page = cl_page_find(env, clob, vmpage->index, vmpage,
- CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- lcc->lcc_page = page;
- lu_ref_add(&page->cp_reference, "cl_io", io);
- result = 0;
- } else {
- result = PTR_ERR(page);
- }
- }
- if (result) {
- ll_cl_fini(lcc);
- lcc = ERR_PTR(result);
- }
-
- return lcc;
-}
-
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
/**
@@ -1112,17 +1030,70 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
return result;
}
+struct ll_cl_context *ll_cl_find(struct file *file)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_cl_context *lcc;
+ struct ll_cl_context *found = NULL;
+
+ read_lock(&fd->fd_lock);
+ list_for_each_entry(lcc, &fd->fd_lccs, lcc_list) {
+ if (lcc->lcc_cookie == current) {
+ found = lcc;
+ break;
+ }
+ }
+ read_unlock(&fd->fd_lock);
+
+ return found;
+}
+
+void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
+
+ memset(lcc, 0, sizeof(*lcc));
+ INIT_LIST_HEAD(&lcc->lcc_list);
+ lcc->lcc_cookie = current;
+ lcc->lcc_env = env;
+ lcc->lcc_io = io;
+
+ write_lock(&fd->fd_lock);
+ list_add(&lcc->lcc_list, &fd->fd_lccs);
+ write_unlock(&fd->fd_lock);
+}
+
+void ll_cl_remove(struct file *file, const struct lu_env *env)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
+
+ write_lock(&fd->fd_lock);
+ list_del_init(&lcc->lcc_list);
+ write_unlock(&fd->fd_lock);
+}
+
int ll_readpage(struct file *file, struct page *vmpage)
{
+ struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob;
struct ll_cl_context *lcc;
+ const struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
int result;
- lcc = ll_cl_init(file, vmpage);
- if (!IS_ERR(lcc)) {
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
+ lcc = ll_cl_find(file);
+ if (!lcc) {
+ unlock_page(vmpage);
+ return -EIO;
+ }
+ env = lcc->lcc_env;
+ io = lcc->lcc_io;
+ LASSERT(io->ci_state == CIS_IO_GOING);
+ page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
LASSERT(page->cp_type == CPT_CACHEABLE);
if (likely(!PageUptodate(vmpage))) {
cl_page_assume(env, io, page);
@@ -1132,10 +1103,10 @@ int ll_readpage(struct file *file, struct page *vmpage)
unlock_page(vmpage);
result = 0;
}
- ll_cl_fini(lcc);
+ cl_page_put(env, page);
} else {
unlock_page(vmpage);
- result = PTR_ERR(lcc);
+ result = PTR_ERR(page);
}
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index c12a048fce59..d98c7acc0832 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -489,7 +485,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata)
{
struct ll_cl_context *lcc;
- struct lu_env *env;
+ const struct lu_env *env;
struct cl_io *io;
struct cl_page *page;
struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
@@ -501,9 +497,9 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
- lcc = ll_cl_init(file, NULL);
- if (IS_ERR(lcc)) {
- result = PTR_ERR(lcc);
+ lcc = ll_cl_find(file);
+ if (!lcc) {
+ result = -EIO;
goto out;
}
@@ -579,8 +575,6 @@ out:
unlock_page(vmpage);
put_page(vmpage);
}
- if (!IS_ERR(lcc))
- ll_cl_fini(lcc);
} else {
*pagep = vmpage;
*fsdata = lcc;
@@ -593,7 +587,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
struct page *vmpage, void *fsdata)
{
struct ll_cl_context *lcc = fsdata;
- struct lu_env *env;
+ const struct lu_env *env;
struct cl_io *io;
struct vvp_io *vio;
struct cl_page *page;
@@ -631,6 +625,10 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
} else {
cl_page_disown(env, io, page);
+ lcc->lcc_page = NULL;
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+
/* page list is not contiguous now, commit it now */
unplug = true;
}
@@ -639,7 +637,6 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
result = vvp_io_write_commit(env, io);
- ll_cl_fini(lcc);
return result >= 0 ? copied : result;
}
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 6322f88661e8..f77524294c27 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -650,7 +646,7 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
}
}
- it->d.lustre.it_lock_handle = entry->se_handle;
+ it->it_lock_handle = entry->se_handle;
rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
if (rc != 1) {
rc = -EAGAIN;
@@ -704,7 +700,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
* process enqueues lock on child with parent lock held, eg.
* unlink.
*/
- handle = it->d.lustre.it_lock_handle;
+ handle = it->it_lock_handle;
ll_intent_drop_lock(it);
}
@@ -854,7 +850,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
{
struct inode *inode = d_inode(dentry);
struct lookup_intent it = { .it_op = IT_GETATTR,
- .d.lustre.it_lock_handle = 0 };
+ .it_lock_handle = 0 };
struct md_enqueue_info *minfo;
struct ldlm_enqueue_info *einfo;
int rc;
@@ -869,7 +865,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
NULL);
if (rc == 1) {
- entry->se_handle = it.d.lustre.it_lock_handle;
+ entry->se_handle = it.it_lock_handle;
ll_intent_release(&it);
return 1;
}
@@ -1573,7 +1569,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) {
struct inode *inode = entry->se_inode;
struct lookup_intent it = { .it_op = IT_GETATTR,
- .d.lustre.it_lock_handle =
+ .it_lock_handle =
entry->se_handle };
__u64 bits;
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 415750b0bff4..3dd7e0eb0b54 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -118,19 +114,6 @@ static int __init lustre_init(void)
if (!ll_file_data_slab)
goto out_cache;
- ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
- sizeof(struct ll_remote_perm),
- 0, 0, NULL);
- if (!ll_remote_perm_cachep)
- goto out_cache;
-
- ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
- REMOTE_PERM_HASHSIZE *
- sizeof(struct list_head),
- 0, 0, NULL);
- if (!ll_rmtperm_hash_cachep)
- goto out_cache;
-
llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
if (IS_ERR_OR_NULL(llite_root)) {
rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
@@ -194,8 +177,6 @@ out_debugfs:
out_cache:
kmem_cache_destroy(ll_inode_cachep);
kmem_cache_destroy(ll_file_data_slab);
- kmem_cache_destroy(ll_remote_perm_cachep);
- kmem_cache_destroy(ll_rmtperm_hash_cachep);
return rc;
}
@@ -213,10 +194,6 @@ static void __exit lustre_exit(void)
vvp_global_fini();
kmem_cache_destroy(ll_inode_cachep);
- kmem_cache_destroy(ll_rmtperm_hash_cachep);
-
- kmem_cache_destroy(ll_remote_perm_cachep);
-
kmem_cache_destroy(ll_file_data_slab);
}
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 3fc736ccf85e..8c8bdfe1ad71 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 47101de1c020..e623216e962d 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -150,8 +146,8 @@ struct lu_context_key vvp_session_key = {
.lct_fini = vvp_session_key_fini
};
-void *vvp_thread_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+static void *vvp_thread_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
{
struct vvp_thread_info *vti;
@@ -161,8 +157,8 @@ void *vvp_thread_key_init(const struct lu_context *ctx,
return vti;
}
-void vvp_thread_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+static void vvp_thread_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
{
struct vvp_thread_info *vti = data;
@@ -564,7 +560,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
- pos = *(loff_t *) v;
+ pos = *(loff_t *)v;
vvp_pgcache_id_unpack(pos, &id);
sbi = f->private;
clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index 27b9b0a01f32..79fc428461ed 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 5bf9592ae5d2..94916dcc6caa 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -47,8 +43,8 @@
#include "llite_internal.h"
#include "vvp_internal.h"
-struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
+static struct vvp_io *cl2vvp_io(const struct lu_env *env,
+ const struct cl_io_slice *slice)
{
struct vvp_io *vio;
@@ -954,7 +950,8 @@ static int vvp_io_write_start(const struct lu_env *env,
* out-of-order writes.
*/
ll_merge_attr(env, inode);
- pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
+ pos = i_size_read(inode);
+ io->u.ci_wr.wr.crw_pos = pos;
vio->vui_iocb->ki_pos = pos;
} else {
LASSERT(vio->vui_iocb->ki_pos == pos);
@@ -1259,7 +1256,7 @@ static int vvp_io_read_page(const struct lu_env *env,
return 0;
}
-void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
+static void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
{
CLOBINVRNT(env, ios->cis_io->ci_obj,
vvp_object_invariant(ios->cis_io->ci_obj));
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index f5bd6c22e112..64be0c9df35b 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 18c9df7ebdda..2c520b0bf6ca 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 6cd2af7a958f..2e566d90bb94 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
index fb886291a4e2..9fe9d6c0a7d1 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_req.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_req.c
@@ -60,10 +60,10 @@ static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
* - o_ioepoch,
*
*/
-void vvp_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
+static void vvp_req_attr_set(const struct lu_env *env,
+ const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *attr, u64 flags)
{
struct inode *inode;
struct obdo *oa;
@@ -87,8 +87,8 @@ void vvp_req_attr_set(const struct lu_env *env,
JOBSTATS_JOBID_SIZE);
}
-void vvp_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
+static void vvp_req_completion(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret)
{
struct vvp_req *vrq;
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 608014b0dbcd..98303cf85815 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -111,11 +107,6 @@ int ll_setxattr_common(struct inode *inode, const char *name,
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *req = NULL;
int xattr_type, rc;
-#ifdef CONFIG_FS_POSIX_ACL
- struct rmtacl_ctl_entry *rce = NULL;
- posix_acl_xattr_header *new_value = NULL;
- ext_acl_xattr_header *acl = NULL;
-#endif
const char *pv = value;
xattr_type = get_xattr_type(name);
@@ -143,62 +134,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
strcmp(name, "security.selinux") == 0)
return -EOPNOTSUPP;
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- (xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T)) {
- rce = rct_search(&sbi->ll_rct, current_pid());
- if (!rce ||
- (rce->rce_ops != RMT_LSETFACL &&
- rce->rce_ops != RMT_RSETFACL))
- return -EOPNOTSUPP;
-
- if (rce->rce_ops == RMT_LSETFACL) {
- struct eacl_entry *ee;
-
- ee = et_search_del(&sbi->ll_et, current_pid(),
- ll_inode2fid(inode), xattr_type);
- if (valid & OBD_MD_FLXATTR) {
- acl = lustre_acl_xattr_merge2ext(
- (posix_acl_xattr_header *)value,
- size, ee->ee_acl);
- if (IS_ERR(acl)) {
- ee_free(ee);
- return PTR_ERR(acl);
- }
- size = CFS_ACL_XATTR_SIZE(\
- le32_to_cpu(acl->a_count), \
- ext_acl_xattr);
- pv = (const char *)acl;
- }
- ee_free(ee);
- } else if (rce->rce_ops == RMT_RSETFACL) {
- rc = lustre_posix_acl_xattr_filter(
- (posix_acl_xattr_header *)value,
- size, &new_value);
- if (unlikely(rc < 0))
- return rc;
- size = rc;
-
- pv = (const char *)new_value;
- } else {
- return -EOPNOTSUPP;
- }
-
- valid |= rce_ops2valid(rce->rce_ops);
- }
-#endif
rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
valid, name, pv, size, 0, flags,
ll_i2suppgid(inode), &req);
-#ifdef CONFIG_FS_POSIX_ACL
- /*
- * Release the posix ACL space.
- */
- kfree(new_value);
- if (acl)
- lustre_ext_acl_xattr_free(acl);
-#endif
if (rc) {
if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
@@ -288,7 +226,6 @@ int ll_getxattr_common(struct inode *inode, const char *name,
struct mdt_body *body;
int xattr_type, rc;
void *xdata;
- struct rmtacl_ctl_entry *rce = NULL;
struct ll_inode_info *lli = ll_i2info(inode);
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
@@ -319,24 +256,11 @@ int ll_getxattr_common(struct inode *inode, const char *name,
return -EOPNOTSUPP;
#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- (xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T)) {
- rce = rct_search(&sbi->ll_rct, current_pid());
- if (!rce ||
- (rce->rce_ops != RMT_LSETFACL &&
- rce->rce_ops != RMT_LGETFACL &&
- rce->rce_ops != RMT_RSETFACL &&
- rce->rce_ops != RMT_RGETFACL))
- return -EOPNOTSUPP;
- }
-
/* posix acl is under protection of LOOKUP lock. when calling to this,
* we just have path resolution to the target inode, so we have great
* chance that cached ACL is uptodate.
*/
- if (xattr_type == XATTR_ACL_ACCESS_T &&
- !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
+ if (xattr_type == XATTR_ACL_ACCESS_T) {
struct posix_acl *acl;
spin_lock(&lli->lli_lock);
@@ -378,9 +302,7 @@ do_getxattr:
} else {
getxattr_nocache:
rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
- name, NULL, 0, size, 0, &req);
-
+ valid, name, NULL, 0, size, 0, &req);
if (rc < 0)
goto out_xattr;
@@ -417,25 +339,6 @@ getxattr_nocache:
rc = body->eadatasize;
}
-#ifdef CONFIG_FS_POSIX_ACL
- if (rce && rce->rce_ops == RMT_LSETFACL) {
- ext_acl_xattr_header *acl;
-
- acl = lustre_posix_acl_xattr_2ext(buffer, rc);
- if (IS_ERR(acl)) {
- rc = PTR_ERR(acl);
- goto out;
- }
-
- rc = ee_add(&sbi->ll_et, current_pid(), ll_inode2fid(inode),
- xattr_type, acl);
- if (unlikely(rc < 0)) {
- lustre_ext_acl_xattr_free(acl);
- goto out;
- }
- }
-#endif
-
out_xattr:
if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
LCONSOLE_INFO(
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index d7e17abbe361..8089da8143d9 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -288,8 +288,8 @@ static int ll_xattr_find_get_lock(struct inode *inode,
LCK_PR);
if (mode != 0) {
/* fake oit in mdc_revalidate_lock() manner */
- oit->d.lustre.it_lock_handle = lockh.cookie;
- oit->d.lustre.it_lock_mode = mode;
+ oit->it_lock_handle = lockh.cookie;
+ oit->it_lock_mode = mode;
goto out;
}
}
@@ -315,7 +315,7 @@ static int ll_xattr_find_get_lock(struct inode *inode,
return rc;
}
- *req = (struct ptlrpc_request *)oit->d.lustre.it_data;
+ *req = oit->it_request;
out:
down_write(&lli->lli_xattrs_list_rwsem);
mutex_unlock(&lli->lli_xattrs_enq_lock);
@@ -362,10 +362,10 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
goto out_maybe_drop;
}
- if (oit->d.lustre.it_status < 0) {
+ if (oit->it_status < 0) {
CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
- oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
- rc = oit->d.lustre.it_status;
+ oit->it_status, PFID(ll_inode2fid(inode)));
+ rc = oit->it_status;
/* xattr data is so large that we don't want to cache it */
if (rc == -ERANGE)
rc = -EAGAIN;
@@ -448,8 +448,8 @@ out_destroy:
up_write(&lli->lli_xattrs_list_rwsem);
ldlm_lock_decref_and_cancel((struct lustre_handle *)
- &oit->d.lustre.it_lock_handle,
- oit->d.lustre.it_lock_mode);
+ &oit->it_lock_handle,
+ oit->it_lock_mode);
goto out_no_unlock;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index 378691b2a062..a3d170aa6fd2 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index e0958eaed054..2f58fdab8d1e 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -84,11 +80,11 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
/*
* We got LOOKUP lock, but we really need attrs.
*/
- pmode = it->d.lustre.it_lock_mode;
+ pmode = it->it_lock_mode;
if (pmode) {
- plock.cookie = it->d.lustre.it_lock_handle;
- it->d.lustre.it_lock_mode = 0;
- it->d.lustre.it_data = NULL;
+ plock.cookie = it->it_lock_handle;
+ it->it_lock_mode = 0;
+ it->it_request = NULL;
}
LASSERT(fid_is_sane(&body->fid1));
@@ -134,14 +130,14 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
* maintain dcache consistency. Thus drop UPDATE|PERM lock here
* and put LOOKUP in request.
*/
- if (it->d.lustre.it_lock_mode != 0) {
- it->d.lustre.it_remote_lock_handle =
- it->d.lustre.it_lock_handle;
- it->d.lustre.it_remote_lock_mode = it->d.lustre.it_lock_mode;
+ if (it->it_lock_mode != 0) {
+ it->it_remote_lock_handle =
+ it->it_lock_handle;
+ it->it_remote_lock_mode = it->it_lock_mode;
}
- it->d.lustre.it_lock_handle = plock.cookie;
- it->d.lustre.it_lock_mode = pmode;
+ it->it_lock_handle = plock.cookie;
+ it->it_lock_mode = pmode;
out_free_op_data:
kfree(op_data);
@@ -201,9 +197,9 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
* Nothing is found, do not access body->fid1 as it is zero and thus
* pointless.
*/
- if ((it->d.lustre.it_disposition & DISP_LOOKUP_NEG) &&
- !(it->d.lustre.it_disposition & DISP_OPEN_CREATE) &&
- !(it->d.lustre.it_disposition & DISP_OPEN_OPEN))
+ if ((it->it_disposition & DISP_LOOKUP_NEG) &&
+ !(it->it_disposition & DISP_OPEN_CREATE) &&
+ !(it->it_disposition & DISP_OPEN_OPEN))
return rc;
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 7007e4c48035..0beafc49b8d2 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 9e31f6b03f9e..0e1588a43187 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1683,7 +1679,7 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct lustre_handle *lockh, void *lmm, int lmmsize,
__u64 extra_lock_flags)
{
- struct ptlrpc_request *req = it->d.lustre.it_data;
+ struct ptlrpc_request *req = it->it_request;
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lustre_handle plock;
@@ -1705,11 +1701,11 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
/*
* We got LOOKUP lock, but we really need attrs.
*/
- pmode = it->d.lustre.it_lock_mode;
+ pmode = it->it_lock_mode;
LASSERT(pmode != 0);
memcpy(&plock, lockh, sizeof(plock));
- it->d.lustre.it_lock_mode = 0;
- it->d.lustre.it_data = NULL;
+ it->it_lock_mode = 0;
+ it->it_request = NULL;
fid1 = body->fid1;
ptlrpc_req_finished(req);
@@ -2611,27 +2607,6 @@ static int lmv_clear_open_replay_data(struct obd_export *exp,
return md_clear_open_replay_data(tgt->ltd_exp, och);
}
-static int lmv_get_remote_perm(struct obd_export *exp,
- const struct lu_fid *fid,
- __u32 suppgid, struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_get_remote_perm(tgt->ltd_exp, fid, suppgid, request);
- return rc;
-}
-
static int lmv_intent_getattr_async(struct obd_export *exp,
struct md_enqueue_info *minfo,
struct ldlm_enqueue_info *einfo)
@@ -2686,7 +2661,7 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt = lmv->tgts[0];
int rc = 0, i;
- __u64 curspace, curinodes;
+ __u64 curspace = 0, curinodes = 0;
if (!tgt || !tgt->ltd_exp || !tgt->ltd_active ||
!lmv->desc.ld_tgt_count) {
@@ -2699,7 +2674,6 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
return rc;
}
- curspace = curinodes = 0;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
int err;
@@ -2796,7 +2770,6 @@ static struct md_ops lmv_md_ops = {
.free_lustre_md = lmv_free_lustre_md,
.set_open_replay_data = lmv_set_open_replay_data,
.clear_open_replay_data = lmv_clear_open_replay_data,
- .get_remote_perm = lmv_get_remote_perm,
.intent_getattr_async = lmv_intent_getattr_async,
.revalidate_lock = lmv_revalidate_lock
};
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index b39e364a29ab..c29c361eb0cc 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index ac9744e887ae..9740568d9521 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index dae8e89bcf6d..b1f260d43bc7 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 460f0fa5e6b1..5053dead17bb 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index eef9afac8467..12bd511e8988 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 86cb3f8f9246..84032a510254 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index 1b203d18c6e9..f3a0583f28f5 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 56ef41d17ad7..b9c90865fdfc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index e15ef2ece893..9b92d5522edb 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -896,6 +892,12 @@ static int lov_cleanup(struct obd_device *obd)
kfree(lov->lov_tgts);
lov->lov_tgt_size = 0;
}
+
+ if (lov->lov_cache) {
+ cl_cache_decref(lov->lov_cache);
+ lov->lov_cache = NULL;
+ }
+
return 0;
}
@@ -1772,7 +1774,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
fm_start = fiemap->fm_start;
fm_length = fiemap->fm_length;
/* Calculate start stripe, last stripe and length of mapping */
- actual_start_stripe = start_stripe = lov_stripe_number(lsm, fm_start);
+ start_stripe = lov_stripe_number(lsm, fm_start);
+ actual_start_stripe = start_stripe;
fm_end = (fm_length == ~0ULL ? fm_key->oa.o_size :
fm_start + fm_length - 1);
/* If fm_length != ~0ULL but fm_start+fm_length-1 exceeds file size */
@@ -2095,11 +2098,9 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
u32 count;
int i, rc = 0, err;
struct lov_tgt_desc *tgt;
- unsigned incr, check_uuid,
- do_inactive, no_set;
- unsigned next_id = 0, mds_con = 0;
+ unsigned int incr = 0, check_uuid = 0, do_inactive = 0, no_set = 0;
+ unsigned int next_id = 0, mds_con = 0;
- incr = check_uuid = do_inactive = no_set = 0;
if (!set) {
no_set = 1;
set = ptlrpc_prep_set();
@@ -2126,6 +2127,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
LASSERT(!lov->lov_cache);
lov->lov_cache = val;
do_inactive = 1;
+ cl_cache_incref(lov->lov_cache);
}
for (i = 0; i < count; i++, val = (char *)val + incr) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 561d493b2cdf..f9621b0fd469 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -185,8 +181,8 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
}
LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
- "stripe %d is already owned.\n", idx);
- LU_OBJECT_DEBUG(mask, env, old_obj, "owned.\n");
+ "stripe %d is already owned.", idx);
+ LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
cl_object_put(env, stripe);
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index 9302f06c34ef..ecca74fbff00 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -74,7 +70,7 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
{
loff_t offset;
- offset = lov_stripe_size(lsm, stripe_index << PAGE_SHIFT, stripe);
+ offset = lov_stripe_size(lsm, (stripe_index << PAGE_SHIFT) + 1, stripe);
return offset >> PAGE_SHIFT;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 0215ea54df8d..869ef41b13ca 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index 0306f00c3f33..c17026f14896 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index 690292ecebdc..4c2d21729589 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -14,12 +14,8 @@
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see [sun.com URL with a
- * copy of GPLv2].
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * version 2 along with this program; If not, see
+ * http://http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 1be4b921c01f..4099b51f826e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index 35f6b1d66ff4..b519a1940e1e 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_io.c b/drivers/staging/lustre/lustre/lov/lovsub_io.c
index 783ec687a4e7..6a9820218a3e 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_io.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_io.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index e92edfb618b7..38f9b735c241 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index bcaae1e5b840..fb2f2660b3e9 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
index 9badedcce2bf..b2e68c3e820d 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
index 0dcb6b6a7782..eb6d30d34e3a 100644
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index 5c7a15dd7bd2..98d15fb247bc 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index c5519aeb0d8a..58f2841cabe4 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index 856c54e03b6b..143bd7628572 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -471,6 +467,18 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
mdc_setattr_pack_rec(rec, op_data);
+ /*
+ * The client will zero out local timestamps when losing the IBITS lock
+ * so any new RPC timestamps will update the client inode's timestamps.
+ * There was a defect on the server side which allowed the atime to be
+ * overwritten by a zeroed-out atime packed into the close RPC.
+ *
+ * Proactively clear the MDS_ATTR_ATIME flag in the RPC in this case
+ * to avoid zeroing the atime on old unpatched servers. See LU-8041.
+ */
+ if (rec->sa_atime == 0)
+ rec->sa_valid &= ~MDS_ATTR_ATIME;
+
mdc_ioepoch_pack(epoch, op_data);
mdc_hsm_release_pack(req, op_data);
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 3b1bc9111b93..f48b58423307 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -54,61 +50,43 @@ struct mdc_getattr_args {
struct ldlm_enqueue_info *ga_einfo;
};
-int it_disposition(struct lookup_intent *it, int flag)
-{
- return it->d.lustre.it_disposition & flag;
-}
-EXPORT_SYMBOL(it_disposition);
-
-void it_set_disposition(struct lookup_intent *it, int flag)
-{
- it->d.lustre.it_disposition |= flag;
-}
-EXPORT_SYMBOL(it_set_disposition);
-
-void it_clear_disposition(struct lookup_intent *it, int flag)
-{
- it->d.lustre.it_disposition &= ~flag;
-}
-EXPORT_SYMBOL(it_clear_disposition);
-
int it_open_error(int phase, struct lookup_intent *it)
{
if (it_disposition(it, DISP_OPEN_LEASE)) {
if (phase >= DISP_OPEN_LEASE)
- return it->d.lustre.it_status;
+ return it->it_status;
else
return 0;
}
if (it_disposition(it, DISP_OPEN_OPEN)) {
if (phase >= DISP_OPEN_OPEN)
- return it->d.lustre.it_status;
+ return it->it_status;
else
return 0;
}
if (it_disposition(it, DISP_OPEN_CREATE)) {
if (phase >= DISP_OPEN_CREATE)
- return it->d.lustre.it_status;
+ return it->it_status;
else
return 0;
}
if (it_disposition(it, DISP_LOOKUP_EXECD)) {
if (phase >= DISP_LOOKUP_EXECD)
- return it->d.lustre.it_status;
+ return it->it_status;
else
return 0;
}
if (it_disposition(it, DISP_IT_EXECD)) {
if (phase >= DISP_IT_EXECD)
- return it->d.lustre.it_status;
+ return it->it_status;
else
return 0;
}
- CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
- it->d.lustre.it_status);
+ CERROR("it disp: %X, status: %d\n", it->it_disposition,
+ it->it_status);
LBUG();
return 0;
}
@@ -347,10 +325,6 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm,
lmmsize);
- /* for remote client, fetch remote perm for current user */
- if (client_is_remote(exp))
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
ptlrpc_request_set_replen(req);
return req;
}
@@ -444,9 +418,7 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
struct obd_device *obddev = class_exp2obd(exp);
u64 valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA |
- OBD_MD_MEA |
- (client_is_remote(exp) ?
- OBD_MD_FLRMTPERM : OBD_MD_FLACL);
+ OBD_MD_MEA | OBD_MD_FLACL;
struct ldlm_intent *lit;
int rc;
int easize;
@@ -478,9 +450,6 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
mdc_getattr_pack(req, valid, it->it_flags, op_data, easize);
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, easize);
- if (client_is_remote(exp))
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
ptlrpc_request_set_replen(req);
return req;
}
@@ -555,7 +524,6 @@ static int mdc_finish_enqueue(struct obd_export *exp,
struct req_capsule *pill = &req->rq_pill;
struct ldlm_request *lockreq;
struct ldlm_reply *lockrep;
- struct lustre_intent_data *intent = &it->d.lustre;
struct ldlm_lock *lock;
void *lvb_data = NULL;
int lvb_len = 0;
@@ -589,17 +557,17 @@ static int mdc_finish_enqueue(struct obd_export *exp,
lockrep = req_capsule_server_get(pill, &RMF_DLM_REP);
- intent->it_disposition = (int)lockrep->lock_policy_res1;
- intent->it_status = (int)lockrep->lock_policy_res2;
- intent->it_lock_mode = einfo->ei_mode;
- intent->it_lock_handle = lockh->cookie;
- intent->it_data = req;
+ it->it_disposition = (int)lockrep->lock_policy_res1;
+ it->it_status = (int)lockrep->lock_policy_res2;
+ it->it_lock_mode = einfo->ei_mode;
+ it->it_lock_handle = lockh->cookie;
+ it->it_request = req;
/* Technically speaking rq_transno must already be zero if
* it_status is in error, so the check is a bit redundant
*/
- if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay)
- mdc_clear_replay_flag(req, intent->it_status);
+ if ((!req->rq_transno || it->it_status < 0) && req->rq_replay)
+ mdc_clear_replay_flag(req, it->it_status);
/* If we're doing an IT_OPEN which did not result in an actual
* successful open, then we need to remove the bit which saves
@@ -610,11 +578,11 @@ static int mdc_finish_enqueue(struct obd_export *exp,
* (bug 3440)
*/
if (it->it_op & IT_OPEN && req->rq_replay &&
- (!it_disposition(it, DISP_OPEN_OPEN) || intent->it_status != 0))
- mdc_clear_replay_flag(req, intent->it_status);
+ (!it_disposition(it, DISP_OPEN_OPEN) || it->it_status != 0))
+ mdc_clear_replay_flag(req, it->it_status);
DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
- it->it_op, intent->it_disposition, intent->it_status);
+ it->it_op, it->it_disposition, it->it_status);
/* We know what to expect, so we do any byte flipping required here */
if (it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR)) {
@@ -687,16 +655,6 @@ static int mdc_finish_enqueue(struct obd_export *exp,
memcpy(lmm, eadata, body->eadatasize);
}
}
-
- if (body->valid & OBD_MD_FLRMTPERM) {
- struct mdt_remote_perm *perm;
-
- LASSERT(client_is_remote(exp));
- perm = req_capsule_server_swab_get(pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (!perm)
- return -EPROTO;
- }
} else if (it->it_op & IT_LAYOUT) {
/* maybe the lock was granted right away and layout
* is packed into RMF_DLM_LVB of req
@@ -715,7 +673,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
if (lock && ldlm_has_layout(lock) && lvb_data) {
void *lmm;
- LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n",
+ LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d",
ldlm_it2str(it->it_op), lvb_len);
lmm = libcfs_kvzalloc(lvb_len, GFP_NOFS);
@@ -923,9 +881,9 @@ resend:
}
ptlrpc_req_finished(req);
- it->d.lustre.it_lock_handle = 0;
- it->d.lustre.it_lock_mode = 0;
- it->d.lustre.it_data = NULL;
+ it->it_lock_handle = 0;
+ it->it_lock_mode = 0;
+ it->it_request = NULL;
}
return rc;
@@ -949,8 +907,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
/* The server failed before it even started executing the
* intent, i.e. because it couldn't unpack the request.
*/
- LASSERT(it->d.lustre.it_status != 0);
- return it->d.lustre.it_status;
+ LASSERT(it->it_status != 0);
+ return it->it_status;
}
rc = it_open_error(DISP_IT_EXECD, it);
if (rc)
@@ -1033,15 +991,15 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
LDLM_IBITS, &policy, LCK_NL,
&old_lock, 0)) {
ldlm_lock_decref_and_cancel(lockh,
- it->d.lustre.it_lock_mode);
+ it->it_lock_mode);
memcpy(lockh, &old_lock, sizeof(old_lock));
- it->d.lustre.it_lock_handle = lockh->cookie;
+ it->it_lock_handle = lockh->cookie;
}
}
CDEBUG(D_DENTRY,
"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
- it->d.lustre.it_status, it->d.lustre.it_disposition, rc);
+ it->it_status, it->it_disposition, rc);
return rc;
}
@@ -1057,8 +1015,8 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
ldlm_policy_data_t policy;
enum ldlm_mode mode;
- if (it->d.lustre.it_lock_handle) {
- lockh.cookie = it->d.lustre.it_lock_handle;
+ if (it->it_lock_handle) {
+ lockh.cookie = it->it_lock_handle;
mode = ldlm_revalidate_lock_handle(&lockh, bits);
} else {
fid_build_reg_res_name(fid, &res_id);
@@ -1099,11 +1057,11 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
}
if (mode) {
- it->d.lustre.it_lock_handle = lockh.cookie;
- it->d.lustre.it_lock_mode = mode;
+ it->it_lock_handle = lockh.cookie;
+ it->it_lock_mode = mode;
} else {
- it->d.lustre.it_lock_handle = 0;
- it->d.lustre.it_lock_mode = 0;
+ it->it_lock_handle = 0;
+ it->it_lock_mode = 0;
}
return !!mode;
@@ -1125,15 +1083,15 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
* ll_create/ll_open gets called.
*
* The server will return to us, in it_disposition, an indication of
- * exactly what d.lustre.it_status refers to.
+ * exactly what it_status refers to.
*
- * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
+ * If DISP_OPEN_OPEN is set, then it_status refers to the open() call,
* otherwise if DISP_OPEN_CREATE is set, then it status is the
* creation failure mode. In either case, one of DISP_LOOKUP_NEG or
* DISP_LOOKUP_POS will be set, indicating whether the child lookup
* was successful.
*
- * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
+ * Else, if DISP_LOOKUP_EXECD then it_status is the rc of the
* child lookup.
*/
int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
@@ -1166,7 +1124,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
* be called in revalidate_it if we already have a lock, let's
* verify that.
*/
- it->d.lustre.it_lock_handle = 0;
+ it->it_lock_handle = 0;
rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL);
/* Only return failure if it was not GETATTR by cfid
* (from inode_revalidate)
@@ -1188,7 +1146,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
if (rc < 0)
return rc;
- *reqp = it->d.lustre.it_data;
+ *reqp = it->it_request;
rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index 4ef3db147f87..5dba2c813857 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -234,7 +230,7 @@ rebuild:
MDS_INODELOCK_UPDATE);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_CREATE_RMT_ACL);
+ &RQF_MDS_REINT_CREATE_ACL);
if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 86b7445365f4..d4cc73bb6e1e 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -150,16 +146,6 @@ static int mdc_getattr_common(struct obd_export *exp,
return -EPROTO;
}
- if (body->valid & OBD_MD_FLRMTPERM) {
- struct mdt_remote_perm *perm;
-
- LASSERT(client_is_remote(exp));
- perm = req_capsule_server_swab_get(pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (!perm)
- return -EPROTO;
- }
-
return 0;
}
@@ -190,11 +176,6 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
op_data->op_mode);
- if (op_data->op_valid & OBD_MD_FLRMTPERM) {
- LASSERT(client_is_remote(exp));
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
- }
ptlrpc_request_set_replen(req);
rc = mdc_getattr_common(exp, req);
@@ -539,16 +520,7 @@ static int mdc_get_lustre_md(struct obd_export *exp,
}
rc = 0;
- if (md->body->valid & OBD_MD_FLRMTPERM) {
- /* remote permission */
- LASSERT(client_is_remote(exp));
- md->remote_perm = req_capsule_server_swab_get(pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (!md->remote_perm) {
- rc = -EPROTO;
- goto out;
- }
- } else if (md->body->valid & OBD_MD_FLACL) {
+ if (md->body->valid & OBD_MD_FLACL) {
/* for ACL, it's possible that FLACL is set but aclsize is zero.
* only when aclsize != 0 there's an actual segment for ACL
* in reply buffer.
@@ -665,7 +637,7 @@ int mdc_set_open_replay_data(struct obd_export *exp,
struct md_open_data *mod;
struct mdt_rec_create *rec;
struct mdt_body *body;
- struct ptlrpc_request *open_req = it->d.lustre.it_data;
+ struct ptlrpc_request *open_req = it->it_request;
struct obd_import *imp = open_req->rq_import;
if (!open_req->rq_replay)
@@ -1168,7 +1140,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+ mdc_pack_body(req, NULL, 0, 0, -1, 0);
/* Copy hsm_progress struct */
req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
@@ -1202,7 +1174,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+ mdc_pack_body(req, NULL, 0, 0, -1, 0);
/* Copy hsm_progress struct */
archive_mask = req_capsule_client_get(&req->rq_pill,
@@ -1241,7 +1213,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
+ mdc_pack_body(req, &op_data->op_fid1, 0, 0,
op_data->op_suppgids[0], 0);
ptlrpc_request_set_replen(req);
@@ -1277,7 +1249,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+ mdc_pack_body(req, NULL, 0, 0, -1, 0);
ptlrpc_request_set_replen(req);
@@ -1306,7 +1278,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
+ mdc_pack_body(req, &op_data->op_fid1, 0, 0,
op_data->op_suppgids[0], 0);
ptlrpc_request_set_replen(req);
@@ -1347,7 +1319,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
+ mdc_pack_body(req, &op_data->op_fid1, 0, 0,
op_data->op_suppgids[0], 0);
/* Copy states */
@@ -1394,7 +1366,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+ mdc_pack_body(req, NULL, 0, 0, -1, 0);
/* Copy hsm_request struct */
req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
@@ -1807,7 +1779,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
case IOC_OBD_STATFS: {
struct obd_statfs stat_buf = {0};
- if (*((__u32 *) data->ioc_inlbuf2) != 0) {
+ if (*((__u32 *)data->ioc_inlbuf2) != 0) {
rc = -ENODEV;
goto out;
}
@@ -2001,7 +1973,7 @@ static int mdc_hsm_copytool_send(int len, void *val)
if (len < sizeof(*lh) + sizeof(*hal)) {
CERROR("Short HSM message %d < %d\n", len,
- (int) (sizeof(*lh) + sizeof(*hal)));
+ (int)(sizeof(*lh) + sizeof(*hal)));
return -EPROTO;
}
if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
@@ -2432,41 +2404,6 @@ static int mdc_process_config(struct obd_device *obd, u32 len, void *buf)
return rc;
}
-/* get remote permission for current user on fid */
-static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
- __u32 suppgid, struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- LASSERT(client_is_remote(exp));
-
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, fid, OBD_MD_FLRMTPERM, 0, suppgid, 0);
-
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
static struct obd_ops mdc_obd_ops = {
.owner = THIS_MODULE,
.setup = mdc_setup,
@@ -2518,7 +2455,6 @@ static struct md_ops mdc_md_ops = {
.free_lustre_md = mdc_free_lustre_md,
.set_open_replay_data = mdc_set_open_replay_data,
.clear_open_replay_data = mdc_clear_open_replay_data,
- .get_remote_perm = mdc_get_remote_perm,
.intent_getattr_async = mdc_intent_getattr_async,
.revalidate_lock = mdc_revalidate_lock
};
diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
index 8d5bc5a751a4..0735220b2a18 100644
--- a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
+++ b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_internal.h b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
index 82fb8f46e037..f146f7521c92 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_internal.h
+++ b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 2311a437c441..9d0bd4745865 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -500,7 +496,9 @@ static void do_requeue(struct config_llog_data *cld)
* export which is being disconnected. Take the client
* semaphore to make the check non-racy.
*/
- down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ down_read_nested(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem,
+ OBD_CLI_SEM_MGC);
+
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
int rc;
@@ -1034,7 +1032,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
rc = sptlrpc_parse_flavor(val, &flvr);
if (rc) {
CERROR("invalid sptlrpc flavor %s to MGS\n",
- (char *) val);
+ (char *)val);
return rc;
}
@@ -1050,7 +1048,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
sptlrpc_flavor2name(&cli->cl_flvr_mgc,
str, sizeof(str));
LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but currently %s is in use\n",
- (char *) val, str);
+ (char *)val, str);
rc = -EPERM;
}
return rc;
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index c404eb3864ff..df7e47f35a66 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -5,5 +5,4 @@ obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
lustre_handles.o lustre_peer.o statfs_pack.o \
obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \
- cl_object.o cl_page.o cl_lock.o cl_io.o \
- acl.o kernelcomm.o
+ cl_object.o cl_page.o cl_lock.o cl_io.o kernelcomm.o
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
deleted file mode 100644
index 0e02ae97b7ed..000000000000
--- a/drivers/staging/lustre/lustre/obdclass/acl.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/acl.c
- *
- * Lustre Access Control List.
- *
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-#include "../include/lu_object.h"
-#include "../include/lustre_acl.h"
-#include "../include/lustre_eacl.h"
-#include "../include/obd_support.h"
-
-#ifdef CONFIG_FS_POSIX_ACL
-
-#define CFS_ACL_XATTR_VERSION POSIX_ACL_XATTR_VERSION
-
-enum {
- ES_UNK = 0, /* unknown stat */
- ES_UNC = 1, /* ACL entry is not changed */
- ES_MOD = 2, /* ACL entry is modified */
- ES_ADD = 3, /* ACL entry is added */
- ES_DEL = 4 /* ACL entry is deleted */
-};
-
-static inline void lustre_ext_acl_le_to_cpu(ext_acl_xattr_entry *d,
- ext_acl_xattr_entry *s)
-{
- d->e_tag = le16_to_cpu(s->e_tag);
- d->e_perm = le16_to_cpu(s->e_perm);
- d->e_id = le32_to_cpu(s->e_id);
- d->e_stat = le32_to_cpu(s->e_stat);
-}
-
-static inline void lustre_ext_acl_cpu_to_le(ext_acl_xattr_entry *d,
- ext_acl_xattr_entry *s)
-{
- d->e_tag = cpu_to_le16(s->e_tag);
- d->e_perm = cpu_to_le16(s->e_perm);
- d->e_id = cpu_to_le32(s->e_id);
- d->e_stat = cpu_to_le32(s->e_stat);
-}
-
-static inline void lustre_posix_acl_le_to_cpu(posix_acl_xattr_entry *d,
- posix_acl_xattr_entry *s)
-{
- d->e_tag = le16_to_cpu(s->e_tag);
- d->e_perm = le16_to_cpu(s->e_perm);
- d->e_id = le32_to_cpu(s->e_id);
-}
-
-static inline void lustre_posix_acl_cpu_to_le(posix_acl_xattr_entry *d,
- posix_acl_xattr_entry *s)
-{
- d->e_tag = cpu_to_le16(s->e_tag);
- d->e_perm = cpu_to_le16(s->e_perm);
- d->e_id = cpu_to_le32(s->e_id);
-}
-
-/* if "new_count == 0", then "new = {a_version, NULL}", NOT NULL. */
-static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
- int old_count, int new_count)
-{
- int old_size = CFS_ACL_XATTR_SIZE(old_count, posix_acl_xattr);
- int new_size = CFS_ACL_XATTR_SIZE(new_count, posix_acl_xattr);
- posix_acl_xattr_header *new;
-
- if (unlikely(old_count <= new_count))
- return old_size;
-
- new = kmemdup(*header, new_size, GFP_NOFS);
- if (unlikely(!new))
- return -ENOMEM;
-
- kfree(*header);
- *header = new;
- return new_size;
-}
-
-/* if "new_count == 0", then "new = {0, NULL}", NOT NULL. */
-static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header,
- int old_count)
-{
- int ext_count = le32_to_cpu((*header)->a_count);
- int ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
- ext_acl_xattr_header *new;
-
- if (unlikely(old_count <= ext_count))
- return 0;
-
- new = kmemdup(*header, ext_size, GFP_NOFS);
- if (unlikely(!new))
- return -ENOMEM;
-
- kfree(*header);
- *header = new;
- return 0;
-}
-
-/*
- * Generate new extended ACL based on the posix ACL.
- */
-ext_acl_xattr_header *
-lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
-{
- int count, i, esize;
- ext_acl_xattr_header *new;
-
- if (unlikely(size < 0))
- return ERR_PTR(-EINVAL);
- else if (!size)
- count = 0;
- else
- count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
- esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
- new = kzalloc(esize, GFP_NOFS);
- if (unlikely(!new))
- return ERR_PTR(-ENOMEM);
-
- new->a_count = cpu_to_le32(count);
- for (i = 0; i < count; i++) {
- new->a_entries[i].e_tag = header->a_entries[i].e_tag;
- new->a_entries[i].e_perm = header->a_entries[i].e_perm;
- new->a_entries[i].e_id = header->a_entries[i].e_id;
- new->a_entries[i].e_stat = cpu_to_le32(ES_UNK);
- }
-
- return new;
-}
-EXPORT_SYMBOL(lustre_posix_acl_xattr_2ext);
-
-/*
- * Filter out the "nobody" entries in the posix ACL.
- */
-int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
- posix_acl_xattr_header **out)
-{
- int count, i, j, rc = 0;
- __u32 id;
- posix_acl_xattr_header *new;
-
- if (!size)
- return 0;
- if (size < sizeof(*new))
- return -EINVAL;
-
- new = kzalloc(size, GFP_NOFS);
- if (unlikely(!new))
- return -ENOMEM;
-
- new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
- count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
- for (i = 0, j = 0; i < count; i++) {
- id = le32_to_cpu(header->a_entries[i].e_id);
- switch (le16_to_cpu(header->a_entries[i].e_tag)) {
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- if (id != ACL_UNDEFINED_ID) {
- rc = -EIO;
- goto _out;
- }
-
- memcpy(&new->a_entries[j++], &header->a_entries[i],
- sizeof(posix_acl_xattr_entry));
- break;
- case ACL_USER:
- if (id != NOBODY_UID)
- memcpy(&new->a_entries[j++],
- &header->a_entries[i],
- sizeof(posix_acl_xattr_entry));
- break;
- case ACL_GROUP:
- if (id != NOBODY_GID)
- memcpy(&new->a_entries[j++],
- &header->a_entries[i],
- sizeof(posix_acl_xattr_entry));
- break;
- default:
- rc = -EIO;
- goto _out;
- }
- }
-
- /* free unused space. */
- rc = lustre_posix_acl_xattr_reduce_space(&new, count, j);
- if (rc >= 0) {
- size = rc;
- *out = new;
- rc = 0;
- }
-
-_out:
- if (rc) {
- kfree(new);
- size = rc;
- }
- return size;
-}
-EXPORT_SYMBOL(lustre_posix_acl_xattr_filter);
-
-/*
- * Release the extended ACL space.
- */
-void lustre_ext_acl_xattr_free(ext_acl_xattr_header *header)
-{
- kfree(header);
-}
-EXPORT_SYMBOL(lustre_ext_acl_xattr_free);
-
-static ext_acl_xattr_entry *
-lustre_ext_acl_xattr_search(ext_acl_xattr_header *header,
- posix_acl_xattr_entry *entry, int *pos)
-{
- int once, start, end, i, j, count = le32_to_cpu(header->a_count);
-
- once = 0;
- start = *pos;
- end = count;
-
-again:
- for (i = start; i < end; i++) {
- if (header->a_entries[i].e_tag == entry->e_tag &&
- header->a_entries[i].e_id == entry->e_id) {
- j = i;
- if (++i >= count)
- i = 0;
- *pos = i;
- return &header->a_entries[j];
- }
- }
-
- if (!once) {
- once = 1;
- start = 0;
- end = *pos;
- goto again;
- }
-
- return NULL;
-}
-
-/*
- * Merge the posix ACL and the extended ACL into new extended ACL.
- */
-ext_acl_xattr_header *
-lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
- ext_acl_xattr_header *ext_header)
-{
- int ori_ext_count, posix_count, ext_count, ext_size;
- int i, j, pos = 0, rc = 0;
- posix_acl_xattr_entry pae;
- ext_acl_xattr_header *new;
- ext_acl_xattr_entry *ee, eae;
-
- if (unlikely(size < 0))
- return ERR_PTR(-EINVAL);
- else if (!size)
- posix_count = 0;
- else
- posix_count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
- ori_ext_count = le32_to_cpu(ext_header->a_count);
- ext_count = posix_count + ori_ext_count;
- ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
-
- new = kzalloc(ext_size, GFP_NOFS);
- if (unlikely(!new))
- return ERR_PTR(-ENOMEM);
-
- for (i = 0, j = 0; i < posix_count; i++) {
- lustre_posix_acl_le_to_cpu(&pae, &posix_header->a_entries[i]);
- switch (pae.e_tag) {
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- if (pae.e_id != ACL_UNDEFINED_ID) {
- rc = -EIO;
- goto out;
- }
- case ACL_USER:
- /* ignore "nobody" entry. */
- if (pae.e_id == NOBODY_UID)
- break;
-
- new->a_entries[j].e_tag =
- posix_header->a_entries[i].e_tag;
- new->a_entries[j].e_perm =
- posix_header->a_entries[i].e_perm;
- new->a_entries[j].e_id =
- posix_header->a_entries[i].e_id;
- ee = lustre_ext_acl_xattr_search(ext_header,
- &posix_header->a_entries[i], &pos);
- if (ee) {
- if (posix_header->a_entries[i].e_perm !=
- ee->e_perm)
- /* entry modified. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_MOD);
- else
- /* entry unchanged. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_UNC);
- } else {
- /* new entry. */
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_ADD);
- }
- break;
- case ACL_GROUP:
- /* ignore "nobody" entry. */
- if (pae.e_id == NOBODY_GID)
- break;
- new->a_entries[j].e_tag =
- posix_header->a_entries[i].e_tag;
- new->a_entries[j].e_perm =
- posix_header->a_entries[i].e_perm;
- new->a_entries[j].e_id =
- posix_header->a_entries[i].e_id;
- ee = lustre_ext_acl_xattr_search(ext_header,
- &posix_header->a_entries[i], &pos);
- if (ee) {
- if (posix_header->a_entries[i].e_perm !=
- ee->e_perm)
- /* entry modified. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_MOD);
- else
- /* entry unchanged. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_UNC);
- } else {
- /* new entry. */
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_ADD);
- }
- break;
- default:
- rc = -EIO;
- goto out;
- }
- }
-
- /* process deleted entries. */
- for (i = 0; i < ori_ext_count; i++) {
- lustre_ext_acl_le_to_cpu(&eae, &ext_header->a_entries[i]);
- if (eae.e_stat == ES_UNK) {
- /* ignore "nobody" entry. */
- if ((eae.e_tag == ACL_USER && eae.e_id == NOBODY_UID) ||
- (eae.e_tag == ACL_GROUP && eae.e_id == NOBODY_GID))
- continue;
-
- new->a_entries[j].e_tag =
- ext_header->a_entries[i].e_tag;
- new->a_entries[j].e_perm =
- ext_header->a_entries[i].e_perm;
- new->a_entries[j].e_id = ext_header->a_entries[i].e_id;
- new->a_entries[j++].e_stat = cpu_to_le32(ES_DEL);
- }
- }
-
- new->a_count = cpu_to_le32(j);
- /* free unused space. */
- rc = lustre_ext_acl_xattr_reduce_space(&new, ext_count);
-
-out:
- if (rc) {
- kfree(new);
- new = ERR_PTR(rc);
- }
- return new;
-}
-EXPORT_SYMBOL(lustre_acl_xattr_merge2ext);
-
-#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
index 7eb0ad7b3644..e866754a42d5 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index 583fb5f33889..e72f1fc00a13 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index 26a576b63a72..9d7b5939b0fd 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 5940f30318ec..91a5806d0239 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -577,7 +573,7 @@ static inline struct cl_env *cl_env_fetch(void)
{
struct cl_env *cle;
- cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
+ cle = cfs_hash_lookup(cl_env_hash, (void *)(long)current->pid);
LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
return cle;
}
@@ -588,7 +584,7 @@ static inline void cl_env_attach(struct cl_env *cle)
int rc;
LASSERT(!cle->ce_owner);
- cle->ce_owner = (void *) (long) current->pid;
+ cle->ce_owner = (void *)(long)current->pid;
rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
&cle->ce_node);
LASSERT(rc == 0);
@@ -599,7 +595,7 @@ static inline void cl_env_do_detach(struct cl_env *cle)
{
void *cookie;
- LASSERT(cle->ce_owner == (void *) (long) current->pid);
+ LASSERT(cle->ce_owner == (void *)(long)current->pid);
cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
&cle->ce_node);
LASSERT(cookie == cle);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index b754f516e557..db2dc6b39073 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1076,3 +1072,49 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
slice->cpl_page = page;
}
EXPORT_SYMBOL(cl_page_slice_add);
+
+/**
+ * Allocate and initialize cl_cache, called by ll_init_sbi().
+ */
+struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
+{
+ struct cl_client_cache *cache = NULL;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (!cache)
+ return NULL;
+
+ /* Initialize cache data */
+ atomic_set(&cache->ccc_users, 1);
+ cache->ccc_lru_max = lru_page_max;
+ atomic_set(&cache->ccc_lru_left, lru_page_max);
+ spin_lock_init(&cache->ccc_lru_lock);
+ INIT_LIST_HEAD(&cache->ccc_lru);
+
+ atomic_set(&cache->ccc_unstable_nr, 0);
+ init_waitqueue_head(&cache->ccc_unstable_waitq);
+
+ return cache;
+}
+EXPORT_SYMBOL(cl_cache_init);
+
+/**
+ * Increase cl_cache refcount
+ */
+void cl_cache_incref(struct cl_client_cache *cache)
+{
+ atomic_inc(&cache->ccc_users);
+}
+EXPORT_SYMBOL(cl_cache_incref);
+
+/**
+ * Decrease cl_cache refcount and free the cache if refcount=0.
+ * Since llite, lov and osc all hold cl_cache refcount,
+ * the free will not cause race. (LU-6173)
+ */
+void cl_cache_decref(struct cl_client_cache *cache)
+{
+ if (atomic_dec_and_test(&cache->ccc_users))
+ kfree(cache);
+}
+EXPORT_SYMBOL(cl_cache_decref);
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index f48816af8be7..d9d2a1952b8b 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index e4edfb2c0a20..8acf67239fa8 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index d95f11d62a32..99c2da632b51 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
index 8405eccdac19..a0f65c470f4d 100644
--- a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
+++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 2cd4522462d9..33342bfcc90e 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index b41b65e2f021..c6cc6a7666e3 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index e6bf414a4444..8f70dd2686f9 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 79194d8cb587..1784ca063428 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -123,8 +119,10 @@ static int llog_read_header(const struct lu_env *env,
handle->lgh_last_idx = 0; /* header is record with index 0 */
llh->llh_count = 1; /* for the header record */
llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
- llh->llh_hdr.lrh_len = llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
- llh->llh_hdr.lrh_index = llh->llh_tail.lrt_index = 0;
+ llh->llh_hdr.lrh_len = LLOG_CHUNK_SIZE;
+ llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
+ llh->llh_hdr.lrh_index = 0;
+ llh->llh_tail.lrt_index = 0;
llh->llh_timestamp = ktime_get_real_seconds();
if (uuid)
memcpy(&llh->llh_tgtuuid, uuid,
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index c27d4ec1df9e..a82a2950295a 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_internal.h b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
index 7fb48dda355e..f7949525d952 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index 826623f528da..6ace7e097859 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index 967ba2e1bfcb..f7b9b190350c 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 5a1eae1de2ec..279b625f1afe 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index e04385760f21..9b03059f34d6 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_ref.c b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
index 993697b660f6..e9f6040d19eb 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_ref.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
index 403ceea06186..082f530c527c 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index b1abe023bb35..5974a9bf77c0 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index cb1d65c3d95d..0eab1236501b 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1021,8 +1017,8 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
/* Search proc entries */
while (lvars[j].name) {
var = &lvars[j];
- if (!class_match_param(key, var->name, NULL)
- && keylen == strlen(var->name)) {
+ if (!class_match_param(key, var->name, NULL) &&
+ keylen == strlen(var->name)) {
matched++;
rc = -EROFS;
if (var->fops && var->fops->write) {
@@ -1077,7 +1073,7 @@ int class_config_llog_handler(const struct lu_env *env,
{
struct config_llog_instance *clli = data;
int cfg_len = rec->lrh_len;
- char *cfg_buf = (char *) (rec + 1);
+ char *cfg_buf = (char *)(rec + 1);
int rc = 0;
switch (rec->lrh_type) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index e0c90adc72a7..aa84a50e9904 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -192,7 +188,7 @@ static int lustre_start_simple(char *obdname, char *type, char *uuid,
return rc;
}
-DEFINE_MUTEX(mgc_start_lock);
+static DEFINE_MUTEX(mgc_start_lock);
/** Set up a mgc obd to process startup logs
*
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 748e33f017d5..8583a4a8c206 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
index fb4e3ae845e0..4bad1fa27d40 100644
--- a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
+++ b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c
index b0b0157a6334..abd9b1ae72cd 100644
--- a/drivers/staging/lustre/lustre/obdclass/uuid.c
+++ b/drivers/staging/lustre/lustre/obdclass/uuid.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 91ef06f17934..5b29c4a44fe5 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 33a113213bf5..7e83d395b998 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 5a14bea961b4..d1a7d6beee60 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -127,9 +123,9 @@ static const char *oes_strings[] = {
/* ----- part 4 ----- */ \
## __VA_ARGS__); \
if (lvl == D_ERROR && __ext->oe_dlmlock) \
- LDLM_ERROR(__ext->oe_dlmlock, "extent: %p\n", __ext); \
+ LDLM_ERROR(__ext->oe_dlmlock, "extent: %p", __ext); \
else \
- LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p\n", __ext); \
+ LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p", __ext); \
} while (0)
#undef EASSERTF
@@ -2371,7 +2367,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
oap->oap_obj_off = offset;
LASSERT(!(offset & ~PAGE_MASK));
- if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
+ if (capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags = OBD_BRW_NOQUOTA;
INIT_LIST_HEAD(&oap->oap_pending_item);
@@ -2410,8 +2406,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
/* Set the OBD_BRW_SRVLOCK before the page is queued. */
brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
- if (!client_is_remote(osc_export(osc)) &&
- capable(CFS_CAP_SYS_RESOURCE)) {
+ if (capable(CFS_CAP_SYS_RESOURCE)) {
brw_flags |= OBD_BRW_NOQUOTA;
cmd |= OBD_BRW_NOQUOTA;
}
@@ -2773,7 +2768,8 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
ext->oe_sync = 1;
ext->oe_urgent = 1;
ext->oe_start = start;
- ext->oe_end = ext->oe_max_end = end;
+ ext->oe_end = end;
+ ext->oe_max_end = end;
ext->oe_obj = obj;
ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
ext->oe_nr_pages = page_count;
@@ -3308,7 +3304,8 @@ int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
goto out;
cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
- info->oti_fn_index = info->oti_next_index = start;
+ info->oti_fn_index = start;
+ info->oti_next_index = start;
do {
res = osc_page_gang_lookup(env, io, osc,
info->oti_next_index, end, cb, osc);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index ae19d396b537..c8c3f1ca77be 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -66,7 +62,7 @@ struct osc_io {
/** super class */
struct cl_io_slice oi_cl;
/** true if this io is lockless. */
- int oi_lockless;
+ unsigned int oi_lockless;
/** how many LRU pages are reserved for this IO */
int oi_lru_reserved;
@@ -356,11 +352,6 @@ struct osc_page {
*/
unsigned ops_transfer_pinned:1,
/**
- * True for a `temporary page' created by read-ahead code, probably
- * outside of any DLM lock.
- */
- ops_temp:1,
- /**
* in LRU?
*/
ops_in_lru:1,
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index d4fe507f165f..83d30c135ba4 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index 7fad8278150f..7a27f0961955 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index d534b0e0edf6..6e3dcd38913f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -221,7 +217,8 @@ static void osc_page_touch_at(const struct lu_env *env,
kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
loi->loi_lvb.lvb_size);
- attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
+ attr->cat_ctime = LTIME_S(CURRENT_TIME);
+ attr->cat_mtime = attr->cat_ctime;
valid = CAT_MTIME | CAT_CTIME;
if (kms > loi->loi_kms) {
attr->cat_kms = kms;
@@ -458,7 +455,8 @@ static int osc_io_setattr_start(const struct lu_env *env,
unsigned int cl_valid = 0;
if (ia_valid & ATTR_SIZE) {
- attr->cat_size = attr->cat_kms = size;
+ attr->cat_size = size;
+ attr->cat_kms = size;
cl_valid = CAT_SIZE | CAT_KMS;
}
if (ia_valid & ATTR_MTIME_SET) {
@@ -526,7 +524,8 @@ static void osc_io_setattr_end(const struct lu_env *env,
if (cbargs->opc_rpc_sent) {
wait_for_completion(&cbargs->opc_sync);
- result = io->ci_result = cbargs->opc_rc;
+ result = cbargs->opc_rc;
+ io->ci_result = cbargs->opc_rc;
}
if (result == 0) {
if (oio->oi_lockless) {
@@ -575,7 +574,8 @@ static int osc_io_write_start(const struct lu_env *env,
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
cl_object_attr_lock(obj);
- attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
+ attr->cat_ctime = ktime_get_real_seconds();
+ attr->cat_mtime = attr->cat_ctime;
rc = cl_object_attr_set(env, obj, attr, CAT_MTIME | CAT_CTIME);
cl_object_attr_unlock(obj);
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 16f9cd9d3b12..717d3ffb6789 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -638,11 +634,10 @@ static int weigh_cb(const struct lu_env *env, struct cl_io *io,
if (cl_page_is_vmlocked(env, page) ||
PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
- ) {
- (*(unsigned long *)cbdata)++;
+ )
return CLP_GANG_ABORT;
- }
+ *(pgoff_t *)cbdata = osc_index(ops) + 1;
return CLP_GANG_OKAY;
}
@@ -652,7 +647,7 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
{
struct cl_io *io = &osc_env_info(env)->oti_io;
struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
- unsigned long npages = 0;
+ pgoff_t page_index;
int result;
io->ci_obj = obj;
@@ -661,11 +656,12 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
if (result != 0)
return result;
+ page_index = cl_index(obj, extent->start);
do {
result = osc_page_gang_lookup(env, io, oscobj,
- cl_index(obj, extent->start),
+ page_index,
cl_index(obj, extent->end),
- weigh_cb, (void *)&npages);
+ weigh_cb, (void *)&page_index);
if (result == CLP_GANG_ABORT)
break;
if (result == CLP_GANG_RESCHED)
@@ -673,7 +669,7 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
} while (result != CLP_GANG_OKAY);
cl_io_fini(env, io);
- return npages;
+ return result == CLP_GANG_ABORT ? 1 : 0;
}
/**
@@ -703,7 +699,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
obj = dlmlock->l_ast_data;
- if (obj) {
+ if (!obj) {
weight = 1;
goto out;
}
@@ -1120,7 +1116,8 @@ static void osc_lock_set_writer(const struct lu_env *env,
}
} else {
LASSERT(cl_io_is_mkwrite(io));
- io_start = io_end = io->u.ci_fault.ft_index;
+ io_start = io->u.ci_fault.ft_index;
+ io_end = io->u.ci_fault.ft_index;
}
if (descr->cld_mode >= CLM_WRITE &&
@@ -1171,7 +1168,7 @@ int osc_lock_init(const struct lu_env *env,
osc_lock_set_writer(env, io, obj, oscl);
- LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
+ LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx",
lock, oscl, oscl->ols_flags);
return 0;
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index 738ab10ab274..d211d1905e83 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index c29c2eabe39c..355f496a2093 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -52,13 +48,6 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
* @{
*/
-static int osc_page_protected(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int unref)
-{
- return 1;
-}
-
/*****************************************************************************
*
* Page operations.
@@ -110,8 +99,6 @@ int osc_page_cache_add(const struct lu_env *env,
struct osc_page *opg = cl2osc_page(slice);
int result;
- LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
-
osc_page_transfer_get(opg, "transfer\0cache");
result = osc_queue_async_io(env, io, opg);
if (result != 0)
@@ -214,8 +201,6 @@ static void osc_page_delete(const struct lu_env *env,
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
int rc;
- LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
-
CDEBUG(D_TRACE, "%p\n", opg);
osc_page_transfer_put(env, opg);
rc = osc_teardown_async_page(env, obj, opg);
@@ -254,8 +239,6 @@ static void osc_page_clip(const struct lu_env *env,
struct osc_page *opg = cl2osc_page(slice);
struct osc_async_page *oap = &opg->ops_oap;
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
-
opg->ops_from = from;
opg->ops_to = to;
spin_lock(&oap->oap_lock);
@@ -269,8 +252,6 @@ static int osc_page_cancel(const struct lu_env *env,
struct osc_page *opg = cl2osc_page(slice);
int rc = 0;
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
-
/* Check if the transferring against this page
* is completed, or not even queued.
*/
@@ -320,10 +301,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
cl_page_slice_add(page, &opg->ops_cl, obj, index,
&osc_page_ops);
}
- /*
- * Cannot assert osc_page_protected() here as read-ahead
- * creates temporary pages outside of a lock.
- */
/* ops_inflight and ops_lru are the same field, but it doesn't
* hurt to initialize it twice :-)
*/
@@ -380,10 +357,6 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
enum cl_req_type crt, int brw_flags)
{
struct osc_async_page *oap = &opg->ops_oap;
- struct osc_object *obj = oap->oap_obj;
-
- LINVRNT(osc_page_protected(env, opg,
- crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, magic 0x%x\n",
oap, oap->oap_magic);
@@ -398,8 +371,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
if (osc_over_unstable_soft_limit(oap->oap_cli))
oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
- if (!client_is_remote(osc_export(obj)) &&
- capable(CFS_CAP_SYS_RESOURCE)) {
+ if (capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
oap->oap_cmd |= OBD_BRW_NOQUOTA;
}
@@ -440,7 +412,7 @@ static int osc_cache_too_much(struct client_obd *cli)
int pages = atomic_read(&cli->cl_lru_in_list);
unsigned long budget;
- budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
+ budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs.
@@ -740,7 +712,7 @@ int osc_lru_reclaim(struct client_obd *cli)
cache->ccc_lru_shrinkers++;
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- max_scans = atomic_read(&cache->ccc_users);
+ max_scans = atomic_read(&cache->ccc_users) - 2;
while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
cli = list_entry(cache->ccc_lru.next, struct client_obd,
cl_lru_osc);
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 47417f88fe3c..536b868ff776 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -474,7 +470,8 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa,
DEBUG_REQ(D_HA, req,
"delorphan from OST integration");
/* Don't resend the delorphan req */
- req->rq_no_resend = req->rq_no_delay = 1;
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
}
rc = ptlrpc_queue_wait(req);
@@ -2249,7 +2246,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
struct lustre_handle lockh = { 0 };
struct ptlrpc_request *req = NULL;
int intent = *flags & LDLM_FL_HAS_INTENT;
- __u64 match_lvb = agl ? 0 : LDLM_FL_LVB_READY;
+ __u64 match_flags = *flags;
enum ldlm_mode mode;
int rc;
@@ -2284,7 +2281,11 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
mode = einfo->ei_mode;
if (einfo->ei_mode == LCK_PR)
mode |= LCK_PW;
- mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
+ if (agl == 0)
+ match_flags |= LDLM_FL_LVB_READY;
+ if (intent != 0)
+ match_flags |= LDLM_FL_BLOCK_GRANTED;
+ mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
einfo->ei_type, policy, mode, &lockh, 0);
if (mode) {
struct ldlm_lock *matched;
@@ -2775,7 +2776,8 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
memcpy(tmp, key, keylen);
- req->rq_no_delay = req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ req->rq_no_resend = 1;
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc)
@@ -2915,7 +2917,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
LASSERT(!cli->cl_cache); /* only once */
cli->cl_cache = val;
- atomic_inc(&cli->cl_cache->ccc_users);
+ cl_cache_incref(cli->cl_cache);
cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
/* add this osc into entity list */
@@ -3295,7 +3297,7 @@ static int osc_cleanup(struct obd_device *obd)
list_del_init(&cli->cl_lru_osc);
spin_unlock(&cli->cl_cache->ccc_lru_lock);
cli->cl_lru_left = NULL;
- atomic_dec(&cli->cl_cache->ccc_users);
+ cl_cache_decref(cli->cl_cache);
cli->cl_cache = NULL;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 4b7912a2cb52..d4463d7c81d2 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -587,14 +583,19 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
spin_unlock(&pool->prp_lock);
}
-static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode,
- int count, __u32 *lengths, char **bufs,
- struct ptlrpc_cli_ctx *ctx)
+int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
+ __u32 version, int opcode, char **bufs,
+ struct ptlrpc_cli_ctx *ctx)
{
- struct obd_import *imp = request->rq_import;
+ int count;
+ struct obd_import *imp;
+ __u32 *lengths;
int rc;
+ count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
+ imp = request->rq_import;
+ lengths = request->rq_pill.rc_area[RCL_CLIENT];
+
if (unlikely(ctx)) {
request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
} else {
@@ -602,20 +603,16 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
if (rc)
goto out_free;
}
-
sptlrpc_req_set_flavor(request, opcode);
rc = lustre_pack_request(request, imp->imp_msg_magic, count,
lengths, bufs);
- if (rc) {
- LASSERT(!request->rq_pool);
+ if (rc)
goto out_ctx;
- }
lustre_msg_add_version(request->rq_reqmsg, version);
request->rq_send_state = LUSTRE_IMP_FULL;
request->rq_type = PTL_RPC_MSG_REQUEST;
- request->rq_export = NULL;
request->rq_req_cbid.cbid_fn = request_out_callback;
request->rq_req_cbid.cbid_arg = request;
@@ -624,6 +621,8 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
request->rq_reply_cbid.cbid_arg = request;
request->rq_reply_deadline = 0;
+ request->rq_bulk_deadline = 0;
+ request->rq_req_deadline = 0;
request->rq_phase = RQ_PHASE_NEW;
request->rq_next_phase = RQ_PHASE_UNDEFINED;
@@ -632,40 +631,49 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
ptlrpc_at_set_req_timeout(request);
- spin_lock_init(&request->rq_lock);
- INIT_LIST_HEAD(&request->rq_list);
- INIT_LIST_HEAD(&request->rq_timed_list);
- INIT_LIST_HEAD(&request->rq_replay_list);
- INIT_LIST_HEAD(&request->rq_ctx_chain);
- INIT_LIST_HEAD(&request->rq_set_chain);
- INIT_LIST_HEAD(&request->rq_history_list);
- INIT_LIST_HEAD(&request->rq_exp_list);
- init_waitqueue_head(&request->rq_reply_waitq);
- init_waitqueue_head(&request->rq_set_waitq);
request->rq_xid = ptlrpc_next_xid();
- atomic_set(&request->rq_refcount, 1);
-
lustre_msg_set_opc(request->rq_reqmsg, opcode);
+ /* Let's setup deadline for req/reply/bulk unlink for opcode. */
+ if (cfs_fail_val == opcode) {
+ time_t *fail_t = NULL, *fail2_t = NULL;
+
+ if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
+ fail_t = &request->rq_bulk_deadline;
+ } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
+ fail_t = &request->rq_reply_deadline;
+ } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
+ fail_t = &request->rq_req_deadline;
+ } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
+ fail_t = &request->rq_reply_deadline;
+ fail2_t = &request->rq_bulk_deadline;
+ }
+
+ if (fail_t) {
+ *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
+
+ if (fail2_t)
+ *fail2_t = ktime_get_real_seconds() +
+ LONG_UNLINK;
+
+ /* The RPC is infected, let the test change the
+ * fail_loc
+ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(2));
+ set_current_state(TASK_RUNNING);
+ }
+ }
+
return 0;
+
out_ctx:
+ LASSERT(!request->rq_pool);
sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
out_free:
class_import_put(imp);
return rc;
}
-
-int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode, char **bufs,
- struct ptlrpc_cli_ctx *ctx)
-{
- int count;
-
- count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
- return __ptlrpc_request_bufs_pack(request, version, opcode, count,
- request->rq_pill.rc_area[RCL_CLIENT],
- bufs, ctx);
-}
EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
/**
@@ -722,7 +730,9 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
request = ptlrpc_prep_req_from_pool(pool);
if (request) {
- LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp);
+ ptlrpc_cli_req_init(request);
+
+ LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
LASSERT(imp != LP_POISON);
LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
imp->imp_client);
@@ -1163,9 +1173,9 @@ static int after_reply(struct ptlrpc_request *req)
LASSERT(obd);
/* repbuf must be unlinked */
- LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink);
+ LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
- if (req->rq_reply_truncate) {
+ if (req->rq_reply_truncated) {
if (ptlrpc_no_resend(req)) {
DEBUG_REQ(D_ERROR, req, "reply buffer overflow, expected: %d, actual size: %d",
req->rq_nob_received, req->rq_repbuf_len);
@@ -1239,8 +1249,9 @@ static int after_reply(struct ptlrpc_request *req)
}
ktime_get_real_ts64(&work_start);
- timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC +
- (work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC;
+ timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
+ (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
+ NSEC_PER_USEC;
if (obd->obd_svc_stats) {
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
timediff);
@@ -1503,16 +1514,28 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
if (!(req->rq_phase == RQ_PHASE_RPC ||
req->rq_phase == RQ_PHASE_BULK ||
req->rq_phase == RQ_PHASE_INTERPRET ||
- req->rq_phase == RQ_PHASE_UNREGISTERING ||
+ req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK ||
req->rq_phase == RQ_PHASE_COMPLETE)) {
DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
LBUG();
}
- if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK) {
LASSERT(req->rq_next_phase != req->rq_phase);
LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
+ if (req->rq_req_deadline &&
+ !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
+ req->rq_req_deadline = 0;
+ if (req->rq_reply_deadline &&
+ !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
+ req->rq_reply_deadline = 0;
+ if (req->rq_bulk_deadline &&
+ !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
+ req->rq_bulk_deadline = 0;
+
/*
* Skip processing until reply is unlinked. We
* can't return to pool before that and we can't
@@ -1520,7 +1543,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
* sure that all rdma transfers finished and will
* not corrupt any data.
*/
- if (ptlrpc_client_recv_or_unlink(req) ||
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
+ ptlrpc_client_recv_or_unlink(req))
+ continue;
+ if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
ptlrpc_client_bulk_active(req))
continue;
@@ -1998,7 +2024,7 @@ void ptlrpc_interrupted_set(void *data)
list_entry(tmp, struct ptlrpc_request, rq_set_chain);
if (req->rq_phase != RQ_PHASE_RPC &&
- req->rq_phase != RQ_PHASE_UNREGISTERING)
+ req->rq_phase != RQ_PHASE_UNREG_RPC)
continue;
ptlrpc_mark_interrupted(req);
@@ -2195,11 +2221,11 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
{
if (!request)
return;
+ LASSERT(!request->rq_srv_req);
+ LASSERT(!request->rq_export);
LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
- LASSERTF(!request->rq_rqbd, "req %p\n", request);/* client-side */
LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
- LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
LASSERTF(!request->rq_replay, "req %p\n", request);
req_capsule_fini(&request->rq_pill);
@@ -2225,10 +2251,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
if (request->rq_repbuf)
sptlrpc_cli_free_repbuf(request);
- if (request->rq_export) {
- class_export_put(request->rq_export);
- request->rq_export = NULL;
- }
+
if (request->rq_import) {
class_import_put(request->rq_import);
request->rq_import = NULL;
@@ -2313,8 +2336,9 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- async && request->rq_reply_deadline == 0)
- request->rq_reply_deadline = ktime_get_real_seconds()+LONG_UNLINK;
+ async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
+ request->rq_reply_deadline =
+ ktime_get_real_seconds() + LONG_UNLINK;
/* Nothing left to do. */
if (!ptlrpc_client_recv_or_unlink(request))
@@ -2327,7 +2351,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
return 1;
/* Move to "Unregistering" phase as reply was not unlinked yet. */
- ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING);
+ ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
/* Do not wait for unlink to finish. */
if (async)
@@ -2359,9 +2383,10 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
LASSERT(rc == -ETIMEDOUT);
DEBUG_REQ(D_WARNING, request,
- "Unexpectedly long timeout rvcng=%d unlnk=%d/%d",
+ "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
request->rq_receiving_reply,
- request->rq_req_unlink, request->rq_reply_unlink);
+ request->rq_req_unlinked,
+ request->rq_reply_unlinked);
}
return 0;
}
@@ -2618,11 +2643,6 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req)
}
EXPORT_SYMBOL(ptlrpc_queue_wait);
-struct ptlrpc_replay_async_args {
- int praa_old_state;
- int praa_old_status;
-};
-
/**
* Callback used for replayed requests reply processing.
* In case of successful reply calls registered request replay callback.
@@ -2961,7 +2981,6 @@ static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
req->rq_timeout = obd_timeout;
req->rq_sent = ktime_get_real_seconds();
req->rq_deadline = req->rq_sent + req->rq_timeout;
- req->rq_reply_deadline = req->rq_deadline;
req->rq_phase = RQ_PHASE_INTERPRET;
req->rq_next_phase = RQ_PHASE_COMPLETE;
req->rq_xid = ptlrpc_next_xid();
@@ -3017,27 +3036,17 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
return ERR_PTR(-ENOMEM);
}
+ ptlrpc_cli_req_init(req);
+
req->rq_send_state = LUSTRE_IMP_FULL;
req->rq_type = PTL_RPC_MSG_REQUEST;
req->rq_import = class_import_get(imp);
- req->rq_export = NULL;
req->rq_interpret_reply = work_interpreter;
/* don't want reply */
- req->rq_receiving_reply = 0;
- req->rq_req_unlink = req->rq_reply_unlink = 0;
- req->rq_no_delay = req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ req->rq_no_resend = 1;
req->rq_pill.rc_fmt = (void *)&worker_format;
- spin_lock_init(&req->rq_lock);
- INIT_LIST_HEAD(&req->rq_list);
- INIT_LIST_HEAD(&req->rq_replay_list);
- INIT_LIST_HEAD(&req->rq_set_chain);
- INIT_LIST_HEAD(&req->rq_history_list);
- INIT_LIST_HEAD(&req->rq_exp_list);
- init_waitqueue_head(&req->rq_reply_waitq);
- init_waitqueue_head(&req->rq_set_waitq);
- atomic_set(&req->rq_refcount, 1);
-
CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
args->cb = cb;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index a14daff3fca0..177a379da9fa 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index fdcde9bbd788..b1ce72511509 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -55,27 +51,33 @@ void request_out_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
+ bool wakeup = false;
- LASSERT(ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->unlinked);
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
sptlrpc_request_out_callback(req);
+
spin_lock(&req->rq_lock);
req->rq_real_sent = ktime_get_real_seconds();
- if (ev->unlinked)
- req->rq_req_unlink = 0;
+ req->rq_req_unlinked = 1;
+ /* reply_in_callback happened before request_out_callback? */
+ if (req->rq_reply_unlinked)
+ wakeup = true;
if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
/* Failed send: make it seem like the reply timed out, just
* like failing sends in client.c does currently...
*/
-
req->rq_net_err = 1;
- ptlrpc_client_wake_req(req);
+ wakeup = true;
}
+
+ if (wakeup)
+ ptlrpc_client_wake_req(req);
+
spin_unlock(&req->rq_lock);
ptlrpc_req_finished(req);
@@ -104,7 +106,7 @@ void reply_in_callback(lnet_event_t *ev)
req->rq_receiving_reply = 0;
req->rq_early = 0;
if (ev->unlinked)
- req->rq_reply_unlink = 0;
+ req->rq_reply_unlinked = 1;
if (ev->status)
goto out_wake;
@@ -118,7 +120,7 @@ void reply_in_callback(lnet_event_t *ev)
if (ev->mlength < ev->rlength) {
CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
req->rq_replen, ev->rlength, ev->offset);
- req->rq_reply_truncate = 1;
+ req->rq_reply_truncated = 1;
req->rq_replied = 1;
req->rq_status = -EOVERFLOW;
req->rq_nob_received = ev->rlength + ev->offset;
@@ -135,7 +137,8 @@ void reply_in_callback(lnet_event_t *ev)
req->rq_early_count++; /* number received, client side */
- if (req->rq_replied) /* already got the real reply */
+ /* already got the real reply or buffers are already unlinked */
+ if (req->rq_replied || req->rq_reply_unlinked == 1)
goto out_wake;
req->rq_early = 1;
@@ -328,6 +331,7 @@ void request_in_callback(lnet_event_t *ev)
}
}
+ ptlrpc_srv_req_init(req);
/* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
* flags are reset and scalars are zero. We only set the message
* size to non-zero if this was a successful receive.
@@ -341,10 +345,6 @@ void request_in_callback(lnet_event_t *ev)
req->rq_self = ev->target.nid;
req->rq_rqbd = rqbd;
req->rq_phase = RQ_PHASE_NEW;
- spin_lock_init(&req->rq_lock);
- INIT_LIST_HEAD(&req->rq_timed_list);
- INIT_LIST_HEAD(&req->rq_exp_list);
- atomic_set(&req->rq_refcount, 1);
if (ev->type == LNET_EVENT_PUT)
CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
req, req->rq_xid, ev->mlength);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index a4f7544f46b8..3292e6ea0102 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -360,9 +356,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
"still on delayed list");
}
- CERROR("%s: RPCs in \"%s\" phase found (%d). Network is sluggish? Waiting them to error out.\n",
+ CERROR("%s: Unregistering RPCs found (%d). Network is sluggish? Waiting them to error out.\n",
cli_tgt,
- ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
atomic_read(&imp->
imp_unregistering));
}
@@ -698,7 +693,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
- request->rq_no_resend = request->rq_no_delay = 1;
+ request->rq_no_resend = 1;
+ request->rq_no_delay = 1;
request->rq_send_state = LUSTRE_IMP_CONNECTING;
/* Allow a slightly larger reply for future growth compatibility */
req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index c0ecd1625dc4..ab5d85174245 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -198,7 +194,7 @@ static const struct req_msg_field *mds_reint_create_slave_client[] = {
&RMF_DLM_REQ
};
-static const struct req_msg_field *mds_reint_create_rmt_acl_client[] = {
+static const struct req_msg_field *mds_reint_create_acl_client[] = {
&RMF_PTLRPC_BODY,
&RMF_REC_REINT,
&RMF_CAPA1,
@@ -679,7 +675,7 @@ static struct req_format *req_formats[] = {
&RQF_MDS_DONE_WRITING,
&RQF_MDS_REINT,
&RQF_MDS_REINT_CREATE,
- &RQF_MDS_REINT_CREATE_RMT_ACL,
+ &RQF_MDS_REINT_CREATE_ACL,
&RQF_MDS_REINT_CREATE_SLAVE,
&RQF_MDS_REINT_CREATE_SYM,
&RQF_MDS_REINT_OPEN,
@@ -1242,10 +1238,10 @@ struct req_format RQF_MDS_REINT_CREATE =
mds_reint_create_client, mdt_body_capa);
EXPORT_SYMBOL(RQF_MDS_REINT_CREATE);
-struct req_format RQF_MDS_REINT_CREATE_RMT_ACL =
- DEFINE_REQ_FMT0("MDS_REINT_CREATE_RMT_ACL",
- mds_reint_create_rmt_acl_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_RMT_ACL);
+struct req_format RQF_MDS_REINT_CREATE_ACL =
+ DEFINE_REQ_FMT0("MDS_REINT_CREATE_ACL",
+ mds_reint_create_acl_client, mdt_body_capa);
+EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_ACL);
struct req_format RQF_MDS_REINT_CREATE_SLAVE =
DEFINE_REQ_FMT0("MDS_REINT_CREATE_EA",
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index a23ac5f9ae96..0f55c01feba8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
index fbccb62213b5..bccdace7e51f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 64c0f1e17f36..bc93b75744e1 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -872,7 +868,8 @@ ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */
srhi->srhi_req = NULL;
- seq = srhi->srhi_seq = 0;
+ seq = 0;
+ srhi->srhi_seq = 0;
} else { /* the next sequence */
seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits);
}
@@ -1161,7 +1158,6 @@ void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
lprocfs_counter_add(svc_stats, idx, bytes);
}
-
EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 10b8fe82a342..11ec82545347 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -251,7 +247,7 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- async && req->rq_bulk_deadline == 0)
+ async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
@@ -270,7 +266,7 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
return 1; /* never registered */
/* Move to "Unregistering" phase as bulk was not unlinked yet. */
- ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
+ ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK);
/* Do not wait for unlink to finish. */
if (async)
@@ -581,19 +577,18 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
}
spin_lock(&request->rq_lock);
- /* If the MD attach succeeds, there _will_ be a reply_in callback */
- request->rq_receiving_reply = !noreply;
- request->rq_req_unlink = 1;
/* We are responsible for unlinking the reply buffer */
- request->rq_reply_unlink = !noreply;
+ request->rq_reply_unlinked = noreply;
+ request->rq_receiving_reply = !noreply;
/* Clear any flags that may be present from previous sends. */
+ request->rq_req_unlinked = 0;
request->rq_replied = 0;
request->rq_err = 0;
request->rq_timedout = 0;
request->rq_net_err = 0;
request->rq_resend = 0;
request->rq_restart = 0;
- request->rq_reply_truncate = 0;
+ request->rq_reply_truncated = 0;
spin_unlock(&request->rq_lock);
if (!noreply) {
@@ -608,7 +603,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
reply_md.user_ptr = &request->rq_reply_cbid;
reply_md.eq_handle = ptlrpc_eq_h;
- /* We must see the unlink callback to unset rq_reply_unlink,
+ /* We must see the unlink callback to set rq_reply_unlinked,
* so we can't auto-unlink
*/
rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
@@ -637,7 +632,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
- ktime_get_real_ts64(&request->rq_arrival_time);
+ ktime_get_real_ts64(&request->rq_sent_tv);
request->rq_sent = ktime_get_real_seconds();
/* We give the server rq_timeout secs to process the req, and
* add the network latency for our local timeout.
@@ -655,9 +650,10 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
connection,
request->rq_request_portal,
request->rq_xid, 0);
- if (rc == 0)
+ if (likely(rc == 0))
goto out;
+ request->rq_req_unlinked = 1;
ptlrpc_req_finished(request);
if (noreply)
goto out;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index c444f516856f..d88faf61e740 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -769,7 +769,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
spin_unlock(&nrs->nrs_lock);
if (rc != 0)
- (void) nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
+ (void)nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 811acf6fc786..b514f18fae50 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1806,19 +1802,6 @@ void lustre_swab_obd_quotactl(struct obd_quotactl *q)
}
EXPORT_SYMBOL(lustre_swab_obd_quotactl);
-void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p)
-{
- __swab32s(&p->rp_uid);
- __swab32s(&p->rp_gid);
- __swab32s(&p->rp_fsuid);
- __swab32s(&p->rp_fsuid_h);
- __swab32s(&p->rp_fsgid);
- __swab32s(&p->rp_fsgid_h);
- __swab32s(&p->rp_access_perm);
- __swab32s(&p->rp_padding);
-};
-EXPORT_SYMBOL(lustre_swab_mdt_remote_perm);
-
void lustre_swab_fid2path(struct getinfo_fid2path *gf)
{
lustre_swab_lu_fid(&gf->gf_fid);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
index ec3af109a1d7..6c820e944171 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 8a869315c258..c0529d808d81 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -57,7 +53,8 @@ ptlrpc_prep_ping(struct obd_import *imp)
LUSTRE_OBD_VERSION, OBD_PING);
if (req) {
ptlrpc_request_set_replen(req);
- req->rq_no_resend = req->rq_no_delay = 1;
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
}
return req;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index 6ca26c98de1b..a9831fab80f3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -292,4 +288,47 @@ static inline void ptlrpc_reqset_put(struct ptlrpc_request_set *set)
if (atomic_dec_and_test(&set->set_refcount))
kfree(set);
}
+
+/** initialise ptlrpc common fields */
+static inline void ptlrpc_req_comm_init(struct ptlrpc_request *req)
+{
+ spin_lock_init(&req->rq_lock);
+ atomic_set(&req->rq_refcount, 1);
+ INIT_LIST_HEAD(&req->rq_list);
+ INIT_LIST_HEAD(&req->rq_replay_list);
+}
+
+/** initialise client side ptlrpc request */
+static inline void ptlrpc_cli_req_init(struct ptlrpc_request *req)
+{
+ struct ptlrpc_cli_req *cr = &req->rq_cli;
+
+ ptlrpc_req_comm_init(req);
+
+ req->rq_receiving_reply = 0;
+ req->rq_req_unlinked = 1;
+ req->rq_reply_unlinked = 1;
+
+ req->rq_receiving_reply = 0;
+ req->rq_req_unlinked = 1;
+ req->rq_reply_unlinked = 1;
+
+ INIT_LIST_HEAD(&cr->cr_set_chain);
+ INIT_LIST_HEAD(&cr->cr_ctx_chain);
+ init_waitqueue_head(&cr->cr_reply_waitq);
+ init_waitqueue_head(&cr->cr_set_waitq);
+}
+
+/** initialise server side ptlrpc request */
+static inline void ptlrpc_srv_req_init(struct ptlrpc_request *req)
+{
+ struct ptlrpc_srv_req *sr = &req->rq_srv;
+
+ ptlrpc_req_comm_init(req);
+ req->rq_srv_req = 1;
+ INIT_LIST_HEAD(&sr->sr_exp_list);
+ INIT_LIST_HEAD(&sr->sr_timed_list);
+ INIT_LIST_HEAD(&sr->sr_hist_list);
+}
+
#endif /* PTLRPC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
index a8ec0e9d7b2e..a70d5843f30e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 76a355a9db8b..0a374b6c2f71 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -161,9 +157,9 @@ static int ptlrpcd_users;
void ptlrpcd_wake(struct ptlrpc_request *req)
{
- struct ptlrpc_request_set *rq_set = req->rq_set;
+ struct ptlrpc_request_set *set = req->rq_set;
- wake_up(&rq_set->set_waitq);
+ wake_up(&set->set_waitq);
}
EXPORT_SYMBOL(ptlrpcd_wake);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 30d9a164e52d..718b3a8d61c6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 187fd1d6898c..dbd819fa6b75 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -867,11 +863,9 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
if (!req)
return -ENOMEM;
- spin_lock_init(&req->rq_lock);
+ ptlrpc_cli_req_init(req);
atomic_set(&req->rq_refcount, 10000);
- INIT_LIST_HEAD(&req->rq_ctx_chain);
- init_waitqueue_head(&req->rq_reply_waitq);
- init_waitqueue_head(&req->rq_set_waitq);
+
req->rq_import = imp;
req->rq_flvr = sec->ps_flvr;
req->rq_cli_ctx = ctx;
@@ -1051,6 +1045,8 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
if (!early_req)
return -ENOMEM;
+ ptlrpc_cli_req_init(early_req);
+
early_size = req->rq_nob_received;
early_bufsz = size_roundup_power2(early_size);
early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS);
@@ -1099,12 +1095,11 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
memcpy(early_buf, req->rq_repbuf, early_size);
spin_unlock(&req->rq_lock);
- spin_lock_init(&early_req->rq_lock);
early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
early_req->rq_flvr = req->rq_flvr;
early_req->rq_repbuf = early_buf;
early_req->rq_repbuf_len = early_bufsz;
- early_req->rq_repdata = (struct lustre_msg *) early_buf;
+ early_req->rq_repdata = (struct lustre_msg *)early_buf;
early_req->rq_repdata_len = early_size;
early_req->rq_early = 1;
early_req->rq_reqmsg = req->rq_reqmsg;
@@ -1556,7 +1551,7 @@ void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
/* move from segment + 1 to end segment */
LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
- movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
+ movesize = oldmsg_size - ((unsigned long)src - (unsigned long)msg);
LASSERT(movesize >= 0);
if (movesize)
@@ -2196,6 +2191,9 @@ int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
pud = lustre_msg_buf(msg, offset, 0);
+ if (!pud)
+ return -EINVAL;
+
pud->pud_uid = from_kuid(&init_user_ns, current_uid());
pud->pud_gid = from_kgid(&init_user_ns, current_gid());
pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 02e6cda4c995..5f4d79718589 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -273,7 +269,7 @@ static unsigned long enc_pools_shrink_scan(struct shrinker *s,
static inline
int npages_to_npools(unsigned long npages)
{
- return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
+ return (int)((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
}
/*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index a51b18bbfd34..c14035479c5f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -648,7 +644,7 @@ static int logname2fsname(const char *logname, char *buf, int buflen)
return -EINVAL;
}
- len = min((int) (ptr - logname), buflen - 1);
+ len = min((int)(ptr - logname), buflen - 1);
memcpy(buf, logname, len);
buf[len] = '\0';
@@ -819,7 +815,7 @@ void sptlrpc_conf_client_adapt(struct obd_device *obd)
CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid);
/* serialize with connect/disconnect import */
- down_read(&obd->u.cli.cl_sem);
+ down_read_nested(&obd->u.cli.cl_sem, OBD_CLI_SEM_MDCOSC);
imp = obd->u.cli.cl_import;
if (imp) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
index 9082da06b28a..9b9801ece582 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
index e610a8ddd223..07273f577969 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
index 40e5349de38c..70a61e12bb7b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -60,7 +56,7 @@ static struct ptlrpc_svc_ctx null_svc_ctx;
static inline
void null_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
{
- msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 24;
+ msg->lm_secflvr |= (((__u32)sp) & 0xFF) << 24;
}
static inline
@@ -265,7 +261,8 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
kvfree(req->rq_reqbuf);
- req->rq_reqbuf = req->rq_reqmsg = newbuf;
+ req->rq_reqbuf = newbuf;
+ req->rq_reqmsg = newbuf;
req->rq_reqbuf_len = alloc_size;
if (req->rq_import)
@@ -329,7 +326,7 @@ int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
rs->rs_svc_ctx = req->rq_svc_ctx;
atomic_inc(&req->rq_svc_ctx->sc_refcount);
- rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+ rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
rs->rs_repbuf_len = rs_size - sizeof(*rs);
rs->rs_msg = rs->rs_repbuf;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 37c9f4c453de..5c4590b0c521 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -298,7 +294,7 @@ int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
- token = (struct plain_bulk_token *) bsd->bsd_data;
+ token = (struct plain_bulk_token *)bsd->bsd_data;
bsd->bsd_version = 0;
bsd->bsd_flags = 0;
@@ -343,7 +339,7 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
- tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+ tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
if (req->rq_bulk_write) {
if (bsdv->bsd_flags & BSD_FL_ERR)
@@ -574,8 +570,12 @@ int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
- if (req->rq_pack_udesc)
- sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
+ if (req->rq_pack_udesc) {
+ int rc = sptlrpc_pack_user_desc(req->rq_reqbuf,
+ PLAIN_PACK_USER_OFF);
+ if (rc < 0)
+ return rc;
+ }
return 0;
}
@@ -811,7 +811,7 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
rs->rs_svc_ctx = req->rq_svc_ctx;
atomic_inc(&req->rq_svc_ctx->sc_refcount);
- rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+ rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
rs->rs_repbuf_len = rs_size - sizeof(*rs);
lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
@@ -891,7 +891,7 @@ int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
LASSERT(req->rq_pack_bulk);
bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
- tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
+ tokenr = (struct plain_bulk_token *)bsdr->bsd_data;
bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
bsdv->bsd_version = 0;
@@ -926,7 +926,7 @@ int plain_svc_wrap_bulk(struct ptlrpc_request *req,
bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
- tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+ tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
bsdv->bsd_version = 0;
bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 17c7b9749f67..4788c4940c2a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index aacc8108391d..6cc2b2edf3fc 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1269,8 +1265,6 @@ void lustre_assert_wire_constants(void)
OBD_MD_FLXATTRRM);
LASSERTF(OBD_MD_FLACL == (0x0000008000000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLACL);
- LASSERTF(OBD_MD_FLRMTPERM == (0x0000010000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTPERM);
LASSERTF(OBD_MD_FLMDSCAPA == (0x0000020000000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLMDSCAPA);
LASSERTF(OBD_MD_FLOSSCAPA == (0x0000040000000000ULL), "found 0x%.16llxULL\n",
@@ -1281,14 +1275,6 @@ void lustre_assert_wire_constants(void)
OBD_MD_FLCROSSREF);
LASSERTF(OBD_MD_FLGETATTRLOCK == (0x0000200000000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLGETATTRLOCK);
- LASSERTF(OBD_MD_FLRMTLSETFACL == (0x0001000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTLSETFACL);
- LASSERTF(OBD_MD_FLRMTLGETFACL == (0x0002000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTLGETFACL);
- LASSERTF(OBD_MD_FLRMTRSETFACL == (0x0004000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTRSETFACL);
- LASSERTF(OBD_MD_FLRMTRGETFACL == (0x0008000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTRGETFACL);
LASSERTF(OBD_MD_FLDATAVERSION == (0x0010000000000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLDATAVERSION);
CLASSERT(OBD_FL_INLINEDATA == 0x00000001);
@@ -1895,44 +1881,6 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->padding) == 4, "found %lld\n",
(long long)(int)sizeof(((struct mdt_ioepoch *)0)->padding));
- /* Checks for struct mdt_remote_perm */
- LASSERTF((int)sizeof(struct mdt_remote_perm) == 32, "found %lld\n",
- (long long)(int)sizeof(struct mdt_remote_perm));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_uid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_uid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_uid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_gid) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_gid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_gid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_fsuid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_fsuid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_fsgid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_fsgid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_access_perm) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_access_perm));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_access_perm) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_access_perm));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_padding) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_padding));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_padding));
- LASSERTF(CFS_SETUID_PERM == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)CFS_SETUID_PERM);
- LASSERTF(CFS_SETGID_PERM == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)CFS_SETGID_PERM);
- LASSERTF(CFS_SETGRP_PERM == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)CFS_SETGRP_PERM);
- LASSERTF(CFS_RMTACL_PERM == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)CFS_RMTACL_PERM);
- LASSERTF(CFS_RMTOWN_PERM == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)CFS_RMTOWN_PERM);
-
/* Checks for struct mdt_rec_setattr */
LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n",
(long long)(int)sizeof(struct mdt_rec_setattr));
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
index 873e2cf31217..20206ba965af 100644
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ b/drivers/staging/lustre/sysfs-fs-lustre
@@ -294,6 +294,14 @@ Description:
Controls extended attributes client-side cache.
1 to enable, 0 to disable.
+What: /sys/fs/lustre/llite/<fsname>-<uuid>/unstable_stats
+Date: Apr 2016
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Shows number of pages that were sent and acknowledged by
+ server but were not yet committed and therefore still
+ pinned in client memory even though no longer dirty.
+
What: /sys/fs/lustre/ldlm/cancel_unused_locks_before_replay
Date: May 2015
Contact: "Oleg Drokin" <oleg.drokin@intel.com>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 68931e5ecd8f..09e9499b7f9d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1799,8 +1799,8 @@ struct ieee80211_device {
short scanning;
short proto_started;
- struct semaphore wx_sem;
- struct semaphore scan_sem;
+ struct mutex wx_mutex;
+ struct mutex scan_mutex;
spinlock_t mgmt_tx_lock;
spinlock_t beacon_lock;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index d705595766a9..49db1b75cd05 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -427,7 +427,7 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
short ch = 0;
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
- down(&ieee->scan_sem);
+ mutex_lock(&ieee->scan_mutex);
while(1)
{
@@ -475,13 +475,13 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
out:
if(ieee->state < IEEE80211_LINKED){
ieee->actscanning = false;
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
}
else{
ieee->sync_scan_hurryup = 0;
if(IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
}
}
EXPORT_SYMBOL(ieee80211_softmac_scan_syncro);
@@ -495,7 +495,7 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
if(!ieee->ieee_up)
return;
- down(&ieee->scan_sem);
+ mutex_lock(&ieee->scan_mutex);
do{
ieee->current_network.channel =
(ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER;
@@ -517,7 +517,7 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
schedule_delayed_work(&ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
return;
out:
if(IS_DOT11D_ENABLE(ieee))
@@ -525,7 +525,7 @@ out:
ieee->actscanning = false;
watchdog = 0;
ieee->scanning = 0;
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
}
@@ -579,7 +579,7 @@ static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
//ieee->sync_scan_hurryup = 1;
- down(&ieee->scan_sem);
+ mutex_lock(&ieee->scan_mutex);
// spin_lock_irqsave(&ieee->lock, flags);
if (ieee->scanning == 1) {
@@ -589,7 +589,7 @@ static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
}
// spin_unlock_irqrestore(&ieee->lock, flags);
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
}
void ieee80211_stop_scan(struct ieee80211_device *ieee)
@@ -621,7 +621,7 @@ static void ieee80211_start_scan(struct ieee80211_device *ieee)
}
-/* called with wx_sem held */
+/* called with wx_mutex held */
void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
{
if (IS_DOT11D_ENABLE(ieee) )
@@ -1389,7 +1389,7 @@ static void ieee80211_associate_procedure_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->data_hard_stop)
ieee->data_hard_stop(ieee->dev);
@@ -1402,7 +1402,7 @@ static void ieee80211_associate_procedure_wq(struct work_struct *work)
ieee->associate_seq = 1;
ieee80211_associate_step1(ieee);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net)
@@ -2331,7 +2331,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
/* iwconfig mode ad-hoc will schedule this and return
* on the other hand this will block further iwconfig SET
- * operations because of the wx_sem hold.
+ * operations because of the wx_mutex hold.
* Anyway some most set operations set a flag to speed-up
* (abort) this wq (when syncro scanning) before sleeping
* on the semaphore
@@ -2340,7 +2340,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
printk("==========oh driver down return\n");
return;
}
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->current_network.ssid_len == 0) {
strcpy(ieee->current_network.ssid, IEEE80211_DEFAULT_TX_ESSID);
@@ -2431,7 +2431,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
ieee->data_hard_resume(ieee->dev);
netif_carrier_on(ieee->dev);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
@@ -2439,7 +2439,7 @@ inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
schedule_delayed_work(&ieee->start_ibss_wq, 150);
}
-/* this is called only in user context, with wx_sem held */
+/* this is called only in user context, with wx_mutex held */
void ieee80211_start_bss(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -2505,7 +2505,7 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
unsigned long flags;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if(!ieee->proto_started)
goto exit;
@@ -2537,7 +2537,7 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
spin_unlock_irqrestore(&ieee->lock, flags);
exit:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
@@ -2583,9 +2583,9 @@ EXPORT_SYMBOL(ieee80211_get_beacon);
void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee)
{
ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
ieee80211_stop_protocol(ieee);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
EXPORT_SYMBOL(ieee80211_softmac_stop_protocol);
@@ -2609,9 +2609,9 @@ void ieee80211_stop_protocol(struct ieee80211_device *ieee)
void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee)
{
ieee->sync_scan_hurryup = 0;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
ieee80211_start_protocol(ieee);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
EXPORT_SYMBOL(ieee80211_softmac_start_protocol);
@@ -2728,8 +2728,8 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
INIT_WORK(&ieee->wx_sync_scan_wq, ieee80211_wx_sync_scan_wq);
- sema_init(&ieee->wx_sem, 1);
- sema_init(&ieee->scan_sem, 1);
+ mutex_init(&ieee->wx_mutex);
+ mutex_init(&ieee->scan_mutex);
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
@@ -2742,14 +2742,14 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
void ieee80211_softmac_free(struct ieee80211_device *ieee)
{
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
kfree(ieee->pDot11dInfo);
ieee->pDot11dInfo = NULL;
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
/********************************************************
@@ -3138,7 +3138,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
struct ieee_param *param;
int ret=0;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
//IEEE_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length);
if (p->length < sizeof(struct ieee_param) || !p->pointer) {
@@ -3183,7 +3183,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
kfree(param);
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index aad288a1f9e3..21bd0dc40888 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -34,7 +34,7 @@ int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info
int ret;
struct iw_freq *fwrq = &wrqu->freq;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->iw_mode == IW_MODE_INFRA) {
ret = -EOPNOTSUPP;
@@ -79,7 +79,7 @@ int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info
ret = 0;
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(ieee80211_wx_set_freq);
@@ -145,7 +145,7 @@ int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
/* use ifconfig hw ether */
if (ieee->iw_mode == IW_MODE_MASTER) {
ret = -1;
@@ -173,7 +173,7 @@ int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
if (ifup)
ieee80211_start_protocol(ieee);
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(ieee80211_wx_set_wap);
@@ -274,7 +274,7 @@ int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info
ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (wrqu->mode == ieee->iw_mode)
goto out;
@@ -293,7 +293,7 @@ int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info
}
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return 0;
}
EXPORT_SYMBOL(ieee80211_wx_set_mode);
@@ -353,7 +353,7 @@ void ieee80211_wx_sync_scan_wq(struct work_struct *work)
ieee80211_start_send_beacons(ieee);
netif_carrier_on(ieee->dev);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
@@ -362,7 +362,7 @@ int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info
{
int ret = 0;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)) {
ret = -1;
@@ -376,7 +376,7 @@ int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info
}
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(ieee80211_wx_set_scan);
@@ -391,7 +391,7 @@ int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
unsigned long flags;
ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
proto_started = ieee->proto_started;
@@ -430,7 +430,7 @@ int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
if (proto_started)
ieee80211_start_protocol(ieee);
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(ieee80211_wx_set_essid);
@@ -453,7 +453,7 @@ int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
int enable = (parms[0] > 0);
short prev = ieee->raw_tx;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (enable)
ieee->raw_tx = 1;
@@ -475,7 +475,7 @@ int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
netif_carrier_off(ieee->dev);
}
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return 0;
}
@@ -514,7 +514,7 @@ int ieee80211_wx_set_power(struct ieee80211_device *ieee,
{
int ret = 0;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (wrqu->power.disabled) {
ieee->ps = IEEE80211_PS_DISABLED;
@@ -553,7 +553,7 @@ int ieee80211_wx_set_power(struct ieee80211_device *ieee,
}
exit:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
@@ -564,7 +564,7 @@ int ieee80211_wx_get_power(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->ps == IEEE80211_PS_DISABLED) {
wrqu->power.disabled = 1;
@@ -592,7 +592,7 @@ int ieee80211_wx_get_power(struct ieee80211_device *ieee,
wrqu->power.flags |= IW_POWER_UNICAST_R;
exit:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return 0;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 208be5fc527a..563d7fed6e1c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -253,7 +253,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
int i = 0;
int err = 0;
IEEE80211_DEBUG_WX("Getting scan\n");
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(network, &ieee->network_list, list) {
@@ -262,7 +262,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
{
err = -E2BIG;
break;
- }
+ }
if (ieee->scan_age == 0 ||
time_after(network->last_scanned + ieee->scan_age, jiffies))
ev = rtl819x_translate_scan(ieee, ev, stop, network, info);
@@ -277,7 +277,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
}
spin_unlock_irqrestore(&ieee->lock, flags);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
wrqu->data.length = ev - extra;
wrqu->data.flags = 0;
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
index 97d9b3f49114..f35defc36fd9 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ b/drivers/staging/rtl8192u/r8180_93cx6.c
@@ -23,8 +23,11 @@
static void eprom_cs(struct net_device *dev, short bit)
{
u8 cmdreg;
+ int err;
- read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ if (err)
+ return;
if (bit)
/* enable EPROM */
write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_CS_BIT);
@@ -40,8 +43,11 @@ static void eprom_cs(struct net_device *dev, short bit)
static void eprom_ck_cycle(struct net_device *dev)
{
u8 cmdreg;
+ int err;
- read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ if (err)
+ return;
write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_CK_BIT);
force_pci_posting(dev);
udelay(EPROM_DELAY);
@@ -56,8 +62,11 @@ static void eprom_ck_cycle(struct net_device *dev)
static void eprom_w(struct net_device *dev, short bit)
{
u8 cmdreg;
+ int err;
- read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ if (err)
+ return;
if (bit)
write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_W_BIT);
else
@@ -71,8 +80,12 @@ static void eprom_w(struct net_device *dev, short bit)
static short eprom_r(struct net_device *dev)
{
u8 bit;
+ int err;
+
+ err = read_nic_byte_E(dev, EPROM_CMD, &bit);
+ if (err)
+ return err;
- read_nic_byte_E(dev, EPROM_CMD, &bit);
udelay(EPROM_DELAY);
if (bit & EPROM_R_BIT)
@@ -93,7 +106,7 @@ static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
}
-u32 eprom_read(struct net_device *dev, u32 addr)
+int eprom_read(struct net_device *dev, u32 addr)
{
struct r8192_priv *priv = ieee80211_priv(dev);
short read_cmd[] = {1, 1, 0};
@@ -101,6 +114,7 @@ u32 eprom_read(struct net_device *dev, u32 addr)
int i;
int addr_len;
u32 ret;
+ int err;
ret = 0;
/* enable EPROM programming */
@@ -144,7 +158,11 @@ u32 eprom_read(struct net_device *dev, u32 addr)
* and reading data. (eeprom outs a dummy 0)
*/
eprom_ck_cycle(dev);
- ret |= (eprom_r(dev)<<(15-i));
+ err = eprom_r(dev);
+ if (err < 0)
+ return err;
+
+ ret |= err<<(15-i);
}
eprom_cs(dev, 0);
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.h b/drivers/staging/rtl8192u/r8180_93cx6.h
index b840348eb5e3..9cf7f587c3ab 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.h
+++ b/drivers/staging/rtl8192u/r8180_93cx6.h
@@ -40,4 +40,4 @@
#define EPROM_TXPW1 0x3d
-u32 eprom_read(struct net_device *dev, u32 addr); /* reads a 16 bits word */
+int eprom_read(struct net_device *dev, u32 addr); /* reads a 16 bits word */
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index ee1c72267811..821afc0ddac5 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -879,8 +879,7 @@ typedef struct r8192_priv {
/* If 1, allow bad crc frame, reception in monitor mode */
short crcmon;
- struct semaphore wx_sem;
- struct semaphore rf_sem; /* Used to lock rf write operation */
+ struct mutex wx_mutex;
u8 rf_type; /* 0: 1T2R, 1: 2T4R */
RT_RF_TYPE_819xU rf_chip;
@@ -1129,10 +1128,10 @@ int read_nic_byte(struct net_device *dev, int x, u8 *data);
int read_nic_byte_E(struct net_device *dev, int x, u8 *data);
int read_nic_dword(struct net_device *dev, int x, u32 *data);
int read_nic_word(struct net_device *dev, int x, u16 *data);
-void write_nic_byte(struct net_device *dev, int x, u8 y);
-void write_nic_byte_E(struct net_device *dev, int x, u8 y);
-void write_nic_word(struct net_device *dev, int x, u16 y);
-void write_nic_dword(struct net_device *dev, int x, u32 y);
+int write_nic_byte(struct net_device *dev, int x, u8 y);
+int write_nic_byte_E(struct net_device *dev, int x, u8 y);
+int write_nic_word(struct net_device *dev, int x, u16 y);
+int write_nic_dword(struct net_device *dev, int x, u32 y);
void force_pci_posting(struct net_device *dev);
void rtl8192_rtx_disable(struct net_device *);
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 8c1d73719147..dd0970facdf5 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -253,7 +253,7 @@ u32 read_cam(struct net_device *dev, u8 addr)
return data;
}
-void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
+int write_nic_byte_E(struct net_device *dev, int indx, u8 data)
{
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -261,7 +261,7 @@ void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
u8 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
if (!usbdata)
- return;
+ return -ENOMEM;
*usbdata = data;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -269,9 +269,12 @@ void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
indx | 0xfe00, 0, usbdata, 1, HZ / 2);
kfree(usbdata);
- if (status < 0)
+ if (status < 0){
netdev_err(dev, "write_nic_byte_E TimeOut! status: %d\n",
status);
+ return status;
+ }
+ return 0;
}
int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
@@ -299,7 +302,7 @@ int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
}
/* as 92U has extend page from 4 to 16, so modify functions below. */
-void write_nic_byte(struct net_device *dev, int indx, u8 data)
+int write_nic_byte(struct net_device *dev, int indx, u8 data)
{
int status;
@@ -308,7 +311,7 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
u8 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
if (!usbdata)
- return;
+ return -ENOMEM;
*usbdata = data;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -317,12 +320,16 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
usbdata, 1, HZ / 2);
kfree(usbdata);
- if (status < 0)
+ if (status < 0) {
netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status);
+ return status;
+ }
+
+ return 0;
}
-void write_nic_word(struct net_device *dev, int indx, u16 data)
+int write_nic_word(struct net_device *dev, int indx, u16 data)
{
int status;
@@ -331,7 +338,7 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
u16 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
if (!usbdata)
- return;
+ return -ENOMEM;
*usbdata = data;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -340,12 +347,16 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
usbdata, 2, HZ / 2);
kfree(usbdata);
- if (status < 0)
+ if (status < 0) {
netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status);
+ return status;
+ }
+
+ return 0;
}
-void write_nic_dword(struct net_device *dev, int indx, u32 data)
+int write_nic_dword(struct net_device *dev, int indx, u32 data)
{
int status;
@@ -354,7 +365,7 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
u32 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
if (!usbdata)
- return;
+ return -ENOMEM;
*usbdata = data;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -364,9 +375,13 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
kfree(usbdata);
- if (status < 0)
+ if (status < 0) {
netdev_err(dev, "write_nic_dword TimeOut! status: %d\n",
status);
+ return status;
+ }
+
+ return 0;
}
@@ -2361,8 +2376,7 @@ static void rtl8192_init_priv_lock(struct r8192_priv *priv)
{
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->irq_lock);
- sema_init(&priv->wx_sem, 1);
- sema_init(&priv->rf_sem, 1);
+ mutex_init(&priv->wx_mutex);
mutex_init(&priv->mutex);
}
@@ -2421,7 +2435,7 @@ static inline u16 endian_swap(u16 *data)
return *data;
}
-static void rtl8192_read_eeprom_info(struct net_device *dev)
+static int rtl8192_read_eeprom_info(struct net_device *dev)
{
u16 wEPROM_ID = 0;
u8 bMac_Tmp_Addr[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x02};
@@ -2429,9 +2443,13 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tmpValue = 0;
int i;
+ int ret;
RT_TRACE(COMP_EPROM, "===========>%s()\n", __func__);
- wEPROM_ID = eprom_read(dev, 0); /* first read EEPROM ID out; */
+ ret = eprom_read(dev, 0); /* first read EEPROM ID out; */
+ if (ret < 0)
+ return ret;
+ wEPROM_ID = (u16)ret;
RT_TRACE(COMP_EPROM, "EEPROM ID is 0x%x\n", wEPROM_ID);
if (wEPROM_ID != RTL8190_EEPROM_ID)
@@ -2443,13 +2461,25 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
if (bLoad_From_EEPOM) {
tmpValue = eprom_read(dev, EEPROM_VID >> 1);
+ ret = eprom_read(dev, EEPROM_VID >> 1);
+ if (ret < 0)
+ return ret;
+ tmpValue = (u16)ret;
priv->eeprom_vid = endian_swap(&tmpValue);
- priv->eeprom_pid = eprom_read(dev, EEPROM_PID >> 1);
- tmpValue = eprom_read(dev, EEPROM_ChannelPlan >> 1);
+ ret = eprom_read(dev, EEPROM_PID >> 1);
+ if (ret < 0)
+ return ret;
+ priv->eeprom_pid = (u16)ret;
+ ret = eprom_read(dev, EEPROM_ChannelPlan >> 1);
+ if (ret < 0)
+ return ret;
+ tmpValue = (u16)ret;
priv->eeprom_ChannelPlan = (tmpValue & 0xff00) >> 8;
priv->btxpowerdata_readfromEEPORM = true;
- priv->eeprom_CustomerID =
- eprom_read(dev, (EEPROM_Customer_ID >> 1)) >> 8;
+ ret = eprom_read(dev, (EEPROM_Customer_ID >> 1)) >> 8;
+ if (ret < 0)
+ return ret;
+ priv->eeprom_CustomerID = (u16)ret;
} else {
priv->eeprom_vid = 0;
priv->eeprom_pid = 0;
@@ -2467,10 +2497,10 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
int i;
for (i = 0; i < 6; i += 2) {
- u16 tmp = 0;
-
- tmp = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
- *(u16 *)(&dev->dev_addr[i]) = tmp;
+ ret = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
+ if (ret < 0)
+ return ret;
+ *(u16 *)(&dev->dev_addr[i]) = (u16)ret;
}
} else {
memcpy(dev->dev_addr, bMac_Tmp_Addr, 6);
@@ -2482,52 +2512,72 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
if (priv->card_8192_version == (u8)VERSION_819xU_A) {
/* read Tx power gain offset of legacy OFDM to HT rate */
- if (bLoad_From_EEPOM)
- priv->EEPROMTxPowerDiff = (eprom_read(dev, (EEPROM_TxPowerDiff >> 1)) & 0xff00) >> 8;
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_TxPowerDiff >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROMTxPowerDiff = ((u16)ret & 0xff00) >> 8;
+ } else
priv->EEPROMTxPowerDiff = EEPROM_Default_TxPower;
RT_TRACE(COMP_EPROM, "TxPowerDiff:%d\n", priv->EEPROMTxPowerDiff);
/* read ThermalMeter from EEPROM */
- if (bLoad_From_EEPOM)
- priv->EEPROMThermalMeter = (u8)(eprom_read(dev, (EEPROM_ThermalMeter >> 1)) & 0x00ff);
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_ThermalMeter >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROMThermalMeter = (u8)((u16)ret & 0x00ff);
+ } else
priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
RT_TRACE(COMP_EPROM, "ThermalMeter:%d\n", priv->EEPROMThermalMeter);
/* for tx power track */
priv->TSSI_13dBm = priv->EEPROMThermalMeter * 100;
/* read antenna tx power offset of B/C/D to A from EEPROM */
- if (bLoad_From_EEPOM)
- priv->EEPROMPwDiff = (eprom_read(dev, (EEPROM_PwDiff >> 1)) & 0x0f00) >> 8;
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_PwDiff >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROMPwDiff = ((u16)ret & 0x0f00) >> 8;
+ } else
priv->EEPROMPwDiff = EEPROM_Default_PwDiff;
RT_TRACE(COMP_EPROM, "TxPwDiff:%d\n", priv->EEPROMPwDiff);
/* Read CrystalCap from EEPROM */
- if (bLoad_From_EEPOM)
- priv->EEPROMCrystalCap = (eprom_read(dev, (EEPROM_CrystalCap >> 1)) & 0x0f);
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_CrystalCap >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROMCrystalCap = (u16)ret & 0x0f;
+ } else
priv->EEPROMCrystalCap = EEPROM_Default_CrystalCap;
RT_TRACE(COMP_EPROM, "CrystalCap = %d\n", priv->EEPROMCrystalCap);
/* get per-channel Tx power level */
- if (bLoad_From_EEPOM)
- priv->EEPROM_Def_Ver = (eprom_read(dev, (EEPROM_TxPwIndex_Ver >> 1)) & 0xff00) >> 8;
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_TxPwIndex_Ver >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROM_Def_Ver = ((u16)ret & 0xff00) >> 8;
+ } else
priv->EEPROM_Def_Ver = 1;
RT_TRACE(COMP_EPROM, "EEPROM_DEF_VER:%d\n", priv->EEPROM_Def_Ver);
if (priv->EEPROM_Def_Ver == 0) { /* old eeprom definition */
int i;
- if (bLoad_From_EEPOM)
- priv->EEPROMTxPowerLevelCCK = (eprom_read(dev, (EEPROM_TxPwIndex_CCK >> 1)) & 0xff) >> 8;
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_TxPwIndex_CCK >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8;
+ } else
priv->EEPROMTxPowerLevelCCK = 0x10;
RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK);
for (i = 0; i < 3; i++) {
if (bLoad_From_EEPOM) {
- tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G + i) >> 1);
+ ret = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G + i) >> 1);
+ if ( ret < 0)
+ return ret;
if (((EEPROM_TxPwIndex_OFDM_24G + i) % 2) == 0)
- tmpValue = tmpValue & 0x00ff;
+ tmpValue = (u16)ret & 0x00ff;
else
- tmpValue = (tmpValue & 0xff00) >> 8;
+ tmpValue = ((u16)ret & 0xff00) >> 8;
} else {
tmpValue = 0x10;
}
@@ -2536,17 +2586,21 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
}
} else if (priv->EEPROM_Def_Ver == 1) {
if (bLoad_From_EEPOM) {
- tmpValue = eprom_read(dev,
- EEPROM_TxPwIndex_CCK_V1 >> 1);
- tmpValue = (tmpValue & 0xff00) >> 8;
+ ret = eprom_read(dev, EEPROM_TxPwIndex_CCK_V1 >> 1);
+ if (ret < 0)
+ return ret;
+ tmpValue = ((u16)ret & 0xff00) >> 8;
} else {
tmpValue = 0x10;
}
priv->EEPROMTxPowerLevelCCK_V1[0] = (u8)tmpValue;
- if (bLoad_From_EEPOM)
- tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_CCK_V1 + 2) >> 1);
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_TxPwIndex_CCK_V1 + 2) >> 1);
+ if (ret < 0)
+ return ret;
+ tmpValue = (u16)ret;
+ } else
tmpValue = 0x1010;
*((u16 *)(&priv->EEPROMTxPowerLevelCCK_V1[1])) = tmpValue;
if (bLoad_From_EEPOM)
@@ -2644,6 +2698,8 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
init_rate_adaptive(dev);
RT_TRACE(COMP_EPROM, "<===========%s()\n", __func__);
+
+ return 0;
}
static short rtl8192_get_channel_map(struct net_device *dev)
@@ -2664,6 +2720,7 @@ static short rtl8192_get_channel_map(struct net_device *dev)
static short rtl8192_init(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
+ int err;
memset(&(priv->stats), 0, sizeof(struct Stats));
memset(priv->txqueue_to_outpipemap, 0, 9);
@@ -2685,7 +2742,14 @@ static short rtl8192_init(struct net_device *dev)
rtl8192_init_priv_lock(priv);
rtl8192_init_priv_task(dev);
rtl8192_get_eeprom_size(dev);
- rtl8192_read_eeprom_info(dev);
+ err = rtl8192_read_eeprom_info(dev);
+ if (err) {
+ DMESG("Reading EEPROM info failed");
+ kfree(priv->pFirmware);
+ priv->pFirmware = NULL;
+ free_ieee80211(dev);
+ return err;
+ }
rtl8192_get_channel_map(dev);
init_hal_dm(dev);
setup_timer(&priv->watch_dog_timer, watch_dog_timer_callback,
@@ -3303,12 +3367,12 @@ RESET_START:
/* Set the variable for reset. */
priv->ResetProgress = RESET_TYPE_SILENT;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (priv->up == 0) {
RT_TRACE(COMP_ERR,
"%s():the driver is not up! return\n",
__func__);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return;
}
priv->up = 0;
@@ -3323,19 +3387,19 @@ RESET_START:
ieee->sync_scan_hurryup = 1;
if (ieee->state == IEEE80211_LINKED) {
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
netdev_dbg(dev, "ieee->state is IEEE80211_LINKED\n");
ieee80211_stop_send_beacons(priv->ieee80211);
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
ieee80211_stop_scan(ieee);
netif_carrier_off(dev);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
} else {
netdev_dbg(dev, "ieee->state is NOT LINKED\n");
ieee80211_softmac_stop_protocol(priv->ieee80211);
}
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
RT_TRACE(COMP_RESET,
"%s():<==========down process is finished\n",
__func__);
@@ -3533,9 +3597,9 @@ static int rtl8192_open(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtl8192_up(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -3556,11 +3620,11 @@ static int rtl8192_close(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtl8192_down(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -3632,11 +3696,11 @@ static void rtl8192_restart(struct work_struct *work)
reset_wq);
struct net_device *dev = priv->ieee80211->dev;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
rtl8192_commit(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
}
static void r8192_set_multicast(struct net_device *dev)
@@ -3659,12 +3723,12 @@ static int r8192_set_mac_adr(struct net_device *dev, void *mac)
struct r8192_priv *priv = ieee80211_priv(dev);
struct sockaddr *addr = mac;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ether_addr_copy(dev->dev_addr, addr->sa_data);
schedule_work(&priv->reset_wq);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -3681,7 +3745,7 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct iw_point *p = &wrq->u.data;
struct ieee_param *ipw = NULL;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (p->length < sizeof(struct ieee_param) || !p->pointer) {
@@ -3774,7 +3838,7 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
kfree(ipw);
ipw = NULL;
out:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 837704de3ea4..d2f2f246063f 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -67,11 +67,11 @@ static int r8192_wx_set_rate(struct net_device *dev,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_rate(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -83,11 +83,11 @@ static int r8192_wx_set_rts(struct net_device *dev,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_rts(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -108,11 +108,11 @@ static int r8192_wx_set_power(struct net_device *dev,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -132,11 +132,11 @@ static int r8192_wx_force_reset(struct net_device *dev,
{
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
netdev_dbg(dev, "%s(): force reset ! extra is %d\n", __func__, *extra);
priv->force_reset = *extra;
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -148,11 +148,11 @@ static int r8192_wx_set_rawtx(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
@@ -166,7 +166,7 @@ static int r8192_wx_set_crcmon(struct net_device *dev,
int *parms = (int *)extra;
int enable = (parms[0] > 0);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (enable)
priv->crcmon = 1;
@@ -176,7 +176,7 @@ static int r8192_wx_set_crcmon(struct net_device *dev,
DMESG("bad CRC in monitor mode are %s",
priv->crcmon ? "accepted" : "rejected");
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -187,13 +187,13 @@ static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b);
rtl8192_set_rxconf(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -338,7 +338,7 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
}
}
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (priv->ieee80211->state != IEEE80211_LINKED) {
priv->ieee80211->scanning = 0;
ieee80211_softmac_scan_syncro(priv->ieee80211);
@@ -346,7 +346,7 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
} else {
ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b);
}
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -361,11 +361,11 @@ static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
if (!priv->up)
return -ENETDOWN;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_get_scan(priv->ieee80211, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -377,11 +377,11 @@ static int r8192_wx_set_essid(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -393,11 +393,11 @@ static int r8192_wx_get_essid(struct net_device *dev,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -408,11 +408,11 @@ static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -468,11 +468,11 @@ static int r8192_wx_set_wap(struct net_device *dev,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
/* struct sockaddr *temp = (struct sockaddr *)awrq; */
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_wap(priv->ieee80211, info, awrq, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
@@ -515,12 +515,12 @@ static int r8192_wx_set_enc(struct net_device *dev,
if (!priv->up)
return -ENETDOWN;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
RT_TRACE(COMP_SEC, "Setting SW wep key");
ret = ieee80211_wx_set_encode(priv->ieee80211, info, wrqu, key);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
@@ -619,7 +619,7 @@ static int r8192_wx_set_retry(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
int err = 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
wrqu->retry.disabled){
@@ -652,7 +652,7 @@ static int r8192_wx_set_retry(struct net_device *dev,
rtl8192_commit(dev);
exit:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return err;
}
@@ -701,7 +701,7 @@ static int r8192_wx_set_sens(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
short err = 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (priv->rf_set_sens == NULL) {
err = -1; /* we have not this support for this radio */
goto exit;
@@ -712,7 +712,7 @@ static int r8192_wx_set_sens(struct net_device *dev,
err = -EINVAL;
exit:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return err;
}
@@ -727,7 +727,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
struct ieee80211_device *ieee = priv->ieee80211;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_encode_ext(priv->ieee80211, info, wrqu, extra);
{
@@ -790,7 +790,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
end_hw_sec:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -801,9 +801,9 @@ static int r8192_wx_set_auth(struct net_device *dev,
int ret = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_auth(priv->ieee80211, info, &(data->param), extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -815,10 +815,10 @@ static int r8192_wx_set_mlme(struct net_device *dev,
int ret = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -829,9 +829,9 @@ static int r8192_wx_set_gen_ie(struct net_device *dev,
int ret = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, data->data.length);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
diff --git a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
index b08b6ecc8d31..98ea7f381a3c 100644
--- a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
+++ b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
@@ -22,7 +22,7 @@ __unisys_vmcall_gnuc(unsigned long tuple, unsigned long reg_ebx,
cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
if (!(cpuid_ecx & 0x80000000))
- return -1;
+ return -EPERM;
__asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
"a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
@@ -40,7 +40,7 @@ __unisys_extended_vmcall_gnuc(unsigned long long tuple,
cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
if (!(cpuid_ecx & 0x80000000))
- return -1;
+ return -EPERM;
__asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
"a"(tuple), "b"(reg_ebx), "c"(reg_ecx), "d"(reg_edx));
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 3a147dbbd7b5..d32b8980a1cf 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -876,10 +876,10 @@ write_vbus_chp_info(struct visorchannel *chan,
int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
if (hdr_info->chp_info_offset == 0)
- return -1;
+ return -EFAULT;
if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -1;
+ return -EFAULT;
return 0;
}
@@ -895,10 +895,10 @@ write_vbus_bus_info(struct visorchannel *chan,
int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
if (hdr_info->bus_info_offset == 0)
- return -1;
+ return -EFAULT;
if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -1;
+ return -EFAULT;
return 0;
}
@@ -915,10 +915,10 @@ write_vbus_dev_info(struct visorchannel *chan,
(hdr_info->device_info_struct_bytes * devix);
if (hdr_info->dev_info_offset == 0)
- return -1;
+ return -EFAULT;
if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -1;
+ return -EFAULT;
return 0;
}
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 5ba5936e2203..d248c946a13b 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -1613,7 +1613,7 @@ parahotplug_request_complete(int id, u16 active)
}
spin_unlock(&parahotplug_request_list_lock);
- return -1;
+ return -EINVAL;
}
/*
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 6a4570d10642..120ba2097e02 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -16,6 +16,8 @@
#include <linux/debugfs.h>
#include <linux/skbuff.h>
#include <linux/kthread.h>
+#include <linux/idr.h>
+#include <linux/seq_file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
@@ -33,7 +35,6 @@
#define MAX_BUF 8192
#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
#define VISORHBA_ERROR_COUNT 30
-#define VISORHBA_OPEN_MAX 1
static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
void (*visorhba_cmnd_done)
@@ -50,14 +51,7 @@ static int visorhba_pause(struct visor_device *dev,
static int visorhba_resume(struct visor_device *dev,
visorbus_state_complete_func complete_func);
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset);
-static int set_no_disk_inquiry_result(unsigned char *buf,
- size_t len, bool is_lun0);
static struct dentry *visorhba_debugfs_dir;
-static const struct file_operations debugfs_info_fops = {
- .read = info_debugfs_read,
-};
/* GUIDS for HBA channel type supported by this driver */
static struct visor_channeltype_descriptor visorhba_channel_types[] = {
@@ -99,14 +93,6 @@ struct scsipending {
char cmdtype; /* Type of pointer that is being stored */
};
-/* Work Data for dar_work_queue */
-struct diskaddremove {
- u8 add; /* 0-remove, 1-add */
- struct Scsi_Host *shost; /* Scsi Host for this visorhba instance */
- u32 channel, id, lun; /* Disk Path */
- struct diskaddremove *next;
-};
-
/* Each scsi_host has a host_data area that contains this struct. */
struct visorhba_devdata {
struct Scsi_Host *scsihost;
@@ -133,14 +119,21 @@ struct visorhba_devdata {
int devnum;
struct task_struct *thread;
int thread_wait_ms;
+
+ /*
+ * allows us to pass int handles back-and-forth between us and
+ * iovm, instead of raw pointers
+ */
+ struct idr idr;
+
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_info;
};
struct visorhba_devices_open {
struct visorhba_devdata *devdata;
};
-static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
-
#define for_each_vdisk_match(iter, list, match) \
for (iter = &list->head; iter->next; iter = iter->next) \
if ((iter->channel == match->channel) && \
@@ -191,7 +184,7 @@ static void visor_thread_stop(struct task_struct *task)
* Partition so that it can be handled when it completes. If new is
* NULL it is assumed the entry refers only to the cmdrsp.
* Returns insert_location where entry was added,
- * SCSI_MLQUEUE_DEVICE_BUSY if it can't
+ * -EBUSY if it can't
*/
static int add_scsipending_entry(struct visorhba_devdata *devdata,
char cmdtype, void *new)
@@ -206,7 +199,7 @@ static int add_scsipending_entry(struct visorhba_devdata *devdata,
insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
if (insert_location == (int)devdata->nextinsert) {
spin_unlock_irqrestore(&devdata->privlock, flags);
- return -1;
+ return -EBUSY;
}
}
@@ -269,6 +262,62 @@ static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
}
/**
+ * simple_idr_get - associate a provided pointer with an int value
+ * 1 <= value <= INT_MAX, and return this int value;
+ * the pointer value can be obtained later by passing
+ * this int value to idr_find()
+ * @idrtable: the data object maintaining the pointer<-->int mappings
+ * @p: the pointer value to be remembered
+ * @lock: a spinlock used when exclusive access to idrtable is needed
+ */
+static unsigned int simple_idr_get(struct idr *idrtable, void *p,
+ spinlock_t *lock)
+{
+ int id;
+ unsigned long flags;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(lock, flags);
+ id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
+ spin_unlock_irqrestore(lock, flags);
+ idr_preload_end();
+ if (id < 0)
+ return 0; /* failure */
+ return (unsigned int)(id); /* idr_alloc() guarantees > 0 */
+}
+
+/**
+ * setup_scsitaskmgmt_handles - stash the necessary handles so that the
+ * completion processing logic for a taskmgmt
+ * cmd will be able to find who to wake up
+ * and where to stash the result
+ */
+static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
+ struct uiscmdrsp *cmdrsp,
+ wait_queue_head_t *event, int *result)
+{
+ /* specify the event that has to be triggered when this */
+ /* cmd is complete */
+ cmdrsp->scsitaskmgmt.notify_handle =
+ simple_idr_get(idrtable, event, lock);
+ cmdrsp->scsitaskmgmt.notifyresult_handle =
+ simple_idr_get(idrtable, result, lock);
+}
+
+/**
+ * cleanup_scsitaskmgmt_handles - forget handles created by
+ * setup_scsitaskmgmt_handles()
+ */
+static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
+ struct uiscmdrsp *cmdrsp)
+{
+ if (cmdrsp->scsitaskmgmt.notify_handle)
+ idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
+ if (cmdrsp->scsitaskmgmt.notifyresult_handle)
+ idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
+}
+
+/**
* forward_taskmgmt_command - send taskmegmt command to the Service
* Partition
* @tasktype: Type of taskmgmt command
@@ -303,10 +352,8 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
/* issue TASK_MGMT_ABORT_TASK */
cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
- /* specify the event that has to be triggered when this */
- /* cmd is complete */
- cmdrsp->scsitaskmgmt.notify_handle = (u64)&notifyevent;
- cmdrsp->scsitaskmgmt.notifyresult_handle = (u64)&notifyresult;
+ setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
+ &notifyevent, &notifyresult);
/* save destination */
cmdrsp->scsitaskmgmt.tasktype = tasktype;
@@ -315,6 +362,8 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
cmdrsp->scsitaskmgmt.handle = scsicmd_id;
+ dev_dbg(&scsidev->sdev_gendev,
+ "visorhba: initiating type=%d taskmgmt command\n", tasktype);
if (!visorchannel_signalinsert(devdata->dev->visorchannel,
IOCHAN_TO_IOPART,
cmdrsp))
@@ -327,17 +376,23 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
msecs_to_jiffies(45000)))
goto err_del_scsipending_ent;
+ dev_dbg(&scsidev->sdev_gendev,
+ "visorhba: taskmgmt type=%d success; result=0x%x\n",
+ tasktype, notifyresult);
if (tasktype == TASK_MGMT_ABORT_TASK)
scsicmd->result = DID_ABORT << 16;
else
scsicmd->result = DID_RESET << 16;
scsicmd->scsi_done(scsicmd);
-
+ cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
return SUCCESS;
err_del_scsipending_ent:
+ dev_dbg(&scsidev->sdev_gendev,
+ "visorhba: taskmgmt type=%d not executed\n", tasktype);
del_scsipending_ent(devdata, scsicmd_id);
+ cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
return FAILED;
}
@@ -606,64 +661,76 @@ static struct scsi_host_template visorhba_driver_template = {
};
/**
- * info_debugfs_read - debugfs interface to dump visorhba states
- * @file: Debug file
- * @buf: buffer to send back to user
- * @len: len that can be written to buf
- * @offset: offset into buf
+ * info_debugfs_show - debugfs interface to dump visorhba states
*
- * Dumps information about the visorhba driver and devices
- * TODO: Make this per vhba
- * Returns bytes_read
+ * This presents a file in the debugfs tree named:
+ * /visorhba/vbus<x>:dev<y>/info
*/
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset)
+static int info_debugfs_show(struct seq_file *seq, void *v)
{
- ssize_t bytes_read = 0;
- int str_pos = 0;
- u64 phys_flags_addr;
- int i;
- struct visorhba_devdata *devdata;
- char *vbuf;
+ struct visorhba_devdata *devdata = seq->private;
+
+ seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
+ seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
+ seq_printf(seq, "interrupts_disabled = %llu\n",
+ devdata->interrupts_disabled);
+ seq_printf(seq, "interrupts_notme = %llu\n",
+ devdata->interrupts_notme);
+ seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
+ if (devdata->flags_addr) {
+ u64 phys_flags_addr =
+ virt_to_phys((__force void *)devdata->flags_addr);
+ seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
+ phys_flags_addr);
+ seq_printf(seq, "FeatureFlags = %llu\n",
+ (__le64)readq(devdata->flags_addr));
+ }
+ seq_printf(seq, "acquire_failed_cnt = %llu\n",
+ devdata->acquire_failed_cnt);
- if (len > MAX_BUF)
- len = MAX_BUF;
- vbuf = kzalloc(len, GFP_KERNEL);
- if (!vbuf)
- return -ENOMEM;
+ return 0;
+}
+
+static int info_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, info_debugfs_show, inode->i_private);
+}
- for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
- if (!visorhbas_open[i].devdata)
- continue;
-
- devdata = visorhbas_open[i].devdata;
-
- str_pos += scnprintf(vbuf + str_pos,
- len - str_pos, "max_buff_len:%u\n",
- devdata->max_buff_len);
-
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
- devdata->interrupts_rcvd,
- devdata->interrupts_disabled);
- str_pos += scnprintf(vbuf + str_pos,
- len - str_pos, "\ninterrupts_notme = %llu,\n",
- devdata->interrupts_notme);
- phys_flags_addr = virt_to_phys((__force void *)
- devdata->flags_addr);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
- devdata->flags_addr, phys_flags_addr,
- (__le64)readq(devdata->flags_addr));
- str_pos += scnprintf(vbuf + str_pos,
- len - str_pos, "acquire_failed_cnt:%llu\n",
- devdata->acquire_failed_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
+static const struct file_operations info_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = info_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * complete_taskmgmt_command - complete task management
+ * @cmdrsp: Response from the IOVM
+ *
+ * Service Partition returned the result of the task management
+ * command. Wake up anyone waiting for it.
+ * Returns void
+ */
+static inline void complete_taskmgmt_command
+(struct idr *idrtable, struct uiscmdrsp *cmdrsp, int result)
+{
+ wait_queue_head_t *wq =
+ idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
+ int *scsi_result_ptr =
+ idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
+
+ if (unlikely(!(wq && scsi_result_ptr))) {
+ pr_err("visorhba: no completion context; cmd will time out\n");
+ return;
}
- bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
- kfree(vbuf);
- return bytes_read;
+ /* copy the result of the taskmgmt and
+ * wake up the error handler that is waiting for this
+ */
+ pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
+ *scsi_result_ptr = result;
+ wake_up_all(wq);
}
/**
@@ -701,17 +768,8 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
break;
case CMD_SCSITASKMGMT_TYPE:
cmdrsp = pendingdel->sent;
- cmdrsp->scsitaskmgmt.notifyresult_handle
- = TASK_MGMT_FAILED;
- wake_up_all((wait_queue_head_t *)
- cmdrsp->scsitaskmgmt.notify_handle);
- break;
- case CMD_VDISKMGMT_TYPE:
- cmdrsp = pendingdel->sent;
- cmdrsp->vdiskmgmt.notifyresult_handle
- = VDISK_MGMT_FAILED;
- wake_up_all((wait_queue_head_t *)
- cmdrsp->vdiskmgmt.notify_handle);
+ complete_taskmgmt_command(&devdata->idr, cmdrsp,
+ TASK_MGMT_FAILED);
break;
default:
break;
@@ -878,89 +936,6 @@ complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
scsicmd->scsi_done(scsicmd);
}
-/* DELETE VDISK TASK MGMT COMMANDS */
-static inline void complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
-{
- /* copy the result of the taskmgmt and
- * wake up the error handler that is waiting for this
- */
- cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
- wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify_handle);
-}
-
-/**
- * complete_taskmgmt_command - complete task management
- * @cmdrsp: Response from the IOVM
- *
- * Service Partition returned the result of the task management
- * command. Wake up anyone waiting for it.
- * Returns void
- */
-static inline void complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
-{
- /* copy the result of the taskgmgt and
- * wake up the error handler that is waiting for this
- */
- cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
- wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify_handle);
-}
-
-static struct work_struct dar_work_queue;
-static struct diskaddremove *dar_work_queue_head;
-static spinlock_t dar_work_queue_lock; /* Lock to protet dar_work_queue_head */
-static unsigned short dar_work_queue_sched;
-
-/**
- * queue_disk_add_remove - IOSP has sent us a add/remove request
- * @dar: disk add/remove request
- *
- * Queue the work needed to add/remove a disk.
- * Returns void
- */
-static inline void queue_disk_add_remove(struct diskaddremove *dar)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dar_work_queue_lock, flags);
- if (!dar_work_queue_head) {
- dar_work_queue_head = dar;
- dar->next = NULL;
- } else {
- dar->next = dar_work_queue_head;
- dar_work_queue_head = dar;
- }
- if (!dar_work_queue_sched) {
- schedule_work(&dar_work_queue);
- dar_work_queue_sched = 1;
- }
- spin_unlock_irqrestore(&dar_work_queue_lock, flags);
-}
-
-/**
- * process_disk_notify - IOSP has sent a process disk notify event
- * @shost: Scsi hot
- * @cmdrsp: Response from the IOSP
- *
- * Queue it to the work queue.
- * Return void.
- */
-static void process_disk_notify(struct Scsi_Host *shost,
- struct uiscmdrsp *cmdrsp)
-{
- struct diskaddremove *dar;
-
- dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
- if (!dar)
- return;
-
- dar->add = cmdrsp->disknotify.add;
- dar->shost = shost;
- dar->channel = cmdrsp->disknotify.channel;
- dar->id = cmdrsp->disknotify.id;
- dar->lun = cmdrsp->disknotify.lun;
- queue_disk_add_remove(dar);
-}
-
/**
* drain_queue - pull responses out of iochannel
* @cmdrsp: Response from the IOSP
@@ -973,7 +948,6 @@ static void
drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
{
struct scsi_cmnd *scsicmd;
- struct Scsi_Host *shost = devdata->scsihost;
while (1) {
if (!visorchannel_signalremove(devdata->dev->visorchannel,
@@ -995,21 +969,12 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
if (!del_scsipending_ent(devdata,
cmdrsp->scsitaskmgmt.handle))
break;
- complete_taskmgmt_command(cmdrsp);
- } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
- /* The vHba pointer has no meaning in a
- * guest partition. Let's be safe and set it
- * to NULL now. Do not use it here!
- */
- cmdrsp->disknotify.v_hba = NULL;
- process_disk_notify(shost, cmdrsp);
- } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
- if (!del_scsipending_ent(devdata,
- cmdrsp->vdiskmgmt.handle))
- break;
- complete_vdiskmgmt_command(cmdrsp);
- }
- /* cmdrsp is now available for resuse */
+ complete_taskmgmt_command(&devdata->idr, cmdrsp,
+ cmdrsp->scsitaskmgmt.result);
+ } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
+ dev_err_once(&devdata->dev->device,
+ "ignoring unsupported NOTIFYGUEST\n");
+ /* cmdrsp is now available for re-use */
}
}
@@ -1107,7 +1072,7 @@ static int visorhba_probe(struct visor_device *dev)
struct Scsi_Host *scsihost;
struct vhba_config_max max;
struct visorhba_devdata *devdata = NULL;
- int i, err, channel_offset;
+ int err, channel_offset;
u64 features;
scsihost = scsi_host_alloc(&visorhba_driver_template,
@@ -1122,9 +1087,9 @@ static int visorhba_probe(struct visor_device *dev)
if (err < 0)
goto err_scsi_host_put;
- scsihost->max_id = (unsigned)max.max_id;
- scsihost->max_lun = (unsigned)max.max_lun;
- scsihost->cmd_per_lun = (unsigned)max.cmd_per_lun;
+ scsihost->max_id = (unsigned int)max.max_id;
+ scsihost->max_lun = (unsigned int)max.max_lun;
+ scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
scsihost->max_sectors =
(unsigned short)(max.max_io_size >> 9);
scsihost->sg_tablesize =
@@ -1136,16 +1101,24 @@ static int visorhba_probe(struct visor_device *dev)
goto err_scsi_host_put;
devdata = (struct visorhba_devdata *)scsihost->hostdata;
- for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
- if (!visorhbas_open[i].devdata) {
- visorhbas_open[i].devdata = devdata;
- break;
- }
- }
-
devdata->dev = dev;
dev_set_drvdata(&dev->device, devdata);
+ devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
+ visorhba_debugfs_dir);
+ if (!devdata->debugfs_dir) {
+ err = -ENOMEM;
+ goto err_scsi_remove_host;
+ }
+ devdata->debugfs_info =
+ debugfs_create_file("info", S_IRUSR | S_IRGRP,
+ devdata->debugfs_dir, devdata,
+ &info_debugfs_fops);
+ if (!devdata->debugfs_info) {
+ err = -ENOMEM;
+ goto err_debugfs_dir;
+ }
+
init_waitqueue_head(&devdata->rsp_queue);
spin_lock_init(&devdata->privlock);
devdata->serverdown = false;
@@ -1156,11 +1129,13 @@ static int visorhba_probe(struct visor_device *dev)
channel_header.features);
err = visorbus_read_channel(dev, channel_offset, &features, 8);
if (err)
- goto err_scsi_remove_host;
+ goto err_debugfs_info;
features |= ULTRA_IO_CHANNEL_IS_POLLING;
err = visorbus_write_channel(dev, channel_offset, &features, 8);
if (err)
- goto err_scsi_remove_host;
+ goto err_debugfs_info;
+
+ idr_init(&devdata->idr);
devdata->thread_wait_ms = 2;
devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
@@ -1170,6 +1145,12 @@ static int visorhba_probe(struct visor_device *dev)
return 0;
+err_debugfs_info:
+ debugfs_remove(devdata->debugfs_info);
+
+err_debugfs_dir:
+ debugfs_remove_recursive(devdata->debugfs_dir);
+
err_scsi_remove_host:
scsi_remove_host(scsihost);
@@ -1198,7 +1179,11 @@ static void visorhba_remove(struct visor_device *dev)
scsi_remove_host(scsihost);
scsi_host_put(scsihost);
+ idr_destroy(&devdata->idr);
+
dev_set_drvdata(&dev->device, NULL);
+ debugfs_remove(devdata->debugfs_info);
+ debugfs_remove_recursive(devdata->debugfs_dir);
}
/**
@@ -1209,26 +1194,17 @@ static void visorhba_remove(struct visor_device *dev)
*/
static int visorhba_init(void)
{
- struct dentry *ret;
int rc = -ENOMEM;
visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
if (!visorhba_debugfs_dir)
return -ENOMEM;
- ret = debugfs_create_file("info", S_IRUSR, visorhba_debugfs_dir, NULL,
- &debugfs_info_fops);
-
- if (!ret) {
- rc = -EIO;
- goto cleanup_debugfs;
- }
-
rc = visorbus_register_visor_driver(&visorhba_driver);
if (rc)
goto cleanup_debugfs;
- return rc;
+ return 0;
cleanup_debugfs:
debugfs_remove_recursive(visorhba_debugfs_dir);
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 12a3570780fc..d67cd76327c0 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -506,7 +506,7 @@ calc_button(int x)
case 3:
return BTN_RIGHT;
default:
- return -1;
+ return -EINVAL;
}
}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index fd7c9a6cb6f3..a28388d3ddc2 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1000,25 +1000,28 @@ visornic_set_multi(struct net_device *netdev)
struct uiscmdrsp *cmdrsp;
struct visornic_devdata *devdata = netdev_priv(netdev);
- /* any filtering changes */
- if (devdata->old_flags != netdev->flags) {
- if ((netdev->flags & IFF_PROMISC) !=
- (devdata->old_flags & IFF_PROMISC)) {
- cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
- if (!cmdrsp)
- return;
- cmdrsp->cmdtype = CMD_NET_TYPE;
- cmdrsp->net.type = NET_RCV_PROMISC;
- cmdrsp->net.enbdis.context = netdev;
- cmdrsp->net.enbdis.enable =
- netdev->flags & IFF_PROMISC;
- visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp);
- kfree(cmdrsp);
- }
- devdata->old_flags = netdev->flags;
- }
+ if (devdata->old_flags == netdev->flags)
+ return;
+
+ if ((netdev->flags & IFF_PROMISC) ==
+ (devdata->old_flags & IFF_PROMISC))
+ goto out_save_flags;
+
+ cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
+ if (!cmdrsp)
+ return;
+ cmdrsp->cmdtype = CMD_NET_TYPE;
+ cmdrsp->net.type = NET_RCV_PROMISC;
+ cmdrsp->net.enbdis.context = netdev;
+ cmdrsp->net.enbdis.enable =
+ netdev->flags & IFF_PROMISC;
+ visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART,
+ cmdrsp);
+ kfree(cmdrsp);
+
+out_save_flags:
+ devdata->old_flags = netdev->flags;
}
/**
@@ -1134,7 +1137,7 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
*
* Got a receive packet back from the IO Part, handle it and send
* it up the stack.
- * Returns void
+ * Returns 1 iff an skb was receieved, otherwise 0
*/
static int
visornic_rx(struct uiscmdrsp *cmdrsp)
@@ -1145,7 +1148,6 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
int cc, currsize, off;
struct ethhdr *eth;
unsigned long flags;
- int rx_count = 0;
/* post new rcv buf to the other end using the cmdrsp we have at hand
* post it without holding lock - but we'll use the signal lock to
@@ -1177,7 +1179,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
*/
spin_unlock_irqrestore(&devdata->priv_lock, flags);
repost_return(cmdrsp, devdata, skb, netdev);
- return rx_count;
+ return 0;
}
spin_unlock_irqrestore(&devdata->priv_lock, flags);
@@ -1196,7 +1198,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev,
"repost_return failed");
- return rx_count;
+ return 0;
}
/* length rcvd is greater than firstfrag in this skb rcv buf */
skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */
@@ -1212,7 +1214,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev,
"repost_return failed");
- return rx_count;
+ return 0;
}
skb->tail += skb->len;
skb->data_len = 0; /* nothing rcvd in frag_list */
@@ -1231,7 +1233,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev, "repost_return failed");
- return rx_count;
+ return 0;
}
if (cmdrsp->net.rcv.numrcvbufs > 1) {
@@ -1313,10 +1315,9 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
/* drop packet - don't forward it up to OS */
devdata->n_rcv_packets_not_accepted++;
repost_return(cmdrsp, devdata, skb, netdev);
- return rx_count;
+ return 0;
} while (0);
- rx_count++;
netif_receive_skb(skb);
/* netif_rx returns various values, but "in practice most drivers
* ignore the return value
@@ -1329,7 +1330,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
* new rcv buffer.
*/
repost_return(cmdrsp, devdata, skb, netdev);
- return rx_count;
+ return 1;
}
/**
@@ -1339,13 +1340,11 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
*
* Setup initial values for the visornic based on channel and default
* values.
- * Returns a pointer to the devdata if successful, else NULL
+ * Returns a pointer to the devdata structure
*/
static struct visornic_devdata *
devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
{
- if (!devdata)
- return NULL;
devdata->dev = dev;
devdata->incarnation_id = get_jiffies_64();
return devdata;
@@ -1793,7 +1792,7 @@ static int visornic_probe(struct visor_device *dev)
sizeof(struct sk_buff *), GFP_KERNEL);
if (!devdata->rcvbuf) {
err = -ENOMEM;
- goto cleanup_rcvbuf;
+ goto cleanup_netdev;
}
/* set the net_xmit outstanding threshold */
@@ -1814,12 +1813,12 @@ static int visornic_probe(struct visor_device *dev)
devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
if (!devdata->cmdrsp_rcv) {
err = -ENOMEM;
- goto cleanup_cmdrsp_rcv;
+ goto cleanup_rcvbuf;
}
devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
if (!devdata->xmit_cmdrsp) {
err = -ENOMEM;
- goto cleanup_xmit_cmdrsp;
+ goto cleanup_cmdrsp_rcv;
}
INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
devdata->server_down = false;
@@ -2088,8 +2087,10 @@ static int visornic_init(void)
goto cleanup_debugfs;
err = visorbus_register_visor_driver(&visornic_driver);
- if (!err)
- return 0;
+ if (err)
+ goto cleanup_debugfs;
+
+ return 0;
cleanup_debugfs:
debugfs_remove_recursive(visornic_debugfs_dir);
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index acc3f3e8481b..d22628314305 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -6,7 +6,6 @@ ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
ccflags-y += -I$(src)/ -DWILC_ASIC_A0 -DWILC_DEBUGFS
wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
- wilc_msgqueue.o \
coreconfigurator.o host_interface.o \
wilc_wlan_cfg.o wilc_debugfs.o \
wilc_wlan.o
diff --git a/drivers/staging/wilc1000/TODO b/drivers/staging/wilc1000/TODO
index 95199d80a3e4..ec93b2ee0b08 100644
--- a/drivers/staging/wilc1000/TODO
+++ b/drivers/staging/wilc1000/TODO
@@ -4,6 +4,11 @@ TODO:
- remove custom debug and tracing functions
- rework comments and function headers(also coding style)
- replace all semaphores with mutexes or completions
+- Move handling for each individual members of 'union message_body' out
+ into a separate 'struct work_struct' and completely remove the multiplexer
+ that is currently part of host_if_work(), allowing movement of the
+ implementation of each message handler into the callsite of the function
+ that currently queues the 'host_if_msg'.
- make spi and sdio components coexist in one build
- turn compile-time platform configuration (BEAGLE_BOARD,
PANDA_BOARD, PLAT_WMS8304, PLAT_RKXXXX, CUSTOMER_PLATFORM, ...)
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 953584248e63..0b1760cba6e3 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -3,11 +3,14 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
#include "host_interface.h"
+#include <linux/spinlock.h>
+#include <linux/errno.h>
#include "coreconfigurator.h"
#include "wilc_wlan.h"
#include "wilc_wlan_if.h"
-#include "wilc_msgqueue.h"
#include <linux/etherdevice.h>
#include "wilc_wfi_netdevice.h"
@@ -181,7 +184,6 @@ union message_body {
struct drv_handler drv;
struct set_multicast multicast_info;
struct op_mode mode;
- struct set_mac_addr set_mac_info;
struct get_mac_addr get_mac_info;
struct ba_session_info session_info;
struct remain_ch remain_on_ch;
@@ -195,6 +197,7 @@ struct host_if_msg {
u16 id;
union message_body body;
struct wilc_vif *vif;
+ struct work_struct work;
};
struct join_bss_param {
@@ -229,8 +232,7 @@ struct join_bss_param {
static struct host_if_drv *terminated_handle;
bool wilc_optaining_ip;
static u8 P2P_LISTEN_STATE;
-static struct task_struct *hif_thread_handler;
-static struct message_queue hif_msg_q;
+static struct workqueue_struct *hif_workqueue;
static struct completion hif_thread_comp;
static struct completion hif_driver_comp;
static struct completion hif_wait_response;
@@ -264,6 +266,27 @@ static struct wilc_vif *join_req_vif;
static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo);
static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
static s32 Handle_ScanDone(struct wilc_vif *vif, enum scan_event enuEvent);
+static void host_if_work(struct work_struct *work);
+
+/*!
+ * @author syounan
+ * @date 1 Sep 2010
+ * @note copied from FLO glue implementatuion
+ * @version 1.0
+ */
+static int wilc_enqueue_cmd(struct host_if_msg *msg)
+{
+ struct host_if_msg *new_msg;
+
+ new_msg = kmemdup(msg, sizeof(*new_msg), GFP_ATOMIC);
+ if (!new_msg)
+ return -ENOMEM;
+
+ INIT_WORK(&new_msg->work, host_if_work);
+ queue_work(hif_workqueue, &new_msg->work);
+ return 0;
+}
+
/* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
* special purpose in wilc device, so we add 1 to the index to starts from 1.
@@ -417,10 +440,10 @@ static void handle_get_mac_address(struct wilc_vif *vif,
complete(&hif_wait_response);
}
-static s32 handle_cfg_param(struct wilc_vif *vif,
- struct cfg_param_attr *cfg_param_attr)
+static void handle_cfg_param(struct wilc_vif *vif,
+ struct cfg_param_attr *cfg_param_attr)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid_list[32];
struct host_if_drv *hif_drv = vif->hif_drv;
int i = 0;
@@ -428,15 +451,16 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
mutex_lock(&hif_drv->cfg_values_lock);
if (cfg_param_attr->flag & BSS_TYPE) {
- if (cfg_param_attr->bss_type < 6) {
+ u8 bss_type = cfg_param_attr->bss_type;
+
+ if (bss_type < 6) {
wid_list[i].id = WID_BSS_TYPE;
- wid_list[i].val = (s8 *)&cfg_param_attr->bss_type;
+ wid_list[i].val = (s8 *)&bss_type;
wid_list[i].type = WID_CHAR;
wid_list[i].size = sizeof(char);
- hif_drv->cfg_values.bss_type = (u8)cfg_param_attr->bss_type;
+ hif_drv->cfg_values.bss_type = bss_type;
} else {
netdev_err(vif->ndev, "check value 6 over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -452,7 +476,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.auth_type = (u8)cfg_param_attr->auth_type;
} else {
netdev_err(vif->ndev, "Impossible value\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -467,7 +490,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.auth_timeout = cfg_param_attr->auth_timeout;
} else {
netdev_err(vif->ndev, "Range(1 ~ 65535) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -481,7 +503,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.power_mgmt_mode = (u8)cfg_param_attr->power_mgmt_mode;
} else {
netdev_err(vif->ndev, "Invalid power mode\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -496,7 +517,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.short_retry_limit = cfg_param_attr->short_retry_limit;
} else {
netdev_err(vif->ndev, "Range(1~256) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -511,7 +531,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.long_retry_limit = cfg_param_attr->long_retry_limit;
} else {
netdev_err(vif->ndev, "Range(1~256) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -526,7 +545,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.frag_threshold = cfg_param_attr->frag_threshold;
} else {
netdev_err(vif->ndev, "Threshold Range fail\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -541,7 +559,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.rts_threshold = cfg_param_attr->rts_threshold;
} else {
netdev_err(vif->ndev, "Threshold Range fail\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -555,7 +572,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.preamble_type = cfg_param_attr->preamble_type;
} else {
netdev_err(vif->ndev, "Preamle Range(0~2) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -569,7 +585,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.short_slot_allowed = (u8)cfg_param_attr->short_slot_allowed;
} else {
netdev_err(vif->ndev, "Short slot(2) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -583,7 +598,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.txop_prot_disabled = (u8)cfg_param_attr->txop_prot_disabled;
} else {
netdev_err(vif->ndev, "TXOP prot disable\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -598,7 +612,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.beacon_interval = cfg_param_attr->beacon_interval;
} else {
netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -613,7 +626,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.dtim_period = cfg_param_attr->dtim_period;
} else {
netdev_err(vif->ndev, "DTIM range(1~255) fail\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -627,7 +639,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.site_survey_enabled = (u8)cfg_param_attr->site_survey_enabled;
} else {
netdev_err(vif->ndev, "Site survey disable\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -642,7 +653,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.site_survey_scan_time = cfg_param_attr->site_survey_scan_time;
} else {
netdev_err(vif->ndev, "Site scan time(1~65535) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -657,7 +667,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.active_scan_time = cfg_param_attr->active_scan_time;
} else {
netdev_err(vif->ndev, "Active time(1~65535) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -672,7 +681,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.passive_scan_time = cfg_param_attr->passive_scan_time;
} else {
netdev_err(vif->ndev, "Passive time(1~65535) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -694,21 +702,19 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.curr_tx_rate = (u8)curr_tx_rate;
} else {
netdev_err(vif->ndev, "out of TX rate\n");
- result = -EINVAL;
goto unlock;
}
i++;
}
- result = wilc_send_config_pkt(vif, SET_CFG, wid_list,
- i, wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, SET_CFG, wid_list,
+ i, wilc_get_vif_idx(vif));
- if (result)
+ if (ret)
netdev_err(vif->ndev, "Error in setting CFG params\n");
unlock:
mutex_unlock(&hif_drv->cfg_values_lock);
- return result;
}
static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
@@ -1231,17 +1237,14 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif,
}
for (i = 0; i < hif_drv->usr_scan_req.rcvd_ch_cnt; i++) {
- if ((hif_drv->usr_scan_req.net_info[i].bssid) &&
- (pstrNetworkInfo->bssid)) {
- if (memcmp(hif_drv->usr_scan_req.net_info[i].bssid,
- pstrNetworkInfo->bssid, 6) == 0) {
- if (pstrNetworkInfo->rssi <= hif_drv->usr_scan_req.net_info[i].rssi) {
- goto done;
- } else {
- hif_drv->usr_scan_req.net_info[i].rssi = pstrNetworkInfo->rssi;
- bNewNtwrkFound = false;
- break;
- }
+ if (memcmp(hif_drv->usr_scan_req.net_info[i].bssid,
+ pstrNetworkInfo->bssid, 6) == 0) {
+ if (pstrNetworkInfo->rssi <= hif_drv->usr_scan_req.net_info[i].rssi) {
+ goto done;
+ } else {
+ hif_drv->usr_scan_req.net_info[i].rssi = pstrNetworkInfo->rssi;
+ bNewNtwrkFound = false;
+ break;
}
}
}
@@ -1250,20 +1253,17 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif,
if (hif_drv->usr_scan_req.rcvd_ch_cnt < MAX_NUM_SCANNED_NETWORKS) {
hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].rssi = pstrNetworkInfo->rssi;
- if (hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid &&
- pstrNetworkInfo->bssid) {
- memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid,
- pstrNetworkInfo->bssid, 6);
+ memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid,
+ pstrNetworkInfo->bssid, 6);
- hif_drv->usr_scan_req.rcvd_ch_cnt++;
+ hif_drv->usr_scan_req.rcvd_ch_cnt++;
- pstrNetworkInfo->new_network = true;
- pJoinParams = host_int_ParseJoinBssParam(pstrNetworkInfo);
+ pstrNetworkInfo->new_network = true;
+ pJoinParams = host_int_ParseJoinBssParam(pstrNetworkInfo);
- hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo,
- hif_drv->usr_scan_req.arg,
- pJoinParams);
- }
+ hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo,
+ hif_drv->usr_scan_req.arg,
+ pJoinParams);
}
} else {
pstrNetworkInfo->new_network = false;
@@ -2364,7 +2364,7 @@ static void ListenTimerCB(unsigned long arg)
msg.vif = vif;
msg.body.remain_on_ch.id = vif->hif_drv->remain_on_ch.id;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
}
@@ -2464,187 +2464,171 @@ static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
complete(&hif_wait_response);
}
-static int hostIFthread(void *pvArg)
+static void host_if_work(struct work_struct *work)
{
- u32 u32Ret;
- struct host_if_msg msg;
- struct wilc *wilc = pvArg;
- struct wilc_vif *vif;
-
- memset(&msg, 0, sizeof(struct host_if_msg));
-
- while (1) {
- wilc_mq_recv(&hif_msg_q, &msg, sizeof(struct host_if_msg), &u32Ret);
- vif = msg.vif;
- if (msg.id == HOST_IF_MSG_EXIT)
- break;
-
- if ((!wilc_initialized)) {
- usleep_range(200 * 1000, 200 * 1000);
- wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
- continue;
- }
-
- if (msg.id == HOST_IF_MSG_CONNECT &&
- vif->hif_drv->usr_scan_req.scan_result) {
- wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
- usleep_range(2 * 1000, 2 * 1000);
- continue;
- }
+ struct host_if_msg *msg;
+ struct wilc *wilc;
- switch (msg.id) {
- case HOST_IF_MSG_SCAN:
- handle_scan(msg.vif, &msg.body.scan_info);
- break;
+ msg = container_of(work, struct host_if_msg, work);
+ wilc = msg->vif->wilc;
- case HOST_IF_MSG_CONNECT:
- Handle_Connect(msg.vif, &msg.body.con_info);
- break;
+ if (msg->id == HOST_IF_MSG_CONNECT &&
+ msg->vif->hif_drv->usr_scan_req.scan_result) {
+ wilc_enqueue_cmd(msg);
+ usleep_range(2 * 1000, 2 * 1000);
+ goto free_msg;
+ }
+ switch (msg->id) {
+ case HOST_IF_MSG_SCAN:
+ handle_scan(msg->vif, &msg->body.scan_info);
+ break;
- case HOST_IF_MSG_RCVD_NTWRK_INFO:
- Handle_RcvdNtwrkInfo(msg.vif, &msg.body.net_info);
- break;
+ case HOST_IF_MSG_CONNECT:
+ Handle_Connect(msg->vif, &msg->body.con_info);
+ break;
- case HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO:
- Handle_RcvdGnrlAsyncInfo(vif,
- &msg.body.async_info);
- break;
+ case HOST_IF_MSG_RCVD_NTWRK_INFO:
+ Handle_RcvdNtwrkInfo(msg->vif, &msg->body.net_info);
+ break;
- case HOST_IF_MSG_KEY:
- Handle_Key(msg.vif, &msg.body.key_info);
- break;
+ case HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO:
+ Handle_RcvdGnrlAsyncInfo(msg->vif,
+ &msg->body.async_info);
+ break;
- case HOST_IF_MSG_CFG_PARAMS:
- handle_cfg_param(msg.vif, &msg.body.cfg_info);
- break;
+ case HOST_IF_MSG_KEY:
+ Handle_Key(msg->vif, &msg->body.key_info);
+ break;
- case HOST_IF_MSG_SET_CHANNEL:
- handle_set_channel(msg.vif, &msg.body.channel_info);
- break;
+ case HOST_IF_MSG_CFG_PARAMS:
+ handle_cfg_param(msg->vif, &msg->body.cfg_info);
+ break;
- case HOST_IF_MSG_DISCONNECT:
- Handle_Disconnect(msg.vif);
- break;
+ case HOST_IF_MSG_SET_CHANNEL:
+ handle_set_channel(msg->vif, &msg->body.channel_info);
+ break;
- case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
- del_timer(&vif->hif_drv->scan_timer);
+ case HOST_IF_MSG_DISCONNECT:
+ Handle_Disconnect(msg->vif);
+ break;
- if (!wilc_wlan_get_num_conn_ifcs(wilc))
- wilc_chip_sleep_manually(wilc);
+ case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
+ del_timer(&msg->vif->hif_drv->scan_timer);
- Handle_ScanDone(msg.vif, SCAN_EVENT_DONE);
+ if (!wilc_wlan_get_num_conn_ifcs(wilc))
+ wilc_chip_sleep_manually(wilc);
- if (vif->hif_drv->remain_on_ch_pending)
- Handle_RemainOnChan(msg.vif,
- &msg.body.remain_on_ch);
+ Handle_ScanDone(msg->vif, SCAN_EVENT_DONE);
- break;
+ if (msg->vif->hif_drv->remain_on_ch_pending)
+ Handle_RemainOnChan(msg->vif,
+ &msg->body.remain_on_ch);
- case HOST_IF_MSG_GET_RSSI:
- Handle_GetRssi(msg.vif);
- break;
+ break;
- case HOST_IF_MSG_GET_STATISTICS:
- Handle_GetStatistics(msg.vif,
- (struct rf_info *)msg.body.data);
- break;
+ case HOST_IF_MSG_GET_RSSI:
+ Handle_GetRssi(msg->vif);
+ break;
- case HOST_IF_MSG_ADD_BEACON:
- Handle_AddBeacon(msg.vif, &msg.body.beacon_info);
- break;
+ case HOST_IF_MSG_GET_STATISTICS:
+ Handle_GetStatistics(msg->vif,
+ (struct rf_info *)msg->body.data);
+ break;
- case HOST_IF_MSG_DEL_BEACON:
- Handle_DelBeacon(msg.vif);
- break;
+ case HOST_IF_MSG_ADD_BEACON:
+ Handle_AddBeacon(msg->vif, &msg->body.beacon_info);
+ break;
- case HOST_IF_MSG_ADD_STATION:
- Handle_AddStation(msg.vif, &msg.body.add_sta_info);
- break;
+ case HOST_IF_MSG_DEL_BEACON:
+ Handle_DelBeacon(msg->vif);
+ break;
- case HOST_IF_MSG_DEL_STATION:
- Handle_DelStation(msg.vif, &msg.body.del_sta_info);
- break;
+ case HOST_IF_MSG_ADD_STATION:
+ Handle_AddStation(msg->vif, &msg->body.add_sta_info);
+ break;
- case HOST_IF_MSG_EDIT_STATION:
- Handle_EditStation(msg.vif, &msg.body.edit_sta_info);
- break;
+ case HOST_IF_MSG_DEL_STATION:
+ Handle_DelStation(msg->vif, &msg->body.del_sta_info);
+ break;
- case HOST_IF_MSG_GET_INACTIVETIME:
- Handle_Get_InActiveTime(msg.vif, &msg.body.mac_info);
- break;
+ case HOST_IF_MSG_EDIT_STATION:
+ Handle_EditStation(msg->vif, &msg->body.edit_sta_info);
+ break;
- case HOST_IF_MSG_SCAN_TIMER_FIRED:
+ case HOST_IF_MSG_GET_INACTIVETIME:
+ Handle_Get_InActiveTime(msg->vif, &msg->body.mac_info);
+ break;
- Handle_ScanDone(msg.vif, SCAN_EVENT_ABORTED);
- break;
+ case HOST_IF_MSG_SCAN_TIMER_FIRED:
+ Handle_ScanDone(msg->vif, SCAN_EVENT_ABORTED);
+ break;
- case HOST_IF_MSG_CONNECT_TIMER_FIRED:
- Handle_ConnectTimeout(msg.vif);
- break;
+ case HOST_IF_MSG_CONNECT_TIMER_FIRED:
+ Handle_ConnectTimeout(msg->vif);
+ break;
- case HOST_IF_MSG_POWER_MGMT:
- Handle_PowerManagement(msg.vif,
- &msg.body.pwr_mgmt_info);
- break;
+ case HOST_IF_MSG_POWER_MGMT:
+ Handle_PowerManagement(msg->vif,
+ &msg->body.pwr_mgmt_info);
+ break;
- case HOST_IF_MSG_SET_WFIDRV_HANDLER:
- handle_set_wfi_drv_handler(msg.vif, &msg.body.drv);
- break;
+ case HOST_IF_MSG_SET_WFIDRV_HANDLER:
+ handle_set_wfi_drv_handler(msg->vif, &msg->body.drv);
+ break;
- case HOST_IF_MSG_SET_OPERATION_MODE:
- handle_set_operation_mode(msg.vif, &msg.body.mode);
- break;
+ case HOST_IF_MSG_SET_OPERATION_MODE:
+ handle_set_operation_mode(msg->vif, &msg->body.mode);
+ break;
- case HOST_IF_MSG_SET_IPADDRESS:
- handle_set_ip_address(vif,
- msg.body.ip_info.ip_addr,
- msg.body.ip_info.idx);
- break;
+ case HOST_IF_MSG_SET_IPADDRESS:
+ handle_set_ip_address(msg->vif,
+ msg->body.ip_info.ip_addr,
+ msg->body.ip_info.idx);
+ break;
- case HOST_IF_MSG_GET_IPADDRESS:
- handle_get_ip_address(vif, msg.body.ip_info.idx);
- break;
+ case HOST_IF_MSG_GET_IPADDRESS:
+ handle_get_ip_address(msg->vif, msg->body.ip_info.idx);
+ break;
- case HOST_IF_MSG_GET_MAC_ADDRESS:
- handle_get_mac_address(msg.vif,
- &msg.body.get_mac_info);
- break;
+ case HOST_IF_MSG_GET_MAC_ADDRESS:
+ handle_get_mac_address(msg->vif,
+ &msg->body.get_mac_info);
+ break;
- case HOST_IF_MSG_REMAIN_ON_CHAN:
- Handle_RemainOnChan(msg.vif, &msg.body.remain_on_ch);
- break;
+ case HOST_IF_MSG_REMAIN_ON_CHAN:
+ Handle_RemainOnChan(msg->vif, &msg->body.remain_on_ch);
+ break;
- case HOST_IF_MSG_REGISTER_FRAME:
- Handle_RegisterFrame(msg.vif, &msg.body.reg_frame);
- break;
+ case HOST_IF_MSG_REGISTER_FRAME:
+ Handle_RegisterFrame(msg->vif, &msg->body.reg_frame);
+ break;
- case HOST_IF_MSG_LISTEN_TIMER_FIRED:
- Handle_ListenStateExpired(msg.vif, &msg.body.remain_on_ch);
- break;
+ case HOST_IF_MSG_LISTEN_TIMER_FIRED:
+ Handle_ListenStateExpired(msg->vif, &msg->body.remain_on_ch);
+ break;
- case HOST_IF_MSG_SET_MULTICAST_FILTER:
- Handle_SetMulticastFilter(msg.vif, &msg.body.multicast_info);
- break;
+ case HOST_IF_MSG_SET_MULTICAST_FILTER:
+ Handle_SetMulticastFilter(msg->vif, &msg->body.multicast_info);
+ break;
- case HOST_IF_MSG_DEL_ALL_STA:
- Handle_DelAllSta(msg.vif, &msg.body.del_all_sta_info);
- break;
+ case HOST_IF_MSG_DEL_ALL_STA:
+ Handle_DelAllSta(msg->vif, &msg->body.del_all_sta_info);
+ break;
- case HOST_IF_MSG_SET_TX_POWER:
- handle_set_tx_pwr(msg.vif, msg.body.tx_power.tx_pwr);
- break;
+ case HOST_IF_MSG_SET_TX_POWER:
+ handle_set_tx_pwr(msg->vif, msg->body.tx_power.tx_pwr);
+ break;
- case HOST_IF_MSG_GET_TX_POWER:
- handle_get_tx_pwr(msg.vif, &msg.body.tx_power.tx_pwr);
- break;
- default:
- netdev_err(vif->ndev, "[Host Interface] undefined\n");
- break;
- }
+ case HOST_IF_MSG_GET_TX_POWER:
+ handle_get_tx_pwr(msg->vif, &msg->body.tx_power.tx_pwr);
+ break;
+ default:
+ netdev_err(msg->vif->ndev, "[Host Interface] undefined\n");
+ break;
}
-
+free_msg:
+ kfree(msg);
complete(&hif_thread_comp);
- return 0;
}
static void TimerCB_Scan(unsigned long arg)
@@ -2656,7 +2640,7 @@ static void TimerCB_Scan(unsigned long arg)
msg.vif = vif;
msg.id = HOST_IF_MSG_SCAN_TIMER_FIRED;
- wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ wilc_enqueue_cmd(&msg);
}
static void TimerCB_Connect(unsigned long arg)
@@ -2668,7 +2652,7 @@ static void TimerCB_Connect(unsigned long arg)
msg.vif = vif;
msg.id = HOST_IF_MSG_CONNECT_TIMER_FIRED;
- wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ wilc_enqueue_cmd(&msg);
}
s32 wilc_remove_key(struct host_if_drv *hif_drv, const u8 *pu8StaAddress)
@@ -2703,7 +2687,7 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
msg.vif = vif;
msg.body.key_info.attr.wep.index = index;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "Request to remove WEP key\n");
else
@@ -2732,7 +2716,7 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
msg.vif = vif;
msg.body.key_info.attr.wep.index = index;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "Default key index\n");
else
@@ -2766,7 +2750,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
msg.body.key_info.attr.wep.key_len = len;
msg.body.key_info.attr.wep.index = index;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "STA - WEP Key\n");
wait_for_completion(&hif_drv->comp_test_key_block);
@@ -2801,7 +2785,7 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
msg.body.key_info.attr.wep.mode = mode;
msg.body.key_info.attr.wep.auth_type = auth_type;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "AP - WEP Key\n");
@@ -2857,7 +2841,7 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
msg.body.key_info.attr.wpa.mode = cipher_mode;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "PTK Key\n");
@@ -2926,7 +2910,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
msg.body.key_info.attr.wpa.key_len = key_len;
msg.body.key_info.attr.wpa.seq_len = key_rsc_len;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "RX GTK\n");
else
@@ -2956,7 +2940,7 @@ int wilc_set_pmkid_info(struct wilc_vif *vif,
&pmkid->pmkidlist[i].pmkid, PMKID_LEN);
}
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "PMKID Info\n");
@@ -2974,7 +2958,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
msg.body.get_mac_info.mac_addr = mac_addr;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "Failed to send get mac address\n");
return -EFAULT;
@@ -3038,7 +3022,7 @@ int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
if (hif_drv->hif_state < HOST_IF_CONNECTING)
hif_drv->hif_state = HOST_IF_CONNECTING;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "send message: Set join request\n");
return -EFAULT;
@@ -3067,7 +3051,7 @@ int wilc_disconnect(struct wilc_vif *vif, u16 reason_code)
msg.id = HOST_IF_MSG_DISCONNECT;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "Failed to send message: disconnect\n");
else
@@ -3111,7 +3095,7 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
msg.body.channel_info.set_ch = channel;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "wilc mq send fail\n");
return -EINVAL;
@@ -3131,7 +3115,7 @@ int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx)
msg.body.drv.mac_idx = mac_idx;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "wilc mq send fail\n");
result = -EINVAL;
@@ -3150,7 +3134,7 @@ int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode)
msg.body.mode.mode = mode;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "wilc mq send fail\n");
result = -EINVAL;
@@ -3177,7 +3161,7 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
msg.id = HOST_IF_MSG_GET_INACTIVETIME;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "Failed to send get host ch param\n");
else
@@ -3198,7 +3182,7 @@ int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
msg.id = HOST_IF_MSG_GET_RSSI;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "Failed to send get host ch param\n");
return -EFAULT;
@@ -3226,7 +3210,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
msg.body.data = (char *)stats;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "Failed to send get host channel\n");
return -EFAULT;
@@ -3279,7 +3263,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
if (!scan_info->ies)
return -ENOMEM;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "Error in sending message queue\n");
return -EINVAL;
@@ -3309,7 +3293,7 @@ int wilc_hif_set_cfg(struct wilc_vif *vif,
msg.body.cfg_info = *cfg_param;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
return result;
}
@@ -3371,21 +3355,17 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
init_completion(&hif_drv->comp_inactive_time);
if (clients_count == 0) {
- result = wilc_mq_create(&hif_msg_q);
-
if (result < 0) {
netdev_err(vif->ndev, "Failed to creat MQ\n");
goto _fail_;
}
-
- hif_thread_handler = kthread_run(hostIFthread, wilc,
- "WILC_kthread");
-
- if (IS_ERR(hif_thread_handler)) {
- netdev_err(vif->ndev, "Failed to creat Thread\n");
- result = -EFAULT;
+ hif_workqueue = create_singlethread_workqueue("WILC_wq");
+ if (!hif_workqueue) {
+ netdev_err(vif->ndev, "Failed to create workqueue\n");
+ result = -ENOMEM;
goto _fail_mq_;
}
+
setup_timer(&periodic_rssi, GetPeriodicRSSI,
(unsigned long)vif);
mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000));
@@ -3411,10 +3391,8 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
clients_count++;
- return result;
-
_fail_mq_:
- wilc_mq_destroy(&hif_msg_q);
+ destroy_workqueue(hif_workqueue);
_fail_:
return result;
}
@@ -3458,13 +3436,13 @@ int wilc_deinit(struct wilc_vif *vif)
msg.id = HOST_IF_MSG_EXIT;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result != 0)
netdev_err(vif->ndev, "deinit : Error(%d)\n", result);
else
wait_for_completion(&hif_thread_comp);
- wilc_mq_destroy(&hif_msg_q);
+ destroy_workqueue(hif_workqueue);
}
kfree(hif_drv);
@@ -3504,7 +3482,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
msg.body.net_info.buffer = kmalloc(u32Length, GFP_KERNEL);
memcpy(msg.body.net_info.buffer, pu8Buffer, u32Length);
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "message parameters (%d)\n", result);
}
@@ -3549,7 +3527,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
msg.body.async_info.buffer = kmalloc(u32Length, GFP_KERNEL);
memcpy(msg.body.async_info.buffer, pu8Buffer, u32Length);
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "synchronous info (%d)\n", result);
@@ -3580,7 +3558,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
msg.id = HOST_IF_MSG_RCVD_SCAN_COMPLETE;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "complete param (%d)\n", result);
}
@@ -3606,7 +3584,7 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
msg.body.remain_on_ch.id = session_id;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc mq send fail\n");
@@ -3631,7 +3609,7 @@ int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id)
msg.vif = vif;
msg.body.remain_on_ch.id = session_id;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc mq send fail\n");
@@ -3662,7 +3640,7 @@ int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
msg.body.reg_frame.reg = reg;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc mq send fail\n");
@@ -3700,7 +3678,7 @@ int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
beacon_info->tail = NULL;
}
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc mq send fail\n");
@@ -3722,7 +3700,7 @@ int wilc_del_beacon(struct wilc_vif *vif)
msg.id = HOST_IF_MSG_DEL_BEACON;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -3749,7 +3727,7 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
return -ENOMEM;
}
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
@@ -3771,7 +3749,7 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr)
else
memcpy(del_sta_info->mac_addr, mac_addr, ETH_ALEN);
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
@@ -3801,7 +3779,7 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
return result;
del_all_sta_info->assoc_sta = assoc_sta;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -3832,7 +3810,7 @@ int wilc_edit_station(struct wilc_vif *vif,
return -ENOMEM;
}
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -3856,7 +3834,7 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
pwr_mgmt_info->enabled = enabled;
pwr_mgmt_info->timeout = timeout;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
@@ -3877,7 +3855,7 @@ int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
multicast_filter_param->enabled = enabled;
multicast_filter_param->cnt = count;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
@@ -4050,7 +4028,7 @@ int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
msg.vif = vif;
msg.body.ip_info.idx = idx;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -4070,7 +4048,7 @@ static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
msg.vif = vif;
msg.body.ip_info.idx = idx;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -4088,7 +4066,7 @@ int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power)
msg.body.tx_power.tx_pwr = tx_power;
msg.vif = vif;
- ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ ret = wilc_enqueue_cmd(&msg);
if (ret)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -4105,7 +4083,7 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
msg.id = HOST_IF_MSG_GET_TX_POWER;
msg.vif = vif;
- ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ ret = wilc_enqueue_cmd(&msg);
if (ret)
netdev_err(vif->ndev, "Failed to get TX PWR\n");
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 8d2dd0db0bed..ddfea29df2a7 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -224,10 +224,6 @@ struct op_mode {
u32 mode;
};
-struct set_mac_addr {
- u8 mac_addr[ETH_ALEN];
-};
-
struct get_mac_addr {
u8 *mac_addr;
};
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 4f93c11e73c0..3a66255f14fc 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
-
+#include <linux/mutex.h>
#include <linux/semaphore.h>
#include <linux/completion.h>
@@ -31,8 +31,6 @@ static struct notifier_block g_dev_notifier = {
.notifier_call = dev_state_ev_handler
};
-static struct semaphore close_exit_sync;
-
static int wlan_deinit_locks(struct net_device *dev);
static void wlan_deinitialize_threads(struct net_device *dev);
@@ -241,7 +239,7 @@ void wilc_mac_indicate(struct wilc *wilc, int flag)
(unsigned char *)&status, 4);
if (wilc->mac_status == WILC_MAC_STATUS_INIT) {
wilc->mac_status = status;
- up(&wilc->sync_event);
+ complete(&wilc->sync_event);
} else {
wilc->mac_status = status;
}
@@ -316,7 +314,7 @@ static int linux_wlan_txq_task(void *vp)
complete(&wl->txq_thread_started);
while (1) {
- down(&wl->txq_event);
+ wait_for_completion(&wl->txq_event);
if (wl->close) {
complete(&wl->txq_thread_started);
@@ -362,7 +360,7 @@ int wilc_wlan_get_firmware(struct net_device *dev)
goto _fail_;
if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) {
- netdev_err(dev, "%s - firmare not available\n", firmware);
+ netdev_err(dev, "%s - firmware not available\n", firmware);
ret = -1;
goto _fail_;
}
@@ -386,9 +384,9 @@ static int linux_wlan_start_firmware(struct net_device *dev)
if (ret < 0)
return ret;
- ret = wilc_lock_timeout(wilc, &wilc->sync_event, 5000);
- if (ret)
- return ret;
+ if (!wait_for_completion_timeout(&wilc->sync_event,
+ msecs_to_jiffies(5000)))
+ return -ETIME;
return 0;
}
@@ -650,7 +648,7 @@ void wilc1000_wlan_deinit(struct net_device *dev)
mutex_unlock(&wl->hif_cs);
}
if (&wl->txq_event)
- up(&wl->txq_event);
+ wait_for_completion(&wl->txq_event);
wlan_deinitialize_threads(dev);
deinit_irq(dev);
@@ -679,12 +677,12 @@ static int wlan_init_locks(struct net_device *dev)
mutex_init(&wl->rxq_cs);
spin_lock_init(&wl->txq_spinlock);
- sema_init(&wl->txq_add_to_head_cs, 1);
+ mutex_init(&wl->txq_add_to_head_cs);
- sema_init(&wl->txq_event, 0);
+ init_completion(&wl->txq_event);
- sema_init(&wl->cfg_event, 0);
- sema_init(&wl->sync_event, 0);
+ init_completion(&wl->cfg_event);
+ init_completion(&wl->sync_event);
init_completion(&wl->txq_thread_started);
return 0;
@@ -717,10 +715,10 @@ static int wlan_initialize_threads(struct net_device *dev)
wilc->txq_thread = kthread_run(linux_wlan_txq_task, (void *)dev,
"K_TXQ_TASK");
- if (!wilc->txq_thread) {
+ if (IS_ERR(wilc->txq_thread)) {
netdev_err(dev, "couldn't create TXQ thread\n");
wilc->close = 0;
- return -ENOBUFS;
+ return PTR_ERR(wilc->txq_thread);
}
wait_for_completion(&wilc->txq_thread_started);
@@ -738,7 +736,7 @@ static void wlan_deinitialize_threads(struct net_device *dev)
wl->close = 1;
if (&wl->txq_event)
- up(&wl->txq_event);
+ complete(&wl->txq_event);
if (wl->txq_thread) {
kthread_stop(wl->txq_thread);
@@ -1088,7 +1086,6 @@ int wilc_mac_close(struct net_device *ndev)
WILC_WFI_deinit_mon_interface();
}
- up(&close_exit_sync);
vif->mac_opened = 0;
return 0;
@@ -1232,8 +1229,6 @@ void wilc_netdev_cleanup(struct wilc *wilc)
}
if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
- wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000);
-
for (i = 0; i < NUM_CONCURRENT_IFC; i++)
if (wilc->vif[i]->ndev)
if (vif[i]->mac_opened)
@@ -1258,8 +1253,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
struct net_device *ndev;
struct wilc *wl;
- sema_init(&close_exit_sync, 0);
-
wl = kzalloc(sizeof(*wl), GFP_KERNEL);
if (!wl)
return -ENOMEM;
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.c b/drivers/staging/wilc1000/wilc_msgqueue.c
deleted file mode 100644
index 6cb894e58f6d..000000000000
--- a/drivers/staging/wilc1000/wilc_msgqueue.c
+++ /dev/null
@@ -1,144 +0,0 @@
-
-#include "wilc_msgqueue.h"
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-/*!
- * @author syounan
- * @date 1 Sep 2010
- * @note copied from FLO glue implementatuion
- * @version 1.0
- */
-int wilc_mq_create(struct message_queue *mq)
-{
- spin_lock_init(&mq->lock);
- sema_init(&mq->sem, 0);
- INIT_LIST_HEAD(&mq->msg_list);
- mq->recv_count = 0;
- mq->exiting = false;
- return 0;
-}
-
-/*!
- * @author syounan
- * @date 1 Sep 2010
- * @note copied from FLO glue implementatuion
- * @version 1.0
- */
-int wilc_mq_destroy(struct message_queue *mq)
-{
- struct message *msg;
-
- mq->exiting = true;
-
- /* Release any waiting receiver thread. */
- while (mq->recv_count > 0) {
- up(&mq->sem);
- mq->recv_count--;
- }
-
- while (!list_empty(&mq->msg_list)) {
- msg = list_first_entry(&mq->msg_list, struct message, list);
- list_del(&msg->list);
- kfree(msg->buf);
- }
-
- return 0;
-}
-
-/*!
- * @author syounan
- * @date 1 Sep 2010
- * @note copied from FLO glue implementatuion
- * @version 1.0
- */
-int wilc_mq_send(struct message_queue *mq,
- const void *send_buf, u32 send_buf_size)
-{
- unsigned long flags;
- struct message *new_msg = NULL;
-
- if (!mq || (send_buf_size == 0) || !send_buf)
- return -EINVAL;
-
- if (mq->exiting)
- return -EFAULT;
-
- /* construct a new message */
- new_msg = kmalloc(sizeof(*new_msg), GFP_ATOMIC);
- if (!new_msg)
- return -ENOMEM;
-
- new_msg->len = send_buf_size;
- INIT_LIST_HEAD(&new_msg->list);
- new_msg->buf = kmemdup(send_buf, send_buf_size, GFP_ATOMIC);
- if (!new_msg->buf) {
- kfree(new_msg);
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&mq->lock, flags);
-
- /* add it to the message queue */
- list_add_tail(&new_msg->list, &mq->msg_list);
-
- spin_unlock_irqrestore(&mq->lock, flags);
-
- up(&mq->sem);
-
- return 0;
-}
-
-/*!
- * @author syounan
- * @date 1 Sep 2010
- * @note copied from FLO glue implementatuion
- * @version 1.0
- */
-int wilc_mq_recv(struct message_queue *mq,
- void *recv_buf, u32 recv_buf_size, u32 *recv_len)
-{
- struct message *msg;
- unsigned long flags;
-
- if (!mq || (recv_buf_size == 0) || !recv_buf || !recv_len)
- return -EINVAL;
-
- if (mq->exiting)
- return -EFAULT;
-
- spin_lock_irqsave(&mq->lock, flags);
- mq->recv_count++;
- spin_unlock_irqrestore(&mq->lock, flags);
-
- down(&mq->sem);
- spin_lock_irqsave(&mq->lock, flags);
-
- if (list_empty(&mq->msg_list)) {
- spin_unlock_irqrestore(&mq->lock, flags);
- up(&mq->sem);
- return -EFAULT;
- }
- /* check buffer size */
- msg = list_first_entry(&mq->msg_list, struct message, list);
- if (recv_buf_size < msg->len) {
- spin_unlock_irqrestore(&mq->lock, flags);
- up(&mq->sem);
- return -EOVERFLOW;
- }
-
- /* consume the message */
- mq->recv_count--;
- memcpy(recv_buf, msg->buf, msg->len);
- *recv_len = msg->len;
-
- list_del(&msg->list);
-
- kfree(msg->buf);
- kfree(msg);
-
- spin_unlock_irqrestore(&mq->lock, flags);
-
- return 0;
-}
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.h b/drivers/staging/wilc1000/wilc_msgqueue.h
deleted file mode 100644
index 846a4840e6e7..000000000000
--- a/drivers/staging/wilc1000/wilc_msgqueue.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __WILC_MSG_QUEUE_H__
-#define __WILC_MSG_QUEUE_H__
-
-#include <linux/semaphore.h>
-#include <linux/list.h>
-
-struct message {
- void *buf;
- u32 len;
- struct list_head list;
-};
-
-struct message_queue {
- struct semaphore sem;
- spinlock_t lock;
- bool exiting;
- u32 recv_count;
- struct list_head msg_list;
-};
-
-int wilc_mq_create(struct message_queue *mq);
-int wilc_mq_send(struct message_queue *mq,
- const void *send_buf, u32 send_buf_size);
-int wilc_mq_recv(struct message_queue *mq,
- void *recv_buf, u32 recv_buf_size, u32 *recv_len);
-int wilc_mq_destroy(struct message_queue *mq);
-
-#endif
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index a839a7967dd8..39b73fb27398 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -1006,7 +1006,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
u32 reg;
if (nint > MAX_NUM_INT) {
- dev_err(&func->dev, "Too many interupts (%d)...\n", nint);
+ dev_err(&func->dev, "Too many interrupts (%d)...\n", nint);
return 0;
}
if (nint > MAX_NUN_INT_THRPT_ENH2) {
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 4268e2f29307..22cf4b7857e5 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -1082,7 +1082,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
int ret, i;
if (nint > MAX_NUM_INT) {
- dev_err(&spi->dev, "Too many interupts (%d)...\n", nint);
+ dev_err(&spi->dev, "Too many interrupts (%d)...\n", nint);
return 0;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 51aff4ff7d7c..3ddfa4aecb7a 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -625,8 +625,7 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
for (i = 0; i < request->n_ssids; i++) {
- if (request->ssids[i].ssid &&
- request->ssids[i].ssid_len != 0) {
+ if (request->ssids[i].ssid_len != 0) {
strHiddenNetwork.net_info[i].ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL);
memcpy(strHiddenNetwork.net_info[i].ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
strHiddenNetwork.net_info[i].ssid_len = request->ssids[i].ssid_len;
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 3a561df6d370..5cc6a82d8081 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -42,6 +42,8 @@
#include "host_interface.h"
#include "wilc_wlan.h"
#include <linux/wireless.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
#define FLOW_CONTROL_LOWER_THRESHOLD 128
#define FLOW_CONTROL_UPPER_THRESHOLD 256
@@ -170,15 +172,15 @@ struct wilc {
struct wilc_vif *vif[NUM_CONCURRENT_IFC];
u8 open_ifcs;
- struct semaphore txq_add_to_head_cs;
+ struct mutex txq_add_to_head_cs;
spinlock_t txq_spinlock;
struct mutex rxq_cs;
struct mutex hif_cs;
- struct semaphore cfg_event;
- struct semaphore sync_event;
- struct semaphore txq_event;
+ struct completion cfg_event;
+ struct completion sync_event;
+ struct completion txq_event;
struct completion txq_thread_started;
struct task_struct *txq_thread;
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 11e16d56ace7..19a580939dfc 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -1,3 +1,4 @@
+#include <linux/completion.h>
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
#include "wilc_wfi_netdevice.h"
@@ -89,7 +90,7 @@ static void wilc_wlan_txq_add_to_tail(struct net_device *dev,
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
- up(&wilc->txq_event);
+ complete(&wilc->txq_event);
}
static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
@@ -98,9 +99,7 @@ static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
unsigned long flags;
struct wilc *wilc = vif->wilc;
- if (wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs,
- CFG_PKTS_TIMEOUT))
- return -1;
+ mutex_lock(&wilc->txq_add_to_head_cs);
spin_lock_irqsave(&wilc->txq_spinlock, flags);
@@ -118,8 +117,8 @@ static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
wilc->txq_entries += 1;
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
- up(&wilc->txq_add_to_head_cs);
- up(&wilc->txq_event);
+ mutex_unlock(&wilc->txq_add_to_head_cs);
+ complete(&wilc->txq_event);
return 0;
}
@@ -287,7 +286,8 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
spin_unlock_irqrestore(&wilc->txq_spinlock, wilc->txq_spinlock_flags);
while (dropped > 0) {
- wilc_lock_timeout(wilc, &wilc->txq_event, 1);
+ wait_for_completion_timeout(&wilc->txq_event,
+ msecs_to_jiffies(1));
dropped--;
}
@@ -310,7 +310,7 @@ static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
netdev_dbg(vif->ndev, "Adding config packet ...\n");
if (wilc->quit) {
netdev_dbg(vif->ndev, "Return due to clear function\n");
- up(&wilc->cfg_event);
+ complete(&wilc->cfg_event);
return 0;
}
@@ -571,8 +571,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if (wilc->quit)
break;
- wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs,
- CFG_PKTS_TIMEOUT);
+ mutex_lock(&wilc->txq_add_to_head_cs);
wilc_wlan_txq_filter_dup_tcp_ack(dev);
tqe = wilc_wlan_txq_get_first(wilc);
i = 0;
@@ -753,7 +752,7 @@ _end_:
if (ret != 1)
break;
} while (0);
- up(&wilc->txq_add_to_head_cs);
+ mutex_unlock(&wilc->txq_add_to_head_cs);
wilc->txq_exit = 1;
*txq_count = wilc->txq_entries;
@@ -770,7 +769,7 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
do {
if (wilc->quit) {
- up(&wilc->cfg_event);
+ complete(&wilc->cfg_event);
break;
}
rqe = wilc_wlan_rxq_remove(wilc);
@@ -821,7 +820,7 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
wilc_wlan_cfg_indicate_rx(wilc, &buffer[pkt_offset + offset], pkt_len, &rsp);
if (rsp.type == WILC_CFG_RSP) {
if (wilc->cfg_seq_no == rsp.seq_no)
- up(&wilc->cfg_event);
+ complete(&wilc->cfg_event);
} else if (rsp.type == WILC_CFG_RSP_STATUS) {
wilc_mac_indicate(wilc, WILC_MAC_INDICATE_STATUS);
@@ -1229,11 +1228,12 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
if (wilc_wlan_cfg_commit(vif, WILC_CFG_SET, drv_handler))
ret_size = 0;
- if (wilc_lock_timeout(wilc, &wilc->cfg_event,
- CFG_PKTS_TIMEOUT)) {
+ if (!wait_for_completion_timeout(&wilc->cfg_event,
+ msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
netdev_dbg(vif->ndev, "Set Timed Out\n");
ret_size = 0;
}
+
wilc->cfg_frame_in_use = 0;
wilc->cfg_frame_offset = 0;
wilc->cfg_seq_no += 1;
@@ -1266,8 +1266,8 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
if (wilc_wlan_cfg_commit(vif, WILC_CFG_QUERY, drv_handler))
ret_size = 0;
- if (wilc_lock_timeout(wilc, &wilc->cfg_event,
- CFG_PKTS_TIMEOUT)) {
+ if (!wait_for_completion_timeout(&wilc->cfg_event,
+ msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
netdev_dbg(vif->ndev, "Get Timed Out\n");
ret_size = 0;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7c4efb4417b0..22af12f8b8eb 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -312,7 +312,8 @@ static void iblock_bio_done(struct bio *bio)
}
static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
+ int op_flags)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
@@ -334,18 +335,19 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
+ bio_set_op_attrs(bio, op, op_flags);
return bio;
}
-static void iblock_submit_bios(struct bio_list *list, int rw)
+static void iblock_submit_bios(struct bio_list *list)
{
struct blk_plug plug;
struct bio *bio;
blk_start_plug(&plug);
while ((bio = bio_list_pop(list)))
- submit_bio(rw, bio);
+ submit_bio(bio);
blk_finish_plug(&plug);
}
@@ -387,9 +389,10 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd;
+ bio->bi_rw = WRITE_FLUSH;
if (!immed)
bio->bi_private = cmd;
- submit_bio(WRITE_FLUSH, bio);
+ submit_bio(bio);
return 0;
}
@@ -478,7 +481,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
goto fail;
cmd->priv = ibr;
- bio = iblock_get_bio(cmd, block_lba, 1);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
if (!bio)
goto fail_free_ibr;
@@ -491,7 +494,8 @@ iblock_execute_write_same(struct se_cmd *cmd)
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
- bio = iblock_get_bio(cmd, block_lba, 1);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
+ 0);
if (!bio)
goto fail_put_bios;
@@ -504,7 +508,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
sectors -= 1;
}
- iblock_submit_bios(&list, WRITE);
+ iblock_submit_bios(&list);
return 0;
fail_put_bios:
@@ -677,8 +681,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct scatterlist *sg;
u32 sg_num = sgl_nents;
unsigned bio_cnt;
- int rw = 0;
- int i;
+ int i, op, op_flags = 0;
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -687,18 +690,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* Force writethrough using WRITE_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
+ op = REQ_OP_WRITE;
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
- rw = WRITE_FUA;
+ op_flags = WRITE_FUA;
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- rw = WRITE_FUA;
- else
- rw = WRITE;
- } else {
- rw = WRITE;
+ op_flags = WRITE_FUA;
}
} else {
- rw = READ;
+ op = REQ_OP_READ;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -712,7 +712,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
return 0;
}
- bio = iblock_get_bio(cmd, block_lba, sgl_nents);
+ bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
if (!bio)
goto fail_free_ibr;
@@ -732,11 +732,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
- iblock_submit_bios(&list, rw);
+ iblock_submit_bios(&list);
bio_cnt = 0;
}
- bio = iblock_get_bio(cmd, block_lba, sg_num);
+ bio = iblock_get_bio(cmd, block_lba, sg_num, op,
+ op_flags);
if (!bio)
goto fail_put_bios;
@@ -756,7 +757,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
goto fail_put_bios;
}
- iblock_submit_bios(&list, rw);
+ iblock_submit_bios(&list);
iblock_complete_cmd(cmd);
return 0;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index de18790eb21c..9125d9358dea 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -876,19 +876,19 @@ static inline struct bio *pscsi_get_bio(int nr_vecs)
static sense_reason_t
pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
- enum dma_data_direction data_direction, struct bio **hbio)
+ struct request *req)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
- struct bio *bio = NULL, *tbio = NULL;
+ struct bio *bio = NULL;
struct page *page;
struct scatterlist *sg;
u32 data_len = cmd->data_length, i, len, bytes, off;
int nr_pages = (cmd->data_length + sgl[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
int nr_vecs = 0, rc;
- int rw = (data_direction == DMA_TO_DEVICE);
+ int rw = (cmd->data_direction == DMA_TO_DEVICE);
- *hbio = NULL;
+ BUG_ON(!cmd->data_length);
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
@@ -922,21 +922,11 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
goto fail;
if (rw)
- bio->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
(rw) ? "rw" : "r", nr_vecs);
- /*
- * Set *hbio pointer to handle the case:
- * nr_pages > BIO_MAX_PAGES, where additional
- * bios need to be added to complete a given
- * command.
- */
- if (!*hbio)
- *hbio = tbio = bio;
- else
- tbio = tbio->bi_next = bio;
}
pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
@@ -955,11 +945,16 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
+
+ rc = blk_rq_append_bio(req, bio);
+ if (rc) {
+ pr_err("pSCSI: failed to append bio\n");
+ goto fail;
+ }
+
/*
* Clear the pointer so that another bio will
- * be allocated with pscsi_get_bio() above, the
- * current bio has already been set *tbio and
- * bio->bi_next.
+ * be allocated with pscsi_get_bio() above.
*/
bio = NULL;
}
@@ -968,13 +963,16 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
}
+ if (bio) {
+ rc = blk_rq_append_bio(req, bio);
+ if (rc) {
+ pr_err("pSCSI: failed to append bio\n");
+ goto fail;
+ }
+ }
+
return 0;
fail:
- while (*hbio) {
- bio = *hbio;
- *hbio = (*hbio)->bi_next;
- bio_endio(bio);
- }
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -992,11 +990,9 @@ pscsi_execute_cmd(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
- enum dma_data_direction data_direction = cmd->data_direction;
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct pscsi_plugin_task *pt;
struct request *req;
- struct bio *hbio;
sense_reason_t ret;
/*
@@ -1012,31 +1008,21 @@ pscsi_execute_cmd(struct se_cmd *cmd)
memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
scsi_command_size(cmd->t_task_cdb));
- if (!sgl) {
- req = blk_get_request(pdv->pdv_sd->request_queue,
- (data_direction == DMA_TO_DEVICE),
- GFP_KERNEL);
- if (IS_ERR(req)) {
- pr_err("PSCSI: blk_get_request() failed\n");
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto fail;
- }
+ req = blk_get_request(pdv->pdv_sd->request_queue,
+ (cmd->data_direction == DMA_TO_DEVICE),
+ GFP_KERNEL);
+ if (IS_ERR(req)) {
+ pr_err("PSCSI: blk_get_request() failed\n");
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto fail;
+ }
- blk_rq_set_block_pc(req);
- } else {
- BUG_ON(!cmd->data_length);
+ blk_rq_set_block_pc(req);
- ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
+ if (sgl) {
+ ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
if (ret)
- goto fail;
-
- req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
- GFP_KERNEL);
- if (IS_ERR(req)) {
- pr_err("pSCSI: blk_make_request() failed\n");
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto fail_free_bio;
- }
+ goto fail_put_request;
}
req->end_io = pscsi_req_done;
@@ -1057,13 +1043,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
return 0;
-fail_free_bio:
- while (hbio) {
- struct bio *bio = hbio;
- hbio = hbio->bi_next;
- bio_endio(bio);
- }
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+fail_put_request:
+ blk_put_request(req);
fail:
kfree(pt);
return ret;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 5b4b47ed948b..3788ed74c9ab 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -787,22 +787,34 @@ __cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus, u32 capacitance,
get_static_t plat_static_func)
{
+ struct cpufreq_policy *policy;
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev;
char dev_name[THERMAL_NAME_LENGTH];
struct cpufreq_frequency_table *pos, *table;
+ struct cpumask temp_mask;
unsigned int freq, i, num_cpus;
int ret;
- table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
+ cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
+ policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
+ if (!policy) {
+ pr_debug("%s: CPUFreq policy not found\n", __func__);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ table = policy->freq_table;
if (!table) {
pr_debug("%s: CPUFreq table not found\n", __func__);
- return ERR_PTR(-EPROBE_DEFER);
+ cool_dev = ERR_PTR(-ENODEV);
+ goto put_policy;
}
cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
- if (!cpufreq_dev)
- return ERR_PTR(-ENOMEM);
+ if (!cpufreq_dev) {
+ cool_dev = ERR_PTR(-ENOMEM);
+ goto put_policy;
+ }
num_cpus = cpumask_weight(clip_cpus);
cpufreq_dev->time_in_idle = kcalloc(num_cpus,
@@ -892,7 +904,7 @@ __cpufreq_cooling_register(struct device_node *np,
CPUFREQ_POLICY_NOTIFIER);
mutex_unlock(&cooling_cpufreq_lock);
- return cool_dev;
+ goto put_policy;
remove_idr:
release_idr(&cpufreq_idr, cpufreq_dev->id);
@@ -906,6 +918,8 @@ free_time_in_idle:
kfree(cpufreq_dev->time_in_idle);
free_cdev:
kfree(cpufreq_dev);
+put_policy:
+ cpufreq_cpu_put(policy);
return cool_dev;
}
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index 4ebb31a35a64..b2bbaa1c60b0 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include "intel_soc_dts_iosf.h"
#define CRITICAL_OFFSET_FROM_TJ_MAX 5000
@@ -42,7 +43,8 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
}
static const struct x86_cpu_id soc_thermal_ids[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0,
+ BYT_SOC_DTS_APIC_IRQ},
{}
};
MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 3840d6b421c4..5e4fa9206861 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -93,8 +93,6 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096))
#endif
-#define STD_COM_FLAGS (0)
-
/* firmware stuff */
#define ZL_MAX_BLOCKS 16
#define DRIVER_VERSION 0x02010203
@@ -2288,7 +2286,6 @@ static int cy_get_serial_info(struct cyclades_port *info,
.closing_wait = info->port.closing_wait,
.baud_base = info->baud,
.custom_divisor = info->custom_divisor,
- .hub6 = 0, /*!!! */
};
return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
}
@@ -3084,7 +3081,6 @@ static int cy_init_card(struct cyclades_card *cinfo)
info->port.closing_wait = CLOSING_WAIT_DELAY;
info->port.close_delay = 5 * HZ / 10;
- info->port.flags = STD_COM_FLAGS;
init_completion(&info->shutdown_wait);
if (cy_is_Z(cinfo)) {
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 345cebb07ae7..2685d59d2724 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -252,20 +252,11 @@ static int ipwireless_get_serial_info(struct ipw_tty *tty,
{
struct serial_struct tmp;
- if (!retinfo)
- return (-EFAULT);
-
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_UNKNOWN;
tmp.line = tty->index;
- tmp.port = 0;
- tmp.irq = 0;
- tmp.flags = 0;
tmp.baud_base = 115200;
- tmp.close_delay = 0;
- tmp.closing_wait = 0;
- tmp.custom_divisor = 0;
- tmp.hub6 = 0;
+
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 9325262289f9..25ccef2fe748 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -323,12 +323,12 @@ static void dashtty_timer(unsigned long ignored)
if (channel >= 0)
fetch_data(channel);
- mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL);
+ mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
}
static void add_poll_timer(struct timer_list *poll_timer)
{
- setup_timer(poll_timer, dashtty_timer, 0);
+ setup_pinned_timer(poll_timer, dashtty_timer, 0);
poll_timer->expires = jiffies + DA_TTY_POLL;
/*
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index a119176a1855..234123b0c642 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -689,7 +689,7 @@ static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
mips_ejtag_fdc_handle(priv);
if (!priv->removing)
- mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL);
+ mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL);
}
/* TTY Port operations */
@@ -1002,7 +1002,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
raw_spin_unlock_irq(&priv->lock);
} else {
/* If we didn't get an usable IRQ, poll instead */
- setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
+ setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
(unsigned long)priv);
priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
/*
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 98d2bd16706d..69294ae154be 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -1219,7 +1219,6 @@ static int mxser_get_serial_info(struct tty_struct *tty,
.close_delay = info->port.close_delay,
.closing_wait = info->port.closing_wait,
.custom_divisor = info->custom_divisor,
- .hub6 = 0
};
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 215a99237e95..122e0e4029fe 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -15,6 +15,8 @@
#include <linux/serial_reg.h>
#include <linux/dmaengine.h>
+#include "../serial_mctrl_gpio.h"
+
struct uart_8250_dma {
int (*tx_dma)(struct uart_8250_port *p);
int (*rx_dma)(struct uart_8250_port *p);
@@ -53,11 +55,9 @@ struct old_serial_port {
unsigned int port;
unsigned int irq;
upf_t flags;
- unsigned char hub6;
unsigned char io_type;
unsigned char __iomem *iomem_base;
unsigned short iomem_reg_shift;
- unsigned long irqflags;
};
struct serial8250_config {
@@ -131,6 +131,47 @@ void serial8250_rpm_put(struct uart_8250_port *p);
int serial8250_em485_init(struct uart_8250_port *p);
void serial8250_em485_destroy(struct uart_8250_port *p);
+static inline void serial8250_out_MCR(struct uart_8250_port *up, int value)
+{
+ int mctrl_gpio = 0;
+
+ serial_out(up, UART_MCR, value);
+
+ if (value & UART_MCR_RTS)
+ mctrl_gpio |= TIOCM_RTS;
+ if (value & UART_MCR_DTR)
+ mctrl_gpio |= TIOCM_DTR;
+
+ mctrl_gpio_set(up->gpios, mctrl_gpio);
+}
+
+static inline int serial8250_in_MCR(struct uart_8250_port *up)
+{
+ int mctrl, mctrl_gpio = 0;
+
+ mctrl = serial_in(up, UART_MCR);
+
+ /* save current MCR values */
+ if (mctrl & UART_MCR_RTS)
+ mctrl_gpio |= TIOCM_RTS;
+ if (mctrl & UART_MCR_DTR)
+ mctrl_gpio |= TIOCM_DTR;
+
+ mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio);
+
+ if (mctrl_gpio & TIOCM_RTS)
+ mctrl |= UART_MCR_RTS;
+ else
+ mctrl &= ~UART_MCR_RTS;
+
+ if (mctrl_gpio & TIOCM_DTR)
+ mctrl |= UART_MCR_DTR;
+ else
+ mctrl &= ~UART_MCR_DTR;
+
+ return mctrl;
+}
+
#if defined(__alpha__) && !defined(CONFIG_PCI)
/*
* Digital did something really horribly wrong with the OUT1 and OUT2
@@ -237,9 +278,3 @@ static inline int serial_index(struct uart_port *port)
{
return port->minor - 64;
}
-
-#if 0
-#define DEBUG_INTR(fmt...) printk(fmt)
-#else
-#define DEBUG_INTR(fmt...) do { } while (0)
-#endif
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 0fbd7c033a25..13ad5c3d2e68 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -114,7 +114,7 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
struct list_head *l, *end = NULL;
int pass_counter = 0, handled = 0;
- DEBUG_INTR("serial8250_interrupt(%d)...", irq);
+ pr_debug("%s(%d): start\n", __func__, irq);
spin_lock(&i->lock);
@@ -144,7 +144,7 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
spin_unlock(&i->lock);
- DEBUG_INTR("end.\n");
+ pr_debug("%s(%d): end\n", __func__, irq);
return IRQ_RETVAL(handled);
}
@@ -546,10 +546,10 @@ static void __init serial8250_isa_init_ports(void)
port->iobase = old_serial_port[i].port;
port->irq = irq_canonicalize(old_serial_port[i].irq);
- port->irqflags = old_serial_port[i].irqflags;
+ port->irqflags = 0;
port->uartclk = old_serial_port[i].baud_base * 16;
port->flags = old_serial_port[i].flags;
- port->hub6 = old_serial_port[i].hub6;
+ port->hub6 = 0;
port->membase = old_serial_port[i].iomem_base;
port->iotype = old_serial_port[i].io_type;
port->regshift = old_serial_port[i].iomem_reg_shift;
@@ -675,7 +675,7 @@ static struct console univ8250_console = {
.device = uart_console_device,
.setup = univ8250_console_setup,
.match = univ8250_console_match,
- .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV,
.index = -1,
.data = &serial8250_reg,
};
@@ -974,6 +974,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart = serial8250_find_match_or_unused(&up->port);
if (uart && uart->port.type != PORT_8250_CIR) {
+ struct mctrl_gpios *gpios;
+
if (uart->port.dev)
uart_remove_one_port(&serial8250_reg, &uart->port);
@@ -1011,6 +1013,13 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
if (up->port.flags & UPF_FIXED_TYPE)
uart->port.type = up->port.type;
+ gpios = mctrl_gpio_init(&uart->port, 0);
+ if (IS_ERR(gpios)) {
+ if (PTR_ERR(gpios) != -ENOSYS)
+ return PTR_ERR(gpios);
+ } else
+ uart->gpios = gpios;
+
serial8250_set_defaults(uart);
/* Possibly override default I/O functions. */
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 7f33d1c8d1a9..3590d012001f 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -145,6 +145,7 @@ void serial8250_rx_dma_flush(struct uart_8250_port *p)
dmaengine_terminate_all(dma->rxchan);
}
}
+EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
int serial8250_request_dma(struct uart_8250_port *p)
{
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 8d08ff5c4e34..85a12f032402 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -150,6 +150,7 @@ EARLYCON_DECLARE(uart, early_serial8250_setup);
OF_EARLYCON_DECLARE(ns16550, "ns16550", early_serial8250_setup);
OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup);
OF_EARLYCON_DECLARE(uart, "nvidia,tegra20-uart", early_serial8250_setup);
+OF_EARLYCON_DECLARE(uart, "snps,dw-apb-uart", early_serial8250_setup);
#ifdef CONFIG_SERIAL_8250_OMAP
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 870981dd9e39..737b4b3957b0 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -13,6 +13,7 @@
#include <linux/pnp.h>
#include <linux/kernel.h>
#include <linux/serial_core.h>
+#include <linux/irq.h>
#include "8250.h"
#define ADDR_PORT 0
@@ -30,6 +31,12 @@
#define IO_ADDR2 0x60
#define LDN 0x7
+#define IRQ_MODE 0x70
+#define IRQ_SHARE BIT(4)
+#define IRQ_MODE_MASK (BIT(6) | BIT(5))
+#define IRQ_LEVEL_LOW 0
+#define IRQ_EDGE_HIGH BIT(5)
+
#define RS485 0xF0
#define RTS_INVERT BIT(5)
#define RS485_URA BIT(4)
@@ -176,10 +183,37 @@ static int find_base_port(struct fintek_8250 *pdata, u16 io_address)
return -ENODEV;
}
+static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
+{
+ int status;
+ u8 tmp;
+
+ status = fintek_8250_enter_key(pdata->base_port, pdata->key);
+ if (status)
+ return status;
+
+ outb(LDN, pdata->base_port + ADDR_PORT);
+ outb(pdata->index, pdata->base_port + DATA_PORT);
+
+ outb(IRQ_MODE, pdata->base_port + ADDR_PORT);
+ tmp = inb(pdata->base_port + DATA_PORT);
+
+ tmp &= ~IRQ_MODE_MASK;
+ tmp |= IRQ_SHARE;
+ if (!level_mode)
+ tmp |= IRQ_EDGE_HIGH;
+
+ outb(tmp, pdata->base_port + DATA_PORT);
+ fintek_8250_exit_key(pdata->base_port);
+ return 0;
+}
+
int fintek_8250_probe(struct uart_8250_port *uart)
{
struct fintek_8250 *pdata;
struct fintek_8250 probe_data;
+ struct irq_data *irq_data = irq_get_irq_data(uart->port.irq);
+ bool level_mode = irqd_is_level_type(irq_data);
if (find_base_port(&probe_data, uart->port.iobase))
return -ENODEV;
@@ -192,5 +226,5 @@ int fintek_8250_probe(struct uart_8250_port *uart)
uart->port.rs485_config = fintek_8250_rs485_config;
uart->port.private_data = pdata;
- return 0;
+ return fintek_8250_set_irq_mode(pdata, level_mode);
}
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index b0677f610863..4d9dc10e265c 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -48,7 +48,6 @@ static const struct of_device_id of_match[];
#define UART_MCR_MDCE BIT(7)
#define UART_MCR_FCM BIT(6)
-#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
static struct earlycon_device *early_device;
static uint8_t __init early_in(struct uart_port *port, int offset)
@@ -141,7 +140,6 @@ OF_EARLYCON_DECLARE(jz4775_uart, "ingenic,jz4775-uart",
EARLYCON_DECLARE(jz4780_uart, ingenic_early_console_setup);
OF_EARLYCON_DECLARE(jz4780_uart, "ingenic,jz4780-uart",
ingenic_early_console_setup);
-#endif /* CONFIG_SERIAL_EARLYCON */
static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
{
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index 86379a79a6a3..339de9cd0866 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -96,13 +96,27 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
static int dnv_handle_irq(struct uart_port *p)
{
struct mid8250 *mid = p->private_data;
+ struct uart_8250_port *up = up_to_u8250p(p);
unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
+ u32 status;
int ret = IRQ_NONE;
-
- if (fisr & BIT(2))
- ret |= hsu_dma_irq(&mid->dma_chip, 1);
- if (fisr & BIT(1))
- ret |= hsu_dma_irq(&mid->dma_chip, 0);
+ int err;
+
+ if (fisr & BIT(2)) {
+ err = hsu_dma_get_status(&mid->dma_chip, 1, &status);
+ if (err > 0) {
+ serial8250_rx_dma_flush(up);
+ ret |= IRQ_HANDLED;
+ } else if (err == 0)
+ ret |= hsu_dma_do_irq(&mid->dma_chip, 1, status);
+ }
+ if (fisr & BIT(1)) {
+ err = hsu_dma_get_status(&mid->dma_chip, 0, &status);
+ if (err > 0)
+ ret |= IRQ_HANDLED;
+ else if (err == 0)
+ ret |= hsu_dma_do_irq(&mid->dma_chip, 0, status);
+ }
if (fisr & BIT(0))
ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
return ret;
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 3489fbcb7313..3611ec9bb4fa 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -301,7 +301,7 @@ static struct platform_driver mtk8250_platform_driver = {
};
module_platform_driver(mtk8250_platform_driver);
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && !defined(MODULE)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init early_mtk8250_setup(struct earlycon_device *device,
const char *options)
{
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 2c44c792d586..e14982f36a04 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -134,18 +134,21 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
serial8250_do_set_mctrl(port, mctrl);
- /*
- * Turn off autoRTS if RTS is lowered and restore autoRTS setting
- * if RTS is raised
- */
- lcr = serial_in(up, UART_LCR);
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
- priv->efr |= UART_EFR_RTS;
- else
- priv->efr &= ~UART_EFR_RTS;
- serial_out(up, UART_EFR, priv->efr);
- serial_out(up, UART_LCR, lcr);
+ if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
+ UART_GPIO_RTS))) {
+ /*
+ * Turn off autoRTS if RTS is lowered and restore autoRTS
+ * setting if RTS is raised
+ */
+ lcr = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+ priv->efr |= UART_EFR_RTS;
+ else
+ priv->efr &= ~UART_EFR_RTS;
+ serial_out(up, UART_EFR, priv->efr);
+ serial_out(up, UART_LCR, lcr);
+ }
}
/*
@@ -280,7 +283,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
serial_out(up, UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
- serial_out(up, UART_MCR, UART_MCR_TCRTLR);
+ serial8250_out_MCR(up, UART_MCR_TCRTLR);
serial_out(up, UART_FCR, up->fcr);
omap8250_update_scr(up, priv);
@@ -296,7 +299,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
serial_out(up, UART_LCR, 0);
/* drop TCR + TLR access, we setup XON/XOFF later */
- serial_out(up, UART_MCR, up->mcr);
+ serial8250_out_MCR(up, up->mcr);
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
@@ -446,7 +449,9 @@ static void omap_8250_set_termios(struct uart_port *port,
priv->efr = 0;
up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
- if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
+ if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW
+ && IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
+ UART_GPIO_RTS))) {
/* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
priv->efr |= UART_EFR_CTS;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 8dd250fbd367..20ebaea5c414 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1136,11 +1136,11 @@ static int pci_quatech_rqopr(struct uart_8250_port *port)
static void pci_quatech_wqopr(struct uart_8250_port *port, u8 qopr)
{
unsigned long base = port->port.iobase;
- u8 LCR, val;
+ u8 LCR;
LCR = inb(base + UART_LCR);
outb(0xBF, base + UART_LCR);
- val = inb(base + UART_SCR);
+ inb(base + UART_SCR);
outb(qopr, base + UART_SCR);
outb(LCR, base + UART_LCR);
}
@@ -1865,6 +1865,16 @@ pci_wch_ch353_setup(struct serial_private *priv,
}
static int
+pci_wch_ch355_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ port->port.flags |= UPF_FIXED_TYPE;
+ port->port.type = PORT_16550A;
+ return pci_default_setup(priv, board, port, idx);
+}
+
+static int
pci_wch_ch38x_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1915,6 +1925,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046
#define PCI_DEVICE_ID_WCH_CH353_1S1P 0x5053
#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053
+#define PCI_DEVICE_ID_WCH_CH355_4S 0x7173
#define PCI_VENDOR_ID_AGESTAR 0x5372
#define PCI_DEVICE_ID_AGESTAR_9375 0x6872
#define PCI_VENDOR_ID_ASIX 0x9710
@@ -2618,6 +2629,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
+ /* WCH CH355 4S card (16550 clone) */
+ {
+ .vendor = PCI_VENDOR_ID_WCH,
+ .device = PCI_DEVICE_ID_WCH_CH355_4S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_wch_ch355_setup,
+ },
/* WCH CH382 2S card (16850 clone) */
{
.vendor = PCIE_VENDOR_ID_WCH,
@@ -3812,6 +3831,7 @@ static const struct pci_device_id blacklist[] = {
/* multi-io cards handled by parport_serial */
{ PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
{ PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
+ { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
{ PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
{ PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
@@ -5567,6 +5587,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH355_4S,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_b0_bt_4_115200 },
+
{ PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch382_2 },
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index d4036038a4dd..7481b95c6d84 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -527,13 +527,13 @@ static void serial8250_clear_fifos(struct uart_8250_port *p)
static inline void serial8250_em485_rts_after_send(struct uart_8250_port *p)
{
- unsigned char mcr = serial_in(p, UART_MCR);
+ unsigned char mcr = serial8250_in_MCR(p);
if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
mcr |= UART_MCR_RTS;
else
mcr &= ~UART_MCR_RTS;
- serial_out(p, UART_MCR, mcr);
+ serial8250_out_MCR(p, mcr);
}
static void serial8250_em485_handle_start_tx(unsigned long arg);
@@ -785,10 +785,10 @@ static int size_fifo(struct uart_8250_port *up)
old_lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, 0);
old_fcr = serial_in(up, UART_FCR);
- old_mcr = serial_in(up, UART_MCR);
+ old_mcr = serial8250_in_MCR(up);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
- serial_out(up, UART_MCR, UART_MCR_LOOP);
+ serial8250_out_MCR(up, UART_MCR_LOOP);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
old_dl = serial_dl_read(up);
serial_dl_write(up, 0x0001);
@@ -800,7 +800,7 @@ static int size_fifo(struct uart_8250_port *up)
(count < 256); count++)
serial_in(up, UART_RX);
serial_out(up, UART_FCR, old_fcr);
- serial_out(up, UART_MCR, old_mcr);
+ serial8250_out_MCR(up, old_mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_dl_write(up, old_dl);
serial_out(up, UART_LCR, old_lcr);
@@ -1040,17 +1040,17 @@ static void autoconfig_16550a(struct uart_8250_port *up)
* it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
*/
serial_out(up, UART_LCR, 0);
- status1 = serial_in(up, UART_MCR);
+ status1 = serial8250_in_MCR(up);
serial_out(up, UART_LCR, 0xE0);
status2 = serial_in(up, 0x02); /* EXCR1 */
if (!((status2 ^ status1) & UART_MCR_LOOP)) {
serial_out(up, UART_LCR, 0);
- serial_out(up, UART_MCR, status1 ^ UART_MCR_LOOP);
+ serial8250_out_MCR(up, status1 ^ UART_MCR_LOOP);
serial_out(up, UART_LCR, 0xE0);
status2 = serial_in(up, 0x02); /* EXCR1 */
serial_out(up, UART_LCR, 0);
- serial_out(up, UART_MCR, status1);
+ serial8250_out_MCR(up, status1);
if ((status2 ^ status1) & UART_MCR_LOOP) {
unsigned short quot;
@@ -1224,7 +1224,7 @@ static void autoconfig(struct uart_8250_port *up)
}
}
- save_mcr = serial_in(up, UART_MCR);
+ save_mcr = serial8250_in_MCR(up);
save_lcr = serial_in(up, UART_LCR);
/*
@@ -1237,9 +1237,9 @@ static void autoconfig(struct uart_8250_port *up)
* that conflicts with COM 1-4 --- we hope!
*/
if (!(port->flags & UPF_SKIP_TEST)) {
- serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+ serial8250_out_MCR(up, UART_MCR_LOOP | 0x0A);
status1 = serial_in(up, UART_MSR) & 0xF0;
- serial_out(up, UART_MCR, save_mcr);
+ serial8250_out_MCR(up, save_mcr);
if (status1 != 0x90) {
spin_unlock_irqrestore(&port->lock, flags);
DEBUG_AUTOCONF("LOOP test failed (%02x) ",
@@ -1305,7 +1305,7 @@ static void autoconfig(struct uart_8250_port *up)
if (port->type == PORT_RSA)
serial_out(up, UART_RSA_FRR, 0);
#endif
- serial_out(up, UART_MCR, save_mcr);
+ serial8250_out_MCR(up, save_mcr);
serial8250_clear_fifos(up);
serial_in(up, UART_RX);
if (up->capabilities & UART_CAP_UUE)
@@ -1353,19 +1353,18 @@ static void autoconfig_irq(struct uart_8250_port *up)
/* forget possible initially masked and pending IRQ */
probe_irq_off(probe_irq_on());
- save_mcr = serial_in(up, UART_MCR);
+ save_mcr = serial8250_in_MCR(up);
save_ier = serial_in(up, UART_IER);
- serial_out(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
+ serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
irqs = probe_irq_on();
- serial_out(up, UART_MCR, 0);
+ serial8250_out_MCR(up, 0);
udelay(10);
if (port->flags & UPF_FOURPORT) {
- serial_out(up, UART_MCR,
- UART_MCR_DTR | UART_MCR_RTS);
+ serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
} else {
- serial_out(up, UART_MCR,
- UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
+ serial8250_out_MCR(up,
+ UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
}
serial_out(up, UART_IER, 0x0f); /* enable all intrs */
serial_in(up, UART_LSR);
@@ -1376,7 +1375,7 @@ static void autoconfig_irq(struct uart_8250_port *up)
udelay(20);
irq = probe_irq_off(irqs);
- serial_out(up, UART_MCR, save_mcr);
+ serial8250_out_MCR(up, save_mcr);
serial_out(up, UART_IER, save_ier);
if (port->flags & UPF_FOURPORT)
@@ -1549,14 +1548,14 @@ static inline void start_tx_rs485(struct uart_port *port)
del_timer(&em485->stop_tx_timer);
em485->active_timer = NULL;
- mcr = serial_in(up, UART_MCR);
+ mcr = serial8250_in_MCR(up);
if (!!(up->port.rs485.flags & SER_RS485_RTS_ON_SEND) !=
!!(mcr & UART_MCR_RTS)) {
if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
mcr |= UART_MCR_RTS;
else
mcr &= ~UART_MCR_RTS;
- serial_out(up, UART_MCR, mcr);
+ serial8250_out_MCR(up, mcr);
if (up->port.rs485.delay_rts_before_send > 0) {
em485->active_timer = &em485->start_tx_timer;
@@ -1619,6 +1618,8 @@ static void serial8250_disable_ms(struct uart_port *port)
if (up->bugs & UART_BUG_NOMSR)
return;
+ mctrl_gpio_disable_ms(up->gpios);
+
up->ier &= ~UART_IER_MSI;
serial_port_out(port, UART_IER, up->ier);
}
@@ -1631,6 +1632,8 @@ static void serial8250_enable_ms(struct uart_port *port)
if (up->bugs & UART_BUG_NOMSR)
return;
+ mctrl_gpio_enable_ms(up->gpios);
+
up->ier |= UART_IER_MSI;
serial8250_rpm_get(up);
@@ -1686,7 +1689,7 @@ static void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
lsr &= port->read_status_mask;
if (lsr & UART_LSR_BI) {
- DEBUG_INTR("handling break....");
+ pr_debug("%s: handling break\n", __func__);
flag = TTY_BREAK;
} else if (lsr & UART_LSR_PE)
flag = TTY_PARITY;
@@ -1757,7 +1760,7 @@ void serial8250_tx_chars(struct uart_8250_port *up)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- DEBUG_INTR("THRE...");
+ pr_debug("%s: THRE\n", __func__);
/*
* With RPM enabled, we have to wait until the FIFO is empty before the
@@ -1823,7 +1826,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial_port_in(port, UART_LSR);
- DEBUG_INTR("status = %x...", status);
+ pr_debug("%s: status = %x\n", __func__, status);
if (status & (UART_LSR_DR | UART_LSR_BI)) {
if (!up->dma || handle_rx_dma(up, iir))
@@ -1861,7 +1864,6 @@ static int serial8250_default_handle_irq(struct uart_port *port)
*/
static int exar_handle_irq(struct uart_port *port)
{
- unsigned char int0, int1, int2, int3;
unsigned int iir = serial_port_in(port, UART_IIR);
int ret;
@@ -1869,10 +1871,10 @@ static int exar_handle_irq(struct uart_port *port)
if ((port->type == PORT_XR17V35X) ||
(port->type == PORT_XR17D15X)) {
- int0 = serial_port_in(port, 0x80);
- int1 = serial_port_in(port, 0x81);
- int2 = serial_port_in(port, 0x82);
- int3 = serial_port_in(port, 0x83);
+ serial_port_in(port, 0x80);
+ serial_port_in(port, 0x81);
+ serial_port_in(port, 0x82);
+ serial_port_in(port, 0x83);
}
return ret;
@@ -1915,7 +1917,8 @@ unsigned int serial8250_do_get_mctrl(struct uart_port *port)
ret |= TIOCM_DSR;
if (status & UART_MSR_CTS)
ret |= TIOCM_CTS;
- return ret;
+
+ return mctrl_gpio_get(up->gpios, &ret);
}
EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl);
@@ -1944,7 +1947,7 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
- serial_port_out(port, UART_MCR, mcr);
+ serial8250_out_MCR(up, mcr);
}
EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
@@ -1994,8 +1997,6 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
- unsigned int tmout;
-
for (tmout = 1000000; tmout; tmout--) {
unsigned int msr = serial_in(up, UART_MSR);
up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
@@ -3093,7 +3094,7 @@ static void serial8250_console_restore(struct uart_8250_port *up)
serial8250_set_divisor(port, baud, quot, frac);
serial_port_out(port, UART_LCR, up->lcr);
- serial_port_out(port, UART_MCR, UART_MCR_DTR | UART_MCR_RTS);
+ serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
}
/*
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index efd1f9c047b1..b8d9c8c9d02a 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -35,7 +35,7 @@ struct uniphier8250_priv {
spinlock_t atomic_write_lock;
};
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && !defined(MODULE)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init uniphier_early_console_setup(struct earlycon_device *device,
const char *options)
{
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index e46761d20f7b..c9ec839a5ddf 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -6,6 +6,7 @@
config SERIAL_8250
tristate "8250/16550 and compatible serial support"
select SERIAL_CORE
+ select SERIAL_MCTRL_GPIO if GPIOLIB
---help---
This selects whether you want to include the driver for the standard
serial ports. The standard answer is Y. People who might say N
@@ -387,7 +388,8 @@ config SERIAL_8250_MT6577
config SERIAL_8250_UNIPHIER
tristate "Support for UniPhier on-chip UART"
- depends on SERIAL_8250 && ARCH_UNIPHIER
+ depends on SERIAL_8250
+ depends on ARCH_UNIPHIER || COMPILE_TEST
help
If you have a UniPhier based board and want to use the on-chip
serial ports, say Y to this option. If unsure, say N.
@@ -395,7 +397,7 @@ config SERIAL_8250_UNIPHIER
config SERIAL_8250_INGENIC
tristate "Support for Ingenic SoC serial ports"
depends on SERIAL_8250
- depends on (OF_FLATTREE && SERIAL_8250_CONSOLE) || !SERIAL_EARLYCON
+ depends on OF_FLATTREE
depends on MIPS || COMPILE_TEST
help
If you have a system using an Ingenic SoC and wish to make use of
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 7e3a58c8bb67..518db24a5b36 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -736,6 +736,7 @@ config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
depends on SUPERH || ARCH_RENESAS || H8300 || COMPILE_TEST
select SERIAL_CORE
+ select SERIAL_MCTRL_GPIO if GPIOLIB
config SERIAL_SH_SCI_NR_UARTS
int "Maximum number of SCI(F) serial ports"
@@ -1477,7 +1478,7 @@ config SERIAL_MPS2_UART_CONSOLE
config SERIAL_MPS2_UART
bool "MPS2 UART port"
- depends on ARM || COMPILE_TEST
+ depends on ARCH_MPS2 || COMPILE_TEST
select SERIAL_CORE
help
This driver support the UART ports on ARM MPS2.
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 1b7331e40d79..8a9e213387a7 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2553,11 +2553,17 @@ static int sbsa_uart_probe(struct platform_device *pdev)
if (!uap)
return -ENOMEM;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot obtain irq\n");
+ return ret;
+ }
+ uap->port.irq = ret;
+
uap->reg_offset = vendor_sbsa.reg_offset;
uap->vendor = &vendor_sbsa;
uap->fifosize = 32;
uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;
- uap->port.irq = platform_get_irq(pdev, 0);
uap->port.ops = &sbsa_uart_pops;
uap->fixed_baud = baudrate;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 954941dd8124..2eaa18ddef61 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -108,6 +108,12 @@ struct atmel_uart_char {
u16 ch;
};
+/*
+ * Be careful, the real size of the ring buffer is
+ * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
+ * can contain up to 1024 characters in PIO mode and up to 4096 characters in
+ * DMA mode.
+ */
#define ATMEL_SERIAL_RINGSIZE 1024
/*
@@ -145,10 +151,10 @@ struct atmel_uart_port {
dma_cookie_t cookie_rx;
struct scatterlist sg_tx;
struct scatterlist sg_rx;
- struct tasklet_struct tasklet;
- unsigned int irq_status;
+ struct tasklet_struct tasklet_rx;
+ struct tasklet_struct tasklet_tx;
+ atomic_t tasklet_shutdown;
unsigned int irq_status_prev;
- unsigned int status_change;
unsigned int tx_len;
struct circ_buf rx_ring;
@@ -281,6 +287,13 @@ static bool atmel_use_fifo(struct uart_port *port)
return atmel_port->fifo_size;
}
+static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
+ struct tasklet_struct *t)
+{
+ if (!atomic_read(&atmel_port->tasklet_shutdown))
+ tasklet_schedule(t);
+}
+
static unsigned int atmel_get_lines_status(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -482,19 +495,21 @@ static void atmel_start_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_use_pdc_tx(port)) {
- if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
- /* The transmitter is already running. Yes, we
- really need this.*/
- return;
+ if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
+ & ATMEL_PDC_TXTEN))
+ /* The transmitter is already running. Yes, we
+ really need this.*/
+ return;
+ if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
if ((port->rs485.flags & SER_RS485_ENABLED) &&
!(port->rs485.flags & SER_RS485_RX_DURING_TX))
atmel_stop_rx(port);
+ if (atmel_use_pdc_tx(port))
/* re-enable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
- }
+
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
}
@@ -710,7 +725,7 @@ static void atmel_rx_chars(struct uart_port *port)
status = atmel_uart_readl(port, ATMEL_US_CSR);
}
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
}
/*
@@ -781,7 +796,7 @@ static void atmel_complete_tx_dma(void *arg)
* remaining data from the beginning of xmit->buf to xmit->head.
*/
if (!uart_circ_empty(xmit))
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -966,7 +981,7 @@ static void atmel_complete_rx_dma(void *arg)
struct uart_port *port = arg;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
}
static void atmel_release_rx_dma(struct uart_port *port)
@@ -1006,7 +1021,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
if (dmastat == DMA_ERROR) {
dev_dbg(port->dev, "Get residue error, restart tasklet\n");
atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
return;
}
@@ -1160,8 +1175,11 @@ static void atmel_uart_timer_callback(unsigned long data)
struct uart_port *port = (void *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- tasklet_schedule(&atmel_port->tasklet);
- mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port));
+ if (!atomic_read(&atmel_port->tasklet_shutdown)) {
+ tasklet_schedule(&atmel_port->tasklet_rx);
+ mod_timer(&atmel_port->uart_timer,
+ jiffies + uart_poll_timeout(port));
+ }
}
/*
@@ -1183,7 +1201,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
atmel_uart_writel(port, ATMEL_US_IDR,
(ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port,
+ &atmel_port->tasklet_rx);
}
if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
@@ -1195,7 +1214,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
if (pending & ATMEL_US_TIMEOUT) {
atmel_uart_writel(port, ATMEL_US_IDR,
ATMEL_US_TIMEOUT);
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port,
+ &atmel_port->tasklet_rx);
}
}
@@ -1225,7 +1245,7 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
/* Either PDC or interrupt transmission */
atmel_uart_writel(port, ATMEL_US_IDR,
atmel_port->tx_done_mask);
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
}
}
@@ -1237,14 +1257,27 @@ atmel_handle_status(struct uart_port *port, unsigned int pending,
unsigned int status)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned int status_change;
if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
| ATMEL_US_CTSIC)) {
- atmel_port->irq_status = status;
- atmel_port->status_change = atmel_port->irq_status ^
- atmel_port->irq_status_prev;
+ status_change = status ^ atmel_port->irq_status_prev;
atmel_port->irq_status_prev = status;
- tasklet_schedule(&atmel_port->tasklet);
+
+ if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
+ | ATMEL_US_DCD | ATMEL_US_CTS)) {
+ /* TODO: All reads to CSR will clear these interrupts! */
+ if (status_change & ATMEL_US_RI)
+ port->icount.rng++;
+ if (status_change & ATMEL_US_DSR)
+ port->icount.dsr++;
+ if (status_change & ATMEL_US_DCD)
+ uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
+ if (status_change & ATMEL_US_CTS)
+ uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
+
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+ }
}
}
@@ -1571,37 +1604,25 @@ static int atmel_prepare_rx_pdc(struct uart_port *port)
/*
* tasklet handling tty stuff outside the interrupt handler.
*/
-static void atmel_tasklet_func(unsigned long data)
+static void atmel_tasklet_rx_func(unsigned long data)
{
struct uart_port *port = (struct uart_port *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- unsigned int status = atmel_port->irq_status;
- unsigned int status_change = atmel_port->status_change;
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
-
- atmel_port->schedule_tx(port);
-
- if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
- | ATMEL_US_DCD | ATMEL_US_CTS)) {
- /* TODO: All reads to CSR will clear these interrupts! */
- if (status_change & ATMEL_US_RI)
- port->icount.rng++;
- if (status_change & ATMEL_US_DSR)
- port->icount.dsr++;
- if (status_change & ATMEL_US_DCD)
- uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
- if (status_change & ATMEL_US_CTS)
- uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
-
- wake_up_interruptible(&port->state->port.delta_msr_wait);
-
- atmel_port->status_change = 0;
- }
-
atmel_port->schedule_rx(port);
+ spin_unlock(&port->lock);
+}
+
+static void atmel_tasklet_tx_func(unsigned long data)
+{
+ struct uart_port *port = (struct uart_port *)data;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ /* The interrupt handler does not take the lock */
+ spin_lock(&port->lock);
+ atmel_port->schedule_tx(port);
spin_unlock(&port->lock);
}
@@ -1785,7 +1806,11 @@ static int atmel_startup(struct uart_port *port)
return retval;
}
- tasklet_enable(&atmel_port->tasklet);
+ atomic_set(&atmel_port->tasklet_shutdown, 0);
+ tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func,
+ (unsigned long)port);
+ tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func,
+ (unsigned long)port);
/*
* Initialize DMA (if necessary)
@@ -1833,7 +1858,6 @@ static int atmel_startup(struct uart_port *port)
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = atmel_get_lines_status(port);
- atmel_port->irq_status = atmel_port->irq_status_prev;
/*
* Finally, enable the serial port
@@ -1905,29 +1929,36 @@ static void atmel_shutdown(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ /* Disable interrupts at device level */
+ atmel_uart_writel(port, ATMEL_US_IDR, -1);
+
+ /* Prevent spurious interrupts from scheduling the tasklet */
+ atomic_inc(&atmel_port->tasklet_shutdown);
+
/*
* Prevent any tasklets being scheduled during
* cleanup
*/
del_timer_sync(&atmel_port->uart_timer);
+ /* Make sure that no interrupt is on the fly */
+ synchronize_irq(port->irq);
+
/*
* Clear out any scheduled tasklets before
* we destroy the buffers
*/
- tasklet_disable(&atmel_port->tasklet);
- tasklet_kill(&atmel_port->tasklet);
+ tasklet_kill(&atmel_port->tasklet_rx);
+ tasklet_kill(&atmel_port->tasklet_tx);
/*
* Ensure everything is stopped and
- * disable all interrupts, port and break condition.
+ * disable port and break condition.
*/
atmel_stop_rx(port);
atmel_stop_tx(port);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
- atmel_uart_writel(port, ATMEL_US_IDR, -1);
-
/*
* Shut-down the DMA.
@@ -2311,10 +2342,6 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
port->irq = pdev->resource[1].start;
port->rs485_config = atmel_config_rs485;
- tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
- (unsigned long)port);
- tasklet_disable(&atmel_port->tasklet);
-
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
if (pdata && pdata->regs) {
@@ -2699,6 +2726,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
atmel_port->uart.line = ret;
atmel_serial_probe_fifos(atmel_port, pdev);
+ atomic_set(&atmel_port->tasklet_shutdown, 0);
spin_lock_init(&atmel_port->lock_suspended);
ret = atmel_init_port(atmel_port, pdev);
@@ -2795,7 +2823,8 @@ static int atmel_serial_remove(struct platform_device *pdev)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int ret = 0;
- tasklet_kill(&atmel_port->tasklet);
+ tasklet_kill(&atmel_port->tasklet_rx);
+ tasklet_kill(&atmel_port->tasklet_tx);
device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index c28e5c24da16..5108fab953aa 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -813,8 +813,12 @@ static int bcm_uart_probe(struct platform_device *pdev)
struct clk *clk;
int ret;
- if (pdev->dev.of_node)
- pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+ if (pdev->dev.of_node) {
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
+
+ if (pdev->id < 0)
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+ }
if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
return -EINVAL;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 3d790033744e..7f95f782a485 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1830,7 +1830,13 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.dev = &pdev->dev;
sport->port.type = PORT_LPUART;
sport->port.iotype = UPIO_MEM;
- sport->port.irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot obtain irq\n");
+ return ret;
+ }
+ sport->port.irq = ret;
+
if (sport->lpuart32)
sport->port.ops = &lpuart32_pops;
else
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 68765f7c2645..218b7118e85d 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -30,7 +30,6 @@
#define SUPPORT_SYSRQ
#endif
-#include <linux/module.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/ioport.h>
@@ -51,9 +50,6 @@
#define PASS_LIMIT 256
-/* Standard COM flags */
-#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
-
static const struct {
unsigned int port;
unsigned int irq;
@@ -892,7 +888,7 @@ static void __init m32r_sio_init_ports(void)
up->port.iobase = old_serial_port[i].port;
up->port.irq = irq_canonicalize(old_serial_port[i].irq);
up->port.uartclk = BAUD_RATE * 16;
- up->port.flags = STD_COM_FLAGS;
+ up->port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
up->port.membase = 0;
up->port.iotype = 0;
up->port.regshift = 0;
@@ -1060,19 +1056,4 @@ static int __init m32r_sio_init(void)
return ret;
}
-
-static void __exit m32r_sio_exit(void)
-{
- int i;
-
- for (i = 0; i < UART_NR; i++)
- uart_remove_one_port(&m32r_sio_reg, &m32r_sio_ports[i].port);
-
- uart_unregister_driver(&m32r_sio_reg);
-}
-
-module_init(m32r_sio_init);
-module_exit(m32r_sio_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Generic M32R SIO serial driver");
+device_initcall(m32r_sio_init);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 3f6e0ab725fe..9360801df3c4 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1,7 +1,7 @@
/*
* Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
*
- * Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
+ * Copyright (C) 2012-2016 Alexander Shiyan <shc_work@mail.ru>
*
* Based on max3100.c, by Christian Pellegrin <chripell@evolware.org>
* Based on max3110.c, by Feng Tang <feng.tang@intel.com>
@@ -32,6 +32,7 @@
#define MAX310X_NAME "max310x"
#define MAX310X_MAJOR 204
#define MAX310X_MINOR 209
+#define MAX310X_UART_NRMAX 16
/* MAX310X register definitions */
#define MAX310X_RHR_REG (0x00) /* RX FIFO */
@@ -155,10 +156,6 @@
#define MAX310X_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */
#define MAX310X_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */
#define MAX310X_LCR_RTS_BIT (1 << 7) /* RTS pin control */
-#define MAX310X_LCR_WORD_LEN_5 (0x00)
-#define MAX310X_LCR_WORD_LEN_6 (0x01)
-#define MAX310X_LCR_WORD_LEN_7 (0x02)
-#define MAX310X_LCR_WORD_LEN_8 (0x03)
/* IRDA register bits */
#define MAX310X_IRDA_IRDAEN_BIT (1 << 0) /* IRDA mode enable */
@@ -262,10 +259,10 @@ struct max310x_one {
struct uart_port port;
struct work_struct tx_work;
struct work_struct md_work;
+ struct work_struct rs_work;
};
struct max310x_port {
- struct uart_driver uart;
struct max310x_devtype *devtype;
struct regmap *regmap;
struct mutex mutex;
@@ -276,6 +273,17 @@ struct max310x_port {
struct max310x_one p[0];
};
+static struct uart_driver max310x_uart = {
+ .owner = THIS_MODULE,
+ .driver_name = MAX310X_NAME,
+ .dev_name = "ttyMAX",
+ .major = MAX310X_MAJOR,
+ .minor = MAX310X_MINOR,
+ .nr = MAX310X_UART_NRMAX,
+};
+
+static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX);
+
static u8 max310x_port_read(struct uart_port *port, u8 reg)
{
struct max310x_port *s = dev_get_drvdata(port->dev);
@@ -594,9 +602,7 @@ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
unsigned int sts, ch, flag;
if (unlikely(rxlen >= port->fifosize)) {
- dev_warn_ratelimited(port->dev,
- "Port %i: Possible RX FIFO overrun\n",
- port->line);
+ dev_warn_ratelimited(port->dev, "Possible RX FIFO overrun\n");
port->icount.buf_overrun++;
/* Ensure sanity of RX level */
rxlen = port->fifosize;
@@ -715,13 +721,13 @@ static irqreturn_t max310x_ist(int irq, void *dev_id)
{
struct max310x_port *s = (struct max310x_port *)dev_id;
- if (s->uart.nr > 1) {
+ if (s->devtype->nr > 1) {
do {
unsigned int val = ~0;
WARN_ON_ONCE(regmap_read(s->regmap,
MAX310X_GLOBALIRQ_REG, &val));
- val = ((1 << s->uart.nr) - 1) & ~val;
+ val = ((1 << s->devtype->nr) - 1) & ~val;
if (!val)
break;
max310x_port_irq(s, fls(val) - 1);
@@ -796,7 +802,7 @@ static void max310x_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
- unsigned int lcr, flow = 0;
+ unsigned int lcr = 0, flow = 0;
int baud;
/* Mask termios capabilities we don't support */
@@ -805,17 +811,16 @@ static void max310x_set_termios(struct uart_port *port,
/* Word size */
switch (termios->c_cflag & CSIZE) {
case CS5:
- lcr = MAX310X_LCR_WORD_LEN_5;
break;
case CS6:
- lcr = MAX310X_LCR_WORD_LEN_6;
+ lcr = MAX310X_LCR_LENGTH0_BIT;
break;
case CS7:
- lcr = MAX310X_LCR_WORD_LEN_7;
+ lcr = MAX310X_LCR_LENGTH1_BIT;
break;
case CS8:
default:
- lcr = MAX310X_LCR_WORD_LEN_8;
+ lcr = MAX310X_LCR_LENGTH1_BIT | MAX310X_LCR_LENGTH0_BIT;
break;
}
@@ -877,36 +882,45 @@ static void max310x_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
}
-static int max310x_rs485_config(struct uart_port *port,
- struct serial_rs485 *rs485)
+static void max310x_rs_proc(struct work_struct *ws)
{
+ struct max310x_one *one = container_of(ws, struct max310x_one, rs_work);
unsigned int val;
- if (rs485->delay_rts_before_send > 0x0f ||
- rs485->delay_rts_after_send > 0x0f)
- return -ERANGE;
+ val = (one->port.rs485.delay_rts_before_send << 4) |
+ one->port.rs485.delay_rts_after_send;
+ max310x_port_write(&one->port, MAX310X_HDPIXDELAY_REG, val);
- val = (rs485->delay_rts_before_send << 4) |
- rs485->delay_rts_after_send;
- max310x_port_write(port, MAX310X_HDPIXDELAY_REG, val);
- if (rs485->flags & SER_RS485_ENABLED) {
- max310x_port_update(port, MAX310X_MODE1_REG,
+ if (one->port.rs485.flags & SER_RS485_ENABLED) {
+ max310x_port_update(&one->port, MAX310X_MODE1_REG,
MAX310X_MODE1_TRNSCVCTRL_BIT,
MAX310X_MODE1_TRNSCVCTRL_BIT);
- max310x_port_update(port, MAX310X_MODE2_REG,
+ max310x_port_update(&one->port, MAX310X_MODE2_REG,
MAX310X_MODE2_ECHOSUPR_BIT,
MAX310X_MODE2_ECHOSUPR_BIT);
} else {
- max310x_port_update(port, MAX310X_MODE1_REG,
+ max310x_port_update(&one->port, MAX310X_MODE1_REG,
MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
- max310x_port_update(port, MAX310X_MODE2_REG,
+ max310x_port_update(&one->port, MAX310X_MODE2_REG,
MAX310X_MODE2_ECHOSUPR_BIT, 0);
}
+}
+
+static int max310x_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct max310x_one *one = container_of(port, struct max310x_one, port);
+
+ if ((rs485->delay_rts_before_send > 0x0f) ||
+ (rs485->delay_rts_after_send > 0x0f))
+ return -ERANGE;
rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_ENABLED;
memset(rs485->padding, 0, sizeof(rs485->padding));
port->rs485 = *rs485;
+ schedule_work(&one->rs_work);
+
return 0;
}
@@ -1009,8 +1023,8 @@ static int __maybe_unused max310x_suspend(struct device *dev)
struct max310x_port *s = dev_get_drvdata(dev);
int i;
- for (i = 0; i < s->uart.nr; i++) {
- uart_suspend_port(&s->uart, &s->p[i].port);
+ for (i = 0; i < s->devtype->nr; i++) {
+ uart_suspend_port(&max310x_uart, &s->p[i].port);
s->devtype->power(&s->p[i].port, 0);
}
@@ -1022,9 +1036,9 @@ static int __maybe_unused max310x_resume(struct device *dev)
struct max310x_port *s = dev_get_drvdata(dev);
int i;
- for (i = 0; i < s->uart.nr; i++) {
+ for (i = 0; i < s->devtype->nr; i++) {
s->devtype->power(&s->p[i].port, 1);
- uart_resume_port(&s->uart, &s->p[i].port);
+ uart_resume_port(&max310x_uart, &s->p[i].port);
}
return 0;
@@ -1159,18 +1173,6 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
uartclk = max310x_set_ref_clk(s, freq, xtal);
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
- /* Register UART driver */
- s->uart.owner = THIS_MODULE;
- s->uart.dev_name = "ttyMAX";
- s->uart.major = MAX310X_MAJOR;
- s->uart.minor = MAX310X_MINOR;
- s->uart.nr = devtype->nr;
- ret = uart_register_driver(&s->uart);
- if (ret) {
- dev_err(dev, "Registering UART driver failed\n");
- goto out_clk;
- }
-
#ifdef CONFIG_GPIOLIB
/* Setup GPIO cotroller */
s->gpio.owner = THIS_MODULE;
@@ -1183,16 +1185,24 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
s->gpio.base = -1;
s->gpio.ngpio = devtype->nr * 4;
s->gpio.can_sleep = 1;
- ret = gpiochip_add_data(&s->gpio, s);
+ ret = devm_gpiochip_add_data(dev, &s->gpio, s);
if (ret)
- goto out_uart;
+ goto out_clk;
#endif
mutex_init(&s->mutex);
for (i = 0; i < devtype->nr; i++) {
+ unsigned int line;
+
+ line = find_first_zero_bit(max310x_lines, MAX310X_UART_NRMAX);
+ if (line == MAX310X_UART_NRMAX) {
+ ret = -ERANGE;
+ goto out_uart;
+ }
+
/* Initialize port data */
- s->p[i].port.line = i;
+ s->p[i].port.line = line;
s->p[i].port.dev = dev;
s->p[i].port.irq = irq;
s->p[i].port.type = PORT_MAX310X;
@@ -1214,10 +1224,19 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
MAX310X_MODE1_IRQSEL_BIT);
/* Initialize queue for start TX */
INIT_WORK(&s->p[i].tx_work, max310x_wq_proc);
- /* Initialize queue for changing mode */
+ /* Initialize queue for changing LOOPBACK mode */
INIT_WORK(&s->p[i].md_work, max310x_md_proc);
+ /* Initialize queue for changing RS485 mode */
+ INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
+
/* Register port */
- uart_add_one_port(&s->uart, &s->p[i].port);
+ ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
+ if (ret) {
+ s->p[i].port.dev = NULL;
+ goto out_uart;
+ }
+ set_bit(line, max310x_lines);
+
/* Go to suspend mode */
devtype->power(&s->p[i].port, 0);
}
@@ -1230,14 +1249,15 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
dev_err(dev, "Unable to reguest IRQ %i\n", irq);
- mutex_destroy(&s->mutex);
-
-#ifdef CONFIG_GPIOLIB
- gpiochip_remove(&s->gpio);
-
out_uart:
-#endif
- uart_unregister_driver(&s->uart);
+ for (i = 0; i < devtype->nr; i++) {
+ if (s->p[i].port.dev) {
+ uart_remove_one_port(&max310x_uart, &s->p[i].port);
+ clear_bit(s->p[i].port.line, max310x_lines);
+ }
+ }
+
+ mutex_destroy(&s->mutex);
out_clk:
clk_disable_unprepare(s->clk);
@@ -1250,19 +1270,16 @@ static int max310x_remove(struct device *dev)
struct max310x_port *s = dev_get_drvdata(dev);
int i;
-#ifdef CONFIG_GPIOLIB
- gpiochip_remove(&s->gpio);
-#endif
-
- for (i = 0; i < s->uart.nr; i++) {
+ for (i = 0; i < s->devtype->nr; i++) {
cancel_work_sync(&s->p[i].tx_work);
cancel_work_sync(&s->p[i].md_work);
- uart_remove_one_port(&s->uart, &s->p[i].port);
+ cancel_work_sync(&s->p[i].rs_work);
+ uart_remove_one_port(&max310x_uart, &s->p[i].port);
+ clear_bit(s->p[i].port.line, max310x_lines);
s->devtype->power(&s->p[i].port, 0);
}
mutex_destroy(&s->mutex);
- uart_unregister_driver(&s->uart);
clk_disable_unprepare(s->clk);
return 0;
@@ -1335,7 +1352,7 @@ static const struct spi_device_id max310x_id_table[] = {
};
MODULE_DEVICE_TABLE(spi, max310x_id_table);
-static struct spi_driver max310x_uart_driver = {
+static struct spi_driver max310x_spi_driver = {
.driver = {
.name = MAX310X_NAME,
.of_match_table = of_match_ptr(max310x_dt_ids),
@@ -1345,9 +1362,36 @@ static struct spi_driver max310x_uart_driver = {
.remove = max310x_spi_remove,
.id_table = max310x_id_table,
};
-module_spi_driver(max310x_uart_driver);
#endif
+static int __init max310x_uart_init(void)
+{
+ int ret;
+
+ bitmap_zero(max310x_lines, MAX310X_UART_NRMAX);
+
+ ret = uart_register_driver(&max310x_uart);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_SPI_MASTER
+ spi_register_driver(&max310x_spi_driver);
+#endif
+
+ return 0;
+}
+module_init(max310x_uart_init);
+
+static void __exit max310x_uart_exit(void)
+{
+#ifdef CONFIG_SPI_MASTER
+ spi_unregister_driver(&max310x_spi_driver);
+#endif
+
+ uart_unregister_driver(&max310x_uart);
+}
+module_exit(max310x_uart_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("MAX310X serial driver");
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
index da9e27d3c263..492ec4b375a0 100644
--- a/drivers/tty/serial/mps2-uart.c
+++ b/drivers/tty/serial/mps2-uart.c
@@ -1,4 +1,6 @@
/*
+ * MPS2 UART driver
+ *
* Copyright (C) 2015 ARM Limited
*
* Author: Vladimir Murzin <vladimir.murzin@arm.com>
@@ -17,7 +19,6 @@
#include <linux/console.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -569,30 +570,20 @@ static int mps2_serial_probe(struct platform_device *pdev)
return 0;
}
-static int mps2_serial_remove(struct platform_device *pdev)
-{
- struct mps2_uart_port *mps_port = platform_get_drvdata(pdev);
-
- uart_remove_one_port(&mps2_uart_driver, &mps_port->port);
-
- return 0;
-}
-
#ifdef CONFIG_OF
static const struct of_device_id mps2_match[] = {
{ .compatible = "arm,mps2-uart", },
{},
};
-MODULE_DEVICE_TABLE(of, mps2_match);
#endif
static struct platform_driver mps2_serial_driver = {
.probe = mps2_serial_probe,
- .remove = mps2_serial_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(mps2_match),
+ .suppress_bind_attrs = true,
},
};
@@ -610,16 +601,4 @@ static int __init mps2_uart_init(void)
return ret;
}
-module_init(mps2_uart_init);
-
-static void __exit mps2_uart_exit(void)
-{
- platform_driver_unregister(&mps2_serial_driver);
- uart_unregister_driver(&mps2_uart_driver);
-}
-module_exit(mps2_uart_exit);
-
-MODULE_AUTHOR("Vladimir Murzin <vladimir.murzin@arm.com>");
-MODULE_DESCRIPTION("MPS2 UART driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
+arch_initcall(mps2_uart_init);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index b7d80bd57db9..7312e7e01b7e 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -19,33 +19,147 @@
# define SUPPORT_SYSRQ
#endif
+#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/ioport.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
-#include <linux/serial.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_device.h>
-
-#include "msm_serial.h"
-
-#define UARTDM_BURST_SIZE 16 /* in bytes */
-#define UARTDM_TX_AIGN(x) ((x) & ~0x3) /* valid for > 1p3 */
-#define UARTDM_TX_MAX 256 /* in bytes, valid for <= 1p3 */
-#define UARTDM_RX_SIZE (UART_XMIT_SIZE / 4)
+#include <linux/wait.h>
+
+#define UART_MR1 0x0000
+
+#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
+#define UART_MR1_RX_RDY_CTL BIT(7)
+#define UART_MR1_CTS_CTL BIT(6)
+
+#define UART_MR2 0x0004
+#define UART_MR2_ERROR_MODE BIT(6)
+#define UART_MR2_BITS_PER_CHAR 0x30
+#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define UART_MR2_PARITY_MODE_NONE 0x0
+#define UART_MR2_PARITY_MODE_ODD 0x1
+#define UART_MR2_PARITY_MODE_EVEN 0x2
+#define UART_MR2_PARITY_MODE_SPACE 0x3
+#define UART_MR2_PARITY_MODE 0x3
+
+#define UART_CSR 0x0008
+
+#define UART_TF 0x000C
+#define UARTDM_TF 0x0070
+
+#define UART_CR 0x0010
+#define UART_CR_CMD_NULL (0 << 4)
+#define UART_CR_CMD_RESET_RX (1 << 4)
+#define UART_CR_CMD_RESET_TX (2 << 4)
+#define UART_CR_CMD_RESET_ERR (3 << 4)
+#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define UART_CR_CMD_START_BREAK (5 << 4)
+#define UART_CR_CMD_STOP_BREAK (6 << 4)
+#define UART_CR_CMD_RESET_CTS (7 << 4)
+#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
+#define UART_CR_CMD_PACKET_MODE (9 << 4)
+#define UART_CR_CMD_MODE_RESET (12 << 4)
+#define UART_CR_CMD_SET_RFR (13 << 4)
+#define UART_CR_CMD_RESET_RFR (14 << 4)
+#define UART_CR_CMD_PROTECTION_EN (16 << 4)
+#define UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
+#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define UART_CR_CMD_FORCE_STALE (4 << 8)
+#define UART_CR_CMD_RESET_TX_READY (3 << 8)
+#define UART_CR_TX_DISABLE BIT(3)
+#define UART_CR_TX_ENABLE BIT(2)
+#define UART_CR_RX_DISABLE BIT(1)
+#define UART_CR_RX_ENABLE BIT(0)
+#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
+
+#define UART_IMR 0x0014
+#define UART_IMR_TXLEV BIT(0)
+#define UART_IMR_RXSTALE BIT(3)
+#define UART_IMR_RXLEV BIT(4)
+#define UART_IMR_DELTA_CTS BIT(5)
+#define UART_IMR_CURRENT_CTS BIT(6)
+#define UART_IMR_RXBREAK_START BIT(10)
+
+#define UART_IPR_RXSTALE_LAST 0x20
+#define UART_IPR_STALE_LSB 0x1F
+#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+#define UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
+
+#define UART_IPR 0x0018
+#define UART_TFWR 0x001C
+#define UART_RFWR 0x0020
+#define UART_HCR 0x0024
+
+#define UART_MREG 0x0028
+#define UART_NREG 0x002C
+#define UART_DREG 0x0030
+#define UART_MNDREG 0x0034
+#define UART_IRDA 0x0038
+#define UART_MISR_MODE 0x0040
+#define UART_MISR_RESET 0x0044
+#define UART_MISR_EXPORT 0x0048
+#define UART_MISR_VAL 0x004C
+#define UART_TEST_CTRL 0x0050
+
+#define UART_SR 0x0008
+#define UART_SR_HUNT_CHAR BIT(7)
+#define UART_SR_RX_BREAK BIT(6)
+#define UART_SR_PAR_FRAME_ERR BIT(5)
+#define UART_SR_OVERRUN BIT(4)
+#define UART_SR_TX_EMPTY BIT(3)
+#define UART_SR_TX_READY BIT(2)
+#define UART_SR_RX_FULL BIT(1)
+#define UART_SR_RX_READY BIT(0)
+
+#define UART_RF 0x000C
+#define UARTDM_RF 0x0070
+#define UART_MISR 0x0010
+#define UART_ISR 0x0014
+#define UART_ISR_TX_READY BIT(7)
+
+#define UARTDM_RXFS 0x50
+#define UARTDM_RXFS_BUF_SHIFT 0x7
+#define UARTDM_RXFS_BUF_MASK 0x7
+
+#define UARTDM_DMEN 0x3C
+#define UARTDM_DMEN_RX_SC_ENABLE BIT(5)
+#define UARTDM_DMEN_TX_SC_ENABLE BIT(4)
+
+#define UARTDM_DMEN_TX_BAM_ENABLE BIT(2) /* UARTDM_1P4 */
+#define UARTDM_DMEN_TX_DM_ENABLE BIT(0) /* < UARTDM_1P4 */
+
+#define UARTDM_DMEN_RX_BAM_ENABLE BIT(3) /* UARTDM_1P4 */
+#define UARTDM_DMEN_RX_DM_ENABLE BIT(1) /* < UARTDM_1P4 */
+
+#define UARTDM_DMRX 0x34
+#define UARTDM_NCF_TX 0x40
+#define UARTDM_RX_TOTAL_SNAP 0x38
+
+#define UARTDM_BURST_SIZE 16 /* in bytes */
+#define UARTDM_TX_AIGN(x) ((x) & ~0x3) /* valid for > 1p3 */
+#define UARTDM_TX_MAX 256 /* in bytes, valid for <= 1p3 */
+#define UARTDM_RX_SIZE (UART_XMIT_SIZE / 4)
enum {
UARTDM_1P1 = 1,
@@ -78,10 +192,65 @@ struct msm_port {
struct msm_dma rx_dma;
};
+#define UART_TO_MSM(uart_port) container_of(uart_port, struct msm_port, uart)
+
+static
+void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
+{
+ writel_relaxed(val, port->membase + off);
+}
+
+static
+unsigned int msm_read(struct uart_port *port, unsigned int off)
+{
+ return readl_relaxed(port->membase + off);
+}
+
+/*
+ * Setup the MND registers to use the TCXO clock.
+ */
+static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
+{
+ msm_write(port, 0x06, UART_MREG);
+ msm_write(port, 0xF1, UART_NREG);
+ msm_write(port, 0x0F, UART_DREG);
+ msm_write(port, 0x1A, UART_MNDREG);
+ port->uartclk = 1843200;
+}
+
+/*
+ * Setup the MND registers to use the TCXO clock divided by 4.
+ */
+static void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
+{
+ msm_write(port, 0x18, UART_MREG);
+ msm_write(port, 0xF6, UART_NREG);
+ msm_write(port, 0x0F, UART_DREG);
+ msm_write(port, 0x0A, UART_MNDREG);
+ port->uartclk = 1843200;
+}
+
+static void msm_serial_set_mnd_regs(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ /*
+ * These registers don't exist so we change the clk input rate
+ * on uartdm hardware instead
+ */
+ if (msm_port->is_uartdm)
+ return;
+
+ if (port->uartclk == 19200000)
+ msm_serial_set_mnd_regs_tcxo(port);
+ else if (port->uartclk == 4800000)
+ msm_serial_set_mnd_regs_tcxoby4(port);
+}
+
static void msm_handle_tx(struct uart_port *port);
static void msm_start_rx_dma(struct msm_port *msm_port);
-void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
+static void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
{
struct device *dev = port->dev;
unsigned int mapped;
@@ -388,10 +557,6 @@ static void msm_complete_rx_dma(void *args)
val &= ~dma->enable_bit;
msm_write(port, val, UARTDM_DMEN);
- /* Restore interrupts */
- msm_port->imr |= UART_IMR_RXLEV | UART_IMR_RXSTALE;
- msm_write(port, msm_port->imr, UART_IMR);
-
if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
@@ -726,7 +891,7 @@ static void msm_handle_tx(struct uart_port *port)
return;
}
- pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
dma_min = 1; /* Always DMA */
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
deleted file mode 100644
index 178645826f16..000000000000
--- a/drivers/tty/serial/msm_serial.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Author: Robert Love <rlove@google.com>
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __DRIVERS_SERIAL_MSM_SERIAL_H
-#define __DRIVERS_SERIAL_MSM_SERIAL_H
-
-#define UART_MR1 0x0000
-
-#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
-#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
-#define UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
-#define UART_MR1_RX_RDY_CTL BIT(7)
-#define UART_MR1_CTS_CTL BIT(6)
-
-#define UART_MR2 0x0004
-#define UART_MR2_ERROR_MODE BIT(6)
-#define UART_MR2_BITS_PER_CHAR 0x30
-#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
-#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
-#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
-#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
-#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
-#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
-#define UART_MR2_PARITY_MODE_NONE 0x0
-#define UART_MR2_PARITY_MODE_ODD 0x1
-#define UART_MR2_PARITY_MODE_EVEN 0x2
-#define UART_MR2_PARITY_MODE_SPACE 0x3
-#define UART_MR2_PARITY_MODE 0x3
-
-#define UART_CSR 0x0008
-
-#define UART_TF 0x000C
-#define UARTDM_TF 0x0070
-
-#define UART_CR 0x0010
-#define UART_CR_CMD_NULL (0 << 4)
-#define UART_CR_CMD_RESET_RX (1 << 4)
-#define UART_CR_CMD_RESET_TX (2 << 4)
-#define UART_CR_CMD_RESET_ERR (3 << 4)
-#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
-#define UART_CR_CMD_START_BREAK (5 << 4)
-#define UART_CR_CMD_STOP_BREAK (6 << 4)
-#define UART_CR_CMD_RESET_CTS (7 << 4)
-#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
-#define UART_CR_CMD_PACKET_MODE (9 << 4)
-#define UART_CR_CMD_MODE_RESET (12 << 4)
-#define UART_CR_CMD_SET_RFR (13 << 4)
-#define UART_CR_CMD_RESET_RFR (14 << 4)
-#define UART_CR_CMD_PROTECTION_EN (16 << 4)
-#define UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
-#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
-#define UART_CR_CMD_FORCE_STALE (4 << 8)
-#define UART_CR_CMD_RESET_TX_READY (3 << 8)
-#define UART_CR_TX_DISABLE BIT(3)
-#define UART_CR_TX_ENABLE BIT(2)
-#define UART_CR_RX_DISABLE BIT(1)
-#define UART_CR_RX_ENABLE BIT(0)
-#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
-
-#define UART_IMR 0x0014
-#define UART_IMR_TXLEV BIT(0)
-#define UART_IMR_RXSTALE BIT(3)
-#define UART_IMR_RXLEV BIT(4)
-#define UART_IMR_DELTA_CTS BIT(5)
-#define UART_IMR_CURRENT_CTS BIT(6)
-#define UART_IMR_RXBREAK_START BIT(10)
-
-#define UART_IPR_RXSTALE_LAST 0x20
-#define UART_IPR_STALE_LSB 0x1F
-#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
-#define UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
-
-#define UART_IPR 0x0018
-#define UART_TFWR 0x001C
-#define UART_RFWR 0x0020
-#define UART_HCR 0x0024
-
-#define UART_MREG 0x0028
-#define UART_NREG 0x002C
-#define UART_DREG 0x0030
-#define UART_MNDREG 0x0034
-#define UART_IRDA 0x0038
-#define UART_MISR_MODE 0x0040
-#define UART_MISR_RESET 0x0044
-#define UART_MISR_EXPORT 0x0048
-#define UART_MISR_VAL 0x004C
-#define UART_TEST_CTRL 0x0050
-
-#define UART_SR 0x0008
-#define UART_SR_HUNT_CHAR BIT(7)
-#define UART_SR_RX_BREAK BIT(6)
-#define UART_SR_PAR_FRAME_ERR BIT(5)
-#define UART_SR_OVERRUN BIT(4)
-#define UART_SR_TX_EMPTY BIT(3)
-#define UART_SR_TX_READY BIT(2)
-#define UART_SR_RX_FULL BIT(1)
-#define UART_SR_RX_READY BIT(0)
-
-#define UART_RF 0x000C
-#define UARTDM_RF 0x0070
-#define UART_MISR 0x0010
-#define UART_ISR 0x0014
-#define UART_ISR_TX_READY BIT(7)
-
-#define UARTDM_RXFS 0x50
-#define UARTDM_RXFS_BUF_SHIFT 0x7
-#define UARTDM_RXFS_BUF_MASK 0x7
-
-#define UARTDM_DMEN 0x3C
-#define UARTDM_DMEN_RX_SC_ENABLE BIT(5)
-#define UARTDM_DMEN_TX_SC_ENABLE BIT(4)
-
-#define UARTDM_DMEN_TX_BAM_ENABLE BIT(2) /* UARTDM_1P4 */
-#define UARTDM_DMEN_TX_DM_ENABLE BIT(0) /* < UARTDM_1P4 */
-
-#define UARTDM_DMEN_RX_BAM_ENABLE BIT(3) /* UARTDM_1P4 */
-#define UARTDM_DMEN_RX_DM_ENABLE BIT(1) /* < UARTDM_1P4 */
-
-#define UARTDM_DMRX 0x34
-#define UARTDM_NCF_TX 0x40
-#define UARTDM_RX_TOTAL_SNAP 0x38
-
-#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
-
-static inline
-void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
-{
- writel_relaxed(val, port->membase + off);
-}
-
-static inline
-unsigned int msm_read(struct uart_port *port, unsigned int off)
-{
- return readl_relaxed(port->membase + off);
-}
-
-/*
- * Setup the MND registers to use the TCXO clock.
- */
-static inline void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
-{
- msm_write(port, 0x06, UART_MREG);
- msm_write(port, 0xF1, UART_NREG);
- msm_write(port, 0x0F, UART_DREG);
- msm_write(port, 0x1A, UART_MNDREG);
- port->uartclk = 1843200;
-}
-
-/*
- * Setup the MND registers to use the TCXO clock divided by 4.
- */
-static inline void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
-{
- msm_write(port, 0x18, UART_MREG);
- msm_write(port, 0xF6, UART_NREG);
- msm_write(port, 0x0F, UART_DREG);
- msm_write(port, 0x0A, UART_MNDREG);
- port->uartclk = 1843200;
-}
-
-static inline
-void msm_serial_set_mnd_regs_from_uartclk(struct uart_port *port)
-{
- if (port->uartclk == 19200000)
- msm_serial_set_mnd_regs_tcxo(port);
- else if (port->uartclk == 4800000)
- msm_serial_set_mnd_regs_tcxoby4(port);
-}
-
-#define msm_serial_set_mnd_regs msm_serial_set_mnd_regs_from_uartclk
-
-#endif /* __DRIVERS_SERIAL_MSM_SERIAL_H */
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index ce362bd51de7..45b57c294d13 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -300,6 +300,8 @@ static int mvebu_uart_startup(struct uart_port *port)
static void mvebu_uart_shutdown(struct uart_port *port)
{
writel(0, port->membase + UART_CTRL);
+
+ free_irq(port->irq, port);
}
static void mvebu_uart_set_termios(struct uart_port *port,
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index 62a43bf5698e..7f8e99bbcb73 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -445,7 +445,6 @@ static int pic32_uart_startup(struct uart_port *port)
sport->idx);
if (!sport->irq_rx_name) {
dev_err(port->dev, "%s: kasprintf err!", __func__);
- kfree(sport->irq_fault_name);
ret = -ENOMEM;
goto out_f;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index e156e39d620c..b24b0556f5a8 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1720,7 +1720,7 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(uap->pdev, 0);
- if (!r_ports || !irq)
+ if (!r_ports || irq <= 0)
return -ENODEV;
uap->port.mapbase = r_ports->start;
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 41eab75ba2af..cd9d9e878475 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -27,7 +27,6 @@
#define SUPPORT_SYSRQ
#endif
-#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -829,7 +828,6 @@ static const struct of_device_id serial_pxa_dt_ids[] = {
{ .compatible = "mrvl,mmp-uart", },
{}
};
-MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids);
static int serial_pxa_probe_dt(struct platform_device *pdev,
struct uart_pxa_port *sport)
@@ -914,28 +912,15 @@ static int serial_pxa_probe(struct platform_device *dev)
return ret;
}
-static int serial_pxa_remove(struct platform_device *dev)
-{
- struct uart_pxa_port *sport = platform_get_drvdata(dev);
-
- uart_remove_one_port(&serial_pxa_reg, &sport->port);
-
- clk_unprepare(sport->clk);
- clk_put(sport->clk);
- kfree(sport);
-
- return 0;
-}
-
static struct platform_driver serial_pxa_driver = {
.probe = serial_pxa_probe,
- .remove = serial_pxa_remove,
.driver = {
.name = "pxa2xx-uart",
#ifdef CONFIG_PM
.pm = &serial_pxa_pm_ops,
#endif
+ .suppress_bind_attrs = true,
.of_match_table = serial_pxa_dt_ids,
},
};
@@ -954,15 +939,4 @@ static int __init serial_pxa_init(void)
return ret;
}
-
-static void __exit serial_pxa_exit(void)
-{
- platform_driver_unregister(&serial_pxa_driver);
- uart_unregister_driver(&serial_pxa_reg);
-}
-
-module_init(serial_pxa_init);
-module_exit(serial_pxa_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pxa2xx-uart");
+device_initcall(serial_pxa_init);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 99bb23161dd6..ae2095a66708 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -169,8 +169,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
return;
if (s3c24xx_serial_has_interrupt_mask(port))
- __set_bit(S3C64XX_UINTM_TXD,
- portaddrl(port, S3C64XX_UINTM));
+ s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM);
else
disable_irq_nosync(ourport->tx_irq);
@@ -235,8 +234,7 @@ static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
/* Mask Tx interrupt */
if (s3c24xx_serial_has_interrupt_mask(port))
- __set_bit(S3C64XX_UINTM_TXD,
- portaddrl(port, S3C64XX_UINTM));
+ s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM);
else
disable_irq_nosync(ourport->tx_irq);
@@ -269,8 +267,8 @@ static void enable_tx_pio(struct s3c24xx_uart_port *ourport)
/* Unmask Tx interrupt */
if (s3c24xx_serial_has_interrupt_mask(port))
- __clear_bit(S3C64XX_UINTM_TXD,
- portaddrl(port, S3C64XX_UINTM));
+ s3c24xx_clear_bit(port, S3C64XX_UINTM_TXD,
+ S3C64XX_UINTM);
else
enable_irq(ourport->tx_irq);
@@ -397,8 +395,8 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
if (rx_enabled(port)) {
dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
if (s3c24xx_serial_has_interrupt_mask(port))
- __set_bit(S3C64XX_UINTM_RXD,
- portaddrl(port, S3C64XX_UINTM));
+ s3c24xx_set_bit(port, S3C64XX_UINTM_RXD,
+ S3C64XX_UINTM);
else
disable_irq_nosync(ourport->rx_irq);
rx_enabled(port) = 0;
@@ -1069,7 +1067,7 @@ static int s3c64xx_serial_startup(struct uart_port *port)
spin_unlock_irqrestore(&port->lock, flags);
/* Enable Rx Interrupt */
- __clear_bit(S3C64XX_UINTM_RXD, portaddrl(port, S3C64XX_UINTM));
+ s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
dbg("s3c64xx_serial_startup ok\n");
return ret;
@@ -1684,7 +1682,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
return -ENODEV;
if (port->mapbase != 0)
- return 0;
+ return -EINVAL;
/* setup info for port */
port->dev = &platdev->dev;
@@ -1738,22 +1736,25 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
ourport->dma = devm_kzalloc(port->dev,
sizeof(*ourport->dma),
GFP_KERNEL);
- if (!ourport->dma)
- return -ENOMEM;
+ if (!ourport->dma) {
+ ret = -ENOMEM;
+ goto err;
+ }
}
ourport->clk = clk_get(&platdev->dev, "uart");
if (IS_ERR(ourport->clk)) {
pr_err("%s: Controller clock not found\n",
dev_name(&platdev->dev));
- return PTR_ERR(ourport->clk);
+ ret = PTR_ERR(ourport->clk);
+ goto err;
}
ret = clk_prepare_enable(ourport->clk);
if (ret) {
pr_err("uart: clock failed to prepare+enable: %d\n", ret);
clk_put(ourport->clk);
- return ret;
+ goto err;
}
/* Keep all interrupts masked and cleared */
@@ -1769,7 +1770,12 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
/* reset the fifos (and setup the uart) */
s3c24xx_serial_resetport(port, cfg);
+
return 0;
+
+err:
+ port->mapbase = 0;
+ return ret;
}
/* Device driver serial port probe */
@@ -1836,8 +1842,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
ourport->min_dma_size = max_t(int, ourport->port.fifosize,
dma_get_cache_alignment());
- probe_index++;
-
dbg("%s: initialising port %p...\n", __func__, ourport);
ret = s3c24xx_serial_init_port(ourport, pdev);
@@ -1867,6 +1871,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
if (ret < 0)
dev_err(&pdev->dev, "failed to add cpufreq notifier\n");
+ probe_index++;
+
return 0;
}
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index fc5deaa4f382..2ae4fcee1814 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -117,10 +117,38 @@ struct s3c24xx_uart_port {
#define portaddrl(port, reg) \
((unsigned long *)(unsigned long)((port)->membase + (reg)))
-#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
-#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
-
-#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
-#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
+#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
+#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
+
+#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
+#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
+
+/* Byte-order aware bit setting/clearing functions. */
+
+static inline void s3c24xx_set_bit(struct uart_port *port, int idx,
+ unsigned int reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+ val = rd_regl(port, reg);
+ val |= (1 << idx);
+ wr_regl(port, reg, val);
+ local_irq_restore(flags);
+}
+
+static inline void s3c24xx_clear_bit(struct uart_port *port, int idx,
+ unsigned int reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+ val = rd_regl(port, reg);
+ val &= ~(1 << idx);
+ wr_regl(port, reg, val);
+ local_irq_restore(flags);
+}
#endif
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 1dba6719db8d..731ac35acb31 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1317,7 +1317,12 @@ static int tegra_uart_probe(struct platform_device *pdev)
}
u->iotype = UPIO_MEM32;
- u->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't get IRQ\n");
+ return ret;
+ }
+ u->irq = ret;
u->regshift = 2;
ret = uart_add_one_port(&tegra_uart_driver, u);
if (ret < 0) {
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index a333c59cba2c..9fc15335c8c5 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -887,7 +887,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
/*
* Free and release old regions
*/
- if (old_type != PORT_UNKNOWN)
+ if (old_type != PORT_UNKNOWN && uport->ops->release_port)
uport->ops->release_port(uport);
uport->iobase = new_port;
@@ -900,7 +900,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
/*
* Claim and map the new regions
*/
- if (uport->type != PORT_UNKNOWN) {
+ if (uport->type != PORT_UNKNOWN && uport->ops->request_port) {
retval = uport->ops->request_port(uport);
} else {
/* Always success - Jean II */
@@ -1125,7 +1125,7 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
* If we already have a port type configured,
* we must release its resources.
*/
- if (uport->type != PORT_UNKNOWN)
+ if (uport->type != PORT_UNKNOWN && uport->ops->release_port)
uport->ops->release_port(uport);
flags = UART_CONFIG_TYPE;
@@ -2897,7 +2897,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
/*
* Free the port IO and memory resources, if any.
*/
- if (uport->type != PORT_UNKNOWN)
+ if (uport->type != PORT_UNKNOWN && uport->ops->release_port)
uport->ops->release_port(uport);
kfree(uport->tty_groups);
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index e8dd5097dc56..d2da6aa7f27d 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -52,6 +52,9 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
int value_array[UART_GPIO_MAX];
unsigned int count = 0;
+ if (gpios == NULL)
+ return;
+
for (i = 0; i < UART_GPIO_MAX; i++)
if (gpios->gpio[i] && mctrl_gpios_desc[i].dir_out) {
desc_array[count] = gpios->gpio[i];
@@ -73,6 +76,9 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
{
enum mctrl_gpio_idx i;
+ if (gpios == NULL)
+ return *mctrl;
+
for (i = 0; i < UART_GPIO_MAX; i++) {
if (gpios->gpio[i] && !mctrl_gpios_desc[i].dir_out) {
if (gpiod_get_value(gpios->gpio[i]))
@@ -86,6 +92,27 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_get);
+unsigned int
+mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl)
+{
+ enum mctrl_gpio_idx i;
+
+ if (gpios == NULL)
+ return *mctrl;
+
+ for (i = 0; i < UART_GPIO_MAX; i++) {
+ if (gpios->gpio[i] && mctrl_gpios_desc[i].dir_out) {
+ if (gpiod_get_value(gpios->gpio[i]))
+ *mctrl |= mctrl_gpios_desc[i].mctrl;
+ else
+ *mctrl &= ~mctrl_gpios_desc[i].mctrl;
+ }
+ }
+
+ return *mctrl;
+}
+EXPORT_SYMBOL_GPL(mctrl_gpio_get_outputs);
+
struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
{
struct mctrl_gpios *gpios;
@@ -203,6 +230,9 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
+ if (gpios == NULL)
+ return;
+
for (i = 0; i < UART_GPIO_MAX; i++) {
if (gpios->irq[i])
devm_free_irq(gpios->port->dev, gpios->irq[i], gpios);
@@ -218,6 +248,9 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
+ if (gpios == NULL)
+ return;
+
/* .enable_ms may be called multiple times */
if (gpios->mctrl_on)
return;
@@ -240,6 +273,9 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
+ if (gpios == NULL)
+ return;
+
if (!gpios->mctrl_on)
return;
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index 332a33ab0647..fa000bcff217 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -48,12 +48,19 @@ struct mctrl_gpios;
void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl);
/*
- * Get state of the modem control output lines from GPIOs.
+ * Get state of the modem control input lines from GPIOs.
* The mctrl flags are updated and returned.
*/
unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl);
/*
+ * Get state of the modem control output lines from GPIOs.
+ * The mctrl flags are updated and returned.
+ */
+unsigned int
+mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl);
+
+/*
* Returns the associated struct gpio_desc to the modem line gidx
*/
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
@@ -107,6 +114,12 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
return *mctrl;
}
+static inline unsigned int
+mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl)
+{
+ return *mctrl;
+}
+
static inline
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 0130feb069ae..d86eee38aae6 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -57,6 +57,7 @@
#include <asm/sh_bios.h>
#endif
+#include "serial_mctrl_gpio.h"
#include "sh-sci.h"
/* Offsets into the sci_port->irqs array */
@@ -111,6 +112,7 @@ struct sci_port {
unsigned int error_clear;
unsigned int sampling_rate_mask;
resource_size_t reg_size;
+ struct mctrl_gpios *gpios;
/* Break timer */
struct timer_list break_timer;
@@ -139,6 +141,8 @@ struct sci_port {
struct timer_list rx_timer;
unsigned int rx_timeout;
#endif
+
+ bool autorts;
};
#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
@@ -701,7 +705,6 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
static void sci_init_pins(struct uart_port *port, unsigned int cflag)
{
struct sci_port *s = to_sci_port(port);
- const struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
/*
* Use port-specific handler if provided.
@@ -711,21 +714,28 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
return;
}
- /*
- * For the generic path SCSPTR is necessary. Bail out if that's
- * unavailable, too.
- */
- if (!reg->size)
- return;
-
- if ((s->cfg->capabilities & SCIx_HAVE_RTSCTS) &&
- ((!(cflag & CRTSCTS)))) {
- unsigned short status;
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ u16 ctrl = serial_port_in(port, SCPCR);
+
+ /* Enable RXD and TXD pin functions */
+ ctrl &= ~(SCPCR_RXDC | SCPCR_TXDC);
+ if (to_sci_port(port)->cfg->capabilities & SCIx_HAVE_RTSCTS) {
+ /* RTS# is output, driven 1 */
+ ctrl |= SCPCR_RTSC;
+ serial_port_out(port, SCPDR,
+ serial_port_in(port, SCPDR) | SCPDR_RTSD);
+ /* Enable CTS# pin function */
+ ctrl &= ~SCPCR_CTSC;
+ }
+ serial_port_out(port, SCPCR, ctrl);
+ } else if (sci_getreg(port, SCSPTR)->size) {
+ u16 status = serial_port_in(port, SCSPTR);
- status = serial_port_in(port, SCSPTR);
- status &= ~SCSPTR_CTSIO;
- status |= SCSPTR_RTSIO;
- serial_port_out(port, SCSPTR, status); /* Set RTS = 1 */
+ /* RTS# is output, driven 1 */
+ status |= SCSPTR_RTSIO | SCSPTR_RTSDT;
+ /* CTS# and SCK are inputs */
+ status &= ~(SCSPTR_CTSIO | SCSPTR_SCKIO);
+ serial_port_out(port, SCSPTR, status);
}
}
@@ -1803,6 +1813,46 @@ static unsigned int sci_tx_empty(struct uart_port *port)
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
}
+static void sci_set_rts(struct uart_port *port, bool state)
+{
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ u16 data = serial_port_in(port, SCPDR);
+
+ /* Active low */
+ if (state)
+ data &= ~SCPDR_RTSD;
+ else
+ data |= SCPDR_RTSD;
+ serial_port_out(port, SCPDR, data);
+
+ /* RTS# is output */
+ serial_port_out(port, SCPCR,
+ serial_port_in(port, SCPCR) | SCPCR_RTSC);
+ } else if (sci_getreg(port, SCSPTR)->size) {
+ u16 ctrl = serial_port_in(port, SCSPTR);
+
+ /* Active low */
+ if (state)
+ ctrl &= ~SCSPTR_RTSDT;
+ else
+ ctrl |= SCSPTR_RTSDT;
+ serial_port_out(port, SCSPTR, ctrl);
+ }
+}
+
+static bool sci_get_cts(struct uart_port *port)
+{
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ /* Active low */
+ return !(serial_port_in(port, SCPDR) & SCPDR_CTSD);
+ } else if (sci_getreg(port, SCSPTR)->size) {
+ /* Active low */
+ return !(serial_port_in(port, SCSPTR) & SCSPTR_CTSDT);
+ }
+
+ return true;
+}
+
/*
* Modem control is a bit of a mixed bag for SCI(F) ports. Generally
* CTS/RTS is supported in hardware by at least one port and controlled
@@ -1817,6 +1867,8 @@ static unsigned int sci_tx_empty(struct uart_port *port)
*/
static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ struct sci_port *s = to_sci_port(port);
+
if (mctrl & TIOCM_LOOP) {
const struct plat_sci_reg *reg;
@@ -1829,25 +1881,72 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
serial_port_in(port, SCFCR) |
SCFCR_LOOP);
}
+
+ mctrl_gpio_set(s->gpios, mctrl);
+
+ if (!(s->cfg->capabilities & SCIx_HAVE_RTSCTS))
+ return;
+
+ if (!(mctrl & TIOCM_RTS)) {
+ /* Disable Auto RTS */
+ serial_port_out(port, SCFCR,
+ serial_port_in(port, SCFCR) & ~SCFCR_MCE);
+
+ /* Clear RTS */
+ sci_set_rts(port, 0);
+ } else if (s->autorts) {
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ /* Enable RTS# pin function */
+ serial_port_out(port, SCPCR,
+ serial_port_in(port, SCPCR) & ~SCPCR_RTSC);
+ }
+
+ /* Enable Auto RTS */
+ serial_port_out(port, SCFCR,
+ serial_port_in(port, SCFCR) | SCFCR_MCE);
+ } else {
+ /* Set RTS */
+ sci_set_rts(port, 1);
+ }
}
static unsigned int sci_get_mctrl(struct uart_port *port)
{
+ struct sci_port *s = to_sci_port(port);
+ struct mctrl_gpios *gpios = s->gpios;
+ unsigned int mctrl = 0;
+
+ mctrl_gpio_get(gpios, &mctrl);
+
/*
* CTS/RTS is handled in hardware when supported, while nothing
- * else is wired up. Keep it simple and simply assert DSR/CAR.
+ * else is wired up.
*/
- return TIOCM_DSR | TIOCM_CAR;
+ if (s->autorts) {
+ if (sci_get_cts(port))
+ mctrl |= TIOCM_CTS;
+ } else if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS))) {
+ mctrl |= TIOCM_CTS;
+ }
+ if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR)))
+ mctrl |= TIOCM_DSR;
+ if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD)))
+ mctrl |= TIOCM_CAR;
+
+ return mctrl;
+}
+
+static void sci_enable_ms(struct uart_port *port)
+{
+ mctrl_gpio_enable_ms(to_sci_port(port)->gpios);
}
static void sci_break_ctl(struct uart_port *port, int break_state)
{
- struct sci_port *s = to_sci_port(port);
- const struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
unsigned short scscr, scsptr;
/* check wheter the port has SCSPTR */
- if (!reg->size) {
+ if (!sci_getreg(port, SCSPTR)->size) {
/*
* Not supported by hardware. Most parts couple break and rx
* interrupts together, with break detection always enabled.
@@ -1873,7 +1972,6 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
static int sci_startup(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
- unsigned long flags;
int ret;
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
@@ -1884,11 +1982,6 @@ static int sci_startup(struct uart_port *port)
sci_request_dma(port);
- spin_lock_irqsave(&port->lock, flags);
- sci_start_tx(port);
- sci_start_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
-
return 0;
}
@@ -1896,12 +1989,19 @@ static void sci_shutdown(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
unsigned long flags;
+ u16 scr;
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ s->autorts = false;
+ mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
+
spin_lock_irqsave(&port->lock, flags);
sci_stop_rx(port);
sci_stop_tx(port);
+ /* Stop RX and TX, disable related interrupts, keep clock source */
+ scr = serial_port_in(port, SCSCR);
+ serial_port_out(port, SCSCR, scr & (SCSCR_CKE1 | SCSCR_CKE0));
spin_unlock_irqrestore(&port->lock, flags);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -2056,6 +2156,15 @@ static void sci_reset(struct uart_port *port)
reg = sci_getreg(port, SCFCR);
if (reg->size)
serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+
+ sci_clear_SCxSR(port,
+ SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) &
+ SCxSR_BREAK_CLEAR(port));
+ if (sci_getreg(port, SCLSR)->size) {
+ status = serial_port_in(port, SCLSR);
+ status &= ~(SCLSR_TO | SCLSR_ORER);
+ serial_port_out(port, SCLSR, status);
+ }
}
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -2218,15 +2327,18 @@ done:
sci_init_pins(port, termios->c_cflag);
+ port->status &= ~UPSTAT_AUTOCTS;
+ s->autorts = false;
reg = sci_getreg(port, SCFCR);
if (reg->size) {
unsigned short ctrl = serial_port_in(port, SCFCR);
- if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) {
- if (termios->c_cflag & CRTSCTS)
- ctrl |= SCFCR_MCE;
- else
- ctrl &= ~SCFCR_MCE;
+ if ((port->flags & UPF_HARD_FLOW) &&
+ (termios->c_cflag & CRTSCTS)) {
+ /* There is no CTS interrupt to restart the hardware */
+ port->status |= UPSTAT_AUTOCTS;
+ /* MCE is enabled when RTS is raised */
+ s->autorts = true;
}
/*
@@ -2300,6 +2412,9 @@ done:
sci_start_rx(port);
sci_port_disable(s);
+
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ sci_enable_ms(port);
}
static void sci_pm(struct uart_port *port, unsigned int state,
@@ -2425,6 +2540,7 @@ static struct uart_ops sci_uart_ops = {
.start_tx = sci_start_tx,
.stop_tx = sci_stop_tx,
.stop_rx = sci_stop_rx,
+ .enable_ms = sci_enable_ms,
.break_ctl = sci_break_ctl,
.startup = sci_startup,
.shutdown = sci_shutdown,
@@ -2890,6 +3006,9 @@ sci_parse_dt(struct platform_device *pdev, unsigned int *dev_id)
p->regtype = SCI_OF_REGTYPE(match->data);
p->scscr = SCSCR_RE | SCSCR_TE;
+ if (of_find_property(np, "uart-has-rtscts", NULL))
+ p->capabilities |= SCIx_HAVE_RTSCTS;
+
return p;
}
@@ -2912,6 +3031,21 @@ static int sci_probe_single(struct platform_device *dev,
if (ret)
return ret;
+ sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
+ if (IS_ERR(sciport->gpios) && PTR_ERR(sciport->gpios) != -ENOSYS)
+ return PTR_ERR(sciport->gpios);
+
+ if (p->capabilities & SCIx_HAVE_RTSCTS) {
+ if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
+ UART_GPIO_CTS)) ||
+ !IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
+ UART_GPIO_RTS))) {
+ dev_err(&dev->dev, "Conflicting RTS/CTS config\n");
+ return -EINVAL;
+ }
+ sciport->port.flags |= UPF_HARD_FLOW;
+ }
+
ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
if (ret) {
sci_cleanup_single(sciport);
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index 7a4fa185b93e..ffa6d688c335 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -105,13 +105,16 @@ enum {
#define SCFCR_LOOP BIT(0) /* Loopback Test */
/* SCLSR (Line Status Register) on (H)SCIF */
+#define SCLSR_TO BIT(2) /* Timeout */
#define SCLSR_ORER BIT(0) /* Overrun Error */
/* SCSPTR (Serial Port Register), optional */
-#define SCSPTR_RTSIO BIT(7) /* Serial Port RTS Pin Input/Output */
-#define SCSPTR_RTSDT BIT(6) /* Serial Port RTS Pin Data */
-#define SCSPTR_CTSIO BIT(5) /* Serial Port CTS Pin Input/Output */
-#define SCSPTR_CTSDT BIT(4) /* Serial Port CTS Pin Data */
+#define SCSPTR_RTSIO BIT(7) /* Serial Port RTS# Pin Input/Output */
+#define SCSPTR_RTSDT BIT(6) /* Serial Port RTS# Pin Data */
+#define SCSPTR_CTSIO BIT(5) /* Serial Port CTS# Pin Input/Output */
+#define SCSPTR_CTSDT BIT(4) /* Serial Port CTS# Pin Data */
+#define SCSPTR_SCKIO BIT(3) /* Serial Port Clock Pin Input/Output */
+#define SCSPTR_SCKDT BIT(2) /* Serial Port Clock Pin Data */
#define SCSPTR_SPB2IO BIT(1) /* Serial Port Break Input/Output */
#define SCSPTR_SPB2DT BIT(0) /* Serial Port Break Data */
@@ -119,12 +122,18 @@ enum {
#define HSCIF_SRE BIT(15) /* Sampling Rate Register Enable */
/* SCPCR (Serial Port Control Register), SCIFA/SCIFB only */
-#define SCPCR_RTSC BIT(4) /* Serial Port RTS Pin / Output Pin */
-#define SCPCR_CTSC BIT(3) /* Serial Port CTS Pin / Input Pin */
+#define SCPCR_RTSC BIT(4) /* Serial Port RTS# Pin / Output Pin */
+#define SCPCR_CTSC BIT(3) /* Serial Port CTS# Pin / Input Pin */
+#define SCPCR_SCKC BIT(2) /* Serial Port SCK Pin / Output Pin */
+#define SCPCR_RXDC BIT(1) /* Serial Port RXD Pin / Input Pin */
+#define SCPCR_TXDC BIT(0) /* Serial Port TXD Pin / Output Pin */
/* SCPDR (Serial Port Data Register), SCIFA/SCIFB only */
-#define SCPDR_RTSD BIT(4) /* Serial Port RTS Output Pin Data */
-#define SCPDR_CTSD BIT(3) /* Serial Port CTS Input Pin Data */
+#define SCPDR_RTSD BIT(4) /* Serial Port RTS# Output Pin Data */
+#define SCPDR_CTSD BIT(3) /* Serial Port CTS# Input Pin Data */
+#define SCPDR_SCKD BIT(2) /* Serial Port SCK Output Pin Data */
+#define SCPDR_RXDD BIT(1) /* Serial Port RXD Input Pin Data */
+#define SCPDR_TXDD BIT(0) /* Serial Port TXD Output Pin Data */
/*
* BRG Clock Select Register (Some SCIF and HSCIF)
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index c3a885b4d76a..43756bd9111c 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -106,7 +106,7 @@ struct sirfsoc_uart_register {
enum sirfsoc_uart_type uart_type;
};
-u32 uart_usp_ff_full_mask(struct uart_port *port)
+static u32 uart_usp_ff_full_mask(struct uart_port *port)
{
u32 full_bit;
@@ -114,7 +114,7 @@ u32 uart_usp_ff_full_mask(struct uart_port *port)
return (1 << full_bit);
}
-u32 uart_usp_ff_empty_mask(struct uart_port *port)
+static u32 uart_usp_ff_empty_mask(struct uart_port *port)
{
u32 empty_bit;
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index b384060e3b1f..23cfc5e16b45 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -21,7 +21,6 @@
#include <linux/hrtimer.h>
#include <linux/delay.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
@@ -730,22 +729,12 @@ static int vt8500_serial_probe(struct platform_device *pdev)
return 0;
}
-static int vt8500_serial_remove(struct platform_device *pdev)
-{
- struct vt8500_port *vt8500_port = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(vt8500_port->clk);
- uart_remove_one_port(&vt8500_uart_driver, &vt8500_port->uart);
-
- return 0;
-}
-
static struct platform_driver vt8500_platform_driver = {
.probe = vt8500_serial_probe,
- .remove = vt8500_serial_remove,
.driver = {
.name = "vt8500_serial",
.of_match_table = wmt_dt_ids,
+ .suppress_bind_attrs = true,
},
};
@@ -764,19 +753,4 @@ static int __init vt8500_serial_init(void)
return ret;
}
-
-static void __exit vt8500_serial_exit(void)
-{
-#ifdef CONFIG_SERIAL_VT8500_CONSOLE
- unregister_console(&vt8500_console);
-#endif
- platform_driver_unregister(&vt8500_platform_driver);
- uart_unregister_driver(&vt8500_uart_driver);
-}
-
-module_init(vt8500_serial_init);
-module_exit(vt8500_serial_exit);
-
-MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
-MODULE_DESCRIPTION("Driver for vt8500 serial device");
-MODULE_LICENSE("GPL v2");
+device_initcall(vt8500_serial_init);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index cd46e64c4255..9ca1a4d1b66a 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -976,6 +976,23 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
}
#endif
+static void cdns_uart_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct cdns_uart *cdns_uart = port->private_data;
+
+ switch (state) {
+ case UART_PM_STATE_OFF:
+ clk_disable(cdns_uart->uartclk);
+ clk_disable(cdns_uart->pclk);
+ break;
+ default:
+ clk_enable(cdns_uart->pclk);
+ clk_enable(cdns_uart->uartclk);
+ break;
+ }
+}
+
static struct uart_ops cdns_uart_ops = {
.set_mctrl = cdns_uart_set_mctrl,
.get_mctrl = cdns_uart_get_mctrl,
@@ -987,6 +1004,7 @@ static struct uart_ops cdns_uart_ops = {
.set_termios = cdns_uart_set_termios,
.startup = cdns_uart_startup,
.shutdown = cdns_uart_shutdown,
+ .pm = cdns_uart_pm,
.type = cdns_uart_type,
.verify_port = cdns_uart_verify_port,
.request_port = cdns_uart_request_port,
@@ -1350,12 +1368,12 @@ static int cdns_uart_probe(struct platform_device *pdev)
return PTR_ERR(cdns_uart_data->uartclk);
}
- rc = clk_prepare_enable(cdns_uart_data->pclk);
+ rc = clk_prepare(cdns_uart_data->pclk);
if (rc) {
dev_err(&pdev->dev, "Unable to enable pclk clock.\n");
return rc;
}
- rc = clk_prepare_enable(cdns_uart_data->uartclk);
+ rc = clk_prepare(cdns_uart_data->uartclk);
if (rc) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
goto err_out_clk_dis_pclk;
@@ -1422,9 +1440,9 @@ err_out_notif_unreg:
&cdns_uart_data->clk_rate_change_nb);
#endif
err_out_clk_disable:
- clk_disable_unprepare(cdns_uart_data->uartclk);
+ clk_unprepare(cdns_uart_data->uartclk);
err_out_clk_dis_pclk:
- clk_disable_unprepare(cdns_uart_data->pclk);
+ clk_unprepare(cdns_uart_data->pclk);
return rc;
}
@@ -1448,8 +1466,8 @@ static int cdns_uart_remove(struct platform_device *pdev)
#endif
rc = uart_remove_one_port(&cdns_uart_uart_driver, port);
port->mapbase = 0;
- clk_disable_unprepare(cdns_uart_data->uartclk);
- clk_disable_unprepare(cdns_uart_data->pclk);
+ clk_unprepare(cdns_uart_data->uartclk);
+ clk_unprepare(cdns_uart_data->pclk);
return rc;
}
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index c8c91f0476a2..9d7ab7b66a8a 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -499,9 +499,8 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
return 0;
}
-/* ui is a leftover from using a hashtable, but might be used again
- Caller must hold the lock */
-static int con_do_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
+/* Caller must hold the lock */
+static int con_do_clear_unimap(struct vc_data *vc)
{
struct uni_pagedir *p, *q;
@@ -524,11 +523,11 @@ static int con_do_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
return 0;
}
-int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
+int con_clear_unimap(struct vc_data *vc)
{
int ret;
console_lock();
- ret = con_do_clear_unimap(vc, ui);
+ ret = con_do_clear_unimap(vc);
console_unlock();
return ret;
}
@@ -556,7 +555,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
int j, k;
u16 **p1, *p2, l;
- err1 = con_do_clear_unimap(vc, NULL);
+ err1 = con_do_clear_unimap(vc);
if (err1) {
console_unlock();
return err1;
@@ -677,7 +676,7 @@ int con_set_default_unimap(struct vc_data *vc)
/* The default font is always 256 characters */
- err = con_do_clear_unimap(vc, NULL);
+ err = con_do_clear_unimap(vc);
if (err)
return err;
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index f973bfce5d08..0f8caae4267d 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c)
static void do_compute_shiftstate(void)
{
- unsigned int i, j, k, sym, val;
+ unsigned int k, sym, val;
shift_state = 0;
memset(shift_down, 0, sizeof(shift_down));
- for (i = 0; i < ARRAY_SIZE(key_down); i++) {
-
- if (!key_down[i])
+ for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
+ sym = U(key_maps[0][k]);
+ if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
continue;
- k = i * BITS_PER_LONG;
-
- for (j = 0; j < BITS_PER_LONG; j++, k++) {
-
- if (!test_bit(k, key_down))
- continue;
-
- sym = U(key_maps[0][k]);
- if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
- continue;
-
- val = KVAL(sym);
- if (val == KVAL(K_CAPSSHIFT))
- val = KVAL(K_SHIFT);
+ val = KVAL(sym);
+ if (val == KVAL(K_CAPSSHIFT))
+ val = KVAL(K_SHIFT);
- shift_down[val]++;
- shift_state |= (1 << val);
- }
+ shift_down[val]++;
+ shift_state |= BIT(val);
}
}
@@ -579,7 +567,7 @@ static void fn_scroll_forw(struct vc_data *vc)
static void fn_scroll_back(struct vc_data *vc)
{
- scrollback(vc, 0);
+ scrollback(vc);
}
static void fn_show_mem(struct vc_data *vc)
@@ -1745,16 +1733,10 @@ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
return -EINVAL;
if (ct) {
- buf = kmalloc(ct * sizeof(struct kbdiacruc),
- GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- if (copy_from_user(buf, a->kbdiacruc,
- ct * sizeof(struct kbdiacruc))) {
- kfree(buf);
- return -EFAULT;
- }
+ buf = memdup_user(a->kbdiacruc,
+ ct * sizeof(struct kbdiacruc));
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
}
spin_lock_irqsave(&kbd_event_lock, flags);
if (ct)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5b0fe97c46ca..2705ca960e92 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -277,13 +277,15 @@ static void notify_update(struct vc_data *vc)
* Low-Level Functions
*/
-#define IS_FG(vc) ((vc)->vc_num == fg_console)
+static inline bool con_is_fg(const struct vc_data *vc)
+{
+ return vc->vc_num == fg_console;
+}
-#ifdef VT_BUF_VRAM_ONLY
-#define DO_UPDATE(vc) 0
-#else
-#define DO_UPDATE(vc) (CON_IS_VISIBLE(vc) && !console_blanked)
-#endif
+static inline bool con_should_update(const struct vc_data *vc)
+{
+ return con_is_visible(vc) && !console_blanked;
+}
static inline unsigned short *screenpos(struct vc_data *vc, int offset, int viewed)
{
@@ -321,7 +323,7 @@ static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
nr = b - t - 1;
if (b > vc->vc_rows || t >= b || nr < 1)
return;
- if (CON_IS_VISIBLE(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
+ if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
return;
d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
@@ -339,7 +341,7 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
nr = b - t - 1;
if (b > vc->vc_rows || t >= b || nr < 1)
return;
- if (CON_IS_VISIBLE(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
+ if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
return;
s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
step = vc->vc_cols * nr;
@@ -349,7 +351,6 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
static void do_update_region(struct vc_data *vc, unsigned long start, int count)
{
-#ifndef VT_BUF_VRAM_ONLY
unsigned int xx, yy, offset;
u16 *p;
@@ -390,14 +391,13 @@ static void do_update_region(struct vc_data *vc, unsigned long start, int count)
start = vc->vc_sw->con_getxy(vc, start, NULL, NULL);
}
}
-#endif
}
void update_region(struct vc_data *vc, unsigned long start, int count)
{
WARN_CONSOLE_UNLOCKED();
- if (DO_UPDATE(vc)) {
+ if (con_should_update(vc)) {
hide_cursor(vc);
do_update_region(vc, start, count);
set_cursor(vc);
@@ -413,7 +413,6 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
return vc->vc_sw->con_build_attr(vc, _color, _intensity,
_blink, _underline, _reverse, _italic);
-#ifndef VT_BUF_VRAM_ONLY
/*
* ++roman: I completely changed the attribute format for monochrome
* mode (!can_do_color). The formerly used MDA (monochrome display
@@ -448,9 +447,6 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
a <<= 1;
return a;
}
-#else
- return 0;
-#endif
}
static void update_attr(struct vc_data *vc)
@@ -470,10 +466,9 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
count /= 2;
p = screenpos(vc, offset, viewed);
- if (vc->vc_sw->con_invert_region)
+ if (vc->vc_sw->con_invert_region) {
vc->vc_sw->con_invert_region(vc, p, count);
-#ifndef VT_BUF_VRAM_ONLY
- else {
+ } else {
u16 *q = p;
int cnt = count;
u16 a;
@@ -501,8 +496,8 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
}
}
}
-#endif
- if (DO_UPDATE(vc))
+
+ if (con_should_update(vc))
do_update_region(vc, (unsigned long) p, count);
notify_update(vc);
}
@@ -519,7 +514,7 @@ void complement_pos(struct vc_data *vc, int offset)
if (old_offset != -1 && old_offset >= 0 &&
old_offset < vc->vc_screenbuf_size) {
scr_writew(old, screenpos(vc, old_offset, 1));
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
vc->vc_sw->con_putc(vc, old, oldy, oldx);
notify_update(vc);
}
@@ -534,7 +529,7 @@ void complement_pos(struct vc_data *vc, int offset)
old = scr_readw(p);
new = old ^ vc->vc_complement_mask;
scr_writew(new, p);
- if (DO_UPDATE(vc)) {
+ if (con_should_update(vc)) {
oldx = (offset >> 1) % vc->vc_cols;
oldy = (offset >> 1) / vc->vc_cols;
vc->vc_sw->con_putc(vc, new, oldy, oldx);
@@ -550,7 +545,7 @@ static void insert_char(struct vc_data *vc, unsigned int nr)
scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2);
scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
vc->vc_need_wrap = 0;
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
do_update_region(vc, (unsigned long) p,
vc->vc_cols - vc->vc_x);
}
@@ -563,7 +558,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr)
scr_memsetw(p + vc->vc_cols - vc->vc_x - nr, vc->vc_video_erase_char,
nr * 2);
vc->vc_need_wrap = 0;
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
do_update_region(vc, (unsigned long) p,
vc->vc_cols - vc->vc_x);
}
@@ -583,7 +578,7 @@ static void add_softcursor(struct vc_data *vc)
if ((type & 0x20) && ((softcursor_original & 0x7000) == (i & 0x7000))) i ^= 0x7000;
if ((type & 0x40) && ((i & 0x700) == ((i & 0x7000) >> 4))) i ^= 0x0700;
scr_writew(i, (u16 *) vc->vc_pos);
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
vc->vc_sw->con_putc(vc, i, vc->vc_y, vc->vc_x);
}
@@ -591,7 +586,7 @@ static void hide_softcursor(struct vc_data *vc)
{
if (softcursor_original != -1) {
scr_writew(softcursor_original, (u16 *)vc->vc_pos);
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
vc->vc_sw->con_putc(vc, softcursor_original,
vc->vc_y, vc->vc_x);
softcursor_original = -1;
@@ -608,8 +603,7 @@ static void hide_cursor(struct vc_data *vc)
static void set_cursor(struct vc_data *vc)
{
- if (!IS_FG(vc) || console_blanked ||
- vc->vc_mode == KD_GRAPHICS)
+ if (!con_is_fg(vc) || console_blanked || vc->vc_mode == KD_GRAPHICS)
return;
if (vc->vc_deccm) {
if (vc == sel_cons)
@@ -625,7 +619,7 @@ static void set_origin(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
- if (!CON_IS_VISIBLE(vc) ||
+ if (!con_is_visible(vc) ||
!vc->vc_sw->con_set_origin ||
!vc->vc_sw->con_set_origin(vc))
vc->vc_origin = (unsigned long)vc->vc_screenbuf;
@@ -673,12 +667,12 @@ void redraw_screen(struct vc_data *vc, int is_switch)
struct vc_data *old_vc = vc_cons[fg_console].d;
if (old_vc == vc)
return;
- if (!CON_IS_VISIBLE(vc))
+ if (!con_is_visible(vc))
redraw = 1;
*vc->vc_display_fg = vc;
fg_console = vc->vc_num;
hide_cursor(old_vc);
- if (!CON_IS_VISIBLE(old_vc)) {
+ if (!con_is_visible(old_vc)) {
save_screen(old_vc);
set_origin(old_vc);
}
@@ -954,7 +948,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
tty_do_resize(tty, &ws);
}
- if (CON_IS_VISIBLE(vc))
+ if (con_is_visible(vc))
update_screen(vc);
vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
return err;
@@ -1103,11 +1097,9 @@ static void gotoxay(struct vc_data *vc, int new_x, int new_y)
gotoxy(vc, new_x, vc->vc_decom ? (vc->vc_top + new_y) : new_y);
}
-void scrollback(struct vc_data *vc, int lines)
+void scrollback(struct vc_data *vc)
{
- if (!lines)
- lines = vc->vc_rows / 2;
- scrolldelta(-lines);
+ scrolldelta(-(vc->vc_rows / 2));
}
void scrollfront(struct vc_data *vc, int lines)
@@ -1186,7 +1178,7 @@ static void csi_J(struct vc_data *vc, int vpar)
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
vc->vc_screenbuf_size >> 1);
set_origin(vc);
- if (CON_IS_VISIBLE(vc))
+ if (con_is_visible(vc))
update_screen(vc);
/* fall through */
case 2: /* erase whole display */
@@ -1197,7 +1189,7 @@ static void csi_J(struct vc_data *vc, int vpar)
return;
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
do_update_region(vc, (unsigned long) start, count);
vc->vc_need_wrap = 0;
}
@@ -1225,7 +1217,7 @@ static void csi_K(struct vc_data *vc, int vpar)
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
vc->vc_need_wrap = 0;
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
do_update_region(vc, (unsigned long) start, count);
}
@@ -1238,7 +1230,7 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi
count = (vpar > vc->vc_cols - vc->vc_x) ? (vc->vc_cols - vc->vc_x) : vpar;
scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count);
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1, count);
vc->vc_need_wrap = 0;
}
@@ -1255,48 +1247,87 @@ static void default_attr(struct vc_data *vc)
struct rgb { u8 r; u8 g; u8 b; };
-static struct rgb rgb_from_256(int i)
+static void rgb_from_256(int i, struct rgb *c)
{
- struct rgb c;
if (i < 8) { /* Standard colours. */
- c.r = i&1 ? 0xaa : 0x00;
- c.g = i&2 ? 0xaa : 0x00;
- c.b = i&4 ? 0xaa : 0x00;
+ c->r = i&1 ? 0xaa : 0x00;
+ c->g = i&2 ? 0xaa : 0x00;
+ c->b = i&4 ? 0xaa : 0x00;
} else if (i < 16) {
- c.r = i&1 ? 0xff : 0x55;
- c.g = i&2 ? 0xff : 0x55;
- c.b = i&4 ? 0xff : 0x55;
+ c->r = i&1 ? 0xff : 0x55;
+ c->g = i&2 ? 0xff : 0x55;
+ c->b = i&4 ? 0xff : 0x55;
} else if (i < 232) { /* 6x6x6 colour cube. */
- c.r = (i - 16) / 36 * 85 / 2;
- c.g = (i - 16) / 6 % 6 * 85 / 2;
- c.b = (i - 16) % 6 * 85 / 2;
+ c->r = (i - 16) / 36 * 85 / 2;
+ c->g = (i - 16) / 6 % 6 * 85 / 2;
+ c->b = (i - 16) % 6 * 85 / 2;
} else /* Grayscale ramp. */
- c.r = c.g = c.b = i * 10 - 2312;
- return c;
+ c->r = c->g = c->b = i * 10 - 2312;
}
-static void rgb_foreground(struct vc_data *vc, struct rgb c)
+static void rgb_foreground(struct vc_data *vc, const struct rgb *c)
{
- u8 hue, max = c.r;
- if (c.g > max)
- max = c.g;
- if (c.b > max)
- max = c.b;
- hue = (c.r > max/2 ? 4 : 0)
- | (c.g > max/2 ? 2 : 0)
- | (c.b > max/2 ? 1 : 0);
- if (hue == 7 && max <= 0x55)
- hue = 0, vc->vc_intensity = 2;
+ u8 hue = 0, max = max3(c->r, c->g, c->b);
+
+ if (c->r > max / 2)
+ hue |= 4;
+ if (c->g > max / 2)
+ hue |= 2;
+ if (c->b > max / 2)
+ hue |= 1;
+
+ if (hue == 7 && max <= 0x55) {
+ hue = 0;
+ vc->vc_intensity = 2;
+ } else if (max > 0xaa)
+ vc->vc_intensity = 2;
else
- vc->vc_intensity = (max > 0xaa) + 1;
+ vc->vc_intensity = 1;
+
vc->vc_color = (vc->vc_color & 0xf0) | hue;
}
-static void rgb_background(struct vc_data *vc, struct rgb c)
+static void rgb_background(struct vc_data *vc, const struct rgb *c)
{
/* For backgrounds, err on the dark side. */
vc->vc_color = (vc->vc_color & 0x0f)
- | (c.r&0x80) >> 1 | (c.g&0x80) >> 2 | (c.b&0x80) >> 3;
+ | (c->r&0x80) >> 1 | (c->g&0x80) >> 2 | (c->b&0x80) >> 3;
+}
+
+/*
+ * ITU T.416 Higher colour modes. They break the usual properties of SGR codes
+ * and thus need to be detected and ignored by hand. Strictly speaking, that
+ * standard also wants : rather than ; as separators, contrary to ECMA-48, but
+ * no one produces such codes and almost no one accepts them.
+ *
+ * Subcommands 3 (CMY) and 4 (CMYK) are so insane there's no point in
+ * supporting them.
+ */
+static int vc_t416_color(struct vc_data *vc, int i,
+ void(*set_color)(struct vc_data *vc, const struct rgb *c))
+{
+ struct rgb c;
+
+ i++;
+ if (i > vc->vc_npar)
+ return i;
+
+ if (vc->vc_par[i] == 5 && i < vc->vc_npar) {
+ /* 256 colours -- ubiquitous */
+ i++;
+ rgb_from_256(vc->vc_par[i], &c);
+ } else if (vc->vc_par[i] == 2 && i <= vc->vc_npar + 3) {
+ /* 24 bit -- extremely rare */
+ c.r = vc->vc_par[i + 1];
+ c.g = vc->vc_par[i + 2];
+ c.b = vc->vc_par[i + 3];
+ i += 3;
+ } else
+ return i;
+
+ set_color(vc, &c);
+
+ return i;
}
/* console_lock is held */
@@ -1306,135 +1337,91 @@ static void csi_m(struct vc_data *vc)
for (i = 0; i <= vc->vc_npar; i++)
switch (vc->vc_par[i]) {
- case 0: /* all attributes off */
- default_attr(vc);
- break;
- case 1:
- vc->vc_intensity = 2;
- break;
- case 2:
- vc->vc_intensity = 0;
- break;
- case 3:
- vc->vc_italic = 1;
- break;
- case 4:
- vc->vc_underline = 1;
- break;
- case 5:
- vc->vc_blink = 1;
- break;
- case 7:
- vc->vc_reverse = 1;
- break;
- case 10: /* ANSI X3.64-1979 (SCO-ish?)
- * Select primary font, don't display
- * control chars if defined, don't set
- * bit 8 on output.
- */
- vc->vc_translate = set_translate(vc->vc_charset == 0
- ? vc->vc_G0_charset
- : vc->vc_G1_charset, vc);
- vc->vc_disp_ctrl = 0;
- vc->vc_toggle_meta = 0;
- break;
- case 11: /* ANSI X3.64-1979 (SCO-ish?)
- * Select first alternate font, lets
- * chars < 32 be displayed as ROM chars.
- */
- vc->vc_translate = set_translate(IBMPC_MAP, vc);
- vc->vc_disp_ctrl = 1;
- vc->vc_toggle_meta = 0;
- break;
- case 12: /* ANSI X3.64-1979 (SCO-ish?)
- * Select second alternate font, toggle
- * high bit before displaying as ROM char.
- */
- vc->vc_translate = set_translate(IBMPC_MAP, vc);
- vc->vc_disp_ctrl = 1;
- vc->vc_toggle_meta = 1;
- break;
- case 21:
- case 22:
- vc->vc_intensity = 1;
- break;
- case 23:
- vc->vc_italic = 0;
- break;
- case 24:
- vc->vc_underline = 0;
- break;
- case 25:
- vc->vc_blink = 0;
- break;
- case 27:
- vc->vc_reverse = 0;
- break;
- case 38: /* ITU T.416
- * Higher colour modes.
- * They break the usual properties of SGR codes
- * and thus need to be detected and ignored by
- * hand. Strictly speaking, that standard also
- * wants : rather than ; as separators, contrary
- * to ECMA-48, but no one produces such codes
- * and almost no one accepts them.
- */
- i++;
- if (i > vc->vc_npar)
- break;
- if (vc->vc_par[i] == 5 && /* 256 colours */
- i < vc->vc_npar) { /* ubiquitous */
- i++;
- rgb_foreground(vc,
- rgb_from_256(vc->vc_par[i]));
- } else if (vc->vc_par[i] == 2 && /* 24 bit */
- i <= vc->vc_npar + 3) {/* extremely rare */
- struct rgb c = {
- .r = vc->vc_par[i + 1],
- .g = vc->vc_par[i + 2],
- .b = vc->vc_par[i + 3],
- };
- rgb_foreground(vc, c);
- i += 3;
- }
- /* Subcommands 3 (CMY) and 4 (CMYK) are so insane
- * there's no point in supporting them.
- */
- break;
- case 48:
- i++;
- if (i > vc->vc_npar)
- break;
- if (vc->vc_par[i] == 5 && /* 256 colours */
- i < vc->vc_npar) {
- i++;
- rgb_background(vc,
- rgb_from_256(vc->vc_par[i]));
- } else if (vc->vc_par[i] == 2 && /* 24 bit */
- i <= vc->vc_npar + 3) {
- struct rgb c = {
- .r = vc->vc_par[i + 1],
- .g = vc->vc_par[i + 2],
- .b = vc->vc_par[i + 3],
- };
- rgb_background(vc, c);
- i += 3;
- }
- break;
- case 39:
- vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
- break;
- case 49:
- vc->vc_color = (vc->vc_def_color & 0xf0) | (vc->vc_color & 0x0f);
- break;
- default:
- if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
- vc->vc_color = color_table[vc->vc_par[i] - 30]
- | (vc->vc_color & 0xf0);
- else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
- vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4)
- | (vc->vc_color & 0x0f);
- break;
+ case 0: /* all attributes off */
+ default_attr(vc);
+ break;
+ case 1:
+ vc->vc_intensity = 2;
+ break;
+ case 2:
+ vc->vc_intensity = 0;
+ break;
+ case 3:
+ vc->vc_italic = 1;
+ break;
+ case 4:
+ vc->vc_underline = 1;
+ break;
+ case 5:
+ vc->vc_blink = 1;
+ break;
+ case 7:
+ vc->vc_reverse = 1;
+ break;
+ case 10: /* ANSI X3.64-1979 (SCO-ish?)
+ * Select primary font, don't display control chars if
+ * defined, don't set bit 8 on output.
+ */
+ vc->vc_translate = set_translate(vc->vc_charset == 0
+ ? vc->vc_G0_charset
+ : vc->vc_G1_charset, vc);
+ vc->vc_disp_ctrl = 0;
+ vc->vc_toggle_meta = 0;
+ break;
+ case 11: /* ANSI X3.64-1979 (SCO-ish?)
+ * Select first alternate font, lets chars < 32 be
+ * displayed as ROM chars.
+ */
+ vc->vc_translate = set_translate(IBMPC_MAP, vc);
+ vc->vc_disp_ctrl = 1;
+ vc->vc_toggle_meta = 0;
+ break;
+ case 12: /* ANSI X3.64-1979 (SCO-ish?)
+ * Select second alternate font, toggle high bit
+ * before displaying as ROM char.
+ */
+ vc->vc_translate = set_translate(IBMPC_MAP, vc);
+ vc->vc_disp_ctrl = 1;
+ vc->vc_toggle_meta = 1;
+ break;
+ case 21:
+ case 22:
+ vc->vc_intensity = 1;
+ break;
+ case 23:
+ vc->vc_italic = 0;
+ break;
+ case 24:
+ vc->vc_underline = 0;
+ break;
+ case 25:
+ vc->vc_blink = 0;
+ break;
+ case 27:
+ vc->vc_reverse = 0;
+ break;
+ case 38:
+ i = vc_t416_color(vc, i, rgb_foreground);
+ break;
+ case 48:
+ i = vc_t416_color(vc, i, rgb_background);
+ break;
+ case 39:
+ vc->vc_color = (vc->vc_def_color & 0x0f) |
+ (vc->vc_color & 0xf0);
+ break;
+ case 49:
+ vc->vc_color = (vc->vc_def_color & 0xf0) |
+ (vc->vc_color & 0x0f);
+ break;
+ default:
+ if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
+ vc->vc_color = color_table[vc->vc_par[i] - 30]
+ | (vc->vc_color & 0xf0);
+ else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
+ vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4)
+ | (vc->vc_color & 0x0f);
+ break;
}
update_attr(vc);
}
@@ -1496,7 +1483,6 @@ static void set_mode(struct vc_data *vc, int on_off)
clr_kbd(vc, decckm);
break;
case 3: /* 80/132 mode switch unimplemented */
- vc->vc_deccolm = on_off;
#if 0
vc_resize(deccolm ? 132 : 80, vc->vc_rows);
/* this alone does not suffice; some user mode
@@ -2178,18 +2164,20 @@ static int is_double_width(uint32_t ucs)
return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
}
+static void con_flush(struct vc_data *vc, unsigned long draw_from,
+ unsigned long draw_to, int *draw_x)
+{
+ if (*draw_x < 0)
+ return;
+
+ vc->vc_sw->con_putcs(vc, (u16 *)draw_from,
+ (u16 *)draw_to - (u16 *)draw_from, vc->vc_y, *draw_x);
+ *draw_x = -1;
+}
+
/* acquires console_lock */
static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
-#ifdef VT_BUF_VRAM_ONLY
-#define FLUSH do { } while(0);
-#else
-#define FLUSH if (draw_x >= 0) { \
- vc->vc_sw->con_putcs(vc, (u16 *)draw_from, (u16 *)draw_to - (u16 *)draw_from, vc->vc_y, draw_x); \
- draw_x = -1; \
- }
-#endif
-
int c, tc, ok, n = 0, draw_x = -1;
unsigned int currcons;
unsigned long draw_from = 0, draw_to = 0;
@@ -2226,7 +2214,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
charmask = himask ? 0x1ff : 0xff;
/* undraw cursor first */
- if (IS_FG(vc))
+ if (con_is_fg(vc))
hide_cursor(vc);
param.vc = vc;
@@ -2381,12 +2369,13 @@ rescan_last_byte:
} else {
vc_attr = ((vc->vc_attr) & 0x88) | (((vc->vc_attr) & 0x70) >> 4) | (((vc->vc_attr) & 0x07) << 4);
}
- FLUSH
+ con_flush(vc, draw_from, draw_to, &draw_x);
}
while (1) {
if (vc->vc_need_wrap || vc->vc_decim)
- FLUSH
+ con_flush(vc, draw_from, draw_to,
+ &draw_x);
if (vc->vc_need_wrap) {
cr(vc);
lf(vc);
@@ -2397,7 +2386,7 @@ rescan_last_byte:
((vc_attr << 8) & ~himask) + ((tc & 0x100) ? himask : 0) + (tc & 0xff) :
(vc_attr << 8) + tc,
(u16 *) vc->vc_pos);
- if (DO_UPDATE(vc) && draw_x < 0) {
+ if (con_should_update(vc) && draw_x < 0) {
draw_x = vc->vc_x;
draw_from = vc->vc_pos;
}
@@ -2416,9 +2405,8 @@ rescan_last_byte:
}
notify_write(vc, c);
- if (inverse) {
- FLUSH
- }
+ if (inverse)
+ con_flush(vc, draw_from, draw_to, &draw_x);
if (rescan) {
rescan = 0;
@@ -2429,15 +2417,14 @@ rescan_last_byte:
}
continue;
}
- FLUSH
+ con_flush(vc, draw_from, draw_to, &draw_x);
do_con_trol(tty, vc, orig);
}
- FLUSH
+ con_flush(vc, draw_from, draw_to, &draw_x);
console_conditional_schedule();
console_unlock();
notify_update(vc);
return n;
-#undef FLUSH
}
/*
@@ -2471,7 +2458,7 @@ static void console_callback(struct work_struct *ignored)
if (scrollback_delta) {
struct vc_data *vc = vc_cons[fg_console].d;
clear_selection();
- if (vc->vc_mode == KD_TEXT)
+ if (vc->vc_mode == KD_TEXT && vc->vc_sw->con_scrolldelta)
vc->vc_sw->con_scrolldelta(vc, scrollback_delta);
scrollback_delta = 0;
}
@@ -2583,7 +2570,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
goto quit;
/* undraw cursor first */
- if (IS_FG(vc))
+ if (con_is_fg(vc))
hide_cursor(vc);
start = (ushort *)vc->vc_pos;
@@ -2594,7 +2581,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
c = *b++;
if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
if (cnt > 0) {
- if (CON_IS_VISIBLE(vc))
+ if (con_is_visible(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
vc->vc_x += cnt;
if (vc->vc_need_wrap)
@@ -2626,7 +2613,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
myx++;
}
if (cnt > 0) {
- if (CON_IS_VISIBLE(vc))
+ if (con_is_visible(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
vc->vc_x += cnt;
if (vc->vc_x == vc->vc_cols) {
@@ -3173,7 +3160,7 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
j = i;
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
k = i;
save_screen(vc);
}
@@ -3981,7 +3968,7 @@ static void set_palette(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
- if (vc->vc_mode != KD_GRAPHICS)
+ if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_set_palette)
vc->vc_sw->con_set_palette(vc, color_table);
}
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 97d5a74558a3..f62c598810ff 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -1006,16 +1006,10 @@ int vt_ioctl(struct tty_struct *tty,
break;
case PIO_UNIMAPCLR:
- { struct unimapinit ui;
if (!perm)
return -EPERM;
- ret = copy_from_user(&ui, up, sizeof(struct unimapinit));
- if (ret)
- ret = -EFAULT;
- else
- con_clear_unimap(vc, &ui);
+ con_clear_unimap(vc);
break;
- }
case PIO_UNIMAP:
case GIO_UNIMAP:
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 3644a3500b70..5e5b9eb7ebf6 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -4,8 +4,9 @@ config USB_CHIPIDEA
select EXTCON
help
Say Y here if your system has a dual role high speed USB
- controller based on ChipIdea silicon IP. Currently, only the
- peripheral mode is supported.
+ controller based on ChipIdea silicon IP. It supports:
+ Dual-role switch (ID, OTG FSM, sysfs), Host-only, and
+ Peripheral-only.
When compiled dynamically, the module will be called ci-hdrc.ko.
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 94a14f5dc4d4..71912301ef7f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -946,7 +946,7 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
DECLARE_WAITQUEUE(wait, current);
struct async_icount old, new;
- if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD ))
+ if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD))
return -EINVAL;
do {
spin_lock_irq(&acm->read_lock);
@@ -1146,7 +1146,7 @@ static int acm_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_cdc_union_desc *union_header = NULL;
- struct usb_cdc_country_functional_desc *cfd = NULL;
+ struct usb_cdc_call_mgmt_descriptor *cmgmd = NULL;
unsigned char *buffer = intf->altsetting->extra;
int buflen = intf->altsetting->extralen;
struct usb_interface *control_interface;
@@ -1155,18 +1155,16 @@ static int acm_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *epread = NULL;
struct usb_endpoint_descriptor *epwrite = NULL;
struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct usb_cdc_parsed_header h;
struct acm *acm;
int minor;
int ctrlsize, readsize;
u8 *buf;
- u8 ac_management_function = 0;
- u8 call_management_function = 0;
- int call_interface_num = -1;
- int data_interface_num = -1;
+ int call_intf_num = -1;
+ int data_intf_num = -1;
unsigned long quirks;
int num_rx_buf;
int i;
- unsigned int elength = 0;
int combined_interfaces = 0;
struct device *tty_dev;
int rv = -ENOMEM;
@@ -1210,70 +1208,22 @@ static int acm_probe(struct usb_interface *intf,
}
}
- while (buflen > 0) {
- elength = buffer[0];
- if (!elength) {
- dev_err(&intf->dev, "skipping garbage byte\n");
- elength = 1;
- goto next_desc;
- }
- if (buffer[1] != USB_DT_CS_INTERFACE) {
- dev_err(&intf->dev, "skipping garbage\n");
- goto next_desc;
- }
-
- switch (buffer[2]) {
- case USB_CDC_UNION_TYPE: /* we've found it */
- if (elength < sizeof(struct usb_cdc_union_desc))
- goto next_desc;
- if (union_header) {
- dev_err(&intf->dev, "More than one "
- "union descriptor, skipping ...\n");
- goto next_desc;
- }
- union_header = (struct usb_cdc_union_desc *)buffer;
- break;
- case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
- if (elength < sizeof(struct usb_cdc_country_functional_desc))
- goto next_desc;
- cfd = (struct usb_cdc_country_functional_desc *)buffer;
- break;
- case USB_CDC_HEADER_TYPE: /* maybe check version */
- break; /* for now we ignore it */
- case USB_CDC_ACM_TYPE:
- if (elength < 4)
- goto next_desc;
- ac_management_function = buffer[3];
- break;
- case USB_CDC_CALL_MANAGEMENT_TYPE:
- if (elength < 5)
- goto next_desc;
- call_management_function = buffer[3];
- call_interface_num = buffer[4];
- break;
- default:
- /*
- * there are LOTS more CDC descriptors that
- * could legitimately be found here.
- */
- dev_dbg(&intf->dev, "Ignoring descriptor: "
- "type %02x, length %ud\n",
- buffer[2], elength);
- break;
- }
-next_desc:
- buflen -= elength;
- buffer += elength;
- }
+ cdc_parse_cdc_header(&h, intf, buffer, buflen);
+ union_header = h.usb_cdc_union_desc;
+ cmgmd = h.usb_cdc_call_mgmt_descriptor;
+ if (cmgmd)
+ call_intf_num = cmgmd->bDataInterface;
if (!union_header) {
- if (call_interface_num > 0) {
+ if (call_intf_num > 0) {
dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
/* quirks for Droids MuIn LCD */
- if (quirks & NO_DATA_INTERFACE)
+ if (quirks & NO_DATA_INTERFACE) {
data_interface = usb_ifnum_to_if(usb_dev, 0);
- else
- data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
+ } else {
+ data_intf_num = call_intf_num;
+ data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
+ }
control_interface = intf;
} else {
if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
@@ -1287,8 +1237,9 @@ next_desc:
}
}
} else {
+ data_intf_num = union_header->bSlaveInterface0;
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
- data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
+ data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
}
if (!control_interface || !data_interface) {
@@ -1296,7 +1247,7 @@ next_desc:
return -ENODEV;
}
- if (data_interface_num != call_interface_num)
+ if (data_intf_num != call_intf_num)
dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
if (control_interface == data_interface) {
@@ -1379,11 +1330,8 @@ made_compressed_probe:
goto alloc_fail;
minor = acm_alloc_minor(acm);
- if (minor < 0) {
- dev_err(&intf->dev, "no more free acm devices\n");
- kfree(acm);
- return -ENODEV;
- }
+ if (minor < 0)
+ goto alloc_fail1;
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
@@ -1394,7 +1342,8 @@ made_compressed_probe:
acm->data = data_interface;
acm->minor = minor;
acm->dev = usb_dev;
- acm->ctrl_caps = ac_management_function;
+ if (h.usb_cdc_acm_descriptor)
+ acm->ctrl_caps = h.usb_cdc_acm_descriptor->bmCapabilities;
if (quirks & NO_CAP_LINE)
acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
acm->ctrlsize = ctrlsize;
@@ -1488,7 +1437,10 @@ made_compressed_probe:
if (i < 0)
goto alloc_fail7;
- if (cfd) { /* export the country data */
+ if (h.usb_cdc_country_functional_desc) { /* export the country data */
+ struct usb_cdc_country_functional_desc * cfd =
+ h.usb_cdc_country_functional_desc;
+
acm->country_codes = kmalloc(cfd->bLength - 4, GFP_KERNEL);
if (!acm->country_codes)
goto skip_countries;
@@ -1572,6 +1524,7 @@ alloc_fail4:
usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
alloc_fail2:
acm_release_minor(acm);
+alloc_fail1:
kfree(acm);
alloc_fail:
return rv;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 61ea87917433..337948c42110 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -875,38 +875,18 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
int rv = -EINVAL;
struct usb_host_interface *iface;
struct usb_endpoint_descriptor *ep;
- struct usb_cdc_dmm_desc *dmhd;
+ struct usb_cdc_parsed_header hdr;
u8 *buffer = intf->altsetting->extra;
int buflen = intf->altsetting->extralen;
u16 maxcom = WDM_DEFAULT_BUFSIZE;
if (!buffer)
goto err;
- while (buflen > 2) {
- if (buffer[1] != USB_DT_CS_INTERFACE) {
- dev_err(&intf->dev, "skipping garbage\n");
- goto next_desc;
- }
- switch (buffer[2]) {
- case USB_CDC_HEADER_TYPE:
- break;
- case USB_CDC_DMM_TYPE:
- dmhd = (struct usb_cdc_dmm_desc *)buffer;
- maxcom = le16_to_cpu(dmhd->wMaxCommand);
- dev_dbg(&intf->dev,
- "Finding maximum buffer length: %d", maxcom);
- break;
- default:
- dev_err(&intf->dev,
- "Ignoring extra header, type %d, length %d\n",
- buffer[2], buffer[0]);
- break;
- }
-next_desc:
- buflen -= buffer[0];
- buffer += buffer[0];
- }
+ cdc_parse_cdc_header(&hdr, intf, buffer, buflen);
+
+ if (hdr.usb_cdc_dmm_desc)
+ maxcom = le16_to_cpu(hdr.usb_cdc_dmm_desc->wMaxCommand);
iface = intf->cur_altsetting;
if (iface->desc.bNumEndpoints != 1)
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index e3d01619d6b3..5ef8da6e67c3 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -131,15 +131,17 @@ EXPORT_SYMBOL_GPL(usb_get_dr_mode);
* of_usb_get_dr_mode_by_phy - Get dual role mode for the controller device
* which is associated with the given phy device_node
* @np: Pointer to the given phy device_node
+ * @arg0: phandle args[0] for phy's with #phy-cells >= 1, or -1 for
+ * phys which do not have phy-cells
*
* In dts a usb controller associates with phy devices. The function gets
* the string from property 'dr_mode' of the controller associated with the
* given phy device node, and returns the correspondig enum usb_dr_mode.
*/
-enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np)
+enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
{
struct device_node *controller = NULL;
- struct device_node *phy;
+ struct of_phandle_args args;
const char *dr_mode;
int index;
int err;
@@ -148,12 +150,24 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np)
controller = of_find_node_with_property(controller, "phys");
index = 0;
do {
- phy = of_parse_phandle(controller, "phys", index);
- of_node_put(phy);
- if (phy == phy_np)
+ if (arg0 == -1) {
+ args.np = of_parse_phandle(controller, "phys",
+ index);
+ args.args_count = 0;
+ } else {
+ err = of_parse_phandle_with_args(controller,
+ "phys", "#phy-cells",
+ index, &args);
+ if (err)
+ break;
+ }
+
+ of_node_put(args.np);
+ if (args.np == np && (args.args_count == 0 ||
+ args.args[0] == arg0))
goto finish;
index++;
- } while (phy);
+ } while (args.np);
} while (controller);
finish:
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index ea681f157368..0406a59f0551 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -12,6 +12,7 @@
#include <linux/nls.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
+#include <linux/usb/cdc.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
#include <asm/byteorder.h>
@@ -2023,3 +2024,155 @@ int usb_driver_set_configuration(struct usb_device *udev, int config)
return 0;
}
EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
+
+/**
+ * cdc_parse_cdc_header - parse the extra headers present in CDC devices
+ * @hdr: the place to put the results of the parsing
+ * @intf: the interface for which parsing is requested
+ * @buffer: pointer to the extra headers to be parsed
+ * @buflen: length of the extra headers
+ *
+ * This evaluates the extra headers present in CDC devices which
+ * bind the interfaces for data and control and provide details
+ * about the capabilities of the device.
+ *
+ * Return: number of descriptors parsed or -EINVAL
+ * if the header is contradictory beyond salvage
+ */
+
+int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
+ struct usb_interface *intf,
+ u8 *buffer,
+ int buflen)
+{
+ /* duplicates are ignored */
+ struct usb_cdc_union_desc *union_header = NULL;
+
+ /* duplicates are not tolerated */
+ struct usb_cdc_header_desc *header = NULL;
+ struct usb_cdc_ether_desc *ether = NULL;
+ struct usb_cdc_mdlm_detail_desc *detail = NULL;
+ struct usb_cdc_mdlm_desc *desc = NULL;
+
+ unsigned int elength;
+ int cnt = 0;
+
+ memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
+ hdr->phonet_magic_present = false;
+ while (buflen > 0) {
+ elength = buffer[0];
+ if (!elength) {
+ dev_err(&intf->dev, "skipping garbage byte\n");
+ elength = 1;
+ goto next_desc;
+ }
+ if (buffer[1] != USB_DT_CS_INTERFACE) {
+ dev_err(&intf->dev, "skipping garbage\n");
+ goto next_desc;
+ }
+
+ switch (buffer[2]) {
+ case USB_CDC_UNION_TYPE: /* we've found it */
+ if (elength < sizeof(struct usb_cdc_union_desc))
+ goto next_desc;
+ if (union_header) {
+ dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
+ goto next_desc;
+ }
+ union_header = (struct usb_cdc_union_desc *)buffer;
+ break;
+ case USB_CDC_COUNTRY_TYPE:
+ if (elength < sizeof(struct usb_cdc_country_functional_desc))
+ goto next_desc;
+ hdr->usb_cdc_country_functional_desc =
+ (struct usb_cdc_country_functional_desc *)buffer;
+ break;
+ case USB_CDC_HEADER_TYPE:
+ if (elength != sizeof(struct usb_cdc_header_desc))
+ goto next_desc;
+ if (header)
+ return -EINVAL;
+ header = (struct usb_cdc_header_desc *)buffer;
+ break;
+ case USB_CDC_ACM_TYPE:
+ if (elength < sizeof(struct usb_cdc_acm_descriptor))
+ goto next_desc;
+ hdr->usb_cdc_acm_descriptor =
+ (struct usb_cdc_acm_descriptor *)buffer;
+ break;
+ case USB_CDC_ETHERNET_TYPE:
+ if (elength != sizeof(struct usb_cdc_ether_desc))
+ goto next_desc;
+ if (ether)
+ return -EINVAL;
+ ether = (struct usb_cdc_ether_desc *)buffer;
+ break;
+ case USB_CDC_CALL_MANAGEMENT_TYPE:
+ if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
+ goto next_desc;
+ hdr->usb_cdc_call_mgmt_descriptor =
+ (struct usb_cdc_call_mgmt_descriptor *)buffer;
+ break;
+ case USB_CDC_DMM_TYPE:
+ if (elength < sizeof(struct usb_cdc_dmm_desc))
+ goto next_desc;
+ hdr->usb_cdc_dmm_desc =
+ (struct usb_cdc_dmm_desc *)buffer;
+ break;
+ case USB_CDC_MDLM_TYPE:
+ if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+ goto next_desc;
+ if (desc)
+ return -EINVAL;
+ desc = (struct usb_cdc_mdlm_desc *)buffer;
+ break;
+ case USB_CDC_MDLM_DETAIL_TYPE:
+ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+ goto next_desc;
+ if (detail)
+ return -EINVAL;
+ detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
+ break;
+ case USB_CDC_NCM_TYPE:
+ if (elength < sizeof(struct usb_cdc_ncm_desc))
+ goto next_desc;
+ hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
+ break;
+ case USB_CDC_MBIM_TYPE:
+ if (elength < sizeof(struct usb_cdc_mbim_desc))
+ goto next_desc;
+
+ hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
+ break;
+ case USB_CDC_MBIM_EXTENDED_TYPE:
+ if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
+ break;
+ hdr->usb_cdc_mbim_extended_desc =
+ (struct usb_cdc_mbim_extended_desc *)buffer;
+ break;
+ case CDC_PHONET_MAGIC_NUMBER:
+ hdr->phonet_magic_present = true;
+ break;
+ default:
+ /*
+ * there are LOTS more CDC descriptors that
+ * could legitimately be found here.
+ */
+ dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
+ buffer[2], elength);
+ goto next_desc;
+ }
+ cnt++;
+next_desc:
+ buflen -= elength;
+ buffer += elength;
+ }
+ hdr->usb_cdc_union_desc = union_header;
+ hdr->usb_cdc_header_desc = header;
+ hdr->usb_cdc_mdlm_detail_desc = detail;
+ hdr->usb_cdc_mdlm_desc = desc;
+ hdr->usb_cdc_ether_desc = ether;
+ return cnt;
+}
+
+EXPORT_SYMBOL(cdc_parse_cdc_header);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 944a6dca0fcb..d2e50a27140c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -128,6 +128,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
+ { USB_DEVICE(0x04f3, 0x0381), .driver_info =
+ USB_QUIRK_NO_LPM },
+
{ USB_DEVICE(0x04f3, 0x21b8), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index c1f29caa8990..e838701d6dd5 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -55,6 +55,7 @@ endchoice
config USB_DWC2_PCI
tristate "DWC2 PCI"
depends on PCI
+ depends on USB_GADGET || !USB_GADGET
default n
select NOP_USB_XCEIV
help
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index dec0b21fc626..9fae0291cd69 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -166,7 +166,7 @@ struct dwc2_hsotg_req;
* means that it is sending data to the Host.
* @index: The index for the endpoint registers.
* @mc: Multi Count - number of transactions per microframe
- * @interval - Interval for periodic endpoints
+ * @interval - Interval for periodic endpoints, in frames or microframes.
* @name: The name array passed to the USB core.
* @halted: Set if the endpoint has been halted.
* @periodic: Set if this is a periodic ep, such as Interrupt
@@ -177,6 +177,8 @@ struct dwc2_hsotg_req;
* @fifo_load: The amount of data loaded into the FIFO (periodic IN)
* @last_load: The offset of data for the last start of request.
* @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
+ * @target_frame: Targeted frame num to setup next ISOC transfer
+ * @frame_overrun: Indicates SOF number overrun in DSTS
*
* This is the driver's state for each registered enpoint, allowing it
* to keep track of transactions that need doing. Each endpoint has a
@@ -213,7 +215,9 @@ struct dwc2_hsotg_ep {
unsigned int periodic:1;
unsigned int isochronous:1;
unsigned int send_zlp:1;
- unsigned int has_correct_parity:1;
+ unsigned int target_frame;
+#define TARGET_FRAME_INITIAL 0xFFFFFFFF
+ bool frame_overrun;
char name[10];
};
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 26cf09d0fe3c..af46adfae41c 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -97,6 +97,25 @@ static inline bool using_dma(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
+ * @hs_ep: The endpoint
+ * @increment: The value to increment by
+ *
+ * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
+ * If an overrun occurs it will wrap the value and set the frame_overrun flag.
+ */
+static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
+{
+ hs_ep->target_frame += hs_ep->interval;
+ if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
+ hs_ep->frame_overrun = 1;
+ hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
+ } else {
+ hs_ep->frame_overrun = 0;
+ }
+}
+
+/**
* dwc2_hsotg_en_gsint - enable one or more of the general interrupt
* @hsotg: The device state
* @ints: A bitmask of the interrupts to enable
@@ -504,6 +523,23 @@ static unsigned get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
}
/**
+* dwc2_hsotg_read_frameno - read current frame number
+* @hsotg: The device instance
+*
+* Return the current frame number
+*/
+static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
+{
+ u32 dsts;
+
+ dsts = dwc2_readl(hsotg->regs + DSTS);
+ dsts &= DSTS_SOFFN_MASK;
+ dsts >>= DSTS_SOFFN_SHIFT;
+
+ return dsts;
+}
+
+/**
* dwc2_hsotg_start_req - start a USB request from an endpoint's queue
* @hsotg: The controller state.
* @hs_ep: The endpoint to process a request for
@@ -631,8 +667,17 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
__func__, &ureq->dma, dma_reg);
}
+ if (hs_ep->isochronous && hs_ep->interval == 1) {
+ hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+ dwc2_gadget_incr_frame_num(hs_ep);
+
+ if (hs_ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+ }
+
ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
- ctrl |= DXEPCTL_USBACTEP;
dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
@@ -659,14 +704,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
}
/*
- * clear the INTknTXFEmpMsk when we start request, more as a aide
- * to debugging to see what is going on.
- */
- if (dir_in)
- dwc2_writel(DIEPMSK_INTKNTXFEMPMSK,
- hsotg->regs + DIEPINT(index));
-
- /*
* Note, trying to clear the NAK here causes problems with transmit
* on the S3C6400 ending up with the TXFIFO becoming full.
*/
@@ -773,6 +810,30 @@ static void dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
hs_req->saved_req_buf = NULL;
}
+/**
+ * dwc2_gadget_target_frame_elapsed - Checks target frame
+ * @hs_ep: The driver endpoint to check
+ *
+ * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
+ * corresponding transfer.
+ */
+static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u32 target_frame = hs_ep->target_frame;
+ u32 current_frame = dwc2_hsotg_read_frameno(hsotg);
+ bool frame_overrun = hs_ep->frame_overrun;
+
+ if (!frame_overrun && current_frame >= target_frame)
+ return true;
+
+ if (frame_overrun && current_frame >= target_frame &&
+ ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
+ return true;
+
+ return false;
+}
+
static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
@@ -812,9 +873,18 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
first = list_empty(&hs_ep->queue);
list_add_tail(&hs_req->queue, &hs_ep->queue);
- if (first)
- dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+ if (first) {
+ if (!hs_ep->isochronous) {
+ dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+ return 0;
+ }
+
+ while (dwc2_gadget_target_frame_elapsed(hs_ep))
+ dwc2_gadget_incr_frame_num(hs_ep);
+ if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
+ dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+ }
return 0;
}
@@ -1035,6 +1105,42 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
}
/**
+ * dwc2_gadget_start_next_request - Starts next request from ep queue
+ * @hs_ep: Endpoint structure
+ *
+ * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
+ * in its handler. Hence we need to unmask it here to be able to do
+ * resynchronization.
+ */
+static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
+{
+ u32 mask;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+ struct dwc2_hsotg_req *hs_req;
+ u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
+
+ if (!list_empty(&hs_ep->queue)) {
+ hs_req = get_ep_head(hs_ep);
+ dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
+ return;
+ }
+ if (!hs_ep->isochronous)
+ return;
+
+ if (dir_in) {
+ dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
+ __func__);
+ } else {
+ dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
+ __func__);
+ mask = dwc2_readl(hsotg->regs + epmsk_reg);
+ mask |= DOEPMSK_OUTTKNEPDISMSK;
+ dwc2_writel(mask, hsotg->regs + epmsk_reg);
+ }
+}
+
+/**
* dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
* @hsotg: The device state
* @ctrl: USB control request
@@ -1044,7 +1150,6 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
{
struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
struct dwc2_hsotg_req *hs_req;
- bool restart;
bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
struct dwc2_hsotg_ep *ep;
int ret;
@@ -1127,12 +1232,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
/* If we have pending request, then start it */
if (!ep->req) {
- restart = !list_empty(&ep->queue);
- if (restart) {
- hs_req = get_ep_head(ep);
- dwc2_hsotg_start_req(hsotg, ep,
- hs_req, false);
- }
+ dwc2_gadget_start_next_request(ep);
}
}
@@ -1373,7 +1473,6 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_req *hs_req,
int result)
{
- bool restart;
if (!hs_req) {
dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
@@ -1417,11 +1516,7 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
*/
if (!hs_ep->req && result >= 0) {
- restart = !list_empty(&hs_ep->queue);
- if (restart) {
- hs_req = get_ep_head(hs_ep);
- dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
- }
+ dwc2_gadget_start_next_request(hs_ep);
}
}
@@ -1597,32 +1692,16 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
* adjust the ISOC parity here.
*/
if (!using_dma(hsotg)) {
- hs_ep->has_correct_parity = 1;
if (hs_ep->isochronous && hs_ep->interval == 1)
dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
+ else if (hs_ep->isochronous && hs_ep->interval > 1)
+ dwc2_gadget_incr_frame_num(hs_ep);
}
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
}
/**
- * dwc2_hsotg_read_frameno - read current frame number
- * @hsotg: The device instance
- *
- * Return the current frame number
- */
-static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
-{
- u32 dsts;
-
- dsts = dwc2_readl(hsotg->regs + DSTS);
- dsts &= DSTS_SOFFN_MASK;
- dsts >>= DSTS_SOFFN_SHIFT;
-
- return dsts;
-}
-
-/**
* dwc2_hsotg_handle_rx - RX FIFO has data
* @hsotg: The device instance
*
@@ -1937,6 +2016,190 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
}
/**
+ * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
+ * @hsotg: The device state.
+ * @idx: Index of ep.
+ * @dir_in: Endpoint direction 1-in 0-out.
+ *
+ * Reads for endpoint with given index and direction, by masking
+ * epint_reg with coresponding mask.
+ */
+static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
+ unsigned int idx, int dir_in)
+{
+ u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
+ u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
+ u32 ints;
+ u32 mask;
+ u32 diepempmsk;
+
+ mask = dwc2_readl(hsotg->regs + epmsk_reg);
+ diepempmsk = dwc2_readl(hsotg->regs + DIEPEMPMSK);
+ mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
+ mask |= DXEPINT_SETUP_RCVD;
+
+ ints = dwc2_readl(hsotg->regs + epint_reg);
+ ints &= mask;
+ return ints;
+}
+
+/**
+ * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
+ * @hs_ep: The endpoint on which interrupt is asserted.
+ *
+ * This interrupt indicates that the endpoint has been disabled per the
+ * application's request.
+ *
+ * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
+ * in case of ISOC completes current request.
+ *
+ * For ISOC-OUT endpoints completes expired requests. If there is remaining
+ * request starts it.
+ */
+static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req;
+ unsigned char idx = hs_ep->index;
+ int dir_in = hs_ep->dir_in;
+ u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
+ int dctl = dwc2_readl(hsotg->regs + DCTL);
+
+ dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+
+ if (dir_in) {
+ int epctl = dwc2_readl(hsotg->regs + epctl_reg);
+
+ dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
+
+ if (hs_ep->isochronous) {
+ dwc2_hsotg_complete_in(hsotg, hs_ep);
+ return;
+ }
+
+ if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
+ int dctl = dwc2_readl(hsotg->regs + DCTL);
+
+ dctl |= DCTL_CGNPINNAK;
+ dwc2_writel(dctl, hsotg->regs + DCTL);
+ }
+ return;
+ }
+
+ if (dctl & DCTL_GOUTNAKSTS) {
+ dctl |= DCTL_CGOUTNAK;
+ dwc2_writel(dctl, hsotg->regs + DCTL);
+ }
+
+ if (!hs_ep->isochronous)
+ return;
+
+ if (list_empty(&hs_ep->queue)) {
+ dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
+ __func__, hs_ep);
+ return;
+ }
+
+ do {
+ hs_req = get_ep_head(hs_ep);
+ if (hs_req)
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
+ -ENODATA);
+ dwc2_gadget_incr_frame_num(hs_ep);
+ } while (dwc2_gadget_target_frame_elapsed(hs_ep));
+
+ dwc2_gadget_start_next_request(hs_ep);
+}
+
+/**
+ * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
+ * @hs_ep: The endpoint on which interrupt is asserted.
+ *
+ * This is starting point for ISOC-OUT transfer, synchronization done with
+ * first out token received from host while corresponding EP is disabled.
+ *
+ * Device does not know initial frame in which out token will come. For this
+ * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
+ * getting this interrupt SW starts calculation for next transfer frame.
+ */
+static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
+{
+ struct dwc2_hsotg *hsotg = ep->parent;
+ int dir_in = ep->dir_in;
+ u32 doepmsk;
+
+ if (dir_in || !ep->isochronous)
+ return;
+
+ dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA);
+
+ if (ep->interval > 1 &&
+ ep->target_frame == TARGET_FRAME_INITIAL) {
+ u32 dsts;
+ u32 ctrl;
+
+ dsts = dwc2_readl(hsotg->regs + DSTS);
+ ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+ dwc2_gadget_incr_frame_num(ep);
+
+ ctrl = dwc2_readl(hsotg->regs + DOEPCTL(ep->index));
+ if (ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+
+ dwc2_writel(ctrl, hsotg->regs + DOEPCTL(ep->index));
+ }
+
+ dwc2_gadget_start_next_request(ep);
+ doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
+ doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
+ dwc2_writel(doepmsk, hsotg->regs + DOEPMSK);
+}
+
+/**
+* dwc2_gadget_handle_nak - handle NAK interrupt
+* @hs_ep: The endpoint on which interrupt is asserted.
+*
+* This is starting point for ISOC-IN transfer, synchronization done with
+* first IN token received from host while corresponding EP is disabled.
+*
+* Device does not know when first one token will arrive from host. On first
+* token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
+* and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
+* sent in response to that as there was no data in FIFO. SW is basing on this
+* interrupt to obtain frame in which token has come and then based on the
+* interval calculates next frame for transfer.
+*/
+static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+
+ if (!dir_in || !hs_ep->isochronous)
+ return;
+
+ if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
+ hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+ if (hs_ep->interval > 1) {
+ u32 ctrl = dwc2_readl(hsotg->regs +
+ DIEPCTL(hs_ep->index));
+ if (hs_ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+
+ dwc2_writel(ctrl, hsotg->regs + DIEPCTL(hs_ep->index));
+ }
+
+ dwc2_hsotg_complete_request(hsotg, hs_ep,
+ get_ep_head(hs_ep), 0);
+ }
+
+ dwc2_gadget_incr_frame_num(hs_ep);
+}
+
+/**
* dwc2_hsotg_epint - handle an in/out endpoint interrupt
* @hsotg: The driver state
* @idx: The index for the endpoint (0..15)
@@ -1954,7 +2217,7 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
u32 ints;
u32 ctrl;
- ints = dwc2_readl(hsotg->regs + epint_reg);
+ ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
ctrl = dwc2_readl(hsotg->regs + epctl_reg);
/* Clear endpoint interrupts */
@@ -1973,11 +2236,10 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
ints &= ~DXEPINT_XFERCOMPL;
- if (ints & DXEPINT_XFERCOMPL) {
- hs_ep->has_correct_parity = 1;
- if (hs_ep->isochronous && hs_ep->interval == 1)
- dwc2_hsotg_change_ep_iso_parity(hsotg, epctl_reg);
+ if (ints & DXEPINT_STSPHSERCVD)
+ dev_dbg(hsotg->dev, "%s: StsPhseRcvd asserted\n", __func__);
+ if (ints & DXEPINT_XFERCOMPL) {
dev_dbg(hsotg->dev,
"%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
__func__, dwc2_readl(hsotg->regs + epctl_reg),
@@ -1988,7 +2250,12 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
* at completing IN requests here
*/
if (dir_in) {
+ if (hs_ep->isochronous && hs_ep->interval > 1)
+ dwc2_gadget_incr_frame_num(hs_ep);
+
dwc2_hsotg_complete_in(hsotg, hs_ep);
+ if (ints & DXEPINT_NAKINTRPT)
+ ints &= ~DXEPINT_NAKINTRPT;
if (idx == 0 && !hs_ep->req)
dwc2_hsotg_enqueue_setup(hsotg);
@@ -1997,28 +2264,21 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
* We're using DMA, we need to fire an OutDone here
* as we ignore the RXFIFO.
*/
+ if (hs_ep->isochronous && hs_ep->interval > 1)
+ dwc2_gadget_incr_frame_num(hs_ep);
dwc2_hsotg_handle_outdone(hsotg, idx);
}
}
- if (ints & DXEPINT_EPDISBLD) {
- dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+ if (ints & DXEPINT_EPDISBLD)
+ dwc2_gadget_handle_ep_disabled(hs_ep);
- if (dir_in) {
- int epctl = dwc2_readl(hsotg->regs + epctl_reg);
+ if (ints & DXEPINT_OUTTKNEPDIS)
+ dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
- dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
-
- if ((epctl & DXEPCTL_STALL) &&
- (epctl & DXEPCTL_EPTYPE_BULK)) {
- int dctl = dwc2_readl(hsotg->regs + DCTL);
-
- dctl |= DCTL_CGNPINNAK;
- dwc2_writel(dctl, hsotg->regs + DCTL);
- }
- }
- }
+ if (ints & DXEPINT_NAKINTRPT)
+ dwc2_gadget_handle_nak(hs_ep);
if (ints & DXEPINT_AHBERR)
dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
@@ -2046,20 +2306,20 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (dir_in && !hs_ep->isochronous) {
/* not sure if this is important, but we'll clear it anyway */
- if (ints & DIEPMSK_INTKNTXFEMPMSK) {
+ if (ints & DXEPINT_INTKNTXFEMP) {
dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
__func__, idx);
}
/* this probably means something bad is happening */
- if (ints & DIEPMSK_INTKNEPMISMSK) {
+ if (ints & DXEPINT_INTKNEPMIS) {
dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
__func__, idx);
}
/* FIFO has space or is empty (see GAHBCFG) */
if (hsotg->dedicated_fifos &&
- ints & DIEPMSK_TXFIFOEMPTY) {
+ ints & DXEPINT_TXFEMP) {
dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
__func__, idx);
if (!using_dma(hsotg))
@@ -2322,18 +2582,16 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
dwc2_writel(((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
- DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
- DIEPMSK_INTKNEPMISMSK,
+ DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
hsotg->regs + DIEPMSK);
/*
* don't need XferCompl, we get that from RXFIFO in slave mode. In
* DMA mode we may need this.
*/
- dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
- DIEPMSK_TIMEOUTMSK) : 0) |
+ dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK) : 0) |
DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
- DOEPMSK_SETUPMSK,
+ DOEPMSK_SETUPMSK | DOEPMSK_STSPHSERCVDMSK,
hsotg->regs + DOEPMSK);
dwc2_writel(0, hsotg->regs + DAINTMSK);
@@ -2414,6 +2672,85 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
+ * @hsotg: The device state:
+ *
+ * This interrupt indicates one of the following conditions occurred while
+ * transmitting an ISOC transaction.
+ * - Corrupted IN Token for ISOC EP.
+ * - Packet not complete in FIFO.
+ *
+ * The following actions will be taken:
+ * - Determine the EP
+ * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
+ */
+static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hsotg_ep *hs_ep;
+ u32 epctrl;
+ u32 idx;
+
+ dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
+
+ for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
+ hs_ep = hsotg->eps_in[idx];
+ epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
+ if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous &&
+ dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ epctrl |= DXEPCTL_SNAK;
+ epctrl |= DXEPCTL_EPDIS;
+ dwc2_writel(epctrl, hsotg->regs + DIEPCTL(idx));
+ }
+ }
+
+ /* Clear interrupt */
+ dwc2_writel(GINTSTS_INCOMPL_SOIN, hsotg->regs + GINTSTS);
+}
+
+/**
+ * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
+ * @hsotg: The device state:
+ *
+ * This interrupt indicates one of the following conditions occurred while
+ * transmitting an ISOC transaction.
+ * - Corrupted OUT Token for ISOC EP.
+ * - Packet not complete in FIFO.
+ *
+ * The following actions will be taken:
+ * - Determine the EP
+ * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
+ */
+static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts;
+ u32 gintmsk;
+ u32 epctrl;
+ struct dwc2_hsotg_ep *hs_ep;
+ int idx;
+
+ dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
+
+ for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
+ hs_ep = hsotg->eps_out[idx];
+ epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
+ if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous &&
+ dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ /* Unmask GOUTNAKEFF interrupt */
+ gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk |= GINTSTS_GOUTNAKEFF;
+ dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+
+ gintsts = dwc2_readl(hsotg->regs + GINTSTS);
+ if (!(gintsts & GINTSTS_GOUTNAKEFF))
+ __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+ }
+ }
+
+ /* Clear interrupt */
+ dwc2_writel(GINTSTS_INCOMPL_SOOUT, hsotg->regs + GINTSTS);
+}
+
+/**
* dwc2_hsotg_irq - handle device interrupt
* @irq: The IRQ number triggered
* @pw: The pw value when registered the handler.
@@ -2545,11 +2882,29 @@ irq_retry:
*/
if (gintsts & GINTSTS_GOUTNAKEFF) {
- dev_info(hsotg->dev, "GOUTNakEff triggered\n");
+ u8 idx;
+ u32 epctrl;
+ u32 gintmsk;
+ struct dwc2_hsotg_ep *hs_ep;
- __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK);
+ /* Mask this interrupt */
+ gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk &= ~GINTSTS_GOUTNAKEFF;
+ dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
- dwc2_hsotg_dump(hsotg);
+ dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
+ for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
+ hs_ep = hsotg->eps_out[idx];
+ epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
+
+ if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
+ epctrl |= DXEPCTL_SNAK;
+ epctrl |= DXEPCTL_EPDIS;
+ dwc2_writel(epctrl, hsotg->regs + DOEPCTL(idx));
+ }
+ }
+
+ /* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
}
if (gintsts & GINTSTS_GINNAKEFF) {
@@ -2560,39 +2915,11 @@ irq_retry:
dwc2_hsotg_dump(hsotg);
}
- if (gintsts & GINTSTS_INCOMPL_SOIN) {
- u32 idx, epctl_reg;
- struct dwc2_hsotg_ep *hs_ep;
-
- dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOIN\n", __func__);
- for (idx = 1; idx < hsotg->num_of_eps; idx++) {
- hs_ep = hsotg->eps_in[idx];
-
- if (!hs_ep->isochronous || hs_ep->has_correct_parity)
- continue;
+ if (gintsts & GINTSTS_INCOMPL_SOIN)
+ dwc2_gadget_handle_incomplete_isoc_in(hsotg);
- epctl_reg = DIEPCTL(idx);
- dwc2_hsotg_change_ep_iso_parity(hsotg, epctl_reg);
- }
- dwc2_writel(GINTSTS_INCOMPL_SOIN, hsotg->regs + GINTSTS);
- }
-
- if (gintsts & GINTSTS_INCOMPL_SOOUT) {
- u32 idx, epctl_reg;
- struct dwc2_hsotg_ep *hs_ep;
-
- dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
- for (idx = 1; idx < hsotg->num_of_eps; idx++) {
- hs_ep = hsotg->eps_out[idx];
-
- if (!hs_ep->isochronous || hs_ep->has_correct_parity)
- continue;
-
- epctl_reg = DOEPCTL(idx);
- dwc2_hsotg_change_ep_iso_parity(hsotg, epctl_reg);
- }
- dwc2_writel(GINTSTS_INCOMPL_SOOUT, hsotg->regs + GINTSTS);
- }
+ if (gintsts & GINTSTS_INCOMPL_SOOUT)
+ dwc2_gadget_handle_incomplete_isoc_out(hsotg);
/*
* if we've had fifo events, we should try and go around the
@@ -2624,6 +2951,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
u32 epctrl_reg;
u32 epctrl;
u32 mps;
+ u32 mask;
unsigned int dir_in;
unsigned int i, val, size;
int ret = 0;
@@ -2666,15 +2994,6 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
*/
epctrl |= DXEPCTL_USBACTEP;
- /*
- * set the NAK status on the endpoint, otherwise we might try and
- * do something with data that we've yet got a request to process
- * since the RXFIFO will take data for an endpoint even if the
- * size register hasn't been set.
- */
-
- epctrl |= DXEPCTL_SNAK;
-
/* update the endpoint state */
dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in);
@@ -2683,18 +3002,24 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
hs_ep->periodic = 0;
hs_ep->halted = 0;
hs_ep->interval = desc->bInterval;
- hs_ep->has_correct_parity = 0;
-
- if (hs_ep->interval > 1 && hs_ep->mc > 1)
- dev_err(hsotg->dev, "MC > 1 when interval is not 1\n");
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_ISOC:
epctrl |= DXEPCTL_EPTYPE_ISO;
epctrl |= DXEPCTL_SETEVENFR;
hs_ep->isochronous = 1;
- if (dir_in)
+ hs_ep->interval = 1 << (desc->bInterval - 1);
+ hs_ep->target_frame = TARGET_FRAME_INITIAL;
+ if (dir_in) {
hs_ep->periodic = 1;
+ mask = dwc2_readl(hsotg->regs + DIEPMSK);
+ mask |= DIEPMSK_NAKMSK;
+ dwc2_writel(mask, hsotg->regs + DIEPMSK);
+ } else {
+ mask = dwc2_readl(hsotg->regs + DOEPMSK);
+ mask |= DOEPMSK_OUTTKNEPDISMSK;
+ dwc2_writel(mask, hsotg->regs + DOEPMSK);
+ }
break;
case USB_ENDPOINT_XFER_BULK:
@@ -2705,6 +3030,9 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
if (dir_in)
hs_ep->periodic = 1;
+ if (hsotg->gadget.speed == USB_SPEED_HIGH)
+ hs_ep->interval = 1 << (desc->bInterval - 1);
+
epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
break;
@@ -2758,7 +3086,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
}
/* for non control endpoints, set PID to D0 */
- if (index)
+ if (index && !hs_ep->isochronous)
epctrl |= DXEPCTL_SETD0PID;
dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
@@ -2875,10 +3203,8 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
dev_warn(hsotg->dev,
"%s: timeout DIEPINT.NAKEFF\n", __func__);
} else {
- /* Clear any pending nak effect interrupt */
- dwc2_writel(GINTSTS_GOUTNAKEFF, hsotg->regs + GINTSTS);
-
- __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+ if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
+ __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
/* Wait for global nak to take effect */
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index b5c7793a2df2..13754353251f 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -367,7 +367,8 @@ static void pmap_unschedule(unsigned long *map, int bits_per_period,
* @fmt: The format for printf.
* @...: The args for printf.
*/
-static void cat_printf(char **buf, size_t *size, const char *fmt, ...)
+static __printf(3, 4)
+void cat_printf(char **buf, size_t *size, const char *fmt, ...)
{
va_list args;
int i;
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 281b57b36ab4..efc3bcde2822 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -459,6 +459,9 @@
#define DSTS_SUSPSTS (1 << 0)
#define DIEPMSK HSOTG_REG(0x810)
+#define DIEPMSK_NAKMSK (1 << 13)
+#define DIEPMSK_BNAININTRMSK (1 << 9)
+#define DIEPMSK_TXFIFOUNDRNMSK (1 << 8)
#define DIEPMSK_TXFIFOEMPTY (1 << 7)
#define DIEPMSK_INEPNAKEFFMSK (1 << 6)
#define DIEPMSK_INTKNEPMISMSK (1 << 5)
@@ -470,6 +473,7 @@
#define DOEPMSK HSOTG_REG(0x814)
#define DOEPMSK_BACK2BACKSETUP (1 << 6)
+#define DOEPMSK_STSPHSERCVDMSK (1 << 5)
#define DOEPMSK_OUTTKNEPDISMSK (1 << 4)
#define DOEPMSK_SETUPMSK (1 << 3)
#define DOEPMSK_AHBERRMSK (1 << 2)
@@ -486,6 +490,7 @@
#define DTKNQR2 HSOTG_REG(0x824)
#define DTKNQR3 HSOTG_REG(0x830)
#define DTKNQR4 HSOTG_REG(0x834)
+#define DIEPEMPMSK HSOTG_REG(0x834)
#define DVBUSDIS HSOTG_REG(0x828)
#define DVBUSPULSE HSOTG_REG(0x82C)
@@ -544,9 +549,18 @@
#define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20))
#define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20))
#define DXEPINT_SETUP_RCVD (1 << 15)
+#define DXEPINT_NYETINTRPT (1 << 14)
+#define DXEPINT_NAKINTRPT (1 << 13)
+#define DXEPINT_BBLEERRINTRPT (1 << 12)
+#define DXEPINT_PKTDRPSTS (1 << 11)
+#define DXEPINT_BNAINTR (1 << 9)
+#define DXEPINT_TXFIFOUNDRN (1 << 8)
+#define DXEPINT_OUTPKTERR (1 << 8)
+#define DXEPINT_TXFEMP (1 << 7)
#define DXEPINT_INEPNAKEFF (1 << 6)
#define DXEPINT_BACK2BACKSETUP (1 << 6)
#define DXEPINT_INTKNEPMIS (1 << 5)
+#define DXEPINT_STSPHSERCVD (1 << 5)
#define DXEPINT_INTKNTXFEMP (1 << 4)
#define DXEPINT_OUTTKNEPDIS (1 << 4)
#define DXEPINT_TIMEOUT (1 << 3)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index a590cd225bb7..946643157b78 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -41,14 +41,13 @@
#include <linux/usb/of.h>
#include <linux/usb/otg.h>
-#include "platform_data.h"
#include "core.h"
#include "gadget.h"
#include "io.h"
#include "debug.h"
-/* -------------------------------------------------------------------------- */
+#define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
{
@@ -149,9 +148,8 @@ static int dwc3_soft_reset(struct dwc3 *dwc)
/*
* dwc3_frame_length_adjustment - Adjusts frame length if required
* @dwc3: Pointer to our controller context structure
- * @fladj: Value of GFLADJ_30MHZ to adjust frame length
*/
-static void dwc3_frame_length_adjustment(struct dwc3 *dwc, u32 fladj)
+static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
{
u32 reg;
u32 dft;
@@ -159,15 +157,15 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc, u32 fladj)
if (dwc->revision < DWC3_REVISION_250A)
return;
- if (fladj == 0)
+ if (dwc->fladj == 0)
return;
reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
dft = reg & DWC3_GFLADJ_30MHZ_MASK;
- if (!dev_WARN_ONCE(dwc->dev, dft == fladj,
+ if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj,
"request value same as default, ignoring\n")) {
reg &= ~DWC3_GFLADJ_30MHZ_MASK;
- reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | fladj;
+ reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
}
@@ -507,6 +505,21 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
return 0;
}
+static void dwc3_core_exit(struct dwc3 *dwc)
+{
+ dwc3_event_buffers_cleanup(dwc);
+
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+}
+
/**
* dwc3_core_init - Low-level initialization of DWC3 Core
* @dwc: Pointer to our controller context structure
@@ -556,6 +569,10 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (ret)
goto err0;
+ ret = dwc3_phy_setup(dwc);
+ if (ret)
+ goto err0;
+
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
@@ -622,22 +639,45 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (dwc->revision < DWC3_REVISION_190A)
reg |= DWC3_GCTL_U2RSTECN;
- dwc3_core_num_eps(dwc);
-
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
- ret = dwc3_alloc_scratch_buffers(dwc);
- if (ret)
- goto err1;
+ dwc3_core_num_eps(dwc);
ret = dwc3_setup_scratch_buffers(dwc);
if (ret)
+ goto err1;
+
+ /* Adjust Frame Length */
+ dwc3_frame_length_adjustment(dwc);
+
+ usb_phy_set_suspend(dwc->usb2_phy, 0);
+ usb_phy_set_suspend(dwc->usb3_phy, 0);
+ ret = phy_power_on(dwc->usb2_generic_phy);
+ if (ret < 0)
goto err2;
+ ret = phy_power_on(dwc->usb3_generic_phy);
+ if (ret < 0)
+ goto err3;
+
+ ret = dwc3_event_buffers_setup(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to setup event buffers\n");
+ goto err4;
+ }
+
return 0;
+err4:
+ phy_power_off(dwc->usb2_generic_phy);
+
+err3:
+ phy_power_off(dwc->usb3_generic_phy);
+
err2:
- dwc3_free_scratch_buffers(dwc);
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ dwc3_core_exit(dwc);
err1:
usb_phy_shutdown(dwc->usb2_phy);
@@ -649,15 +689,6 @@ err0:
return ret;
}
-static void dwc3_core_exit(struct dwc3 *dwc)
-{
- dwc3_free_scratch_buffers(dwc);
- usb_phy_shutdown(dwc->usb2_phy);
- usb_phy_shutdown(dwc->usb3_phy);
- phy_exit(dwc->usb2_generic_phy);
- phy_exit(dwc->usb3_generic_phy);
-}
-
static int dwc3_core_get_phy(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
@@ -735,7 +766,8 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
- dev_err(dev, "failed to initialize gadget\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to initialize gadget\n");
return ret;
}
break;
@@ -743,7 +775,8 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
ret = dwc3_host_init(dwc);
if (ret) {
- dev_err(dev, "failed to initialize host\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
break;
@@ -751,13 +784,15 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
ret = dwc3_host_init(dwc);
if (ret) {
- dev_err(dev, "failed to initialize host\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
ret = dwc3_gadget_init(dwc);
if (ret) {
- dev_err(dev, "failed to initialize gadget\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to initialize gadget\n");
return ret;
}
break;
@@ -793,13 +828,11 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct dwc3_platform_data *pdata = dev_get_platdata(dev);
struct resource *res;
struct dwc3 *dwc;
u8 lpm_nyet_threshold;
u8 tx_de_emphasis;
u8 hird_threshold;
- u32 fladj = 0;
int ret;
@@ -814,16 +847,6 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->mem = mem;
dwc->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "missing IRQ\n");
- return -ENODEV;
- }
- dwc->xhci_resources[1].start = res->start;
- dwc->xhci_resources[1].end = res->end;
- dwc->xhci_resources[1].flags = res->flags;
- dwc->xhci_resources[1].name = res->name;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "missing memory resource\n");
@@ -909,40 +932,7 @@ static int dwc3_probe(struct platform_device *pdev)
device_property_read_string(dev, "snps,hsphy_interface",
&dwc->hsphy_interface);
device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
- &fladj);
-
- if (pdata) {
- dwc->maximum_speed = pdata->maximum_speed;
- dwc->has_lpm_erratum = pdata->has_lpm_erratum;
- if (pdata->lpm_nyet_threshold)
- lpm_nyet_threshold = pdata->lpm_nyet_threshold;
- dwc->is_utmi_l1_suspend = pdata->is_utmi_l1_suspend;
- if (pdata->hird_threshold)
- hird_threshold = pdata->hird_threshold;
-
- dwc->usb3_lpm_capable = pdata->usb3_lpm_capable;
- dwc->dr_mode = pdata->dr_mode;
-
- dwc->disable_scramble_quirk = pdata->disable_scramble_quirk;
- dwc->u2exit_lfps_quirk = pdata->u2exit_lfps_quirk;
- dwc->u2ss_inp3_quirk = pdata->u2ss_inp3_quirk;
- dwc->req_p1p2p3_quirk = pdata->req_p1p2p3_quirk;
- dwc->del_p1p2p3_quirk = pdata->del_p1p2p3_quirk;
- dwc->del_phy_power_chg_quirk = pdata->del_phy_power_chg_quirk;
- dwc->lfps_filter_quirk = pdata->lfps_filter_quirk;
- dwc->rx_detect_poll_quirk = pdata->rx_detect_poll_quirk;
- dwc->dis_u3_susphy_quirk = pdata->dis_u3_susphy_quirk;
- dwc->dis_u2_susphy_quirk = pdata->dis_u2_susphy_quirk;
- dwc->dis_enblslpm_quirk = pdata->dis_enblslpm_quirk;
- dwc->dis_rxdet_inp3_quirk = pdata->dis_rxdet_inp3_quirk;
-
- dwc->tx_de_emphasis_quirk = pdata->tx_de_emphasis_quirk;
- if (pdata->tx_de_emphasis)
- tx_de_emphasis = pdata->tx_de_emphasis;
-
- dwc->hsphy_interface = pdata->hsphy_interface;
- fladj = pdata->fladj_value;
- }
+ &dwc->fladj);
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
dwc->tx_de_emphasis = tx_de_emphasis;
@@ -953,10 +943,6 @@ static int dwc3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
- ret = dwc3_phy_setup(dwc);
- if (ret)
- goto err0;
-
ret = dwc3_core_get_phy(dwc);
if (ret)
goto err0;
@@ -969,29 +955,43 @@ static int dwc3_probe(struct platform_device *pdev)
dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
}
+ pm_runtime_set_active(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err1;
+
pm_runtime_forbid(dev);
ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
if (ret) {
dev_err(dwc->dev, "failed to allocate event buffers\n");
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
- if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
+ if (IS_ENABLED(CONFIG_USB_DWC3_HOST) &&
+ (dwc->dr_mode == USB_DR_MODE_OTG ||
+ dwc->dr_mode == USB_DR_MODE_UNKNOWN))
dwc->dr_mode = USB_DR_MODE_HOST;
- else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
+ else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET) &&
+ (dwc->dr_mode == USB_DR_MODE_OTG ||
+ dwc->dr_mode == USB_DR_MODE_UNKNOWN))
dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
dwc->dr_mode = USB_DR_MODE_OTG;
+ ret = dwc3_alloc_scratch_buffers(dwc);
+ if (ret)
+ goto err3;
+
ret = dwc3_core_init(dwc);
if (ret) {
dev_err(dev, "failed to initialize core\n");
- goto err1;
+ goto err4;
}
/* Check the maximum_speed parameter */
@@ -1021,31 +1021,12 @@ static int dwc3_probe(struct platform_device *pdev)
break;
}
- /* Adjust Frame Length */
- dwc3_frame_length_adjustment(dwc, fladj);
-
- usb_phy_set_suspend(dwc->usb2_phy, 0);
- usb_phy_set_suspend(dwc->usb3_phy, 0);
- ret = phy_power_on(dwc->usb2_generic_phy);
- if (ret < 0)
- goto err2;
-
- ret = phy_power_on(dwc->usb3_generic_phy);
- if (ret < 0)
- goto err3;
-
- ret = dwc3_event_buffers_setup(dwc);
- if (ret) {
- dev_err(dwc->dev, "failed to setup event buffers\n");
- goto err4;
- }
-
ret = dwc3_core_init_mode(dwc);
if (ret)
goto err5;
dwc3_debugfs_init(dwc);
- pm_runtime_allow(dev);
+ pm_runtime_put(dev);
return 0;
@@ -1053,19 +1034,18 @@ err5:
dwc3_event_buffers_cleanup(dwc);
err4:
- phy_power_off(dwc->usb3_generic_phy);
+ dwc3_free_scratch_buffers(dwc);
err3:
- phy_power_off(dwc->usb2_generic_phy);
+ dwc3_free_event_buffers(dwc);
+ dwc3_ulpi_exit(dwc);
err2:
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- dwc3_core_exit(dwc);
+ pm_runtime_allow(&pdev->dev);
err1:
- dwc3_free_event_buffers(dwc);
- dwc3_ulpi_exit(dwc);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
err0:
/*
@@ -1083,6 +1063,7 @@ static int dwc3_remove(struct platform_device *pdev)
struct dwc3 *dwc = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pm_runtime_get_sync(&pdev->dev);
/*
* restore res->start back to its original value so that, in case the
* probe is deferred, we don't end up getting error in request the
@@ -1092,133 +1073,192 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_debugfs_exit(dwc);
dwc3_core_exit_mode(dwc);
- dwc3_event_buffers_cleanup(dwc);
- dwc3_free_event_buffers(dwc);
-
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- phy_power_off(dwc->usb2_generic_phy);
- phy_power_off(dwc->usb3_generic_phy);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ dwc3_free_event_buffers(dwc);
+ dwc3_free_scratch_buffers(dwc);
+
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int dwc3_suspend_common(struct dwc3 *dwc)
{
- struct dwc3 *dwc = dev_get_drvdata(dev);
unsigned long flags;
- spin_lock_irqsave(&dwc->lock, flags);
-
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
case USB_DR_MODE_OTG:
+ spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
- /* FALLTHROUGH */
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ break;
case USB_DR_MODE_HOST:
default:
- dwc3_event_buffers_cleanup(dwc);
+ /* do nothing */
break;
}
- dwc->gctl = dwc3_readl(dwc->regs, DWC3_GCTL);
- spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc3_core_exit(dwc);
- usb_phy_shutdown(dwc->usb3_phy);
- usb_phy_shutdown(dwc->usb2_phy);
- phy_exit(dwc->usb2_generic_phy);
- phy_exit(dwc->usb3_generic_phy);
+ return 0;
+}
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
- WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
+static int dwc3_resume_common(struct dwc3 *dwc)
+{
+ unsigned long flags;
+ int ret;
- pinctrl_pm_select_sleep_state(dev);
+ ret = dwc3_core_init(dwc);
+ if (ret)
+ return ret;
+
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ case USB_DR_MODE_OTG:
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_resume(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ /* FALLTHROUGH */
+ case USB_DR_MODE_HOST:
+ default:
+ /* do nothing */
+ break;
+ }
return 0;
}
-static int dwc3_resume(struct device *dev)
+static int dwc3_runtime_checks(struct dwc3 *dwc)
{
- struct dwc3 *dwc = dev_get_drvdata(dev);
- unsigned long flags;
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ case USB_DR_MODE_OTG:
+ if (dwc->connected)
+ return -EBUSY;
+ break;
+ case USB_DR_MODE_HOST:
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+static int dwc3_runtime_suspend(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
int ret;
- pinctrl_pm_select_default_state(dev);
+ if (dwc3_runtime_checks(dwc))
+ return -EBUSY;
- usb_phy_set_suspend(dwc->usb2_phy, 0);
- usb_phy_set_suspend(dwc->usb3_phy, 0);
- ret = phy_power_on(dwc->usb2_generic_phy);
- if (ret < 0)
+ ret = dwc3_suspend_common(dwc);
+ if (ret)
return ret;
- ret = phy_power_on(dwc->usb3_generic_phy);
- if (ret < 0)
- goto err_usb2phy_power;
+ device_init_wakeup(dev, true);
- usb_phy_init(dwc->usb3_phy);
- usb_phy_init(dwc->usb2_phy);
- ret = phy_init(dwc->usb2_generic_phy);
- if (ret < 0)
- goto err_usb3phy_power;
+ return 0;
+}
- ret = phy_init(dwc->usb3_generic_phy);
- if (ret < 0)
- goto err_usb2phy_init;
+static int dwc3_runtime_resume(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ int ret;
- spin_lock_irqsave(&dwc->lock, flags);
+ device_init_wakeup(dev, false);
- dwc3_event_buffers_setup(dwc);
- dwc3_writel(dwc->regs, DWC3_GCTL, dwc->gctl);
+ ret = dwc3_resume_common(dwc);
+ if (ret)
+ return ret;
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
case USB_DR_MODE_OTG:
- dwc3_gadget_resume(dwc);
- /* FALLTHROUGH */
+ dwc3_gadget_process_pending_events(dwc);
+ break;
case USB_DR_MODE_HOST:
default:
/* do nothing */
break;
}
- spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_mark_last_busy(dev);
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ return 0;
+}
+
+static int dwc3_runtime_idle(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ case USB_DR_MODE_OTG:
+ if (dwc3_runtime_checks(dwc))
+ return -EBUSY;
+ break;
+ case USB_DR_MODE_HOST:
+ default:
+ /* do nothing */
+ break;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
return 0;
+}
+#endif /* CONFIG_PM */
-err_usb2phy_init:
- phy_exit(dwc->usb2_generic_phy);
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_suspend(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ int ret;
-err_usb3phy_power:
- phy_power_off(dwc->usb3_generic_phy);
+ ret = dwc3_suspend_common(dwc);
+ if (ret)
+ return ret;
-err_usb2phy_power:
- phy_power_off(dwc->usb2_generic_phy);
+ pinctrl_pm_select_sleep_state(dev);
- return ret;
+ return 0;
}
+static int dwc3_resume(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ int ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = dwc3_resume_common(dwc);
+ if (ret)
+ return ret;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
static const struct dev_pm_ops dwc3_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+ SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
+ dwc3_runtime_idle)
};
-#define DWC3_PM_OPS &(dwc3_dev_pm_ops)
-#else
-#define DWC3_PM_OPS NULL
-#endif
-
#ifdef CONFIG_OF
static const struct of_device_id of_dwc3_match[] = {
{
@@ -1250,7 +1290,7 @@ static struct platform_driver dwc3_driver = {
.name = "dwc3",
.of_match_table = of_match_ptr(of_dwc3_match),
.acpi_match_table = ACPI_PTR(dwc3_acpi_match),
- .pm = DWC3_PM_OPS,
+ .pm = &dwc3_dev_pm_ops,
},
};
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 654050684f4f..45d6de5107c7 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -86,6 +86,7 @@
#define DWC3_GCTL 0xc110
#define DWC3_GEVTEN 0xc114
#define DWC3_GSTS 0xc118
+#define DWC3_GUCTL1 0xc11c
#define DWC3_GSNPSID 0xc120
#define DWC3_GGPIO 0xc124
#define DWC3_GUID 0xc128
@@ -138,10 +139,12 @@
#define DWC3_DGCMDPAR 0xc710
#define DWC3_DGCMD 0xc714
#define DWC3_DALEPENA 0xc720
-#define DWC3_DEPCMDPAR2(n) (0xc800 + (n * 0x10))
-#define DWC3_DEPCMDPAR1(n) (0xc804 + (n * 0x10))
-#define DWC3_DEPCMDPAR0(n) (0xc808 + (n * 0x10))
-#define DWC3_DEPCMD(n) (0xc80c + (n * 0x10))
+
+#define DWC3_DEP_BASE(n) (0xc800 + (n * 0x10))
+#define DWC3_DEPCMDPAR2 0x00
+#define DWC3_DEPCMDPAR1 0x04
+#define DWC3_DEPCMDPAR0 0x08
+#define DWC3_DEPCMD 0x0c
/* OTG Registers */
#define DWC3_OCFG 0xcc00
@@ -231,6 +234,14 @@
#define DWC3_GEVNTSIZ_INTMASK (1 << 31)
#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff)
+/* Global HWPARAMS0 Register */
+#define DWC3_GHWPARAMS0_USB3_MODE(n) ((n) & 0x3)
+#define DWC3_GHWPARAMS0_MBUS_TYPE(n) (((n) >> 3) & 0x7)
+#define DWC3_GHWPARAMS0_SBUS_TYPE(n) (((n) >> 6) & 0x3)
+#define DWC3_GHWPARAMS0_MDWIDTH(n) (((n) >> 8) & 0xff)
+#define DWC3_GHWPARAMS0_SDWIDTH(n) (((n) >> 16) & 0xff)
+#define DWC3_GHWPARAMS0_AWIDTH(n) (((n) >> 24) & 0xff)
+
/* Global HWPARAMS1 Register */
#define DWC3_GHWPARAMS1_EN_PWROPT(n) (((n) & (3 << 24)) >> 24)
#define DWC3_GHWPARAMS1_EN_PWROPT_NO 0
@@ -260,6 +271,10 @@
/* Global HWPARAMS6 Register */
#define DWC3_GHWPARAMS6_EN_FPGA (1 << 7)
+/* Global HWPARAMS7 Register */
+#define DWC3_GHWPARAMS7_RAM1_DEPTH(n) ((n) & 0xffff)
+#define DWC3_GHWPARAMS7_RAM2_DEPTH(n) (((n) >> 16) & 0xffff)
+
/* Global Frame Length Adjustment Register */
#define DWC3_GFLADJ_30MHZ_SDBND_SEL (1 << 7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
@@ -468,6 +483,8 @@ struct dwc3_event_buffer {
* @endpoint: usb endpoint
* @pending_list: list of pending requests for this endpoint
* @started_list: list of started requests on this endpoint
+ * @lock: spinlock for endpoint request queue traversal
+ * @regs: pointer to first endpoint register
* @trb_pool: array of transaction buffers
* @trb_pool_dma: dma address of @trb_pool
* @trb_enqueue: enqueue 'pointer' into TRB array
@@ -480,6 +497,8 @@ struct dwc3_event_buffer {
* @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
* @resource_index: Resource transfer index
* @interval: the interval on which the ISOC transfer is started
+ * @allocated_requests: number of requests allocated
+ * @queued_requests: number of requests queued for transfer
* @name: a human readable name e.g. ep1out-bulk
* @direction: true for TX, false for RX
* @stream_capable: true when streams are enabled
@@ -489,6 +508,9 @@ struct dwc3_ep {
struct list_head pending_list;
struct list_head started_list;
+ spinlock_t lock;
+ void __iomem *regs;
+
struct dwc3_trb *trb_pool;
dma_addr_t trb_pool_dma;
const struct usb_ss_ep_comp_descriptor *comp_desc;
@@ -521,6 +543,8 @@ struct dwc3_ep {
u8 number;
u8 type;
u8 resource_index;
+ u32 allocated_requests;
+ u32 queued_requests;
u32 interval;
char name[20];
@@ -712,6 +736,8 @@ struct dwc3_scratchpad_array {
* @gadget_driver: pointer to the gadget driver
* @regs: base address for our registers
* @regs_size: address space size
+ * @fladj: frame length adjustment
+ * @irq_gadget: peripheral controller's IRQ number
* @nr_scratch: number of scratch buffers
* @u1u2: only used on revisions <1.83a for workaround
* @maximum_speed: maximum speed requested (mainly for testing purposes)
@@ -744,6 +770,7 @@ struct dwc3_scratchpad_array {
* @lpm_nyet_threshold: LPM NYET response threshold
* @hird_threshold: HIRD threshold
* @hsphy_interface: "utmi" or "ulpi"
+ * @connected: true when we're connected to a host, false otherwise
* @delayed_status: true when gadget driver asks for delayed status
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
@@ -754,6 +781,7 @@ struct dwc3_scratchpad_array {
* 0 - utmi_sleep_n
* 1 - utmi_l1_suspend_n
* @is_fpga: true when we are using the FPGA board
+ * @pending_events: true when we have pending IRQs to be handled
* @pullups_connected: true when Run/Stop bit is set
* @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
* @start_config_issued: true when StartConfig command has been issued
@@ -818,10 +846,8 @@ struct dwc3 {
enum usb_dr_mode dr_mode;
- /* used for suspend/resume */
- u32 dcfg;
- u32 gctl;
-
+ u32 fladj;
+ u32 irq_gadget;
u32 nr_scratch;
u32 u1u2;
u32 maximum_speed;
@@ -860,7 +886,7 @@ struct dwc3 {
* just so dwc31 revisions are always larger than dwc3.
*/
#define DWC3_REVISION_IS_DWC31 0x80000000
-#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_USB31)
+#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_DWC31)
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -890,6 +916,7 @@ struct dwc3 {
const char *hsphy_interface;
+ unsigned connected:1;
unsigned delayed_status:1;
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
@@ -897,6 +924,7 @@ struct dwc3 {
unsigned has_lpm_erratum:1;
unsigned is_utmi_l1_suspend:1;
unsigned is_fpga:1;
+ unsigned pending_events:1;
unsigned pullups_connected:1;
unsigned setup_packet_pending:1;
unsigned three_stage_setup:1;
@@ -1094,8 +1122,8 @@ void dwc3_gadget_exit(struct dwc3 *dwc);
int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
int dwc3_gadget_get_link_state(struct dwc3 *dwc);
int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
-int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
+int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+ struct dwc3_gadget_ep_cmd_params *params);
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
@@ -1110,8 +1138,8 @@ static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc,
enum dwc3_link_state state)
{ return 0; }
-static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
+static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+ struct dwc3_gadget_ep_cmd_params *params)
{ return 0; }
static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
int cmd, u32 param)
@@ -1122,6 +1150,7 @@ static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
#if !IS_ENABLED(CONFIG_USB_DWC3_HOST)
int dwc3_gadget_suspend(struct dwc3 *dwc);
int dwc3_gadget_resume(struct dwc3 *dwc);
+void dwc3_gadget_process_pending_events(struct dwc3 *dwc);
#else
static inline int dwc3_gadget_suspend(struct dwc3 *dwc)
{
@@ -1132,6 +1161,10 @@ static inline int dwc3_gadget_resume(struct dwc3 *dwc)
{
return 0;
}
+
+static inline void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+{
+}
#endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */
#if IS_ENABLED(CONFIG_USB_DWC3_ULPI)
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 71e318025964..22dfc3dd6a13 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -128,56 +128,112 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
* dwc3_gadget_event_string - returns event name
* @event: the event code
*/
-static inline const char *dwc3_gadget_event_string(u8 event)
+static inline const char *
+dwc3_gadget_event_string(const struct dwc3_event_devt *event)
{
- switch (event) {
+ static char str[256];
+ enum dwc3_link_state state = event->event_info & DWC3_LINK_STATE_MASK;
+
+ switch (event->type) {
case DWC3_DEVICE_EVENT_DISCONNECT:
- return "Disconnect";
+ sprintf(str, "Disconnect: [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_RESET:
- return "Reset";
+ sprintf(str, "Reset [%s]", dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_CONNECT_DONE:
- return "Connection Done";
+ sprintf(str, "Connection Done [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
- return "Link Status Change";
+ sprintf(str, "Link Change [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_WAKEUP:
- return "WakeUp";
+ sprintf(str, "WakeUp [%s]", dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_EOPF:
- return "End-Of-Frame";
+ sprintf(str, "End-Of-Frame [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_SOF:
- return "Start-Of-Frame";
+ sprintf(str, "Start-Of-Frame [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
- return "Erratic Error";
+ sprintf(str, "Erratic Error [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_CMD_CMPL:
- return "Command Complete";
+ sprintf(str, "Command Complete [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_OVERFLOW:
- return "Overflow";
+ sprintf(str, "Overflow [%s]", dwc3_gadget_link_string(state));
+ break;
+ default:
+ sprintf(str, "UNKNOWN");
}
- return "UNKNOWN";
+ return str;
}
/**
* dwc3_ep_event_string - returns event name
* @event: then event code
*/
-static inline const char *dwc3_ep_event_string(u8 event)
+static inline const char *
+dwc3_ep_event_string(const struct dwc3_event_depevt *event)
{
- switch (event) {
+ u8 epnum = event->endpoint_number;
+ static char str[256];
+ int status;
+ int ret;
+
+ ret = sprintf(str, "ep%d%s: ", epnum >> 1,
+ (epnum & 1) ? "in" : "in");
+ if (ret < 0)
+ return "UNKNOWN";
+
+ switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
- return "Transfer Complete";
+ strcat(str, "Transfer Complete");
+ break;
case DWC3_DEPEVT_XFERINPROGRESS:
- return "Transfer In-Progress";
+ strcat(str, "Transfer In-Progress");
+ break;
case DWC3_DEPEVT_XFERNOTREADY:
- return "Transfer Not Ready";
+ strcat(str, "Transfer Not Ready");
+ status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
+ strcat(str, status ? " (Active)" : " (Not Active)");
+ break;
case DWC3_DEPEVT_RXTXFIFOEVT:
- return "FIFO";
+ strcat(str, "FIFO");
+ break;
case DWC3_DEPEVT_STREAMEVT:
- return "Stream";
+ status = event->status;
+
+ switch (status) {
+ case DEPEVT_STREAMEVT_FOUND:
+ sprintf(str + ret, " Stream %d Found",
+ event->parameters);
+ break;
+ case DEPEVT_STREAMEVT_NOTFOUND:
+ default:
+ strcat(str, " Stream Not Found");
+ break;
+ }
+
+ break;
case DWC3_DEPEVT_EPCMDCMPLT:
- return "Endpoint Command Complete";
+ strcat(str, "Endpoint Command Complete");
+ break;
+ default:
+ sprintf(str, "UNKNOWN");
}
- return "UNKNOWN";
+ return str;
}
/**
@@ -214,6 +270,46 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
}
}
+static inline const char *dwc3_decode_event(u32 event)
+{
+ const union dwc3_event evt = (union dwc3_event) event;
+
+ if (evt.type.is_devspec)
+ return dwc3_gadget_event_string(&evt.devt);
+ else
+ return dwc3_ep_event_string(&evt.depevt);
+}
+
+static inline const char *dwc3_ep_cmd_status_string(int status)
+{
+ switch (status) {
+ case -ETIMEDOUT:
+ return "Timed Out";
+ case 0:
+ return "Successful";
+ case DEPEVT_TRANSFER_NO_RESOURCE:
+ return "No Resource";
+ case DEPEVT_TRANSFER_BUS_EXPIRY:
+ return "Bus Expiry";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
+{
+ switch (status) {
+ case -ETIMEDOUT:
+ return "Timed Out";
+ case 0:
+ return "Successful";
+ case 1:
+ return "Error";
+ default:
+ return "UNKNOWN";
+ }
+}
+
void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index b1dd3c6d7ef7..31926dda43c9 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -36,9 +36,32 @@
#define dump_register(nm) \
{ \
.name = __stringify(nm), \
- .offset = DWC3_ ##nm - DWC3_GLOBALS_REGS_START, \
+ .offset = DWC3_ ##nm, \
}
+#define dump_ep_register_set(n) \
+ { \
+ .name = "DEPCMDPAR2("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMDPAR2, \
+ }, \
+ { \
+ .name = "DEPCMDPAR1("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMDPAR1, \
+ }, \
+ { \
+ .name = "DEPCMDPAR0("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMDPAR0, \
+ }, \
+ { \
+ .name = "DEPCMD("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMD, \
+ }
+
+
static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(GSBUSCFG0),
dump_register(GSBUSCFG1),
@@ -47,6 +70,7 @@ static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(GCTL),
dump_register(GEVTEN),
dump_register(GSTS),
+ dump_register(GUCTL1),
dump_register(GSNPSID),
dump_register(GGPIO),
dump_register(GUID),
@@ -218,137 +242,38 @@ static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(DGCMD),
dump_register(DALEPENA),
- dump_register(DEPCMDPAR2(0)),
- dump_register(DEPCMDPAR2(1)),
- dump_register(DEPCMDPAR2(2)),
- dump_register(DEPCMDPAR2(3)),
- dump_register(DEPCMDPAR2(4)),
- dump_register(DEPCMDPAR2(5)),
- dump_register(DEPCMDPAR2(6)),
- dump_register(DEPCMDPAR2(7)),
- dump_register(DEPCMDPAR2(8)),
- dump_register(DEPCMDPAR2(9)),
- dump_register(DEPCMDPAR2(10)),
- dump_register(DEPCMDPAR2(11)),
- dump_register(DEPCMDPAR2(12)),
- dump_register(DEPCMDPAR2(13)),
- dump_register(DEPCMDPAR2(14)),
- dump_register(DEPCMDPAR2(15)),
- dump_register(DEPCMDPAR2(16)),
- dump_register(DEPCMDPAR2(17)),
- dump_register(DEPCMDPAR2(18)),
- dump_register(DEPCMDPAR2(19)),
- dump_register(DEPCMDPAR2(20)),
- dump_register(DEPCMDPAR2(21)),
- dump_register(DEPCMDPAR2(22)),
- dump_register(DEPCMDPAR2(23)),
- dump_register(DEPCMDPAR2(24)),
- dump_register(DEPCMDPAR2(25)),
- dump_register(DEPCMDPAR2(26)),
- dump_register(DEPCMDPAR2(27)),
- dump_register(DEPCMDPAR2(28)),
- dump_register(DEPCMDPAR2(29)),
- dump_register(DEPCMDPAR2(30)),
- dump_register(DEPCMDPAR2(31)),
-
- dump_register(DEPCMDPAR1(0)),
- dump_register(DEPCMDPAR1(1)),
- dump_register(DEPCMDPAR1(2)),
- dump_register(DEPCMDPAR1(3)),
- dump_register(DEPCMDPAR1(4)),
- dump_register(DEPCMDPAR1(5)),
- dump_register(DEPCMDPAR1(6)),
- dump_register(DEPCMDPAR1(7)),
- dump_register(DEPCMDPAR1(8)),
- dump_register(DEPCMDPAR1(9)),
- dump_register(DEPCMDPAR1(10)),
- dump_register(DEPCMDPAR1(11)),
- dump_register(DEPCMDPAR1(12)),
- dump_register(DEPCMDPAR1(13)),
- dump_register(DEPCMDPAR1(14)),
- dump_register(DEPCMDPAR1(15)),
- dump_register(DEPCMDPAR1(16)),
- dump_register(DEPCMDPAR1(17)),
- dump_register(DEPCMDPAR1(18)),
- dump_register(DEPCMDPAR1(19)),
- dump_register(DEPCMDPAR1(20)),
- dump_register(DEPCMDPAR1(21)),
- dump_register(DEPCMDPAR1(22)),
- dump_register(DEPCMDPAR1(23)),
- dump_register(DEPCMDPAR1(24)),
- dump_register(DEPCMDPAR1(25)),
- dump_register(DEPCMDPAR1(26)),
- dump_register(DEPCMDPAR1(27)),
- dump_register(DEPCMDPAR1(28)),
- dump_register(DEPCMDPAR1(29)),
- dump_register(DEPCMDPAR1(30)),
- dump_register(DEPCMDPAR1(31)),
-
- dump_register(DEPCMDPAR0(0)),
- dump_register(DEPCMDPAR0(1)),
- dump_register(DEPCMDPAR0(2)),
- dump_register(DEPCMDPAR0(3)),
- dump_register(DEPCMDPAR0(4)),
- dump_register(DEPCMDPAR0(5)),
- dump_register(DEPCMDPAR0(6)),
- dump_register(DEPCMDPAR0(7)),
- dump_register(DEPCMDPAR0(8)),
- dump_register(DEPCMDPAR0(9)),
- dump_register(DEPCMDPAR0(10)),
- dump_register(DEPCMDPAR0(11)),
- dump_register(DEPCMDPAR0(12)),
- dump_register(DEPCMDPAR0(13)),
- dump_register(DEPCMDPAR0(14)),
- dump_register(DEPCMDPAR0(15)),
- dump_register(DEPCMDPAR0(16)),
- dump_register(DEPCMDPAR0(17)),
- dump_register(DEPCMDPAR0(18)),
- dump_register(DEPCMDPAR0(19)),
- dump_register(DEPCMDPAR0(20)),
- dump_register(DEPCMDPAR0(21)),
- dump_register(DEPCMDPAR0(22)),
- dump_register(DEPCMDPAR0(23)),
- dump_register(DEPCMDPAR0(24)),
- dump_register(DEPCMDPAR0(25)),
- dump_register(DEPCMDPAR0(26)),
- dump_register(DEPCMDPAR0(27)),
- dump_register(DEPCMDPAR0(28)),
- dump_register(DEPCMDPAR0(29)),
- dump_register(DEPCMDPAR0(30)),
- dump_register(DEPCMDPAR0(31)),
-
- dump_register(DEPCMD(0)),
- dump_register(DEPCMD(1)),
- dump_register(DEPCMD(2)),
- dump_register(DEPCMD(3)),
- dump_register(DEPCMD(4)),
- dump_register(DEPCMD(5)),
- dump_register(DEPCMD(6)),
- dump_register(DEPCMD(7)),
- dump_register(DEPCMD(8)),
- dump_register(DEPCMD(9)),
- dump_register(DEPCMD(10)),
- dump_register(DEPCMD(11)),
- dump_register(DEPCMD(12)),
- dump_register(DEPCMD(13)),
- dump_register(DEPCMD(14)),
- dump_register(DEPCMD(15)),
- dump_register(DEPCMD(16)),
- dump_register(DEPCMD(17)),
- dump_register(DEPCMD(18)),
- dump_register(DEPCMD(19)),
- dump_register(DEPCMD(20)),
- dump_register(DEPCMD(21)),
- dump_register(DEPCMD(22)),
- dump_register(DEPCMD(23)),
- dump_register(DEPCMD(24)),
- dump_register(DEPCMD(25)),
- dump_register(DEPCMD(26)),
- dump_register(DEPCMD(27)),
- dump_register(DEPCMD(28)),
- dump_register(DEPCMD(29)),
- dump_register(DEPCMD(30)),
- dump_register(DEPCMD(31)),
+ dump_ep_register_set(0),
+ dump_ep_register_set(1),
+ dump_ep_register_set(2),
+ dump_ep_register_set(3),
+ dump_ep_register_set(4),
+ dump_ep_register_set(5),
+ dump_ep_register_set(6),
+ dump_ep_register_set(7),
+ dump_ep_register_set(8),
+ dump_ep_register_set(9),
+ dump_ep_register_set(10),
+ dump_ep_register_set(11),
+ dump_ep_register_set(12),
+ dump_ep_register_set(13),
+ dump_ep_register_set(14),
+ dump_ep_register_set(15),
+ dump_ep_register_set(16),
+ dump_ep_register_set(17),
+ dump_ep_register_set(18),
+ dump_ep_register_set(19),
+ dump_ep_register_set(20),
+ dump_ep_register_set(21),
+ dump_ep_register_set(22),
+ dump_ep_register_set(23),
+ dump_ep_register_set(24),
+ dump_ep_register_set(25),
+ dump_ep_register_set(26),
+ dump_ep_register_set(27),
+ dump_ep_register_set(28),
+ dump_ep_register_set(29),
+ dump_ep_register_set(30),
+ dump_ep_register_set(31),
dump_register(OCFG),
dump_register(OCTL),
@@ -939,7 +864,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->regs = dwc3_regs;
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
- dwc->regset->base = dwc->regs;
+ dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
if (!file)
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index af264493bbae..29e80cc9b634 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -165,7 +165,7 @@ static void dwc3_omap_write_utmi_ctrl(struct dwc3_omap *omap, u32 value)
static u32 dwc3_omap_read_irq0_status(struct dwc3_omap *omap)
{
- return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_0 -
+ return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_RAW_0 -
omap->irq0_offset);
}
@@ -178,7 +178,7 @@ static void dwc3_omap_write_irq0_status(struct dwc3_omap *omap, u32 value)
static u32 dwc3_omap_read_irqmisc_status(struct dwc3_omap *omap)
{
- return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_MISC +
+ return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_RAW_MISC +
omap->irqmisc_offset);
}
@@ -231,35 +231,30 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
}
val = dwc3_omap_read_utmi_ctrl(omap);
- val &= ~(USBOTGSS_UTMI_OTG_CTRL_IDDIG
- | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
- | USBOTGSS_UTMI_OTG_CTRL_SESSEND);
- val |= USBOTGSS_UTMI_OTG_CTRL_SESSVALID
- | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
+ val &= ~USBOTGSS_UTMI_OTG_CTRL_IDDIG;
dwc3_omap_write_utmi_ctrl(omap, val);
break;
case OMAP_DWC3_VBUS_VALID:
val = dwc3_omap_read_utmi_ctrl(omap);
val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
- val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG
- | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
- | USBOTGSS_UTMI_OTG_CTRL_SESSVALID
- | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
+ val |= USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
+ | USBOTGSS_UTMI_OTG_CTRL_SESSVALID;
dwc3_omap_write_utmi_ctrl(omap, val);
break;
case OMAP_DWC3_ID_FLOAT:
if (omap->vbus_reg)
regulator_disable(omap->vbus_reg);
+ val = dwc3_omap_read_utmi_ctrl(omap);
+ val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+ dwc3_omap_write_utmi_ctrl(omap, val);
case OMAP_DWC3_VBUS_OFF:
val = dwc3_omap_read_utmi_ctrl(omap);
val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
- | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
- | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT);
- val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND
- | USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+ | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID);
+ val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND;
dwc3_omap_write_utmi_ctrl(omap, val);
break;
@@ -268,19 +263,38 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
}
}
+static void dwc3_omap_enable_irqs(struct dwc3_omap *omap);
+static void dwc3_omap_disable_irqs(struct dwc3_omap *omap);
+
static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
{
struct dwc3_omap *omap = _omap;
+
+ if (dwc3_omap_read_irqmisc_status(omap) ||
+ dwc3_omap_read_irq0_status(omap)) {
+ /* mask irqs */
+ dwc3_omap_disable_irqs(omap);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t dwc3_omap_interrupt_thread(int irq, void *_omap)
+{
+ struct dwc3_omap *omap = _omap;
u32 reg;
+ /* clear irq status flags */
reg = dwc3_omap_read_irqmisc_status(omap);
-
dwc3_omap_write_irqmisc_status(omap, reg);
reg = dwc3_omap_read_irq0_status(omap);
-
dwc3_omap_write_irq0_status(omap, reg);
+ /* unmask irqs */
+ dwc3_omap_enable_irqs(omap);
+
return IRQ_HANDLED;
}
@@ -497,8 +511,9 @@ static int dwc3_omap_probe(struct platform_device *pdev)
/* check the DMA Status */
reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
- ret = devm_request_irq(dev, omap->irq, dwc3_omap_interrupt, 0,
- "dwc3-omap", omap);
+ ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
+ dwc3_omap_interrupt_thread, IRQF_SHARED,
+ "dwc3-omap", omap);
if (ret) {
dev_err(dev, "failed to request IRQ #%d --> %d\n",
omap->irq, ret);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 14196cd416b3..45f5a232d9fb 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -20,11 +20,11 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/acpi.h>
-
-#include "platform_data.h"
+#include <linux/delay.h>
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
@@ -51,62 +51,70 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
{
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
- struct dwc3_platform_data pdata;
-
- memset(&pdata, 0, sizeof(pdata));
-
- pdata.has_lpm_erratum = true;
- pdata.lpm_nyet_threshold = 0xf;
-
- pdata.u2exit_lfps_quirk = true;
- pdata.u2ss_inp3_quirk = true;
- pdata.req_p1p2p3_quirk = true;
- pdata.del_p1p2p3_quirk = true;
- pdata.del_phy_power_chg_quirk = true;
- pdata.lfps_filter_quirk = true;
- pdata.rx_detect_poll_quirk = true;
-
- pdata.tx_de_emphasis_quirk = true;
- pdata.tx_de_emphasis = 1;
-
- /*
- * FIXME these quirks should be removed when AMD NL
- * taps out
- */
- pdata.disable_scramble_quirk = true;
- pdata.dis_u3_susphy_quirk = true;
- pdata.dis_u2_susphy_quirk = true;
-
- return platform_device_add_data(dwc3, &pdata, sizeof(pdata));
+ struct property_entry properties[] = {
+ PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
+ PROPERTY_ENTRY_U8("snps,lpm-nyet-threshold", 0xf),
+ PROPERTY_ENTRY_BOOL("snps,u2exit_lfps_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,u2ss_inp3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,req_p1p2p3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,del_p1p2p3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,del_phy_power_chg_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,lfps_filter_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,rx_detect_poll_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,tx_de_emphasis_quirk"),
+ PROPERTY_ENTRY_U8("snps,tx_de_emphasis", 1),
+ /*
+ * FIXME these quirks should be removed when AMD NL
+ * tapes out
+ */
+ PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ { },
+ };
+
+ return platform_device_add_properties(dwc3, properties);
}
- if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
- struct gpio_desc *gpio;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ int ret;
- acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
- acpi_dwc3_byt_gpios);
+ struct property_entry properties[] = {
+ PROPERTY_ENTRY_STRING("dr-mode", "peripheral"),
+ { }
+ };
- /*
- * These GPIOs will turn on the USB2 PHY. Note that we have to
- * put the gpio descriptors again here because the phy driver
- * might want to grab them, too.
- */
- gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
+ ret = platform_device_add_properties(dwc3, properties);
+ if (ret < 0)
+ return ret;
- gpiod_set_value_cansleep(gpio, 1);
- gpiod_put(gpio);
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
+ struct gpio_desc *gpio;
- gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
+ acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
+ acpi_dwc3_byt_gpios);
+
+ /*
+ * These GPIOs will turn on the USB2 PHY. Note that we have to
+ * put the gpio descriptors again here because the phy driver
+ * might want to grab them, too.
+ */
+ gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
- if (gpio) {
gpiod_set_value_cansleep(gpio, 1);
gpiod_put(gpio);
- usleep_range(10000, 11000);
+
+ gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (gpio) {
+ gpiod_set_value_cansleep(gpio, 1);
+ gpiod_put(gpio);
+ usleep_range(10000, 11000);
+ }
}
}
@@ -114,15 +122,14 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
(pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 ||
pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI ||
pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31)) {
-
- struct dwc3_platform_data pdata;
-
- memset(&pdata, 0, sizeof(pdata));
- pdata.usb3_lpm_capable = true;
- pdata.has_lpm_erratum = true;
- pdata.dis_enblslpm_quirk = true;
-
- return platform_device_add_data(dwc3, &pdata, sizeof(pdata));
+ struct property_entry properties[] = {
+ PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
+ PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
+ PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
+ { },
+ };
+
+ return platform_device_add_properties(dwc3, properties);
}
return 0;
@@ -180,7 +187,11 @@ static int dwc3_pci_probe(struct pci_dev *pci,
goto err;
}
+ device_init_wakeup(dev, true);
+ device_set_run_wake(dev, true);
pci_set_drvdata(pci, dwc3);
+ pm_runtime_put(dev);
+
return 0;
err:
platform_device_put(dwc3);
@@ -189,6 +200,8 @@ err:
static void dwc3_pci_remove(struct pci_dev *pci)
{
+ device_init_wakeup(&pci->dev, false);
+ pm_runtime_get(&pci->dev);
acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev));
platform_device_unregister(pci_get_drvdata(pci));
}
@@ -219,11 +232,43 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+#ifdef CONFIG_PM
+static int dwc3_pci_runtime_suspend(struct device *dev)
+{
+ if (device_run_wake(dev))
+ return 0;
+
+ return -EBUSY;
+}
+
+static int dwc3_pci_pm_dummy(struct device *dev)
+{
+ /*
+ * There's nothing to do here. No, seriously. Everything is either taken
+ * care either by PCI subsystem or dwc3/core.c, so we have nothing
+ * missing here.
+ *
+ * So you'd think we didn't need this at all, but PCI subsystem will
+ * bail out if we don't have a valid callback :-s
+ */
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
+ SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_pm_dummy,
+ NULL)
+};
+
static struct pci_driver dwc3_pci_driver = {
.name = "dwc3-pci",
.id_table = dwc3_pci_id_table,
.probe = dwc3_pci_probe,
.remove = dwc3_pci_remove,
+ .driver = {
+ .pm = &dwc3_pci_dev_pm_ops,
+ }
};
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 51b52a79dfec..fe79d771dee4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -98,8 +98,7 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
trace_dwc3_prepare_trb(dep, trb);
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_STARTTRANSFER, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
if (ret < 0) {
dwc3_trace(trace_dwc3_ep0, "%s STARTTRANSFER failed",
dep->name);
@@ -107,9 +106,7 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
}
dep->flags |= DWC3_EP_BUSY;
- dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
- dep->number);
-
+ dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
dwc->ep0_next_event = DWC3_EP0_COMPLETE;
return 0;
@@ -499,7 +496,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
case USB_RECIP_ENDPOINT:
switch (wValue) {
case USB_ENDPOINT_HALT:
- dep = dwc3_wIndex_to_dep(dwc, wIndex);
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
if (!dep)
return -EINVAL;
if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
@@ -622,8 +619,8 @@ static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
struct timing {
u8 u1sel;
u8 u1pel;
- u16 u2sel;
- u16 u2pel;
+ __le16 u2sel;
+ __le16 u2pel;
} __packed timing;
int ret;
@@ -980,7 +977,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
ret = usb_gadget_map_request(&dwc->gadget, &req->request,
dep->number);
if (ret) {
- dwc3_trace(trace_dwc3_ep0, "failed to map request\n");
+ dwc3_trace(trace_dwc3_ep0, "failed to map request");
return;
}
@@ -1008,7 +1005,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
ret = usb_gadget_map_request(&dwc->gadget, &req->request,
dep->number);
if (ret) {
- dwc3_trace(trace_dwc3_ep0, "failed to map request\n");
+ dwc3_trace(trace_dwc3_ep0, "failed to map request");
return;
}
@@ -1058,7 +1055,7 @@ static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
cmd |= DWC3_DEPCMD_CMDIOC;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
WARN_ON_ONCE(ret);
dep->resource_index = 0;
}
@@ -1112,11 +1109,8 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
- u8 epnum = event->endpoint_number;
-
- dwc3_trace(trace_dwc3_ep0, "%s while ep%d%s in state '%s'",
- dwc3_ep_event_string(event->endpoint_event),
- epnum >> 1, (epnum & 1) ? "in" : "out",
+ dwc3_trace(trace_dwc3_ep0, "%s: state '%s'",
+ dwc3_ep_event_string(event),
dwc3_ep0_state_string(dwc->ep0state));
switch (event->endpoint_event) {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 07248ff1be5c..8f8c2157910e 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -145,21 +145,29 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
return -ETIMEDOUT;
}
-static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
+/**
+ * dwc3_ep_inc_trb() - Increment a TRB index.
+ * @index - Pointer to the TRB index to increment.
+ *
+ * The index should never point to the link TRB. After incrementing,
+ * if it is point to the link TRB, wrap around to the beginning. The
+ * link TRB is always at the last TRB entry.
+ */
+static void dwc3_ep_inc_trb(u8 *index)
{
- dep->trb_enqueue++;
- dep->trb_enqueue %= DWC3_TRB_NUM;
+ (*index)++;
+ if (*index == (DWC3_TRB_NUM - 1))
+ *index = 0;
}
-static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
+static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
{
- dep->trb_dequeue++;
- dep->trb_dequeue %= DWC3_TRB_NUM;
+ dwc3_ep_inc_trb(&dep->trb_enqueue);
}
-static int dwc3_ep_is_last_trb(unsigned int index)
+static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
{
- return index == DWC3_TRB_NUM - 1;
+ dwc3_ep_inc_trb(&dep->trb_dequeue);
}
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
@@ -172,13 +180,6 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
i = 0;
do {
dwc3_ep_inc_deq(dep);
- /*
- * Skip LINK TRB. We can't use req->trb and check for
- * DWC3_TRBCTL_LINK_TRB because it points the TRB we
- * just completed (not the LINK TRB).
- */
- if (dwc3_ep_is_last_trb(dep->trb_dequeue))
- dwc3_ep_inc_deq(dep);
} while(++i < req->request.num_mapped_sgs);
req->started = false;
}
@@ -199,57 +200,54 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
spin_unlock(&dwc->lock);
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
+
+ if (dep->number > 1)
+ pm_runtime_put(dwc->dev);
}
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
{
u32 timeout = 500;
+ int status = 0;
+ int ret = 0;
u32 reg;
- trace_dwc3_gadget_generic_cmd(cmd, param);
-
dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
do {
reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
if (!(reg & DWC3_DGCMD_CMDACT)) {
- dwc3_trace(trace_dwc3_gadget,
- "Command Complete --> %d",
- DWC3_DGCMD_STATUS(reg));
- if (DWC3_DGCMD_STATUS(reg))
- return -EINVAL;
- return 0;
+ status = DWC3_DGCMD_STATUS(reg);
+ if (status)
+ ret = -EINVAL;
+ break;
}
+ } while (timeout--);
- /*
- * We can't sleep here, because it's also called from
- * interrupt context.
- */
- timeout--;
- if (!timeout) {
- dwc3_trace(trace_dwc3_gadget,
- "Command Timed Out");
- return -ETIMEDOUT;
- }
- udelay(1);
- } while (1);
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ status = -ETIMEDOUT;
+ }
+
+ trace_dwc3_gadget_generic_cmd(cmd, param, status);
+
+ return ret;
}
static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
-int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
+int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+ struct dwc3_gadget_ep_cmd_params *params)
{
- struct dwc3_ep *dep = dwc->eps[ep];
+ struct dwc3 *dwc = dep->dwc;
u32 timeout = 500;
u32 reg;
+ int cmd_status = 0;
int susphy = false;
int ret = -EINVAL;
- trace_dwc3_gadget_ep_cmd(dep, cmd, params);
-
/*
* Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
* we're issuing an endpoint command, we must check if
@@ -258,11 +256,13 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
* We will also set SUSPHY bit to what it was before returning as stated
* by the same section on Synopsys databook.
*/
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
- susphy = true;
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ if (dwc->gadget.speed <= USB_SPEED_HIGH) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+ susphy = true;
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
}
if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
@@ -279,26 +279,21 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
}
}
- dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
- dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
- dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
- dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
+ dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
do {
- reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
+ reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
if (!(reg & DWC3_DEPCMD_CMDACT)) {
- int cmd_status = DWC3_DEPCMD_STATUS(reg);
-
- dwc3_trace(trace_dwc3_gadget,
- "Command Complete --> %d",
- cmd_status);
+ cmd_status = DWC3_DEPCMD_STATUS(reg);
switch (cmd_status) {
case 0:
ret = 0;
break;
case DEPEVT_TRANSFER_NO_RESOURCE:
- dwc3_trace(trace_dwc3_gadget, "%s: no resource available");
ret = -EINVAL;
break;
case DEPEVT_TRANSFER_BUS_EXPIRY:
@@ -313,7 +308,6 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
* give a hint to the gadget driver that this is
* the case by returning -EAGAIN.
*/
- dwc3_trace(trace_dwc3_gadget, "%s: bus expiry");
ret = -EAGAIN;
break;
default:
@@ -322,21 +316,14 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
break;
}
+ } while (--timeout);
- /*
- * We can't sleep here, because it is also called from
- * interrupt context.
- */
- timeout--;
- if (!timeout) {
- dwc3_trace(trace_dwc3_gadget,
- "Command Timed Out");
- ret = -ETIMEDOUT;
- break;
- }
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ cmd_status = -ETIMEDOUT;
+ }
- udelay(1);
- } while (1);
+ trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
if (unlikely(susphy)) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
@@ -366,7 +353,7 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
memset(&params, 0, sizeof(params));
- return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
}
static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
@@ -454,7 +441,7 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
memset(&params, 0x00, sizeof(params));
cmd = DWC3_DEPCMD_DEPSTARTCFG;
- ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
if (ret)
return ret;
@@ -475,10 +462,14 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
const struct usb_endpoint_descriptor *desc,
const struct usb_ss_ep_comp_descriptor *comp_desc,
- bool ignore, bool restore)
+ bool modify, bool restore)
{
struct dwc3_gadget_ep_cmd_params params;
+ if (dev_WARN_ONCE(dwc->dev, modify && restore,
+ "Can't modify and restore\n"))
+ return -EINVAL;
+
memset(&params, 0x00, sizeof(params));
params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
@@ -487,30 +478,22 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
/* Burst size is only needed in SuperSpeed mode */
if (dwc->gadget.speed >= USB_SPEED_SUPER) {
u32 burst = dep->endpoint.maxburst;
- u32 nump;
- u32 reg;
-
- /* update NumP */
- reg = dwc3_readl(dwc->regs, DWC3_DCFG);
- nump = DWC3_DCFG_NUMP(reg);
- nump = max(nump, burst);
- reg &= ~DWC3_DCFG_NUMP_MASK;
- reg |= nump << DWC3_DCFG_NUMP_SHIFT;
- dwc3_writel(dwc->regs, DWC3_DCFG, reg);
-
params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
}
- if (ignore)
- params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
-
- if (restore) {
+ if (modify) {
+ params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
+ } else if (restore) {
params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
params.param2 |= dep->saved_state;
+ } else {
+ params.param0 |= DWC3_DEPCFG_ACTION_INIT;
}
- params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
- | DWC3_DEPCFG_XFER_NOT_READY_EN;
+ params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
+
+ if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
+ params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
@@ -541,8 +524,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
dep->interval = 1 << (desc->bInterval - 1);
}
- return dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_SETEPCONFIG, &params);
+ return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
}
static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
@@ -553,8 +535,8 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
- return dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
+ return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
+ &params);
}
/**
@@ -567,7 +549,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
const struct usb_endpoint_descriptor *desc,
const struct usb_ss_ep_comp_descriptor *comp_desc,
- bool ignore, bool restore)
+ bool modify, bool restore)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
@@ -581,7 +563,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
+ ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
restore);
if (ret)
return ret;
@@ -600,38 +582,24 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
if (usb_endpoint_xfer_control(desc))
- goto out;
+ return 0;
+
+ /* Initialize the TRB ring */
+ dep->trb_dequeue = 0;
+ dep->trb_enqueue = 0;
+ memset(dep->trb_pool, 0,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
/* Link TRB. The HWO bit is never reset */
trb_st_hw = &dep->trb_pool[0];
trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
- memset(trb_link, 0, sizeof(*trb_link));
-
trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
}
-out:
- switch (usb_endpoint_type(desc)) {
- case USB_ENDPOINT_XFER_CONTROL:
- /* don't change name */
- break;
- case USB_ENDPOINT_XFER_ISOC:
- strlcat(dep->name, "-isoc", sizeof(dep->name));
- break;
- case USB_ENDPOINT_XFER_BULK:
- strlcat(dep->name, "-bulk", sizeof(dep->name));
- break;
- case USB_ENDPOINT_XFER_INT:
- strlcat(dep->name, "-int", sizeof(dep->name));
- break;
- default:
- dev_err(dwc->dev, "invalid endpoint transfer type\n");
- }
-
return 0;
}
@@ -640,15 +608,13 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
- if (!list_empty(&dep->started_list)) {
- dwc3_stop_active_transfer(dwc, dep->number, true);
+ dwc3_stop_active_transfer(dwc, dep->number, true);
- /* - giveback all requests to gadget driver */
- while (!list_empty(&dep->started_list)) {
- req = next_request(&dep->started_list);
+ /* - giveback all requests to gadget driver */
+ while (!list_empty(&dep->started_list)) {
+ req = next_request(&dep->started_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
- }
+ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
}
while (!list_empty(&dep->pending_list)) {
@@ -689,10 +655,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->type = 0;
dep->flags = 0;
- snprintf(dep->name, sizeof(dep->name), "ep%d%s",
- dep->number >> 1,
- (dep->number & 1) ? "in" : "out");
-
return 0;
}
@@ -784,6 +746,8 @@ static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
req->epnum = dep->number;
req->dep = dep;
+ dep->allocated_requests++;
+
trace_dwc3_alloc_request(req);
return &req->request;
@@ -793,7 +757,9 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
struct usb_request *request)
{
struct dwc3_request *req = to_dwc3_request(request);
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ dep->allocated_requests--;
trace_dwc3_free_request(req);
kfree(req);
}
@@ -825,9 +791,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
}
dwc3_ep_inc_enq(dep);
- /* Skip the LINK-TRB */
- if (dwc3_ep_is_last_trb(dep->trb_enqueue))
- dwc3_ep_inc_enq(dep);
trb->size = DWC3_TRB_SIZE_LENGTH(length);
trb->bpl = lower_32_bits(dma);
@@ -877,137 +840,169 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
trb->ctrl |= DWC3_TRB_CTRL_HWO;
+ dep->queued_requests++;
+
trace_dwc3_prepare_trb(dep, trb);
}
-/*
- * dwc3_prepare_trbs - setup TRBs from requests
- * @dep: endpoint for which requests are being prepared
- * @starting: true if the endpoint is idle and no requests are queued.
+/**
+ * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
+ * @dep: The endpoint with the TRB ring
+ * @index: The index of the current TRB in the ring
*
- * The function goes through the requests list and sets up TRBs for the
- * transfers. The function returns once there are no more TRBs available or
- * it runs out of requests.
+ * Returns the TRB prior to the one pointed to by the index. If the
+ * index is 0, we will wrap backwards, skip the link TRB, and return
+ * the one just before that.
*/
-static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
+static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
{
- struct dwc3_request *req, *n;
- u32 trbs_left;
- unsigned int last_one = 0;
+ if (!index)
+ index = DWC3_TRB_NUM - 2;
+ else
+ index = dep->trb_enqueue - 1;
- BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
+ return &dep->trb_pool[index];
+}
- trbs_left = dep->trb_dequeue - dep->trb_enqueue;
+static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
+{
+ struct dwc3_trb *tmp;
+ u8 trbs_left;
/*
- * If enqueue & dequeue are equal than it is either full or empty. If we
- * are starting to process requests then we are empty. Otherwise we are
- * full and don't do anything
+ * If enqueue & dequeue are equal than it is either full or empty.
+ *
+ * One way to know for sure is if the TRB right before us has HWO bit
+ * set or not. If it has, then we're definitely full and can't fit any
+ * more transfers in our ring.
*/
- if (!trbs_left) {
- if (!starting)
- return;
+ if (dep->trb_enqueue == dep->trb_dequeue) {
+ tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
+ if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
+ return 0;
- trbs_left = DWC3_TRB_NUM;
+ return DWC3_TRB_NUM - 1;
}
- /* The last TRB is a link TRB, not used for xfer */
- if (trbs_left <= 1)
- return;
+ trbs_left = dep->trb_dequeue - dep->trb_enqueue;
+ trbs_left &= (DWC3_TRB_NUM - 1);
- list_for_each_entry_safe(req, n, &dep->pending_list, list) {
- unsigned length;
- dma_addr_t dma;
- last_one = false;
-
- if (req->request.num_mapped_sgs > 0) {
- struct usb_request *request = &req->request;
- struct scatterlist *sg = request->sg;
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, request->num_mapped_sgs, i) {
- unsigned chain = true;
-
- length = sg_dma_len(s);
- dma = sg_dma_address(s);
-
- if (i == (request->num_mapped_sgs - 1) ||
- sg_is_last(s)) {
- if (list_empty(&dep->pending_list))
- last_one = true;
- chain = false;
- }
+ if (dep->trb_dequeue < dep->trb_enqueue)
+ trbs_left--;
- trbs_left--;
- if (!trbs_left)
- last_one = true;
+ return trbs_left;
+}
- if (last_one)
- chain = false;
+static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ struct dwc3_request *req, unsigned int trbs_left,
+ unsigned int more_coming)
+{
+ struct usb_request *request = &req->request;
+ struct scatterlist *sg = request->sg;
+ struct scatterlist *s;
+ unsigned int last = false;
+ unsigned int length;
+ dma_addr_t dma;
+ int i;
- dwc3_prepare_one_trb(dep, req, dma, length,
- last_one, chain, i);
+ for_each_sg(sg, s, request->num_mapped_sgs, i) {
+ unsigned chain = true;
- if (last_one)
- break;
- }
+ length = sg_dma_len(s);
+ dma = sg_dma_address(s);
- if (last_one)
- break;
- } else {
- dma = req->request.dma;
- length = req->request.length;
- trbs_left--;
+ if (sg_is_last(s)) {
+ if (usb_endpoint_xfer_int(dep->endpoint.desc) ||
+ !more_coming)
+ last = true;
- if (!trbs_left)
- last_one = 1;
+ chain = false;
+ }
- /* Is this the last request? */
- if (list_is_last(&req->list, &dep->pending_list))
- last_one = 1;
+ if (!trbs_left--)
+ last = true;
- dwc3_prepare_one_trb(dep, req, dma, length,
- last_one, false, 0);
+ if (last)
+ chain = false;
- if (last_one)
- break;
- }
+ dwc3_prepare_one_trb(dep, req, dma, length,
+ last, chain, i);
+
+ if (last)
+ break;
}
}
-static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
- int start_new)
+static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
+ struct dwc3_request *req, unsigned int trbs_left,
+ unsigned int more_coming)
+{
+ unsigned int last = false;
+ unsigned int length;
+ dma_addr_t dma;
+
+ dma = req->request.dma;
+ length = req->request.length;
+
+ if (!trbs_left)
+ last = true;
+
+ /* Is this the last request? */
+ if (usb_endpoint_xfer_int(dep->endpoint.desc) || !more_coming)
+ last = true;
+
+ dwc3_prepare_one_trb(dep, req, dma, length,
+ last, false, 0);
+}
+
+/*
+ * dwc3_prepare_trbs - setup TRBs from requests
+ * @dep: endpoint for which requests are being prepared
+ *
+ * The function goes through the requests list and sets up TRBs for the
+ * transfers. The function returns once there are no more TRBs available or
+ * it runs out of requests.
+ */
+static void dwc3_prepare_trbs(struct dwc3_ep *dep)
+{
+ struct dwc3_request *req, *n;
+ unsigned int more_coming;
+ u32 trbs_left;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
+
+ trbs_left = dwc3_calc_trbs_left(dep);
+ if (!trbs_left)
+ return;
+
+ more_coming = dep->allocated_requests - dep->queued_requests;
+
+ list_for_each_entry_safe(req, n, &dep->pending_list, list) {
+ if (req->request.num_mapped_sgs > 0)
+ dwc3_prepare_one_trb_sg(dep, req, trbs_left--,
+ more_coming);
+ else
+ dwc3_prepare_one_trb_linear(dep, req, trbs_left--,
+ more_coming);
+
+ if (!trbs_left)
+ return;
+ }
+}
+
+static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
struct dwc3 *dwc = dep->dwc;
+ int starting;
int ret;
u32 cmd;
- if (start_new && (dep->flags & DWC3_EP_BUSY)) {
- dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name);
- return -EBUSY;
- }
-
- /*
- * If we are getting here after a short-out-packet we don't enqueue any
- * new requests as we try to set the IOC bit only on the last request.
- */
- if (start_new) {
- if (list_empty(&dep->started_list))
- dwc3_prepare_trbs(dep, start_new);
-
- /* req points to the first request which will be sent */
- req = next_request(&dep->started_list);
- } else {
- dwc3_prepare_trbs(dep, start_new);
+ starting = !(dep->flags & DWC3_EP_BUSY);
- /*
- * req points to the first request where HWO changed from 0 to 1
- */
- req = next_request(&dep->started_list);
- }
+ dwc3_prepare_trbs(dep);
+ req = next_request(&dep->started_list);
if (!req) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
return 0;
@@ -1015,16 +1010,17 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
memset(&params, 0, sizeof(params));
- if (start_new) {
+ if (starting) {
params.param0 = upper_32_bits(req->trb_dma);
params.param1 = lower_32_bits(req->trb_dma);
- cmd = DWC3_DEPCMD_STARTTRANSFER;
+ cmd = DWC3_DEPCMD_STARTTRANSFER |
+ DWC3_DEPCMD_PARAM(cmd_param);
} else {
- cmd = DWC3_DEPCMD_UPDATETRANSFER;
+ cmd = DWC3_DEPCMD_UPDATETRANSFER |
+ DWC3_DEPCMD_PARAM(dep->resource_index);
}
- cmd |= DWC3_DEPCMD_PARAM(cmd_param);
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
if (ret < 0) {
/*
* FIXME we need to iterate over the list of requests
@@ -1039,9 +1035,8 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
dep->flags |= DWC3_EP_BUSY;
- if (start_new) {
- dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
- dep->number);
+ if (starting) {
+ dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
WARN_ON_ONCE(!dep->resource_index);
}
@@ -1064,7 +1059,7 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
/* 4 micro frames in the future */
uf = cur_uf + dep->interval * 4;
- __dwc3_gadget_kick_transfer(dep, uf, 1);
+ __dwc3_gadget_kick_transfer(dep, uf);
}
static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
@@ -1085,18 +1080,20 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
if (!dep->endpoint.desc) {
dwc3_trace(trace_dwc3_gadget,
- "trying to queue request %p to disabled %s\n",
+ "trying to queue request %p to disabled %s",
&req->request, dep->endpoint.name);
return -ESHUTDOWN;
}
if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
&req->request, req->dep->name)) {
- dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'\n",
+ dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
&req->request, req->dep->name);
return -EINVAL;
}
+ pm_runtime_get(dwc->dev);
+
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->direction = dep->direction;
@@ -1131,9 +1128,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* little bit faster.
*/
if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- !usb_endpoint_xfer_int(dep->endpoint.desc) &&
- !(dep->flags & DWC3_EP_BUSY)) {
- ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+ !usb_endpoint_xfer_int(dep->endpoint.desc)) {
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
goto out;
}
@@ -1163,7 +1159,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return 0;
}
- ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
if (!ret)
dep->flags &= ~DWC3_EP_PENDING_REQUEST;
@@ -1179,8 +1175,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
(dep->flags & DWC3_EP_BUSY) &&
!(dep->flags & DWC3_EP_MISSED_ISOC)) {
WARN_ON_ONCE(!dep->resource_index);
- ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
- false);
+ ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
goto out;
}
@@ -1190,12 +1185,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* handled.
*/
if (dep->stream_capable)
- ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
out:
if (ret && ret != -EBUSY)
dwc3_trace(trace_dwc3_gadget,
- "%s: failed to kick transfers\n",
+ "%s: failed to kick transfers",
dep->name);
if (ret == -EBUSY)
ret = 0;
@@ -1215,7 +1210,7 @@ static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
struct usb_request *request;
struct usb_ep *ep = &dep->endpoint;
- dwc3_trace(trace_dwc3_gadget, "queueing ZLP\n");
+ dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
if (!request)
return -ENOMEM;
@@ -1319,23 +1314,36 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
memset(&params, 0x00, sizeof(params));
if (value) {
- if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
- (!list_empty(&dep->started_list) ||
- !list_empty(&dep->pending_list)))) {
+ struct dwc3_trb *trb;
+
+ unsigned transfer_in_flight;
+ unsigned started;
+
+ if (dep->number > 1)
+ trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
+ else
+ trb = &dwc->ep0_trb[dep->trb_enqueue];
+
+ transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
+ started = !list_empty(&dep->started_list);
+
+ if (!protocol && ((dep->direction && transfer_in_flight) ||
+ (!dep->direction && started))) {
dwc3_trace(trace_dwc3_gadget,
"%s: pending request, cannot halt",
dep->name);
return -EAGAIN;
}
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_SETSTALL, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
+ &params);
if (ret)
dev_err(dwc->dev, "failed to set STALL on %s\n",
dep->name);
else
dep->flags |= DWC3_EP_STALL;
} else {
+
ret = dwc3_send_clear_stall_ep_cmd(dep);
if (ret)
dev_err(dwc->dev, "failed to clear STALL on %s\n",
@@ -1444,8 +1452,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
speed = reg & DWC3_DSTS_CONNECTSPD;
if ((speed == DWC3_DSTS_SUPERSPEED) ||
(speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
- dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n");
- return -EINVAL;
+ dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
+ return 0;
}
link_state = DWC3_DSTS_USBLNKST(reg);
@@ -1456,7 +1464,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
break;
default:
dwc3_trace(trace_dwc3_gadget,
- "can't wakeup from '%s'\n",
+ "can't wakeup from '%s'",
dwc3_gadget_link_string(link_state));
return -EINVAL;
}
@@ -1525,6 +1533,9 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
u32 reg;
u32 timeout = 500;
+ if (pm_runtime_suspended(dwc->dev))
+ return 0;
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
if (dwc->revision <= DWC3_REVISION_187A) {
@@ -1553,18 +1564,11 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
do {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- if (is_on) {
- if (!(reg & DWC3_DSTS_DEVCTRLHLT))
- break;
- } else {
- if (reg & DWC3_DSTS_DEVCTRLHLT)
- break;
- }
- timeout--;
- if (!timeout)
- return -ETIMEDOUT;
- udelay(1);
- } while (1);
+ reg &= DWC3_DSTS_DEVCTRLHLT;
+ } while (--timeout && !(!is_on ^ !reg));
+
+ if (!timeout)
+ return -ETIMEDOUT;
dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
dwc->gadget_driver
@@ -1616,36 +1620,52 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
-static int dwc3_gadget_start(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+/**
+ * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
+ * dwc: pointer to our context structure
+ *
+ * The following looks like complex but it's actually very simple. In order to
+ * calculate the number of packets we can burst at once on OUT transfers, we're
+ * gonna use RxFIFO size.
+ *
+ * To calculate RxFIFO size we need two numbers:
+ * MDWIDTH = size, in bits, of the internal memory bus
+ * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
+ *
+ * Given these two numbers, the formula is simple:
+ *
+ * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
+ *
+ * 24 bytes is for 3x SETUP packets
+ * 16 bytes is a clock domain crossing tolerance
+ *
+ * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
+ */
+static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
{
- struct dwc3 *dwc = gadget_to_dwc(g);
- struct dwc3_ep *dep;
- unsigned long flags;
- int ret = 0;
- int irq;
- u32 reg;
+ u32 ram2_depth;
+ u32 mdwidth;
+ u32 nump;
+ u32 reg;
- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
- ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
- IRQF_SHARED, "dwc3", dwc->ev_buf);
- if (ret) {
- dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
- irq, ret);
- goto err0;
- }
+ ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
+ mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
- spin_lock_irqsave(&dwc->lock, flags);
+ nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
+ nump = min_t(u32, nump, 16);
- if (dwc->gadget_driver) {
- dev_err(dwc->dev, "%s is already bound to %s\n",
- dwc->gadget.name,
- dwc->gadget_driver->driver.name);
- ret = -EBUSY;
- goto err1;
- }
+ /* update NumP */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~DWC3_DCFG_NUMP_MASK;
+ reg |= nump << DWC3_DCFG_NUMP_SHIFT;
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
- dwc->gadget_driver = driver;
+static int __dwc3_gadget_start(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+ int ret = 0;
+ u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
@@ -1668,16 +1688,16 @@ static int dwc3_gadget_start(struct usb_gadget *g,
} else {
switch (dwc->maximum_speed) {
case USB_SPEED_LOW:
- reg |= DWC3_DSTS_LOWSPEED;
+ reg |= DWC3_DCFG_LOWSPEED;
break;
case USB_SPEED_FULL:
- reg |= DWC3_DSTS_FULLSPEED1;
+ reg |= DWC3_DCFG_FULLSPEED1;
break;
case USB_SPEED_HIGH:
- reg |= DWC3_DSTS_HIGHSPEED;
+ reg |= DWC3_DCFG_HIGHSPEED;
break;
case USB_SPEED_SUPER_PLUS:
- reg |= DWC3_DSTS_SUPERSPEED_PLUS;
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
break;
default:
dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
@@ -1701,6 +1721,8 @@ static int dwc3_gadget_start(struct usb_gadget *g,
reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+ dwc3_gadget_setup_nump(dwc);
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -1709,7 +1731,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
- goto err2;
+ goto err0;
}
dep = dwc->eps[1];
@@ -1717,7 +1739,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
- goto err3;
+ goto err1;
}
/* begin to receive SETUP packets */
@@ -1726,43 +1748,79 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc3_gadget_enable_irq(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
-
return 0;
-err3:
- __dwc3_gadget_ep_disable(dwc->eps[0]);
-
-err2:
- dwc->gadget_driver = NULL;
-
err1:
- spin_unlock_irqrestore(&dwc->lock, flags);
-
- free_irq(irq, dwc->ev_buf);
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
err0:
return ret;
}
-static int dwc3_gadget_stop(struct usb_gadget *g)
+static int dwc3_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
+ int ret = 0;
int irq;
+ irq = dwc->irq_gadget;
+ ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
+ IRQF_SHARED, "dwc3", dwc->ev_buf);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+ irq, ret);
+ goto err0;
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
+ if (dwc->gadget_driver) {
+ dev_err(dwc->dev, "%s is already bound to %s\n",
+ dwc->gadget.name,
+ dwc->gadget_driver->driver.name);
+ ret = -EBUSY;
+ goto err1;
+ }
+
+ dwc->gadget_driver = driver;
+
+ if (pm_runtime_active(dwc->dev))
+ __dwc3_gadget_start(dwc);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+
+err1:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ free_irq(irq, dwc);
+
+err0:
+ return ret;
+}
+
+static void __dwc3_gadget_stop(struct dwc3 *dwc)
+{
+ if (pm_runtime_suspended(dwc->dev))
+ return;
dwc3_gadget_disable_irq(dwc);
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
+}
- dwc->gadget_driver = NULL;
+static int dwc3_gadget_stop(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
+ __dwc3_gadget_stop(dwc);
+ dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
- free_irq(irq, dwc->ev_buf);
+ free_irq(dwc->irq_gadget, dwc->ev_buf);
return 0;
}
@@ -1785,7 +1843,7 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
u8 i;
for (i = 0; i < num; i++) {
- u8 epnum = (i << 1) | (!!direction);
+ u8 epnum = (i << 1) | (direction ? 1 : 0);
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep)
@@ -1794,12 +1852,14 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->dwc = dwc;
dep->number = epnum;
dep->direction = !!direction;
+ dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
dwc->eps[epnum] = dep;
snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
(epnum & 1) ? "in" : "out");
dep->endpoint.name = dep->name;
+ spin_lock_init(&dep->lock);
dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
@@ -1901,6 +1961,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
unsigned int s_pkt = 0;
unsigned int trb_status;
+ dep->queued_requests--;
trace_dwc3_complete_trb(dep, trb);
if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
@@ -1921,7 +1982,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
dwc3_trace(trace_dwc3_gadget,
- "%s: incomplete IN transfer\n",
+ "%s: incomplete IN transfer",
dep->name);
/*
* If missed isoc occurred and there is
@@ -2006,6 +2067,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
break;
} while (1);
+ /*
+ * Our endpoint might get disabled by another thread during
+ * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
+ * early on so DWC3_EP_BUSY flag gets cleared
+ */
+ if (!dep->endpoint.desc)
+ return 1;
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
list_empty(&dep->started_list)) {
if (list_empty(&dep->pending_list)) {
@@ -2023,6 +2092,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
return 1;
}
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ if ((event->status & DEPEVT_STATUS_IOC) &&
+ (trb->ctrl & DWC3_TRB_CTRL_IOC))
+ return 0;
return 1;
}
@@ -2039,7 +2112,7 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
status = -ECONNRESET;
clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
- if (clean_busy && (is_xfer_complete ||
+ if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
usb_endpoint_xfer_isoc(dep->endpoint.desc)))
dep->flags &= ~DWC3_EP_BUSY;
@@ -2068,10 +2141,18 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
dwc->u1u2 = 0;
}
+ /*
+ * Our endpoint might get disabled by another thread during
+ * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
+ * early on so DWC3_EP_BUSY flag gets cleared
+ */
+ if (!dep->endpoint.desc)
+ return;
+
if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
int ret;
- ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete);
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
if (!ret || ret == -EBUSY)
return;
}
@@ -2099,7 +2180,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dwc3_trace(trace_dwc3_gadget,
- "%s is an Isochronous endpoint\n",
+ "%s is an Isochronous endpoint",
dep->name);
return;
}
@@ -2122,12 +2203,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->name, active ? "Transfer Active"
: "Transfer Not Active");
- ret = __dwc3_gadget_kick_transfer(dep, 0, !active);
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
if (!ret || ret == -EBUSY)
return;
dwc3_trace(trace_dwc3_gadget,
- "%s: failed to kick transfers\n",
+ "%s: failed to kick transfers",
dep->name);
}
@@ -2150,11 +2231,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
/* FALLTHROUGH */
default:
dwc3_trace(trace_dwc3_gadget,
- "unable to find suitable stream\n");
+ "unable to find suitable stream");
}
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
- dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun\n", dep->name);
+ dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
break;
case DWC3_DEPEVT_EPCMDCMPLT:
dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
@@ -2237,7 +2318,7 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
cmd |= DWC3_DEPCMD_CMDIOC;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
WARN_ON_ONCE(ret);
dep->resource_index = 0;
dep->flags &= ~DWC3_EP_BUSY;
@@ -2300,12 +2381,16 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
+
+ dwc->connected = false;
}
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
{
u32 reg;
+ dwc->connected = true;
+
/*
* WORKAROUND: DWC3 revisions <1.88a have an issue which
* would cause a missing Disconnect Event if there's a
@@ -2393,12 +2478,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_update_ram_clk_sel(dwc, speed);
switch (speed) {
- case DWC3_DCFG_SUPERSPEED_PLUS:
+ case DWC3_DSTS_SUPERSPEED_PLUS:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget.ep0->maxpacket = 512;
dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
break;
- case DWC3_DCFG_SUPERSPEED:
+ case DWC3_DSTS_SUPERSPEED:
/*
* WORKAROUND: DWC3 revisions <1.90a have an issue which
* would cause a missing USB3 Reset event.
@@ -2419,18 +2504,18 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc->gadget.ep0->maxpacket = 512;
dwc->gadget.speed = USB_SPEED_SUPER;
break;
- case DWC3_DCFG_HIGHSPEED:
+ case DWC3_DSTS_HIGHSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_HIGH;
break;
- case DWC3_DCFG_FULLSPEED2:
- case DWC3_DCFG_FULLSPEED1:
+ case DWC3_DSTS_FULLSPEED2:
+ case DWC3_DSTS_FULLSPEED1:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_FULL;
break;
- case DWC3_DCFG_LOWSPEED:
+ case DWC3_DSTS_LOWSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
dwc->gadget.ep0->maxpacket = 8;
dwc->gadget.speed = USB_SPEED_LOW;
@@ -2440,8 +2525,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
/* Enable USB2 LPM Capability */
if ((dwc->revision > DWC3_REVISION_194A) &&
- (speed != DWC3_DCFG_SUPERSPEED) &&
- (speed != DWC3_DCFG_SUPERSPEED_PLUS)) {
+ (speed != DWC3_DSTS_SUPERSPEED) &&
+ (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg |= DWC3_DCFG_LPM_CAP;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
@@ -2610,6 +2695,17 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
dwc->link_state = next;
}
+static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
+ unsigned int evtinfo)
+{
+ enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+
+ if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
+ dwc3_suspend_gadget(dwc);
+
+ dwc->link_state = next;
+}
+
static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
@@ -2661,7 +2757,20 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
break;
case DWC3_DEVICE_EVENT_EOPF:
- dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
+ /* It changed to be suspend event for version 2.30a and above */
+ if (dwc->revision < DWC3_REVISION_230A) {
+ dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
+ } else {
+ dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
+
+ /*
+ * Ignore suspend event until the gadget enters into
+ * USB_STATE_CONFIGURED state.
+ */
+ if (dwc->gadget.state >= USB_STATE_CONFIGURED)
+ dwc3_gadget_suspend_interrupt(dwc,
+ event->event_info);
+ }
break;
case DWC3_DEVICE_EVENT_SOF:
dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
@@ -2767,6 +2876,13 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
u32 count;
u32 reg;
+ if (pm_runtime_suspended(dwc->dev)) {
+ pm_runtime_get(dwc->dev);
+ disable_irq_nosync(dwc->irq_gadget);
+ dwc->pending_events = true;
+ return IRQ_HANDLED;
+ }
+
count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
count &= DWC3_GEVNTCOUNT_MASK;
if (!count)
@@ -2798,7 +2914,33 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
*/
int dwc3_gadget_init(struct dwc3 *dwc)
{
- int ret;
+ int ret, irq;
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+
+ irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq <= 0) {
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq <= 0) {
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER) {
+ dev_err(dwc->dev,
+ "missing peripheral IRQ\n");
+ }
+ if (!irq)
+ irq = -EINVAL;
+ return irq;
+ }
+ }
+ }
+
+ dwc->irq_gadget = irq;
dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
&dwc->ctrl_req_addr, GFP_KERNEL);
@@ -2861,7 +3003,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
*/
if (dwc->revision < DWC3_REVISION_220A)
dwc3_trace(trace_dwc3_gadget,
- "Changing max_speed on rev %08x\n",
+ "Changing max_speed on rev %08x",
dwc->revision);
dwc->gadget.max_speed = dwc->maximum_speed;
@@ -2935,61 +3077,50 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
+ int ret;
+
if (!dwc->gadget_driver)
return 0;
- if (dwc->pullups_connected) {
- dwc3_gadget_disable_irq(dwc);
- dwc3_gadget_run_stop(dwc, true, true);
- }
-
- __dwc3_gadget_ep_disable(dwc->eps[0]);
- __dwc3_gadget_ep_disable(dwc->eps[1]);
+ ret = dwc3_gadget_run_stop(dwc, false, false);
+ if (ret < 0)
+ return ret;
- dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ dwc3_disconnect_gadget(dwc);
+ __dwc3_gadget_stop(dwc);
return 0;
}
int dwc3_gadget_resume(struct dwc3 *dwc)
{
- struct dwc3_ep *dep;
int ret;
if (!dwc->gadget_driver)
return 0;
- /* Start with SuperSpeed Default */
- dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
-
- dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
- false);
- if (ret)
+ ret = __dwc3_gadget_start(dwc);
+ if (ret < 0)
goto err0;
- dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
- false);
- if (ret)
+ ret = dwc3_gadget_run_stop(dwc, true, false);
+ if (ret < 0)
goto err1;
- /* begin to receive SETUP packets */
- dwc->ep0state = EP0_SETUP_PHASE;
- dwc3_ep0_out_start(dwc);
-
- dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
-
- if (dwc->pullups_connected) {
- dwc3_gadget_enable_irq(dwc);
- dwc3_gadget_run_stop(dwc, true, false);
- }
-
return 0;
err1:
- __dwc3_gadget_ep_disable(dwc->eps[0]);
+ __dwc3_gadget_stop(dwc);
err0:
return ret;
}
+
+void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+{
+ if (dwc->pending_events) {
+ dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
+ dwc->pending_events = false;
+ enable_irq(dwc->irq_gadget);
+ }
+}
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index f21c0fccbebd..e4a1d974a5ae 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -95,11 +95,11 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
*
* Caller should take care of locking
*/
-static inline u32 dwc3_gadget_ep_get_transfer_index(struct dwc3 *dwc, u8 number)
+static inline u32 dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
{
u32 res_id;
- res_id = dwc3_readl(dwc->regs, DWC3_DEPCMD(number));
+ res_id = dwc3_readl(dep->regs, DWC3_DEPCMD);
return DWC3_DEPCMD_GET_RSC_IDX(res_id);
}
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index c679f63783ae..f6533c68fed1 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -16,15 +16,55 @@
*/
#include <linux/platform_device.h>
-#include <linux/usb/xhci_pdriver.h>
#include "core.h"
int dwc3_host_init(struct dwc3 *dwc)
{
+ struct property_entry props[2];
struct platform_device *xhci;
- struct usb_xhci_pdata pdata;
- int ret;
+ int ret, irq;
+ struct resource *res;
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+
+ irq = platform_get_irq_byname(dwc3_pdev, "host");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq <= 0) {
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq <= 0) {
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER) {
+ dev_err(dwc->dev,
+ "missing host IRQ\n");
+ }
+ if (!irq)
+ irq = -EINVAL;
+ return irq;
+ } else {
+ res = platform_get_resource(dwc3_pdev,
+ IORESOURCE_IRQ, 0);
+ }
+ } else {
+ res = platform_get_resource_byname(dwc3_pdev,
+ IORESOURCE_IRQ,
+ "dwc_usb3");
+ }
+
+ } else {
+ res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
+ "host");
+ }
+
+ dwc->xhci_resources[1].start = irq;
+ dwc->xhci_resources[1].end = irq;
+ dwc->xhci_resources[1].flags = res->flags;
+ dwc->xhci_resources[1].name = res->name;
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
@@ -47,14 +87,15 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err1;
}
- memset(&pdata, 0, sizeof(pdata));
-
- pdata.usb3_lpm_capable = dwc->usb3_lpm_capable;
+ memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
- ret = platform_device_add_data(xhci, &pdata, sizeof(pdata));
- if (ret) {
- dev_err(dwc->dev, "couldn't add platform data to xHCI device\n");
- goto err1;
+ if (dwc->usb3_lpm_capable) {
+ props[0].name = "usb3-lpm-capable";
+ ret = platform_device_add_properties(xhci, props);
+ if (ret) {
+ dev_err(dwc->dev, "failed to add properties to xHCI\n");
+ goto err1;
+ }
}
phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index 6a79c8e66bbc..a06f9a8fecc7 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -26,7 +26,6 @@
static inline u32 dwc3_readl(void __iomem *base, u32 offset)
{
- u32 offs = offset - DWC3_GLOBALS_REGS_START;
u32 value;
/*
@@ -34,7 +33,7 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
* space, see dwc3_probe in core.c.
* However, the offsets are given starting from xHCI address space.
*/
- value = readl(base + offs);
+ value = readl(base + offset - DWC3_GLOBALS_REGS_START);
/*
* When tracing we want to make it easy to find the correct address on
@@ -49,14 +48,12 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
{
- u32 offs = offset - DWC3_GLOBALS_REGS_START;
-
/*
* We requested the mem region starting from the Globals address
* space, see dwc3_probe in core.c.
* However, the offsets are given starting from xHCI address space.
*/
- writel(value, base + offs);
+ writel(value, base + offset - DWC3_GLOBALS_REGS_START);
/*
* When tracing we want to make it easy to find the correct address on
diff --git a/drivers/usb/dwc3/platform_data.h b/drivers/usb/dwc3/platform_data.h
deleted file mode 100644
index 8826cca5fc6f..000000000000
--- a/drivers/usb/dwc3/platform_data.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * platform_data.h - USB DWC3 Platform Data Support
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
- * Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/usb/ch9.h>
-#include <linux/usb/otg.h>
-
-struct dwc3_platform_data {
- enum usb_device_speed maximum_speed;
- enum usb_dr_mode dr_mode;
- bool usb3_lpm_capable;
-
- unsigned is_utmi_l1_suspend:1;
- u8 hird_threshold;
-
- u8 lpm_nyet_threshold;
-
- unsigned disable_scramble_quirk:1;
- unsigned has_lpm_erratum:1;
- unsigned u2exit_lfps_quirk:1;
- unsigned u2ss_inp3_quirk:1;
- unsigned req_p1p2p3_quirk:1;
- unsigned del_p1p2p3_quirk:1;
- unsigned del_phy_power_chg_quirk:1;
- unsigned lfps_filter_quirk:1;
- unsigned rx_detect_poll_quirk:1;
- unsigned dis_u3_susphy_quirk:1;
- unsigned dis_u2_susphy_quirk:1;
- unsigned dis_enblslpm_quirk:1;
- unsigned dis_rxdet_inp3_quirk:1;
-
- unsigned tx_de_emphasis_quirk:1;
- unsigned tx_de_emphasis:2;
-
- u32 fladj_value;
-
- const char *hsphy_interface;
-};
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 3ac7252f4427..d24cefd191b5 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -71,7 +71,8 @@ DECLARE_EVENT_CLASS(dwc3_log_event,
TP_fast_assign(
__entry->event = event;
),
- TP_printk("event %08x", __entry->event)
+ TP_printk("event (%08x): %s", __entry->event,
+ dwc3_decode_event(__entry->event))
);
DEFINE_EVENT(dwc3_log_event, dwc3_event,
@@ -85,21 +86,21 @@ DECLARE_EVENT_CLASS(dwc3_log_ctrl,
TP_STRUCT__entry(
__field(__u8, bRequestType)
__field(__u8, bRequest)
- __field(__le16, wValue)
- __field(__le16, wIndex)
- __field(__le16, wLength)
+ __field(__u16, wValue)
+ __field(__u16, wIndex)
+ __field(__u16, wLength)
),
TP_fast_assign(
__entry->bRequestType = ctrl->bRequestType;
__entry->bRequest = ctrl->bRequest;
- __entry->wValue = ctrl->wValue;
- __entry->wIndex = ctrl->wIndex;
- __entry->wLength = ctrl->wLength;
+ __entry->wValue = le16_to_cpu(ctrl->wValue);
+ __entry->wIndex = le16_to_cpu(ctrl->wIndex);
+ __entry->wLength = le16_to_cpu(ctrl->wLength);
),
TP_printk("bRequestType %02x bRequest %02x wValue %04x wIndex %04x wLength %d",
__entry->bRequestType, __entry->bRequest,
- le16_to_cpu(__entry->wValue), le16_to_cpu(__entry->wIndex),
- le16_to_cpu(__entry->wLength)
+ __entry->wValue, __entry->wIndex,
+ __entry->wLength
)
);
@@ -166,37 +167,41 @@ DEFINE_EVENT(dwc3_log_request, dwc3_gadget_giveback,
);
DECLARE_EVENT_CLASS(dwc3_log_generic_cmd,
- TP_PROTO(unsigned int cmd, u32 param),
- TP_ARGS(cmd, param),
+ TP_PROTO(unsigned int cmd, u32 param, int status),
+ TP_ARGS(cmd, param, status),
TP_STRUCT__entry(
__field(unsigned int, cmd)
__field(u32, param)
+ __field(int, status)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->param = param;
+ __entry->status = status;
),
- TP_printk("cmd '%s' [%d] param %08x",
+ TP_printk("cmd '%s' [%d] param %08x --> status: %s",
dwc3_gadget_generic_cmd_string(__entry->cmd),
- __entry->cmd, __entry->param
+ __entry->cmd, __entry->param,
+ dwc3_gadget_generic_cmd_status_string(__entry->status)
)
);
DEFINE_EVENT(dwc3_log_generic_cmd, dwc3_gadget_generic_cmd,
- TP_PROTO(unsigned int cmd, u32 param),
- TP_ARGS(cmd, param)
+ TP_PROTO(unsigned int cmd, u32 param, int status),
+ TP_ARGS(cmd, param, status)
);
DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
TP_PROTO(struct dwc3_ep *dep, unsigned int cmd,
- struct dwc3_gadget_ep_cmd_params *params),
- TP_ARGS(dep, cmd, params),
+ struct dwc3_gadget_ep_cmd_params *params, int cmd_status),
+ TP_ARGS(dep, cmd, params, cmd_status),
TP_STRUCT__entry(
__dynamic_array(char, name, DWC3_MSG_MAX)
__field(unsigned int, cmd)
__field(u32, param0)
__field(u32, param1)
__field(u32, param2)
+ __field(int, cmd_status)
),
TP_fast_assign(
snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
@@ -204,18 +209,20 @@ DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
__entry->param0 = params->param0;
__entry->param1 = params->param1;
__entry->param2 = params->param2;
+ __entry->cmd_status = cmd_status;
),
- TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x",
+ TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x --> status: %s",
__get_str(name), dwc3_gadget_ep_cmd_string(__entry->cmd),
__entry->cmd, __entry->param0,
- __entry->param1, __entry->param2
+ __entry->param1, __entry->param2,
+ dwc3_ep_cmd_status_string(__entry->cmd_status)
)
);
DEFINE_EVENT(dwc3_log_gadget_ep_cmd, dwc3_gadget_ep_cmd,
TP_PROTO(struct dwc3_ep *dep, unsigned int cmd,
- struct dwc3_gadget_ep_cmd_params *params),
- TP_ARGS(dep, cmd, params)
+ struct dwc3_gadget_ep_cmd_params *params, int cmd_status),
+ TP_ARGS(dep, cmd, params, cmd_status)
);
DECLARE_EVENT_CLASS(dwc3_log_trb,
@@ -224,6 +231,8 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
TP_STRUCT__entry(
__dynamic_array(char, name, DWC3_MSG_MAX)
__field(struct dwc3_trb *, trb)
+ __field(u32, allocated)
+ __field(u32, queued)
__field(u32, bpl)
__field(u32, bph)
__field(u32, size)
@@ -232,14 +241,53 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
TP_fast_assign(
snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
__entry->trb = trb;
+ __entry->allocated = dep->allocated_requests;
+ __entry->queued = dep->queued_requests;
__entry->bpl = trb->bpl;
__entry->bph = trb->bph;
__entry->size = trb->size;
__entry->ctrl = trb->ctrl;
),
- TP_printk("%s: trb %p bph %08x bpl %08x size %08x ctrl %08x",
- __get_str(name), __entry->trb, __entry->bph, __entry->bpl,
- __entry->size, __entry->ctrl
+ TP_printk("%s: %d/%d trb %p buf %08x%08x size %d ctrl %08x (%c%c%c%c:%c%c:%s)",
+ __get_str(name), __entry->queued, __entry->allocated,
+ __entry->trb, __entry->bph, __entry->bpl,
+ __entry->size, __entry->ctrl,
+ __entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
+ __entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
+ __entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
+ __entry->ctrl & DWC3_TRB_CTRL_CSP ? 'S' : 's',
+ __entry->ctrl & DWC3_TRB_CTRL_ISP_IMI ? 'S' : 's',
+ __entry->ctrl & DWC3_TRB_CTRL_IOC ? 'C' : 'c',
+ ({char *s;
+ switch (__entry->ctrl & 0x3f0) {
+ case DWC3_TRBCTL_NORMAL:
+ s = "normal";
+ break;
+ case DWC3_TRBCTL_CONTROL_SETUP:
+ s = "setup";
+ break;
+ case DWC3_TRBCTL_CONTROL_STATUS2:
+ s = "status2";
+ break;
+ case DWC3_TRBCTL_CONTROL_STATUS3:
+ s = "status3";
+ break;
+ case DWC3_TRBCTL_CONTROL_DATA:
+ s = "data";
+ break;
+ case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
+ s = "isoc-first";
+ break;
+ case DWC3_TRBCTL_ISOCHRONOUS:
+ s = "isoc";
+ break;
+ case DWC3_TRBCTL_LINK_TRB:
+ s = "link";
+ break;
+ default:
+ s = "UNKNOWN";
+ break;
+ } s; })
)
);
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 8cfc3191be50..12731e67d2c7 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -13,7 +13,7 @@
#include <linux/console.h>
#include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
#include <linux/usb/ch9.h>
@@ -1093,5 +1093,5 @@ static int __init kgdbdbgp_start_thread(void)
return 0;
}
-module_init(kgdbdbgp_start_thread);
+device_initcall(kgdbdbgp_start_thread);
#endif /* CONFIG_KGDB */
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 2057add439f0..3c3f31ceece7 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -114,7 +114,7 @@ config USB_GADGET_VBUS_DRAW
config USB_GADGET_STORAGE_NUM_BUFFERS
int "Number of storage pipeline buffers"
- range 2 32
+ range 2 256
default 2
help
Usually 2 buffers are enough to establish a good buffering
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index e6c0542a063b..17a6077b89a4 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -93,7 +93,7 @@ int usb_gadget_config_buf(
*cp = *config;
/* then interface/endpoint/class/vendor/... */
- len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf,
+ len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8 *)buf,
length - USB_DT_CONFIG_SIZE, desc);
if (len < 0)
return len;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index cc33d2667408..5c8429f23a89 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -130,6 +130,12 @@ struct ffs_epfile {
struct dentry *dentry;
+ /*
+ * Buffer for holding data from partial reads which may happen since
+ * we’re rounding user read requests to a multiple of a max packet size.
+ */
+ struct ffs_buffer *read_buffer; /* P: epfile->mutex */
+
char name[5];
unsigned char in; /* P: ffs->eps_lock */
@@ -138,6 +144,12 @@ struct ffs_epfile {
unsigned char _pad;
};
+struct ffs_buffer {
+ size_t length;
+ char *data;
+ char storage[];
+};
+
/* ffs_io_data structure ***************************************************/
struct ffs_io_data {
@@ -640,6 +652,49 @@ static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
}
}
+static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
+{
+ ssize_t ret = copy_to_iter(data, data_len, iter);
+ if (likely(ret == data_len))
+ return ret;
+
+ if (unlikely(iov_iter_count(iter)))
+ return -EFAULT;
+
+ /*
+ * Dear user space developer!
+ *
+ * TL;DR: To stop getting below error message in your kernel log, change
+ * user space code using functionfs to align read buffers to a max
+ * packet size.
+ *
+ * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
+ * packet size. When unaligned buffer is passed to functionfs, it
+ * internally uses a larger, aligned buffer so that such UDCs are happy.
+ *
+ * Unfortunately, this means that host may send more data than was
+ * requested in read(2) system call. f_fs doesn’t know what to do with
+ * that excess data so it simply drops it.
+ *
+ * Was the buffer aligned in the first place, no such problem would
+ * happen.
+ *
+ * Data may be dropped only in AIO reads. Synchronous reads are handled
+ * by splitting a request into multiple parts. This splitting may still
+ * be a problem though so it’s likely best to align the buffer
+ * regardless of it being AIO or not..
+ *
+ * This only affects OUT endpoints, i.e. reading data with a read(2),
+ * aio_read(2) etc. system calls. Writing data to an IN endpoint is not
+ * affected.
+ */
+ pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
+ "Align read buffer size to max packet size to avoid the problem.\n",
+ data_len, ret);
+
+ return ret;
+}
+
static void ffs_user_copy_worker(struct work_struct *work)
{
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
@@ -650,9 +705,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
if (io_data->read && ret > 0) {
use_mm(io_data->mm);
- ret = copy_to_iter(io_data->buf, ret, &io_data->data);
- if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
- ret = -EFAULT;
+ ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
unuse_mm(io_data->mm);
}
@@ -680,6 +733,58 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
schedule_work(&io_data->work);
}
+/* Assumes epfile->mutex is held. */
+static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
+ struct iov_iter *iter)
+{
+ struct ffs_buffer *buf = epfile->read_buffer;
+ ssize_t ret;
+ if (!buf)
+ return 0;
+
+ ret = copy_to_iter(buf->data, buf->length, iter);
+ if (buf->length == ret) {
+ kfree(buf);
+ epfile->read_buffer = NULL;
+ } else if (unlikely(iov_iter_count(iter))) {
+ ret = -EFAULT;
+ } else {
+ buf->length -= ret;
+ buf->data += ret;
+ }
+ return ret;
+}
+
+/* Assumes epfile->mutex is held. */
+static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
+ void *data, int data_len,
+ struct iov_iter *iter)
+{
+ struct ffs_buffer *buf;
+
+ ssize_t ret = copy_to_iter(data, data_len, iter);
+ if (likely(data_len == ret))
+ return ret;
+
+ if (unlikely(iov_iter_count(iter)))
+ return -EFAULT;
+
+ /* See ffs_copy_to_iter for more context. */
+ pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
+ data_len, ret);
+
+ data_len -= ret;
+ buf = kmalloc(sizeof(*buf) + data_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ buf->length = data_len;
+ buf->data = buf->storage;
+ memcpy(buf->storage, data + ret, data_len);
+ epfile->read_buffer = buf;
+
+ return ret;
+}
+
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
{
struct ffs_epfile *epfile = file->private_data;
@@ -709,21 +814,40 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
if (halt && epfile->isoc)
return -EINVAL;
+ /* We will be using request and read_buffer */
+ ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
+ if (unlikely(ret))
+ goto error;
+
/* Allocate & copy */
if (!halt) {
+ struct usb_gadget *gadget;
+
+ /*
+ * Do we have buffered data from previous partial read? Check
+ * that for synchronous case only because we do not have
+ * facility to ‘wake up’ a pending asynchronous read and push
+ * buffered data to it which we would need to make things behave
+ * consistently.
+ */
+ if (!io_data->aio && io_data->read) {
+ ret = __ffs_epfile_read_buffered(epfile, &io_data->data);
+ if (ret)
+ goto error_mutex;
+ }
+
/*
* if we _do_ wait above, the epfile->ffs->gadget might be NULL
* before the waiting completes, so do not assign to 'gadget'
* earlier
*/
- struct usb_gadget *gadget = epfile->ffs->gadget;
- size_t copied;
+ gadget = epfile->ffs->gadget;
spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */
if (epfile->ep != ep) {
- spin_unlock_irq(&epfile->ffs->eps_lock);
- return -ESHUTDOWN;
+ ret = -ESHUTDOWN;
+ goto error_lock;
}
data_len = iov_iter_count(&io_data->data);
/*
@@ -735,22 +859,17 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
data = kmalloc(data_len, GFP_KERNEL);
- if (unlikely(!data))
- return -ENOMEM;
- if (!io_data->read) {
- copied = copy_from_iter(data, data_len, &io_data->data);
- if (copied != data_len) {
- ret = -EFAULT;
- goto error;
- }
+ if (unlikely(!data)) {
+ ret = -ENOMEM;
+ goto error_mutex;
+ }
+ if (!io_data->read &&
+ copy_from_iter(data, data_len, &io_data->data) != data_len) {
+ ret = -EFAULT;
+ goto error_mutex;
}
}
- /* We will be using request */
- ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
- if (unlikely(ret))
- goto error;
-
spin_lock_irq(&epfile->ffs->eps_lock);
if (epfile->ep != ep) {
@@ -803,18 +922,13 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
interrupted = ep->status < 0;
}
- /*
- * XXX We may end up silently droping data here. Since data_len
- * (i.e. req->length) may be bigger than len (after being
- * rounded up to maxpacketsize), we may end up with more data
- * then user space has space for.
- */
- ret = interrupted ? -EINTR : ep->status;
- if (io_data->read && ret > 0) {
- ret = copy_to_iter(data, ret, &io_data->data);
- if (!ret)
- ret = -EFAULT;
- }
+ if (interrupted)
+ ret = -EINTR;
+ else if (io_data->read && ep->status > 0)
+ ret = __ffs_epfile_read_data(epfile, data, ep->status,
+ &io_data->data);
+ else
+ ret = ep->status;
goto error_mutex;
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
ret = -ENOMEM;
@@ -980,6 +1094,8 @@ ffs_epfile_release(struct inode *inode, struct file *file)
ENTER();
+ kfree(epfile->read_buffer);
+ epfile->read_buffer = NULL;
ffs_data_closed(epfile->ffs);
return 0;
@@ -1605,19 +1721,24 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned count = func->ffs->eps_count;
unsigned long flags;
- spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
+ if (epfile)
+ mutex_lock(&epfile->mutex);
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
++ep;
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
if (epfile) {
epfile->ep = NULL;
+ kfree(epfile->read_buffer);
+ epfile->read_buffer = NULL;
+ mutex_unlock(&epfile->mutex);
++epfile;
}
} while (--count);
- spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
static int ffs_func_eps_enable(struct ffs_function *func)
@@ -2227,8 +2348,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
{
u32 str_count, needed_count, lang_count;
struct usb_gadget_strings **stringtabs, *t;
- struct usb_string *strings, *s;
const char *data = _data;
+ struct usb_string *s;
ENTER();
@@ -2286,7 +2407,6 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
stringtabs = vla_ptr(vlabuf, d, stringtabs);
t = vla_ptr(vlabuf, d, stringtab);
s = vla_ptr(vlabuf, d, strings);
- strings = s;
}
/* For each language */
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 5c6d4d7ca605..2505117e88e8 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2655,18 +2655,6 @@ void fsg_common_put(struct fsg_common *common)
}
EXPORT_SYMBOL_GPL(fsg_common_put);
-/* check if fsg_num_buffers is within a valid range */
-static inline int fsg_num_buffers_validate(unsigned int fsg_num_buffers)
-{
-#define FSG_MAX_NUM_BUFFERS 32
-
- if (fsg_num_buffers >= 2 && fsg_num_buffers <= FSG_MAX_NUM_BUFFERS)
- return 0;
- pr_err("fsg_num_buffers %u is out of range (%d to %d)\n",
- fsg_num_buffers, 2, FSG_MAX_NUM_BUFFERS);
- return -EINVAL;
-}
-
static struct fsg_common *fsg_common_setup(struct fsg_common *common)
{
if (!common) {
@@ -2709,11 +2697,7 @@ static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n)
int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n)
{
struct fsg_buffhd *bh, *buffhds;
- int i, rc;
-
- rc = fsg_num_buffers_validate(n);
- if (rc != 0)
- return rc;
+ int i;
buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL);
if (!buffhds)
@@ -3401,10 +3385,6 @@ static ssize_t fsg_opts_num_buffers_store(struct config_item *item,
if (ret)
goto end;
- ret = fsg_num_buffers_validate(num);
- if (ret)
- goto end;
-
fsg_common_set_num_buffers(opts->common, num);
ret = len;
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 3580f198df8b..6ded6345cd09 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -907,7 +907,6 @@ static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
- int status;
pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
port->port_num, tty, count);
@@ -917,7 +916,7 @@ static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
count = gs_buf_put(&port->port_write_buf, buf, count);
/* treat count == 0 as flush_chars() */
if (port->port_usb)
- status = gs_start_tx(port);
+ gs_start_tx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return count;
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index f85639ef8a8f..6da7316f8e87 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -265,7 +265,7 @@ static void *functionfs_acquire_dev(struct ffs_dev *dev)
{
if (!try_module_get(THIS_MODULE))
return ERR_PTR(-ENOENT);
-
+
return NULL;
}
@@ -275,7 +275,7 @@ static void functionfs_release_dev(struct ffs_dev *dev)
}
/*
- * The caller of this function takes ffs_lock
+ * The caller of this function takes ffs_lock
*/
static int functionfs_ready_callback(struct ffs_data *ffs)
{
@@ -294,12 +294,12 @@ static int functionfs_ready_callback(struct ffs_data *ffs)
++missing_funcs;
gfs_registered = false;
}
-
+
return ret;
}
/*
- * The caller of this function takes ffs_lock
+ * The caller of this function takes ffs_lock
*/
static void functionfs_closed_callback(struct ffs_data *ffs)
{
@@ -347,17 +347,14 @@ static int gfs_bind(struct usb_composite_dev *cdev)
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
{
- struct f_rndis_opts *rndis_opts;
-
fi_rndis = usb_get_function_instance("rndis");
if (IS_ERR(fi_rndis)) {
ret = PTR_ERR(fi_rndis);
goto error;
}
- rndis_opts = container_of(fi_rndis, struct f_rndis_opts,
- func_inst);
#ifndef CONFIG_USB_FUNCTIONFS_ETH
- net = rndis_opts->net;
+ net = container_of(fi_rndis, struct f_rndis_opts,
+ func_inst)->net;
#endif
}
#endif
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 7c289416f87d..658b8da60915 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -312,7 +312,7 @@ config USB_NET2272_DMA
If unsure, say "N" here. The driver works fine in PIO mode.
config USB_NET2280
- tristate "NetChip 228x / PLX USB338x"
+ tristate "NetChip NET228x / PLX USB3x8x"
depends on PCI
help
NetChip 2280 / 2282 is a PCI based USB peripheral controller which
@@ -322,6 +322,8 @@ config USB_NET2280
(for control transfers) and several endpoints with dedicated
functions.
+ PLX 2380 is a PCIe version of the PLX 2380.
+
PLX 3380 / 3382 is a PCIe based USB peripheral controller which
supports full, high speed USB 2.0 and super speed USB 3.0
data transfers.
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index dfee53446319..98e74ed9f555 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -1,3 +1,8 @@
+# define_trace.h needs to know how to find our header
+CFLAGS_trace.o := -I$(src)
+
+udc-core-y := core.o trace.o
+
#
# USB peripheral controller drivers
#
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index 39d70b4a8958..ea03ca7ae29a 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -2340,7 +2340,6 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
struct udc_ep *ep;
struct udc_request *req;
struct udc_data_dma *td;
- unsigned dma_done;
unsigned len;
ep = &dev->ep[ep_ix];
@@ -2385,13 +2384,8 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
*/
if (use_dma_ppb_du) {
td = udc_get_last_dma_desc(req);
- if (td) {
- dma_done =
- AMD_GETBITS(td->status,
- UDC_DMA_IN_STS_BS);
- /* don't care DMA done */
+ if (td)
req->req.actual = req->req.length;
- }
} else {
/* assume all bytes transferred */
req->req.actual = req->req.length;
@@ -3417,4 +3411,3 @@ module_pci_driver(udc_pci_driver);
MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
MODULE_AUTHOR("Thomas Dahlmann");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 18569de06b04..bb1f6c8f0f01 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1920,6 +1920,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
udc->errata = match->data;
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
+ if (IS_ERR(udc->pmc))
+ udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
if (udc->errata && IS_ERR(udc->pmc))
return ERR_CAST(udc->pmc);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 6a4155c4bd86..4d5e9188beae 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -57,7 +57,6 @@ static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
u32 param0, u32 param1, u32 param2)
{
u32 temp, cmd_status;
- int reset_bdc = 0;
int ret;
temp = bdc_readl(bdc->regs, BDC_CMDSC);
@@ -94,7 +93,6 @@ static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
case BDC_CMDS_INTL:
dev_err(bdc->dev, "BDC Internal error\n");
- reset_bdc = 1;
ret = -ECONNRESET;
break;
@@ -102,7 +100,6 @@ static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
dev_err(bdc->dev,
"command timedout waited for %dusec\n",
BDC_CMD_TIMEOUT);
- reset_bdc = 1;
ret = -ECONNRESET;
break;
default:
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index d6199507f861..ccaa74ab6c0e 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -81,7 +81,7 @@ static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
continue;
}
if (!bd_table->start_bd) {
- dev_dbg(bdc->dev, "bd dma pool not allocted\n");
+ dev_dbg(bdc->dev, "bd dma pool not allocated\n");
continue;
}
@@ -702,11 +702,9 @@ static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
/* Queue data stage */
static int ep0_queue_data_stage(struct bdc *bdc)
{
- struct usb_request *ep0_usb_req;
struct bdc_ep *ep;
dev_dbg(bdc->dev, "%s\n", __func__);
- ep0_usb_req = &bdc->ep0_req.usb_req;
ep = bdc->bdc_ep_array[1];
bdc->ep0_req.ep = ep;
bdc->ep0_req.usb_req.complete = NULL;
@@ -1393,10 +1391,8 @@ static int ep0_set_sel(struct bdc *bdc,
{
struct bdc_ep *ep;
u16 wLength;
- u16 wValue;
dev_dbg(bdc->dev, "%s\n", __func__);
- wValue = le16_to_cpu(setup_pkt->wValue);
wLength = le16_to_cpu(setup_pkt->wLength);
if (unlikely(wLength != 6)) {
dev_err(bdc->dev, "%s Wrong wLength:%d\n", __func__, wLength);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
new file mode 100644
index 000000000000..ff8685ea7219
--- /dev/null
+++ b/drivers/usb/gadget/udc/core.c
@@ -0,0 +1,1523 @@
+/**
+ * udc.c - Core UDC Framework
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+
+#include "trace.h"
+
+/**
+ * struct usb_udc - describes one usb device controller
+ * @driver - the gadget driver pointer. For use by the class code
+ * @dev - the child device to the actual controller
+ * @gadget - the gadget. For use by the class code
+ * @list - for use by the udc class driver
+ * @vbus - for udcs who care about vbus status, this value is real vbus status;
+ * for udcs who do not care about vbus status, this value is always true
+ *
+ * This represents the internal data structure which is used by the UDC-class
+ * to hold information about udc driver and gadget together.
+ */
+struct usb_udc {
+ struct usb_gadget_driver *driver;
+ struct usb_gadget *gadget;
+ struct device dev;
+ struct list_head list;
+ bool vbus;
+};
+
+static struct class *udc_class;
+static LIST_HEAD(udc_list);
+static LIST_HEAD(gadget_driver_pending_list);
+static DEFINE_MUTEX(udc_lock);
+
+static int udc_bind_to_driver(struct usb_udc *udc,
+ struct usb_gadget_driver *driver);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint
+ * @ep:the endpoint being configured
+ * @maxpacket_limit:value of maximum packet size limit
+ *
+ * This function should be used only in UDC drivers to initialize endpoint
+ * (usually in probe function).
+ */
+void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
+ unsigned maxpacket_limit)
+{
+ ep->maxpacket_limit = maxpacket_limit;
+ ep->maxpacket = maxpacket_limit;
+
+ trace_usb_ep_set_maxpacket_limit(ep, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_maxpacket_limit);
+
+/**
+ * usb_ep_enable - configure endpoint, making it usable
+ * @ep:the endpoint being configured. may not be the endpoint named "ep0".
+ * drivers discover endpoints through the ep_list of a usb_gadget.
+ *
+ * When configurations are set, or when interface settings change, the driver
+ * will enable or disable the relevant endpoints. while it is enabled, an
+ * endpoint may be used for i/o until the driver receives a disconnect() from
+ * the host or until the endpoint is disabled.
+ *
+ * the ep0 implementation (which calls this routine) must ensure that the
+ * hardware capabilities of each endpoint match the descriptor provided
+ * for it. for example, an endpoint named "ep2in-bulk" would be usable
+ * for interrupt transfers as well as bulk, but it likely couldn't be used
+ * for iso transfers or for endpoint 14. some endpoints are fully
+ * configurable, with more generic names like "ep-a". (remember that for
+ * USB, "in" means "towards the USB master".)
+ *
+ * returns zero, or a negative error code.
+ */
+int usb_ep_enable(struct usb_ep *ep)
+{
+ int ret = 0;
+
+ if (ep->enabled)
+ goto out;
+
+ ret = ep->ops->enable(ep, ep->desc);
+ if (ret) {
+ ret = ret;
+ goto out;
+ }
+
+ ep->enabled = true;
+
+out:
+ trace_usb_ep_enable(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_enable);
+
+/**
+ * usb_ep_disable - endpoint is no longer usable
+ * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0".
+ *
+ * no other task may be using this endpoint when this is called.
+ * any pending and uncompleted requests will complete with status
+ * indicating disconnect (-ESHUTDOWN) before this call returns.
+ * gadget drivers must call usb_ep_enable() again before queueing
+ * requests to the endpoint.
+ *
+ * returns zero, or a negative error code.
+ */
+int usb_ep_disable(struct usb_ep *ep)
+{
+ int ret = 0;
+
+ if (!ep->enabled)
+ goto out;
+
+ ret = ep->ops->disable(ep);
+ if (ret) {
+ ret = ret;
+ goto out;
+ }
+
+ ep->enabled = false;
+
+out:
+ trace_usb_ep_disable(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_disable);
+
+/**
+ * usb_ep_alloc_request - allocate a request object to use with this endpoint
+ * @ep:the endpoint to be used with with the request
+ * @gfp_flags:GFP_* flags to use
+ *
+ * Request objects must be allocated with this call, since they normally
+ * need controller-specific setup and may even need endpoint-specific
+ * resources such as allocation of DMA descriptors.
+ * Requests may be submitted with usb_ep_queue(), and receive a single
+ * completion callback. Free requests with usb_ep_free_request(), when
+ * they are no longer needed.
+ *
+ * Returns the request, or null if one could not be allocated.
+ */
+struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct usb_request *req = NULL;
+
+ req = ep->ops->alloc_request(ep, gfp_flags);
+
+ trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
+
+ return req;
+}
+EXPORT_SYMBOL_GPL(usb_ep_alloc_request);
+
+/**
+ * usb_ep_free_request - frees a request object
+ * @ep:the endpoint associated with the request
+ * @req:the request being freed
+ *
+ * Reverses the effect of usb_ep_alloc_request().
+ * Caller guarantees the request is not queued, and that it will
+ * no longer be requeued (or otherwise used).
+ */
+void usb_ep_free_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ ep->ops->free_request(ep, req);
+ trace_usb_ep_free_request(ep, req, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_free_request);
+
+/**
+ * usb_ep_queue - queues (submits) an I/O request to an endpoint.
+ * @ep:the endpoint associated with the request
+ * @req:the request being submitted
+ * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
+ * pre-allocate all necessary memory with the request.
+ *
+ * This tells the device controller to perform the specified request through
+ * that endpoint (reading or writing a buffer). When the request completes,
+ * including being canceled by usb_ep_dequeue(), the request's completion
+ * routine is called to return the request to the driver. Any endpoint
+ * (except control endpoints like ep0) may have more than one transfer
+ * request queued; they complete in FIFO order. Once a gadget driver
+ * submits a request, that request may not be examined or modified until it
+ * is given back to that driver through the completion callback.
+ *
+ * Each request is turned into one or more packets. The controller driver
+ * never merges adjacent requests into the same packet. OUT transfers
+ * will sometimes use data that's already buffered in the hardware.
+ * Drivers can rely on the fact that the first byte of the request's buffer
+ * always corresponds to the first byte of some USB packet, for both
+ * IN and OUT transfers.
+ *
+ * Bulk endpoints can queue any amount of data; the transfer is packetized
+ * automatically. The last packet will be short if the request doesn't fill it
+ * out completely. Zero length packets (ZLPs) should be avoided in portable
+ * protocols since not all usb hardware can successfully handle zero length
+ * packets. (ZLPs may be explicitly written, and may be implicitly written if
+ * the request 'zero' flag is set.) Bulk endpoints may also be used
+ * for interrupt transfers; but the reverse is not true, and some endpoints
+ * won't support every interrupt transfer. (Such as 768 byte packets.)
+ *
+ * Interrupt-only endpoints are less functional than bulk endpoints, for
+ * example by not supporting queueing or not handling buffers that are
+ * larger than the endpoint's maxpacket size. They may also treat data
+ * toggle differently.
+ *
+ * Control endpoints ... after getting a setup() callback, the driver queues
+ * one response (even if it would be zero length). That enables the
+ * status ack, after transferring data as specified in the response. Setup
+ * functions may return negative error codes to generate protocol stalls.
+ * (Note that some USB device controllers disallow protocol stall responses
+ * in some cases.) When control responses are deferred (the response is
+ * written after the setup callback returns), then usb_ep_set_halt() may be
+ * used on ep0 to trigger protocol stalls. Depending on the controller,
+ * it may not be possible to trigger a status-stage protocol stall when the
+ * data stage is over, that is, from within the response's completion
+ * routine.
+ *
+ * For periodic endpoints, like interrupt or isochronous ones, the usb host
+ * arranges to poll once per interval, and the gadget driver usually will
+ * have queued some data to transfer at that time.
+ *
+ * Returns zero, or a negative error code. Endpoints that are not enabled
+ * report errors; errors will also be
+ * reported when the usb peripheral is disconnected.
+ */
+int usb_ep_queue(struct usb_ep *ep,
+ struct usb_request *req, gfp_t gfp_flags)
+{
+ int ret = 0;
+
+ if (WARN_ON_ONCE(!ep->enabled && ep->address)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ ret = ep->ops->queue(ep, req, gfp_flags);
+
+out:
+ trace_usb_ep_queue(ep, req, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_queue);
+
+/**
+ * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint
+ * @ep:the endpoint associated with the request
+ * @req:the request being canceled
+ *
+ * If the request is still active on the endpoint, it is dequeued and its
+ * completion routine is called (with status -ECONNRESET); else a negative
+ * error code is returned. This is guaranteed to happen before the call to
+ * usb_ep_dequeue() returns.
+ *
+ * Note that some hardware can't clear out write fifos (to unlink the request
+ * at the head of the queue) except as part of disconnecting from usb. Such
+ * restrictions prevent drivers from supporting configuration changes,
+ * even to configuration zero (a "chapter 9" requirement).
+ */
+int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ int ret;
+
+ ret = ep->ops->dequeue(ep, req);
+ trace_usb_ep_dequeue(ep, req, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_dequeue);
+
+/**
+ * usb_ep_set_halt - sets the endpoint halt feature.
+ * @ep: the non-isochronous endpoint being stalled
+ *
+ * Use this to stall an endpoint, perhaps as an error report.
+ * Except for control endpoints,
+ * the endpoint stays halted (will not stream any data) until the host
+ * clears this feature; drivers may need to empty the endpoint's request
+ * queue first, to make sure no inappropriate transfers happen.
+ *
+ * Note that while an endpoint CLEAR_FEATURE will be invisible to the
+ * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the
+ * current altsetting, see usb_ep_clear_halt(). When switching altsettings,
+ * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
+ *
+ * Returns zero, or a negative error code. On success, this call sets
+ * underlying hardware state that blocks data transfers.
+ * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
+ * transfer requests are still queued, or if the controller hardware
+ * (usually a FIFO) still holds bytes that the host hasn't collected.
+ */
+int usb_ep_set_halt(struct usb_ep *ep)
+{
+ int ret;
+
+ ret = ep->ops->set_halt(ep, 1);
+ trace_usb_ep_set_halt(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_halt);
+
+/**
+ * usb_ep_clear_halt - clears endpoint halt, and resets toggle
+ * @ep:the bulk or interrupt endpoint being reset
+ *
+ * Use this when responding to the standard usb "set interface" request,
+ * for endpoints that aren't reconfigured, after clearing any other state
+ * in the endpoint's i/o queue.
+ *
+ * Returns zero, or a negative error code. On success, this call clears
+ * the underlying hardware state reflecting endpoint halt and data toggle.
+ * Note that some hardware can't support this request (like pxa2xx_udc),
+ * and accordingly can't correctly implement interface altsettings.
+ */
+int usb_ep_clear_halt(struct usb_ep *ep)
+{
+ int ret;
+
+ ret = ep->ops->set_halt(ep, 0);
+ trace_usb_ep_clear_halt(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_clear_halt);
+
+/**
+ * usb_ep_set_wedge - sets the halt feature and ignores clear requests
+ * @ep: the endpoint being wedged
+ *
+ * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
+ * requests. If the gadget driver clears the halt status, it will
+ * automatically unwedge the endpoint.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_ep_set_wedge(struct usb_ep *ep)
+{
+ int ret;
+
+ if (ep->ops->set_wedge)
+ ret = ep->ops->set_wedge(ep);
+ else
+ ret = ep->ops->set_halt(ep, 1);
+
+ trace_usb_ep_set_wedge(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_wedge);
+
+/**
+ * usb_ep_fifo_status - returns number of bytes in fifo, or error
+ * @ep: the endpoint whose fifo status is being checked.
+ *
+ * FIFO endpoints may have "unclaimed data" in them in certain cases,
+ * such as after aborted transfers. Hosts may not have collected all
+ * the IN data written by the gadget driver (and reported by a request
+ * completion). The gadget driver may not have collected all the data
+ * written OUT to it by the host. Drivers that need precise handling for
+ * fault reporting or recovery may need to use this call.
+ *
+ * This returns the number of such bytes in the fifo, or a negative
+ * errno if the endpoint doesn't use a FIFO or doesn't support such
+ * precise handling.
+ */
+int usb_ep_fifo_status(struct usb_ep *ep)
+{
+ int ret;
+
+ if (ep->ops->fifo_status)
+ ret = ep->ops->fifo_status(ep);
+ else
+ ret = -EOPNOTSUPP;
+
+ trace_usb_ep_fifo_status(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_fifo_status);
+
+/**
+ * usb_ep_fifo_flush - flushes contents of a fifo
+ * @ep: the endpoint whose fifo is being flushed.
+ *
+ * This call may be used to flush the "unclaimed data" that may exist in
+ * an endpoint fifo after abnormal transaction terminations. The call
+ * must never be used except when endpoint is not being used for any
+ * protocol translation.
+ */
+void usb_ep_fifo_flush(struct usb_ep *ep)
+{
+ if (ep->ops->fifo_flush)
+ ep->ops->fifo_flush(ep);
+
+ trace_usb_ep_fifo_flush(ep, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_fifo_flush);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_gadget_frame_number - returns the current frame number
+ * @gadget: controller that reports the frame number
+ *
+ * Returns the usb frame number, normally eleven bits from a SOF packet,
+ * or negative errno if this device doesn't support this capability.
+ */
+int usb_gadget_frame_number(struct usb_gadget *gadget)
+{
+ int ret;
+
+ ret = gadget->ops->get_frame(gadget);
+
+ trace_usb_gadget_frame_number(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_frame_number);
+
+/**
+ * usb_gadget_wakeup - tries to wake up the host connected to this gadget
+ * @gadget: controller used to wake up the host
+ *
+ * Returns zero on success, else negative error code if the hardware
+ * doesn't support such attempts, or its support has not been enabled
+ * by the usb host. Drivers must return device descriptors that report
+ * their ability to support this, or hosts won't enable it.
+ *
+ * This may also try to use SRP to wake the host and start enumeration,
+ * even if OTG isn't otherwise in use. OTG devices may also start
+ * remote wakeup even when hosts don't explicitly enable it.
+ */
+int usb_gadget_wakeup(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->wakeup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->wakeup(gadget);
+
+out:
+ trace_usb_gadget_wakeup(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_wakeup);
+
+/**
+ * usb_gadget_set_selfpowered - sets the device selfpowered feature.
+ * @gadget:the device being declared as self-powered
+ *
+ * this affects the device status reported by the hardware driver
+ * to reflect that it now has a local power supply.
+ *
+ * returns zero on success, else negative errno.
+ */
+int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->set_selfpowered) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->set_selfpowered(gadget, 1);
+
+out:
+ trace_usb_gadget_set_selfpowered(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_set_selfpowered);
+
+/**
+ * usb_gadget_clear_selfpowered - clear the device selfpowered feature.
+ * @gadget:the device being declared as bus-powered
+ *
+ * this affects the device status reported by the hardware driver.
+ * some hardware may not support bus-powered operation, in which
+ * case this feature's value can never change.
+ *
+ * returns zero on success, else negative errno.
+ */
+int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->set_selfpowered) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->set_selfpowered(gadget, 0);
+
+out:
+ trace_usb_gadget_clear_selfpowered(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_clear_selfpowered);
+
+/**
+ * usb_gadget_vbus_connect - Notify controller that VBUS is powered
+ * @gadget:The device which now has VBUS power.
+ * Context: can sleep
+ *
+ * This call is used by a driver for an external transceiver (or GPIO)
+ * that detects a VBUS power session starting. Common responses include
+ * resuming the controller, activating the D+ (or D-) pullup to let the
+ * host detect that a USB device is attached, and starting to draw power
+ * (8mA or possibly more, especially after SET_CONFIGURATION).
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_connect(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->vbus_session) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->vbus_session(gadget, 1);
+
+out:
+ trace_usb_gadget_vbus_connect(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_connect);
+
+/**
+ * usb_gadget_vbus_draw - constrain controller's VBUS power usage
+ * @gadget:The device whose VBUS usage is being described
+ * @mA:How much current to draw, in milliAmperes. This should be twice
+ * the value listed in the configuration descriptor bMaxPower field.
+ *
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume. For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ int ret = 0;
+
+ if (!gadget->ops->vbus_draw) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->vbus_draw(gadget, mA);
+ if (!ret)
+ gadget->mA = mA;
+
+out:
+ trace_usb_gadget_vbus_draw(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_draw);
+
+/**
+ * usb_gadget_vbus_disconnect - notify controller about VBUS session end
+ * @gadget:the device whose VBUS supply is being described
+ * Context: can sleep
+ *
+ * This call is used by a driver for an external transceiver (or GPIO)
+ * that detects a VBUS power session ending. Common responses include
+ * reversing everything done in usb_gadget_vbus_connect().
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->vbus_session) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->vbus_session(gadget, 0);
+
+out:
+ trace_usb_gadget_vbus_disconnect(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
+
+/**
+ * usb_gadget_connect - software-controlled connect to USB host
+ * @gadget:the peripheral being connected
+ *
+ * Enables the D+ (or potentially D-) pullup. The host will start
+ * enumerating this gadget when the pullup is active and a VBUS session
+ * is active (the link is powered). This pullup is always enabled unless
+ * usb_gadget_disconnect() has been used to disable it.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_connect(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->pullup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (gadget->deactivated) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will be connected automatically after activation.
+ */
+ gadget->connected = true;
+ goto out;
+ }
+
+ ret = gadget->ops->pullup(gadget, 1);
+ if (!ret)
+ gadget->connected = 1;
+
+out:
+ trace_usb_gadget_connect(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_connect);
+
+/**
+ * usb_gadget_disconnect - software-controlled disconnect from USB host
+ * @gadget:the peripheral being disconnected
+ *
+ * Disables the D+ (or potentially D-) pullup, which the host may see
+ * as a disconnect (when a VBUS session is active). Not all systems
+ * support software pullup controls.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_disconnect(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->pullup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (gadget->deactivated) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will stay disconnected after activation.
+ */
+ gadget->connected = false;
+ goto out;
+ }
+
+ ret = gadget->ops->pullup(gadget, 0);
+ if (!ret)
+ gadget->connected = 0;
+
+out:
+ trace_usb_gadget_disconnect(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
+
+/**
+ * usb_gadget_deactivate - deactivate function which is not ready to work
+ * @gadget: the peripheral being deactivated
+ *
+ * This routine may be used during the gadget driver bind() call to prevent
+ * the peripheral from ever being visible to the USB host, unless later
+ * usb_gadget_activate() is called. For example, user mode components may
+ * need to be activated before the system can talk to hosts.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_deactivate(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (gadget->deactivated)
+ goto out;
+
+ if (gadget->connected) {
+ ret = usb_gadget_disconnect(gadget);
+ if (ret)
+ goto out;
+
+ /*
+ * If gadget was being connected before deactivation, we want
+ * to reconnect it in usb_gadget_activate().
+ */
+ gadget->connected = true;
+ }
+ gadget->deactivated = true;
+
+out:
+ trace_usb_gadget_deactivate(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_deactivate);
+
+/**
+ * usb_gadget_activate - activate function which is not ready to work
+ * @gadget: the peripheral being activated
+ *
+ * This routine activates gadget which was previously deactivated with
+ * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_activate(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->deactivated)
+ goto out;
+
+ gadget->deactivated = false;
+
+ /*
+ * If gadget has been connected before deactivation, or became connected
+ * while it was being deactivated, we call usb_gadget_connect().
+ */
+ if (gadget->connected)
+ ret = usb_gadget_connect(gadget);
+
+out:
+ trace_usb_gadget_activate(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_activate);
+
+/* ------------------------------------------------------------------------- */
+
+#ifdef CONFIG_HAS_DMA
+
+int usb_gadget_map_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in)
+{
+ if (req->length == 0)
+ return 0;
+
+ if (req->num_sgs) {
+ int mapped;
+
+ mapped = dma_map_sg(dev, req->sg, req->num_sgs,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (mapped == 0) {
+ dev_err(dev, "failed to map SGs\n");
+ return -EFAULT;
+ }
+
+ req->num_mapped_sgs = mapped;
+ } else {
+ req->dma = dma_map_single(dev, req->buf, req->length,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, req->dma)) {
+ dev_err(dev, "failed to map buffer\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev);
+
+int usb_gadget_map_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in)
+{
+ return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_map_request);
+
+void usb_gadget_unmap_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in)
+{
+ if (req->length == 0)
+ return;
+
+ if (req->num_mapped_sgs) {
+ dma_unmap_sg(dev, req->sg, req->num_mapped_sgs,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ req->num_mapped_sgs = 0;
+ } else {
+ dma_unmap_single(dev, req->dma, req->length,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ }
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev);
+
+void usb_gadget_unmap_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in)
+{
+ usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
+
+#endif /* CONFIG_HAS_DMA */
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_gadget_giveback_request - give the request back to the gadget layer
+ * Context: in_interrupt()
+ *
+ * This is called by device controller drivers in order to return the
+ * completed request back to the gadget layer.
+ */
+void usb_gadget_giveback_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ if (likely(req->status == 0))
+ usb_led_activity(USB_LED_EVENT_GADGET);
+
+ trace_usb_gadget_giveback_request(ep, req, 0);
+
+ req->complete(ep, req);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_giveback_request);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * gadget_find_ep_by_name - returns ep whose name is the same as sting passed
+ * in second parameter or NULL if searched endpoint not found
+ * @g: controller to check for quirk
+ * @name: name of searched endpoint
+ */
+struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name)
+{
+ struct usb_ep *ep;
+
+ gadget_for_each_ep(ep, g) {
+ if (!strcmp(ep->name, name))
+ return ep;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gadget_find_ep_by_name);
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+ struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+ u8 type;
+ u16 max;
+ int num_req_streams = 0;
+
+ /* endpoint already claimed? */
+ if (ep->claimed)
+ return 0;
+
+ type = usb_endpoint_type(desc);
+ max = 0x7ff & usb_endpoint_maxp(desc);
+
+ if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
+ return 0;
+ if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
+ return 0;
+
+ if (max > ep->maxpacket_limit)
+ return 0;
+
+ /* "high bandwidth" works only at high speed */
+ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
+ return 0;
+
+ switch (type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* only support ep0 for portable CONTROL traffic */
+ return 0;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!ep->caps.type_iso)
+ return 0;
+ /* ISO: limit 1023 bytes full speed, 1024 high/super speed */
+ if (!gadget_is_dualspeed(gadget) && max > 1023)
+ return 0;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (!ep->caps.type_bulk)
+ return 0;
+ if (ep_comp && gadget_is_superspeed(gadget)) {
+ /* Get the number of required streams from the
+ * EP companion descriptor and see if the EP
+ * matches it
+ */
+ num_req_streams = ep_comp->bmAttributes & 0x1f;
+ if (num_req_streams > ep->max_streams)
+ return 0;
+ }
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ /* Bulk endpoints handle interrupt transfers,
+ * except the toggle-quirky iso-synch kind
+ */
+ if (!ep->caps.type_int && !ep->caps.type_bulk)
+ return 0;
+ /* INT: limit 64 bytes full speed, 1024 high/super speed */
+ if (!gadget_is_dualspeed(gadget) && max > 64)
+ return 0;
+ break;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
+
+/* ------------------------------------------------------------------------- */
+
+static void usb_gadget_state_work(struct work_struct *work)
+{
+ struct usb_gadget *gadget = work_to_gadget(work);
+ struct usb_udc *udc = gadget->udc;
+
+ if (udc)
+ sysfs_notify(&udc->dev.kobj, NULL, "state");
+}
+
+void usb_gadget_set_state(struct usb_gadget *gadget,
+ enum usb_device_state state)
+{
+ gadget->state = state;
+ schedule_work(&gadget->work);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_set_state);
+
+/* ------------------------------------------------------------------------- */
+
+static void usb_udc_connect_control(struct usb_udc *udc)
+{
+ if (udc->vbus)
+ usb_gadget_connect(udc->gadget);
+ else
+ usb_gadget_disconnect(udc->gadget);
+}
+
+/**
+ * usb_udc_vbus_handler - updates the udc core vbus status, and try to
+ * connect or disconnect gadget
+ * @gadget: The gadget which vbus change occurs
+ * @status: The vbus status
+ *
+ * The udc driver calls it when it wants to connect or disconnect gadget
+ * according to vbus status.
+ */
+void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
+{
+ struct usb_udc *udc = gadget->udc;
+
+ if (udc) {
+ udc->vbus = status;
+ usb_udc_connect_control(udc);
+ }
+}
+EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
+
+/**
+ * usb_gadget_udc_reset - notifies the udc core that bus reset occurs
+ * @gadget: The gadget which bus reset occurs
+ * @driver: The gadget driver we want to notify
+ *
+ * If the udc driver has bus reset handler, it needs to call this when the bus
+ * reset occurs, it notifies the gadget driver that the bus reset occurs as
+ * well as updates gadget state.
+ */
+void usb_gadget_udc_reset(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ driver->reset(gadget);
+ usb_gadget_set_state(gadget, USB_STATE_DEFAULT);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+
+/**
+ * usb_gadget_udc_start - tells usb device controller to start up
+ * @udc: The UDC to be started
+ *
+ * This call is issued by the UDC Class driver when it's about
+ * to register a gadget driver to the device controller, before
+ * calling gadget driver's bind() method.
+ *
+ * It allows the controller to be powered off until strictly
+ * necessary to have it powered on.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_udc_start(struct usb_udc *udc)
+{
+ return udc->gadget->ops->udc_start(udc->gadget, udc->driver);
+}
+
+/**
+ * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
+ * @gadget: The device we want to stop activity
+ * @driver: The driver to unbind from @gadget
+ *
+ * This call is issued by the UDC Class driver after calling
+ * gadget driver's unbind() method.
+ *
+ * The details are implementation specific, but it can go as
+ * far as powering off UDC completely and disable its data
+ * line pullups.
+ */
+static inline void usb_gadget_udc_stop(struct usb_udc *udc)
+{
+ udc->gadget->ops->udc_stop(udc->gadget);
+}
+
+/**
+ * usb_udc_release - release the usb_udc struct
+ * @dev: the dev member within usb_udc
+ *
+ * This is called by driver's core in order to free memory once the last
+ * reference is released.
+ */
+static void usb_udc_release(struct device *dev)
+{
+ struct usb_udc *udc;
+
+ udc = container_of(dev, struct usb_udc, dev);
+ dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
+ kfree(udc);
+}
+
+static const struct attribute_group *usb_udc_attr_groups[];
+
+static void usb_udc_nop_release(struct device *dev)
+{
+ dev_vdbg(dev, "%s\n", __func__);
+}
+
+/**
+ * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller driver's
+ * device.
+ * @gadget: the gadget to be added to the list.
+ * @release: a gadget release function.
+ *
+ * Returns zero on success, negative errno otherwise.
+ */
+int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
+ void (*release)(struct device *dev))
+{
+ struct usb_udc *udc;
+ struct usb_gadget_driver *driver;
+ int ret = -ENOMEM;
+
+ udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ goto err1;
+
+ dev_set_name(&gadget->dev, "gadget");
+ INIT_WORK(&gadget->work, usb_gadget_state_work);
+ gadget->dev.parent = parent;
+
+ if (release)
+ gadget->dev.release = release;
+ else
+ gadget->dev.release = usb_udc_nop_release;
+
+ ret = device_register(&gadget->dev);
+ if (ret)
+ goto err2;
+
+ device_initialize(&udc->dev);
+ udc->dev.release = usb_udc_release;
+ udc->dev.class = udc_class;
+ udc->dev.groups = usb_udc_attr_groups;
+ udc->dev.parent = parent;
+ ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
+ if (ret)
+ goto err3;
+
+ udc->gadget = gadget;
+ gadget->udc = udc;
+
+ mutex_lock(&udc_lock);
+ list_add_tail(&udc->list, &udc_list);
+
+ ret = device_add(&udc->dev);
+ if (ret)
+ goto err4;
+
+ usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
+ udc->vbus = true;
+
+ /* pick up one of pending gadget drivers */
+ list_for_each_entry(driver, &gadget_driver_pending_list, pending) {
+ if (!driver->udc_name || strcmp(driver->udc_name,
+ dev_name(&udc->dev)) == 0) {
+ ret = udc_bind_to_driver(udc, driver);
+ if (ret != -EPROBE_DEFER)
+ list_del(&driver->pending);
+ if (ret)
+ goto err4;
+ break;
+ }
+ }
+
+ mutex_unlock(&udc_lock);
+
+ return 0;
+
+err4:
+ list_del(&udc->list);
+ mutex_unlock(&udc_lock);
+
+err3:
+ put_device(&udc->dev);
+ device_del(&gadget->dev);
+
+err2:
+ put_device(&gadget->dev);
+ kfree(udc);
+
+err1:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release);
+
+/**
+ * usb_get_gadget_udc_name - get the name of the first UDC controller
+ * This functions returns the name of the first UDC controller in the system.
+ * Please note that this interface is usefull only for legacy drivers which
+ * assume that there is only one UDC controller in the system and they need to
+ * get its name before initialization. There is no guarantee that the UDC
+ * of the returned name will be still available, when gadget driver registers
+ * itself.
+ *
+ * Returns pointer to string with UDC controller name on success, NULL
+ * otherwise. Caller should kfree() returned string.
+ */
+char *usb_get_gadget_udc_name(void)
+{
+ struct usb_udc *udc;
+ char *name = NULL;
+
+ /* For now we take the first available UDC */
+ mutex_lock(&udc_lock);
+ list_for_each_entry(udc, &udc_list, list) {
+ if (!udc->driver) {
+ name = kstrdup(udc->gadget->name, GFP_KERNEL);
+ break;
+ }
+ }
+ mutex_unlock(&udc_lock);
+ return name;
+}
+EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name);
+
+/**
+ * usb_add_gadget_udc - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller
+ * driver's device.
+ * @gadget: the gadget to be added to the list
+ *
+ * Returns zero on success, negative errno otherwise.
+ */
+int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
+{
+ return usb_add_gadget_udc_release(parent, gadget, NULL);
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
+
+static void usb_gadget_remove_driver(struct usb_udc *udc)
+{
+ dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
+ udc->driver->function);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+
+ usb_gadget_disconnect(udc->gadget);
+ udc->driver->disconnect(udc->gadget);
+ udc->driver->unbind(udc->gadget);
+ usb_gadget_udc_stop(udc);
+
+ udc->driver = NULL;
+ udc->dev.driver = NULL;
+ udc->gadget->dev.driver = NULL;
+}
+
+/**
+ * usb_del_gadget_udc - deletes @udc from udc_list
+ * @gadget: the gadget to be removed.
+ *
+ * This, will call usb_gadget_unregister_driver() if
+ * the @udc is still busy.
+ */
+void usb_del_gadget_udc(struct usb_gadget *gadget)
+{
+ struct usb_udc *udc = gadget->udc;
+
+ if (!udc)
+ return;
+
+ dev_vdbg(gadget->dev.parent, "unregistering gadget\n");
+
+ mutex_lock(&udc_lock);
+ list_del(&udc->list);
+
+ if (udc->driver) {
+ struct usb_gadget_driver *driver = udc->driver;
+
+ usb_gadget_remove_driver(udc);
+ list_add(&driver->pending, &gadget_driver_pending_list);
+ }
+ mutex_unlock(&udc_lock);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+ flush_work(&gadget->work);
+ device_unregister(&udc->dev);
+ device_unregister(&gadget->dev);
+}
+EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
+
+/* ------------------------------------------------------------------------- */
+
+static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *driver)
+{
+ int ret;
+
+ dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
+ driver->function);
+
+ udc->driver = driver;
+ udc->dev.driver = &driver->driver;
+ udc->gadget->dev.driver = &driver->driver;
+
+ ret = driver->bind(udc->gadget, driver);
+ if (ret)
+ goto err1;
+ ret = usb_gadget_udc_start(udc);
+ if (ret) {
+ driver->unbind(udc->gadget);
+ goto err1;
+ }
+ usb_udc_connect_control(udc);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ return 0;
+err1:
+ if (ret != -EISNAM)
+ dev_err(&udc->dev, "failed to start %s: %d\n",
+ udc->driver->function, ret);
+ udc->driver = NULL;
+ udc->dev.driver = NULL;
+ udc->gadget->dev.driver = NULL;
+ return ret;
+}
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
+{
+ struct usb_udc *udc = NULL;
+ int ret = -ENODEV;
+
+ if (!driver || !driver->bind || !driver->setup)
+ return -EINVAL;
+
+ mutex_lock(&udc_lock);
+ if (driver->udc_name) {
+ list_for_each_entry(udc, &udc_list, list) {
+ ret = strcmp(driver->udc_name, dev_name(&udc->dev));
+ if (!ret)
+ break;
+ }
+ if (!ret && !udc->driver)
+ goto found;
+ } else {
+ list_for_each_entry(udc, &udc_list, list) {
+ /* For now we take the first one */
+ if (!udc->driver)
+ goto found;
+ }
+ }
+
+ if (!driver->match_existing_only) {
+ list_add_tail(&driver->pending, &gadget_driver_pending_list);
+ pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
+ driver->function);
+ ret = 0;
+ }
+
+ mutex_unlock(&udc_lock);
+ return ret;
+found:
+ ret = udc_bind_to_driver(udc, driver);
+ mutex_unlock(&udc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct usb_udc *udc = NULL;
+ int ret = -ENODEV;
+
+ if (!driver || !driver->unbind)
+ return -EINVAL;
+
+ mutex_lock(&udc_lock);
+ list_for_each_entry(udc, &udc_list, list)
+ if (udc->driver == driver) {
+ usb_gadget_remove_driver(udc);
+ usb_gadget_set_state(udc->gadget,
+ USB_STATE_NOTATTACHED);
+ ret = 0;
+ break;
+ }
+
+ if (ret) {
+ list_del(&driver->pending);
+ ret = 0;
+ }
+ mutex_unlock(&udc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t usb_udc_srp_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+
+ if (sysfs_streq(buf, "1"))
+ usb_gadget_wakeup(udc->gadget);
+
+ return n;
+}
+static DEVICE_ATTR(srp, S_IWUSR, NULL, usb_udc_srp_store);
+
+static ssize_t usb_udc_softconn_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+
+ if (!udc->driver) {
+ dev_err(dev, "soft-connect without a gadget driver\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (sysfs_streq(buf, "connect")) {
+ usb_gadget_udc_start(udc);
+ usb_gadget_connect(udc->gadget);
+ } else if (sysfs_streq(buf, "disconnect")) {
+ usb_gadget_disconnect(udc->gadget);
+ udc->driver->disconnect(udc->gadget);
+ usb_gadget_udc_stop(udc);
+ } else {
+ dev_err(dev, "unsupported command '%s'\n", buf);
+ return -EINVAL;
+ }
+
+ return n;
+}
+static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ struct usb_gadget *gadget = udc->gadget;
+
+ return sprintf(buf, "%s\n", usb_state_string(gadget->state));
+}
+static DEVICE_ATTR_RO(state);
+
+#define USB_UDC_SPEED_ATTR(name, param) \
+ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
+ return snprintf(buf, PAGE_SIZE, "%s\n", \
+ usb_speed_string(udc->gadget->param)); \
+} \
+static DEVICE_ATTR_RO(name)
+
+static USB_UDC_SPEED_ATTR(current_speed, speed);
+static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
+
+#define USB_UDC_ATTR(name) \
+ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
+ struct usb_gadget *gadget = udc->gadget; \
+ \
+ return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
+} \
+static DEVICE_ATTR_RO(name)
+
+static USB_UDC_ATTR(is_otg);
+static USB_UDC_ATTR(is_a_peripheral);
+static USB_UDC_ATTR(b_hnp_enable);
+static USB_UDC_ATTR(a_hnp_support);
+static USB_UDC_ATTR(a_alt_hnp_support);
+static USB_UDC_ATTR(is_selfpowered);
+
+static struct attribute *usb_udc_attrs[] = {
+ &dev_attr_srp.attr,
+ &dev_attr_soft_connect.attr,
+ &dev_attr_state.attr,
+ &dev_attr_current_speed.attr,
+ &dev_attr_maximum_speed.attr,
+
+ &dev_attr_is_otg.attr,
+ &dev_attr_is_a_peripheral.attr,
+ &dev_attr_b_hnp_enable.attr,
+ &dev_attr_a_hnp_support.attr,
+ &dev_attr_a_alt_hnp_support.attr,
+ &dev_attr_is_selfpowered.attr,
+ NULL,
+};
+
+static const struct attribute_group usb_udc_attr_group = {
+ .attrs = usb_udc_attrs,
+};
+
+static const struct attribute_group *usb_udc_attr_groups[] = {
+ &usb_udc_attr_group,
+ NULL,
+};
+
+static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ int ret;
+
+ ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name);
+ if (ret) {
+ dev_err(dev, "failed to add uevent USB_UDC_NAME\n");
+ return ret;
+ }
+
+ if (udc->driver) {
+ ret = add_uevent_var(env, "USB_UDC_DRIVER=%s",
+ udc->driver->function);
+ if (ret) {
+ dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int __init usb_udc_init(void)
+{
+ udc_class = class_create(THIS_MODULE, "udc");
+ if (IS_ERR(udc_class)) {
+ pr_err("failed to create udc class --> %ld\n",
+ PTR_ERR(udc_class));
+ return PTR_ERR(udc_class);
+ }
+
+ udc_class->dev_uevent = usb_udc_uevent;
+ return 0;
+}
+subsys_initcall(usb_udc_init);
+
+static void __exit usb_udc_exit(void)
+{
+ class_destroy(udc_class);
+}
+module_exit(usb_udc_exit);
+
+MODULE_DESCRIPTION("UDC Framework");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index dde44450dfa9..77d07904f932 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -647,12 +647,10 @@ static int dummy_disable(struct usb_ep *_ep)
static struct usb_request *dummy_alloc_request(struct usb_ep *_ep,
gfp_t mem_flags)
{
- struct dummy_ep *ep;
struct dummy_request *req;
if (!_ep)
return NULL;
- ep = usb_ep_to_dummy_ep(_ep);
req = kzalloc(sizeof(*req), mem_flags);
if (!req)
@@ -2444,9 +2442,6 @@ static int dummy_start(struct usb_hcd *hcd)
static void dummy_stop(struct usb_hcd *hcd)
{
- struct dummy *dum;
-
- dum = hcd_to_dummy_hcd(hcd)->dum;
device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs);
dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n");
}
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index b1cfa96cc88f..6e977dc22570 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1199,8 +1199,6 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
struct m66592 *m66592 = _m66592;
u16 intsts0;
u16 intenb0;
- u16 brdysts, nrdysts, bempsts;
- u16 brdyenb, nrdyenb, bempenb;
u16 savepipe;
u16 mask0;
@@ -1224,12 +1222,10 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
mask0 = intsts0 & intenb0;
if (mask0) {
- brdysts = m66592_read(m66592, M66592_BRDYSTS);
- nrdysts = m66592_read(m66592, M66592_NRDYSTS);
- bempsts = m66592_read(m66592, M66592_BEMPSTS);
- brdyenb = m66592_read(m66592, M66592_BRDYENB);
- nrdyenb = m66592_read(m66592, M66592_NRDYENB);
- bempenb = m66592_read(m66592, M66592_BEMPENB);
+ u16 brdysts = m66592_read(m66592, M66592_BRDYSTS);
+ u16 bempsts = m66592_read(m66592, M66592_BEMPSTS);
+ u16 brdyenb = m66592_read(m66592, M66592_BRDYENB);
+ u16 bempenb = m66592_read(m66592, M66592_BEMPENB);
if (mask0 & M66592_VBINT) {
m66592_write(m66592, 0xffff & ~M66592_VBINT,
@@ -1408,28 +1404,20 @@ static int m66592_dequeue(struct usb_ep *_ep, struct usb_request *_req)
static int m66592_set_halt(struct usb_ep *_ep, int value)
{
- struct m66592_ep *ep;
- struct m66592_request *req;
+ struct m66592_ep *ep = container_of(_ep, struct m66592_ep, ep);
unsigned long flags;
int ret = 0;
- ep = container_of(_ep, struct m66592_ep, ep);
- req = list_entry(ep->queue.next, struct m66592_request, queue);
-
spin_lock_irqsave(&ep->m66592->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
- goto out;
- }
- if (value) {
+ } else if (value) {
ep->busy = 1;
pipe_stall(ep->m66592, ep->pipenum);
} else {
ep->busy = 0;
pipe_stop(ep->m66592, ep->pipenum);
}
-
-out:
spin_unlock_irqrestore(&ep->m66592->lock, flags);
return ret;
}
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index dafe74eb9ade..b9e19a591322 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -119,18 +119,14 @@ static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
struct mv_u3d_req *curr_req)
{
struct mv_u3d_trb *curr_trb;
- dma_addr_t cur_deq_lo;
- struct mv_u3d_ep_context *curr_ep_context;
- int trb_complete, actual, remaining_length = 0;
+ int actual, remaining_length = 0;
int direction, ep_num;
int retval = 0;
u32 tmp, status, length;
- curr_ep_context = &u3d->ep_context[index];
direction = index % 2;
ep_num = index / 2;
- trb_complete = 0;
actual = curr_req->req.length;
while (!list_empty(&curr_req->trb_list)) {
@@ -143,15 +139,10 @@ static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
}
curr_trb->trb_hw->ctrl.own = 0;
- if (direction == MV_U3D_EP_DIR_OUT) {
+ if (direction == MV_U3D_EP_DIR_OUT)
tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
- cur_deq_lo =
- ioread32(&u3d->vuc_regs->rxst[ep_num].curdeqlo);
- } else {
+ else
tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
- cur_deq_lo =
- ioread32(&u3d->vuc_regs->txst[ep_num].curdeqlo);
- }
status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
@@ -527,7 +518,6 @@ static int mv_u3d_ep_enable(struct usb_ep *_ep,
{
struct mv_u3d *u3d;
struct mv_u3d_ep *ep;
- struct mv_u3d_ep_context *ep_context;
u16 max = 0;
unsigned maxburst = 0;
u32 epxcr, direction;
@@ -548,9 +538,6 @@ static int mv_u3d_ep_enable(struct usb_ep *_ep,
_ep->maxburst = 1;
maxburst = _ep->maxburst;
- /* Get the endpoint context address */
- ep_context = (struct mv_u3d_ep_context *)ep->ep_context;
-
/* Set the max burst size */
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK:
@@ -633,7 +620,6 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
{
struct mv_u3d *u3d;
struct mv_u3d_ep *ep;
- struct mv_u3d_ep_context *ep_context;
u32 epxcr, direction;
unsigned long flags;
@@ -646,9 +632,6 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
u3d = ep->u3d;
- /* Get the endpoint context address */
- ep_context = ep->ep_context;
-
direction = mv_u3d_ep_dir(ep);
/* nuke all pending requests (does flush) */
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 81b6229c7805..ce73b3552269 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -129,7 +129,7 @@ static int process_ep_req(struct mv_udc *udc, int index,
{
struct mv_dtd *curr_dtd;
struct mv_dqh *curr_dqh;
- int td_complete, actual, remaining_length;
+ int actual, remaining_length;
int i, direction;
int retval = 0;
u32 errors;
@@ -139,7 +139,6 @@ static int process_ep_req(struct mv_udc *udc, int index,
direction = index % 2;
curr_dtd = curr_req->head;
- td_complete = 0;
actual = curr_req->req.length;
for (i = 0; i < curr_req->dtd_count; i++) {
@@ -412,11 +411,8 @@ static int req_to_dtd(struct mv_req *req)
unsigned count;
int is_last, is_first = 1;
struct mv_dtd *dtd, *last_dtd = NULL;
- struct mv_udc *udc;
dma_addr_t dma;
- udc = req->ep->udc;
-
do {
dtd = build_dtd(req, &count, &dma, &is_last);
if (dtd == NULL)
@@ -567,7 +563,7 @@ static int mv_ep_disable(struct usb_ep *_ep)
struct mv_udc *udc;
struct mv_ep *ep;
struct mv_dqh *dqh;
- u32 bit_pos, epctrlx, direction;
+ u32 epctrlx, direction;
unsigned long flags;
ep = container_of(_ep, struct mv_ep, ep);
@@ -582,7 +578,6 @@ static int mv_ep_disable(struct usb_ep *_ep)
spin_lock_irqsave(&udc->lock, flags);
direction = ep_dir(ep);
- bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
/* Reset the max packet length and the interrupt on Setup */
dqh->max_packet_length = 0;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 18f5ebd447b8..7c6113432093 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -329,12 +329,10 @@ static int net2272_disable(struct usb_ep *_ep)
static struct usb_request *
net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
- struct net2272_ep *ep;
struct net2272_request *req;
if (!_ep)
return NULL;
- ep = container_of(_ep, struct net2272_ep, ep);
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
@@ -348,10 +346,8 @@ net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
static void
net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
- struct net2272_ep *ep;
struct net2272_request *req;
- ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || !_req)
return;
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index c894b94b234b..614ab951a4ae 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -211,7 +211,7 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
goto print_err;
}
- if (dev->quirks & PLX_SUPERSPEED) {
+ if (dev->quirks & PLX_PCIE) {
if ((desc->bEndpointAddress & 0x0f) >= 0x0c) {
ret = -EDOM;
goto print_err;
@@ -245,7 +245,7 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
/* set type, direction, address; reset fifo counters */
writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
- if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) {
+ if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
tmp = readl(&ep->cfg->ep_cfg);
/* If USB ep number doesn't match hardware ep number */
if ((tmp & 0xf) != usb_endpoint_num(desc)) {
@@ -316,7 +316,7 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
}
- if (dev->quirks & PLX_SUPERSPEED)
+ if (dev->quirks & PLX_PCIE)
ep_clear_seqnum(ep);
writel(tmp, &ep->cfg->ep_cfg);
@@ -527,7 +527,7 @@ static int net2280_disable(struct usb_ep *_ep)
spin_lock_irqsave(&ep->dev->lock, flags);
nuke(ep);
- if (ep->dev->quirks & PLX_SUPERSPEED)
+ if (ep->dev->quirks & PLX_PCIE)
ep_reset_338x(ep->dev->regs, ep);
else
ep_reset_228x(ep->dev->regs, ep);
@@ -862,7 +862,7 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
writel(readl(&dma->dmastat), &dma->dmastat);
writel(td_dma, &dma->dmadesc);
- if (ep->dev->quirks & PLX_SUPERSPEED)
+ if (ep->dev->quirks & PLX_PCIE)
dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
writel(dmactl, &dma->dmactl);
@@ -1046,7 +1046,7 @@ net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
/* kickstart this i/o queue? */
if (list_empty(&ep->queue) && !ep->stopped &&
- !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
+ !((dev->quirks & PLX_PCIE) && ep->dma &&
(readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
/* use DMA if the endpoint supports it, else pio */
@@ -1169,7 +1169,7 @@ static void scan_dma_completions(struct net2280_ep *ep)
break;
} else if (!ep->is_in &&
(req->req.length % ep->ep.maxpacket) &&
- !(ep->dev->quirks & PLX_SUPERSPEED)) {
+ !(ep->dev->quirks & PLX_PCIE)) {
tmp = readl(&ep->regs->ep_stat);
/* AVOID TROUBLE HERE by not issuing short reads from
@@ -1367,7 +1367,7 @@ net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
ep->wedged = 1;
} else {
clear_halt(ep);
- if (ep->dev->quirks & PLX_SUPERSPEED &&
+ if (ep->dev->quirks & PLX_PCIE &&
!list_empty(&ep->queue) && ep->td_dma)
restart_dma(ep);
ep->wedged = 0;
@@ -2394,7 +2394,7 @@ static int net2280_start(struct usb_gadget *_gadget,
*/
net2280_led_active(dev, 1);
- if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
+ if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
defect7374_enable_data_eps_zero(dev);
ep0_start(dev);
@@ -3063,7 +3063,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
}
ep->stopped = 0;
dev->protocol_stall = 0;
- if (!(dev->quirks & PLX_SUPERSPEED)) {
+ if (!(dev->quirks & PLX_PCIE)) {
if (ep->dev->quirks & PLX_2280)
tmp = BIT(FIFO_OVERFLOW) |
BIT(FIFO_UNDERFLOW);
@@ -3090,7 +3090,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
cpu_to_le32s(&u.raw[0]);
cpu_to_le32s(&u.raw[1]);
- if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
+ if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
defect7374_workaround(dev, u.r);
tmp = 0;
@@ -3173,7 +3173,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
} else {
ep_vdbg(dev, "%s clear halt\n", e->ep.name);
clear_halt(e);
- if ((ep->dev->quirks & PLX_SUPERSPEED) &&
+ if ((ep->dev->quirks & PLX_PCIE) &&
!list_empty(&e->queue) && e->td_dma)
restart_dma(e);
}
@@ -3195,7 +3195,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
if (e->ep.name == ep0name)
goto do_stall;
set_halt(e);
- if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
+ if ((dev->quirks & PLX_PCIE) && e->dma)
abort_dma(e);
allow_status(ep);
ep_vdbg(dev, "%s set halt\n", ep->ep.name);
@@ -3234,7 +3234,7 @@ do_stall:
#undef w_length
next_endpoints:
- if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) {
+ if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
u32 mask = (BIT(ENDPOINT_0_INTERRUPT) |
USB3380_IRQSTAT0_EP_INTR_MASK_IN |
USB3380_IRQSTAT0_EP_INTR_MASK_OUT);
@@ -3399,7 +3399,7 @@ __acquires(dev->lock)
writel(tmp, &dma->dmastat);
/* dma sync*/
- if (dev->quirks & PLX_SUPERSPEED) {
+ if (dev->quirks & PLX_PCIE) {
u32 r_dmacount = readl(&dma->dmacount);
if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
@@ -3468,7 +3468,7 @@ static irqreturn_t net2280_irq(int irq, void *_dev)
/* control requests and PIO */
handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
- if (dev->quirks & PLX_SUPERSPEED) {
+ if (dev->quirks & PLX_PCIE) {
/* re-enable interrupt to trigger any possible new interrupt */
u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
@@ -3513,7 +3513,7 @@ static void net2280_remove(struct pci_dev *pdev)
}
if (dev->got_irq)
free_irq(pdev->irq, dev);
- if (dev->quirks & PLX_SUPERSPEED)
+ if (dev->quirks & PLX_PCIE)
pci_disable_msi(pdev);
if (dev->regs)
iounmap(dev->regs);
@@ -3593,7 +3593,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
- if (dev->quirks & PLX_SUPERSPEED) {
+ if (dev->quirks & PLX_PCIE) {
u32 fsmvalue;
u32 usbstat;
dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
@@ -3637,7 +3637,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto done;
}
- if (dev->quirks & PLX_SUPERSPEED)
+ if (dev->quirks & PLX_PCIE)
if (pci_enable_msi(pdev))
ep_err(dev, "Failed to enable MSI mode\n");
@@ -3755,10 +3755,19 @@ static const struct pci_device_id pci_ids[] = { {
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x2380,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = PLX_PCIE,
+ },
+ {
+ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_PLX,
.device = 0x3380,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = PLX_SUPERSPEED,
+ .driver_data = PLX_PCIE | PLX_SUPERSPEED,
},
{
.class = PCI_CLASS_SERIAL_USB_DEVICE,
@@ -3767,7 +3776,7 @@ static const struct pci_device_id pci_ids[] = { {
.device = 0x3382,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = PLX_SUPERSPEED,
+ .driver_data = PLX_PCIE | PLX_SUPERSPEED,
},
{ /* end: all zeroes */ }
};
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 0d32052bf16f..2736a95751c3 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -47,6 +47,7 @@ set_idx_reg(struct net2280_regs __iomem *regs, u32 index, u32 value)
#define PLX_LEGACY BIT(0)
#define PLX_2280 BIT(1)
#define PLX_SUPERSPEED BIT(2)
+#define PLX_PCIE BIT(3)
#define REG_DIAG 0x0
#define RETRY_COUNTER 16
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index ebc51ec5790a..a97da645c1b9 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1477,11 +1477,11 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
req->dma_mapped = 0;
}
ep->halted = 1;
- spin_lock(&dev->lock);
+ spin_unlock(&dev->lock);
if (!ep->in)
pch_udc_ep_clear_rrdy(ep);
usb_gadget_giveback_request(&ep->ep, &req->req);
- spin_unlock(&dev->lock);
+ spin_lock(&dev->lock);
ep->halted = halted;
}
@@ -1984,9 +1984,8 @@ static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
if (ep->num == PCH_UDC_EP0)
ep->dev->stall = 1;
pch_udc_ep_set_stall(ep);
- pch_udc_enable_ep_interrupts(ep->dev,
- PCH_UDC_EPINT(ep->in,
- ep->num));
+ pch_udc_enable_ep_interrupts(
+ ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
} else {
pch_udc_ep_clear_stall(ep);
}
@@ -2451,16 +2450,11 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
*/
static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
{
- struct pch_udc_ep *ep;
- struct pch_udc_request *req;
-
- ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
- if (!list_empty(&ep->queue)) {
- req = list_entry(ep->queue.next, struct pch_udc_request, queue);
- pch_udc_enable_ep_interrupts(ep->dev,
- PCH_UDC_EPINT(ep->in, ep->num));
- pch_udc_ep_clear_nak(ep);
- }
+ struct pch_udc_ep *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
+ if (list_empty(&ep->queue))
+ return;
+ pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+ pch_udc_ep_clear_nak(ep);
}
/**
@@ -2573,9 +2567,9 @@ static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
empty_req_queue(ep);
}
if (dev->driver) {
- spin_lock(&dev->lock);
- usb_gadget_udc_reset(&dev->gadget, dev->driver);
spin_unlock(&dev->lock);
+ usb_gadget_udc_reset(&dev->gadget, dev->driver);
+ spin_lock(&dev->lock);
}
}
@@ -2654,9 +2648,9 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
dev->ep[i].halted = 0;
}
dev->stall = 0;
- spin_lock(&dev->lock);
- dev->driver->setup(&dev->gadget, &dev->setup_data);
spin_unlock(&dev->lock);
+ dev->driver->setup(&dev->gadget, &dev->setup_data);
+ spin_lock(&dev->lock);
}
/**
@@ -2691,9 +2685,9 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
dev->stall = 0;
/* call gadget zero with setup data received */
- spin_lock(&dev->lock);
- dev->driver->setup(&dev->gadget, &dev->setup_data);
spin_unlock(&dev->lock);
+ dev->driver->setup(&dev->gadget, &dev->setup_data);
+ spin_lock(&dev->lock);
}
/**
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 001a3b74a993..ad140aa00132 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -1825,13 +1825,10 @@ fail:
* Disables all udc endpoints (even control endpoint), report disconnect to
* the gadget user.
*/
-static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
+static void stop_activity(struct pxa_udc *udc)
{
int i;
- /* don't disconnect drivers more than once */
- if (udc->gadget.speed == USB_SPEED_UNKNOWN)
- driver = NULL;
udc->gadget.speed = USB_SPEED_UNKNOWN;
for (i = 0; i < NR_USB_ENDPOINTS; i++)
@@ -1848,7 +1845,7 @@ static int pxa27x_udc_stop(struct usb_gadget *g)
{
struct pxa_udc *udc = to_pxa(g);
- stop_activity(udc, NULL);
+ stop_activity(udc);
udc_disable(udc);
udc->driver = NULL;
@@ -2296,7 +2293,7 @@ static void irq_udc_reset(struct pxa_udc *udc)
if ((udccr & UDCCR_UDA) == 0) {
dev_dbg(udc->dev, "USB reset start\n");
- stop_activity(udc, udc->driver);
+ stop_activity(udc);
}
udc->gadget.speed = USB_SPEED_FULL;
memset(&udc->stats, 0, sizeof udc->stats);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 8b300e6da7fc..f2c8862093a2 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1464,8 +1464,6 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
struct r8a66597 *r8a66597 = _r8a66597;
u16 intsts0;
u16 intenb0;
- u16 brdysts, nrdysts, bempsts;
- u16 brdyenb, nrdyenb, bempenb;
u16 savepipe;
u16 mask0;
@@ -1481,12 +1479,10 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
mask0 = intsts0 & intenb0;
if (mask0) {
- brdysts = r8a66597_read(r8a66597, BRDYSTS);
- nrdysts = r8a66597_read(r8a66597, NRDYSTS);
- bempsts = r8a66597_read(r8a66597, BEMPSTS);
- brdyenb = r8a66597_read(r8a66597, BRDYENB);
- nrdyenb = r8a66597_read(r8a66597, NRDYENB);
- bempenb = r8a66597_read(r8a66597, BEMPENB);
+ u16 brdysts = r8a66597_read(r8a66597, BRDYSTS);
+ u16 bempsts = r8a66597_read(r8a66597, BEMPSTS);
+ u16 brdyenb = r8a66597_read(r8a66597, BRDYENB);
+ u16 bempenb = r8a66597_read(r8a66597, BEMPENB);
if (mask0 & VBINT) {
r8a66597_write(r8a66597, 0xffff & ~VBINT,
@@ -1658,20 +1654,14 @@ static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
static int r8a66597_set_halt(struct usb_ep *_ep, int value)
{
- struct r8a66597_ep *ep;
- struct r8a66597_request *req;
+ struct r8a66597_ep *ep = container_of(_ep, struct r8a66597_ep, ep);
unsigned long flags;
int ret = 0;
- ep = container_of(_ep, struct r8a66597_ep, ep);
- req = get_request_from_ep(ep);
-
spin_lock_irqsave(&ep->r8a66597->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
- goto out;
- }
- if (value) {
+ } else if (value) {
ep->busy = 1;
pipe_stall(ep->r8a66597, ep->pipenum);
} else {
@@ -1679,8 +1669,6 @@ static int r8a66597_set_halt(struct usb_ep *_ep, int value)
ep->wedge = 0;
pipe_stop(ep->r8a66597, ep->pipenum);
}
-
-out:
spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
return ret;
}
diff --git a/drivers/usb/gadget/udc/trace.c b/drivers/usb/gadget/udc/trace.c
new file mode 100644
index 000000000000..8c551ab91ad8
--- /dev/null
+++ b/drivers/usb/gadget/udc/trace.c
@@ -0,0 +1,18 @@
+/**
+ * trace.c - USB Gadget Framework Trace Support
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Felipe Balbi <felipe.balbi@linux.intel.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/usb/gadget/udc/trace.h b/drivers/usb/gadget/udc/trace.h
new file mode 100644
index 000000000000..da29874b5366
--- /dev/null
+++ b/drivers/usb/gadget/udc/trace.h
@@ -0,0 +1,298 @@
+/**
+ * udc.c - Core UDC Framework
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Felipe Balbi <felipe.balbi@linux.intel.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gadget
+
+#if !defined(__UDC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __UDC_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <asm/byteorder.h>
+#include <linux/usb/gadget.h>
+
+DECLARE_EVENT_CLASS(udc_log_gadget,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret),
+ TP_STRUCT__entry(
+ __field(enum usb_device_speed, speed)
+ __field(enum usb_device_speed, max_speed)
+ __field(enum usb_device_state, state)
+ __field(unsigned, mA)
+ __field(unsigned, sg_supported)
+ __field(unsigned, is_otg)
+ __field(unsigned, is_a_peripheral)
+ __field(unsigned, b_hnp_enable)
+ __field(unsigned, a_hnp_support)
+ __field(unsigned, hnp_polling_support)
+ __field(unsigned, host_request_flag)
+ __field(unsigned, quirk_ep_out_aligned_size)
+ __field(unsigned, quirk_altset_not_supp)
+ __field(unsigned, quirk_stall_not_supp)
+ __field(unsigned, quirk_zlp_not_supp)
+ __field(unsigned, is_selfpowered)
+ __field(unsigned, deactivated)
+ __field(unsigned, connected)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->speed = g->speed;
+ __entry->max_speed = g->max_speed;
+ __entry->state = g->state;
+ __entry->mA = g->mA;
+ __entry->sg_supported = g->sg_supported;
+ __entry->is_otg = g->is_otg;
+ __entry->is_a_peripheral = g->is_a_peripheral;
+ __entry->b_hnp_enable = g->b_hnp_enable;
+ __entry->a_hnp_support = g->a_hnp_support;
+ __entry->hnp_polling_support = g->hnp_polling_support;
+ __entry->host_request_flag = g->host_request_flag;
+ __entry->quirk_ep_out_aligned_size = g->quirk_ep_out_aligned_size;
+ __entry->quirk_altset_not_supp = g->quirk_altset_not_supp;
+ __entry->quirk_stall_not_supp = g->quirk_stall_not_supp;
+ __entry->quirk_zlp_not_supp = g->quirk_zlp_not_supp;
+ __entry->is_selfpowered = g->is_selfpowered;
+ __entry->deactivated = g->deactivated;
+ __entry->connected = g->connected;
+ __entry->ret = ret;
+ ),
+ TP_printk("speed %d/%d state %d %dmA [%s%s%s%s%s%s%s%s%s%s%s%s%s%s] --> %d",
+ __entry->speed, __entry->max_speed, __entry->state, __entry->mA,
+ __entry->sg_supported ? "sg:" : "",
+ __entry->is_otg ? "OTG:" : "",
+ __entry->is_a_peripheral ? "a_peripheral:" : "",
+ __entry->b_hnp_enable ? "b_hnp:" : "",
+ __entry->a_hnp_support ? "a_hnp:" : "",
+ __entry->hnp_polling_support ? "hnp_poll:" : "",
+ __entry->host_request_flag ? "hostreq:" : "",
+ __entry->quirk_ep_out_aligned_size ? "out_aligned:" : "",
+ __entry->quirk_altset_not_supp ? "no_altset:" : "",
+ __entry->quirk_stall_not_supp ? "no_stall:" : "",
+ __entry->quirk_zlp_not_supp ? "no_zlp" : "",
+ __entry->is_selfpowered ? "self-powered:" : "bus-powered:",
+ __entry->deactivated ? "deactivated:" : "activated:",
+ __entry->connected ? "connected" : "disconnected",
+ __entry->ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_frame_number,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_wakeup,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_set_selfpowered,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_clear_selfpowered,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_connect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_draw,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_disconnect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_connect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_disconnect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_deactivate,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_activate,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DECLARE_EVENT_CLASS(udc_log_ep,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret),
+ TP_STRUCT__entry(
+ __dynamic_array(char, name, UDC_TRACE_STR_MAX)
+ __field(unsigned, maxpacket)
+ __field(unsigned, maxpacket_limit)
+ __field(unsigned, max_streams)
+ __field(unsigned, mult)
+ __field(unsigned, maxburst)
+ __field(u8, address)
+ __field(bool, claimed)
+ __field(bool, enabled)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name);
+ __entry->maxpacket = ep->maxpacket;
+ __entry->maxpacket_limit = ep->maxpacket_limit;
+ __entry->max_streams = ep->max_streams;
+ __entry->mult = ep->mult;
+ __entry->maxburst = ep->maxburst;
+ __entry->address = ep->address,
+ __entry->claimed = ep->claimed;
+ __entry->enabled = ep->enabled;
+ __entry->ret = ret;
+ ),
+ TP_printk("%s: mps %d/%d streams %d mult %d burst %d addr %02x %s%s --> %d",
+ __get_str(name), __entry->maxpacket, __entry->maxpacket_limit,
+ __entry->max_streams, __entry->mult, __entry->maxburst,
+ __entry->address, __entry->claimed ? "claimed:" : "released:",
+ __entry->enabled ? "enabled" : "disabled", ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_maxpacket_limit,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_enable,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_disable,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_halt,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_clear_halt,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_wedge,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_fifo_status,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_fifo_flush,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DECLARE_EVENT_CLASS(udc_log_req,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret),
+ TP_STRUCT__entry(
+ __dynamic_array(char, name, UDC_TRACE_STR_MAX)
+ __field(unsigned, length)
+ __field(unsigned, actual)
+ __field(unsigned, num_sgs)
+ __field(unsigned, num_mapped_sgs)
+ __field(unsigned, stream_id)
+ __field(unsigned, no_interrupt)
+ __field(unsigned, zero)
+ __field(unsigned, short_not_ok)
+ __field(int, status)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name);
+ __entry->length = req->length;
+ __entry->actual = req->actual;
+ __entry->num_sgs = req->num_sgs;
+ __entry->num_mapped_sgs = req->num_mapped_sgs;
+ __entry->stream_id = req->stream_id;
+ __entry->no_interrupt = req->no_interrupt;
+ __entry->zero = req->zero;
+ __entry->short_not_ok = req->short_not_ok;
+ __entry->status = req->status;
+ __entry->ret = ret;
+ ),
+ TP_printk("%s: length %d/%d sgs %d/%d stream %d %s%s%s status %d --> %d",
+ __get_str(name), __entry->actual, __entry->length,
+ __entry->num_mapped_sgs, __entry->num_sgs, __entry->stream_id,
+ __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "i" : "I",
+ __entry->status, __entry->ret
+ )
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_alloc_request,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_free_request,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_queue,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_dequeue,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_gadget_giveback_request,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+#endif /* __UDC_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
deleted file mode 100644
index e1b2dcebdc2e..000000000000
--- a/drivers/usb/gadget/udc/udc-core.c
+++ /dev/null
@@ -1,800 +0,0 @@
-/**
- * udc.c - Core UDC Framework
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/err.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb.h>
-
-/**
- * struct usb_udc - describes one usb device controller
- * @driver - the gadget driver pointer. For use by the class code
- * @dev - the child device to the actual controller
- * @gadget - the gadget. For use by the class code
- * @list - for use by the udc class driver
- * @vbus - for udcs who care about vbus status, this value is real vbus status;
- * for udcs who do not care about vbus status, this value is always true
- *
- * This represents the internal data structure which is used by the UDC-class
- * to hold information about udc driver and gadget together.
- */
-struct usb_udc {
- struct usb_gadget_driver *driver;
- struct usb_gadget *gadget;
- struct device dev;
- struct list_head list;
- bool vbus;
-};
-
-static struct class *udc_class;
-static LIST_HEAD(udc_list);
-static LIST_HEAD(gadget_driver_pending_list);
-static DEFINE_MUTEX(udc_lock);
-
-static int udc_bind_to_driver(struct usb_udc *udc,
- struct usb_gadget_driver *driver);
-
-/* ------------------------------------------------------------------------- */
-
-#ifdef CONFIG_HAS_DMA
-
-int usb_gadget_map_request_by_dev(struct device *dev,
- struct usb_request *req, int is_in)
-{
- if (req->length == 0)
- return 0;
-
- if (req->num_sgs) {
- int mapped;
-
- mapped = dma_map_sg(dev, req->sg, req->num_sgs,
- is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (mapped == 0) {
- dev_err(dev, "failed to map SGs\n");
- return -EFAULT;
- }
-
- req->num_mapped_sgs = mapped;
- } else {
- req->dma = dma_map_single(dev, req->buf, req->length,
- is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
- if (dma_mapping_error(dev, req->dma)) {
- dev_err(dev, "failed to map buffer\n");
- return -EFAULT;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev);
-
-int usb_gadget_map_request(struct usb_gadget *gadget,
- struct usb_request *req, int is_in)
-{
- return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in);
-}
-EXPORT_SYMBOL_GPL(usb_gadget_map_request);
-
-void usb_gadget_unmap_request_by_dev(struct device *dev,
- struct usb_request *req, int is_in)
-{
- if (req->length == 0)
- return;
-
- if (req->num_mapped_sgs) {
- dma_unmap_sg(dev, req->sg, req->num_mapped_sgs,
- is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
- req->num_mapped_sgs = 0;
- } else {
- dma_unmap_single(dev, req->dma, req->length,
- is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- }
-}
-EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev);
-
-void usb_gadget_unmap_request(struct usb_gadget *gadget,
- struct usb_request *req, int is_in)
-{
- usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in);
-}
-EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
-
-#endif /* CONFIG_HAS_DMA */
-
-/* ------------------------------------------------------------------------- */
-
-/**
- * usb_gadget_giveback_request - give the request back to the gadget layer
- * Context: in_interrupt()
- *
- * This is called by device controller drivers in order to return the
- * completed request back to the gadget layer.
- */
-void usb_gadget_giveback_request(struct usb_ep *ep,
- struct usb_request *req)
-{
- if (likely(req->status == 0))
- usb_led_activity(USB_LED_EVENT_GADGET);
-
- req->complete(ep, req);
-}
-EXPORT_SYMBOL_GPL(usb_gadget_giveback_request);
-
-/* ------------------------------------------------------------------------- */
-
-/**
- * gadget_find_ep_by_name - returns ep whose name is the same as sting passed
- * in second parameter or NULL if searched endpoint not found
- * @g: controller to check for quirk
- * @name: name of searched endpoint
- */
-struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name)
-{
- struct usb_ep *ep;
-
- gadget_for_each_ep(ep, g) {
- if (!strcmp(ep->name, name))
- return ep;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(gadget_find_ep_by_name);
-
-/* ------------------------------------------------------------------------- */
-
-int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
- struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
- struct usb_ss_ep_comp_descriptor *ep_comp)
-{
- u8 type;
- u16 max;
- int num_req_streams = 0;
-
- /* endpoint already claimed? */
- if (ep->claimed)
- return 0;
-
- type = usb_endpoint_type(desc);
- max = 0x7ff & usb_endpoint_maxp(desc);
-
- if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
- return 0;
- if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
- return 0;
-
- if (max > ep->maxpacket_limit)
- return 0;
-
- /* "high bandwidth" works only at high speed */
- if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
- return 0;
-
- switch (type) {
- case USB_ENDPOINT_XFER_CONTROL:
- /* only support ep0 for portable CONTROL traffic */
- return 0;
- case USB_ENDPOINT_XFER_ISOC:
- if (!ep->caps.type_iso)
- return 0;
- /* ISO: limit 1023 bytes full speed, 1024 high/super speed */
- if (!gadget_is_dualspeed(gadget) && max > 1023)
- return 0;
- break;
- case USB_ENDPOINT_XFER_BULK:
- if (!ep->caps.type_bulk)
- return 0;
- if (ep_comp && gadget_is_superspeed(gadget)) {
- /* Get the number of required streams from the
- * EP companion descriptor and see if the EP
- * matches it
- */
- num_req_streams = ep_comp->bmAttributes & 0x1f;
- if (num_req_streams > ep->max_streams)
- return 0;
- }
- break;
- case USB_ENDPOINT_XFER_INT:
- /* Bulk endpoints handle interrupt transfers,
- * except the toggle-quirky iso-synch kind
- */
- if (!ep->caps.type_int && !ep->caps.type_bulk)
- return 0;
- /* INT: limit 64 bytes full speed, 1024 high/super speed */
- if (!gadget_is_dualspeed(gadget) && max > 64)
- return 0;
- break;
- }
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
-
-/* ------------------------------------------------------------------------- */
-
-static void usb_gadget_state_work(struct work_struct *work)
-{
- struct usb_gadget *gadget = work_to_gadget(work);
- struct usb_udc *udc = gadget->udc;
-
- if (udc)
- sysfs_notify(&udc->dev.kobj, NULL, "state");
-}
-
-void usb_gadget_set_state(struct usb_gadget *gadget,
- enum usb_device_state state)
-{
- gadget->state = state;
- schedule_work(&gadget->work);
-}
-EXPORT_SYMBOL_GPL(usb_gadget_set_state);
-
-/* ------------------------------------------------------------------------- */
-
-static void usb_udc_connect_control(struct usb_udc *udc)
-{
- if (udc->vbus)
- usb_gadget_connect(udc->gadget);
- else
- usb_gadget_disconnect(udc->gadget);
-}
-
-/**
- * usb_udc_vbus_handler - updates the udc core vbus status, and try to
- * connect or disconnect gadget
- * @gadget: The gadget which vbus change occurs
- * @status: The vbus status
- *
- * The udc driver calls it when it wants to connect or disconnect gadget
- * according to vbus status.
- */
-void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
-{
- struct usb_udc *udc = gadget->udc;
-
- if (udc) {
- udc->vbus = status;
- usb_udc_connect_control(udc);
- }
-}
-EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
-
-/**
- * usb_gadget_udc_reset - notifies the udc core that bus reset occurs
- * @gadget: The gadget which bus reset occurs
- * @driver: The gadget driver we want to notify
- *
- * If the udc driver has bus reset handler, it needs to call this when the bus
- * reset occurs, it notifies the gadget driver that the bus reset occurs as
- * well as updates gadget state.
- */
-void usb_gadget_udc_reset(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
-{
- driver->reset(gadget);
- usb_gadget_set_state(gadget, USB_STATE_DEFAULT);
-}
-EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
-
-/**
- * usb_gadget_udc_start - tells usb device controller to start up
- * @udc: The UDC to be started
- *
- * This call is issued by the UDC Class driver when it's about
- * to register a gadget driver to the device controller, before
- * calling gadget driver's bind() method.
- *
- * It allows the controller to be powered off until strictly
- * necessary to have it powered on.
- *
- * Returns zero on success, else negative errno.
- */
-static inline int usb_gadget_udc_start(struct usb_udc *udc)
-{
- return udc->gadget->ops->udc_start(udc->gadget, udc->driver);
-}
-
-/**
- * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
- * @gadget: The device we want to stop activity
- * @driver: The driver to unbind from @gadget
- *
- * This call is issued by the UDC Class driver after calling
- * gadget driver's unbind() method.
- *
- * The details are implementation specific, but it can go as
- * far as powering off UDC completely and disable its data
- * line pullups.
- */
-static inline void usb_gadget_udc_stop(struct usb_udc *udc)
-{
- udc->gadget->ops->udc_stop(udc->gadget);
-}
-
-/**
- * usb_udc_release - release the usb_udc struct
- * @dev: the dev member within usb_udc
- *
- * This is called by driver's core in order to free memory once the last
- * reference is released.
- */
-static void usb_udc_release(struct device *dev)
-{
- struct usb_udc *udc;
-
- udc = container_of(dev, struct usb_udc, dev);
- dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
- kfree(udc);
-}
-
-static const struct attribute_group *usb_udc_attr_groups[];
-
-static void usb_udc_nop_release(struct device *dev)
-{
- dev_vdbg(dev, "%s\n", __func__);
-}
-
-/**
- * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list
- * @parent: the parent device to this udc. Usually the controller driver's
- * device.
- * @gadget: the gadget to be added to the list.
- * @release: a gadget release function.
- *
- * Returns zero on success, negative errno otherwise.
- */
-int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
- void (*release)(struct device *dev))
-{
- struct usb_udc *udc;
- struct usb_gadget_driver *driver;
- int ret = -ENOMEM;
-
- udc = kzalloc(sizeof(*udc), GFP_KERNEL);
- if (!udc)
- goto err1;
-
- dev_set_name(&gadget->dev, "gadget");
- INIT_WORK(&gadget->work, usb_gadget_state_work);
- gadget->dev.parent = parent;
-
- if (release)
- gadget->dev.release = release;
- else
- gadget->dev.release = usb_udc_nop_release;
-
- ret = device_register(&gadget->dev);
- if (ret)
- goto err2;
-
- device_initialize(&udc->dev);
- udc->dev.release = usb_udc_release;
- udc->dev.class = udc_class;
- udc->dev.groups = usb_udc_attr_groups;
- udc->dev.parent = parent;
- ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
- if (ret)
- goto err3;
-
- udc->gadget = gadget;
- gadget->udc = udc;
-
- mutex_lock(&udc_lock);
- list_add_tail(&udc->list, &udc_list);
-
- ret = device_add(&udc->dev);
- if (ret)
- goto err4;
-
- usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
- udc->vbus = true;
-
- /* pick up one of pending gadget drivers */
- list_for_each_entry(driver, &gadget_driver_pending_list, pending) {
- if (!driver->udc_name || strcmp(driver->udc_name,
- dev_name(&udc->dev)) == 0) {
- ret = udc_bind_to_driver(udc, driver);
- if (ret != -EPROBE_DEFER)
- list_del(&driver->pending);
- if (ret)
- goto err4;
- break;
- }
- }
-
- mutex_unlock(&udc_lock);
-
- return 0;
-
-err4:
- list_del(&udc->list);
- mutex_unlock(&udc_lock);
-
-err3:
- put_device(&udc->dev);
- device_del(&gadget->dev);
-
-err2:
- put_device(&gadget->dev);
- kfree(udc);
-
-err1:
- return ret;
-}
-EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release);
-
-/**
- * usb_get_gadget_udc_name - get the name of the first UDC controller
- * This functions returns the name of the first UDC controller in the system.
- * Please note that this interface is usefull only for legacy drivers which
- * assume that there is only one UDC controller in the system and they need to
- * get its name before initialization. There is no guarantee that the UDC
- * of the returned name will be still available, when gadget driver registers
- * itself.
- *
- * Returns pointer to string with UDC controller name on success, NULL
- * otherwise. Caller should kfree() returned string.
- */
-char *usb_get_gadget_udc_name(void)
-{
- struct usb_udc *udc;
- char *name = NULL;
-
- /* For now we take the first available UDC */
- mutex_lock(&udc_lock);
- list_for_each_entry(udc, &udc_list, list) {
- if (!udc->driver) {
- name = kstrdup(udc->gadget->name, GFP_KERNEL);
- break;
- }
- }
- mutex_unlock(&udc_lock);
- return name;
-}
-EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name);
-
-/**
- * usb_add_gadget_udc - adds a new gadget to the udc class driver list
- * @parent: the parent device to this udc. Usually the controller
- * driver's device.
- * @gadget: the gadget to be added to the list
- *
- * Returns zero on success, negative errno otherwise.
- */
-int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
-{
- return usb_add_gadget_udc_release(parent, gadget, NULL);
-}
-EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
-
-static void usb_gadget_remove_driver(struct usb_udc *udc)
-{
- dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
- udc->driver->function);
-
- kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
-
- usb_gadget_disconnect(udc->gadget);
- udc->driver->disconnect(udc->gadget);
- udc->driver->unbind(udc->gadget);
- usb_gadget_udc_stop(udc);
-
- udc->driver = NULL;
- udc->dev.driver = NULL;
- udc->gadget->dev.driver = NULL;
-}
-
-/**
- * usb_del_gadget_udc - deletes @udc from udc_list
- * @gadget: the gadget to be removed.
- *
- * This, will call usb_gadget_unregister_driver() if
- * the @udc is still busy.
- */
-void usb_del_gadget_udc(struct usb_gadget *gadget)
-{
- struct usb_udc *udc = gadget->udc;
-
- if (!udc)
- return;
-
- dev_vdbg(gadget->dev.parent, "unregistering gadget\n");
-
- mutex_lock(&udc_lock);
- list_del(&udc->list);
-
- if (udc->driver) {
- struct usb_gadget_driver *driver = udc->driver;
-
- usb_gadget_remove_driver(udc);
- list_add(&driver->pending, &gadget_driver_pending_list);
- }
- mutex_unlock(&udc_lock);
-
- kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
- flush_work(&gadget->work);
- device_unregister(&udc->dev);
- device_unregister(&gadget->dev);
-}
-EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
-
-/* ------------------------------------------------------------------------- */
-
-static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *driver)
-{
- int ret;
-
- dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
- driver->function);
-
- udc->driver = driver;
- udc->dev.driver = &driver->driver;
- udc->gadget->dev.driver = &driver->driver;
-
- ret = driver->bind(udc->gadget, driver);
- if (ret)
- goto err1;
- ret = usb_gadget_udc_start(udc);
- if (ret) {
- driver->unbind(udc->gadget);
- goto err1;
- }
- usb_udc_connect_control(udc);
-
- kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
- return 0;
-err1:
- if (ret != -EISNAM)
- dev_err(&udc->dev, "failed to start %s: %d\n",
- udc->driver->function, ret);
- udc->driver = NULL;
- udc->dev.driver = NULL;
- udc->gadget->dev.driver = NULL;
- return ret;
-}
-
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
-{
- struct usb_udc *udc = NULL;
- int ret = -ENODEV;
-
- if (!driver || !driver->bind || !driver->setup)
- return -EINVAL;
-
- mutex_lock(&udc_lock);
- if (driver->udc_name) {
- list_for_each_entry(udc, &udc_list, list) {
- ret = strcmp(driver->udc_name, dev_name(&udc->dev));
- if (!ret)
- break;
- }
- if (!ret && !udc->driver)
- goto found;
- } else {
- list_for_each_entry(udc, &udc_list, list) {
- /* For now we take the first one */
- if (!udc->driver)
- goto found;
- }
- }
-
- if (!driver->match_existing_only) {
- list_add_tail(&driver->pending, &gadget_driver_pending_list);
- pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
- driver->function);
- ret = 0;
- }
-
- mutex_unlock(&udc_lock);
- return ret;
-found:
- ret = udc_bind_to_driver(udc, driver);
- mutex_unlock(&udc_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(usb_gadget_probe_driver);
-
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
-{
- struct usb_udc *udc = NULL;
- int ret = -ENODEV;
-
- if (!driver || !driver->unbind)
- return -EINVAL;
-
- mutex_lock(&udc_lock);
- list_for_each_entry(udc, &udc_list, list)
- if (udc->driver == driver) {
- usb_gadget_remove_driver(udc);
- usb_gadget_set_state(udc->gadget,
- USB_STATE_NOTATTACHED);
- ret = 0;
- break;
- }
-
- if (ret) {
- list_del(&driver->pending);
- ret = 0;
- }
- mutex_unlock(&udc_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
-
-/* ------------------------------------------------------------------------- */
-
-static ssize_t usb_udc_srp_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t n)
-{
- struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
-
- if (sysfs_streq(buf, "1"))
- usb_gadget_wakeup(udc->gadget);
-
- return n;
-}
-static DEVICE_ATTR(srp, S_IWUSR, NULL, usb_udc_srp_store);
-
-static ssize_t usb_udc_softconn_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t n)
-{
- struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
-
- if (!udc->driver) {
- dev_err(dev, "soft-connect without a gadget driver\n");
- return -EOPNOTSUPP;
- }
-
- if (sysfs_streq(buf, "connect")) {
- usb_gadget_udc_start(udc);
- usb_gadget_connect(udc->gadget);
- } else if (sysfs_streq(buf, "disconnect")) {
- usb_gadget_disconnect(udc->gadget);
- udc->driver->disconnect(udc->gadget);
- usb_gadget_udc_stop(udc);
- } else {
- dev_err(dev, "unsupported command '%s'\n", buf);
- return -EINVAL;
- }
-
- return n;
-}
-static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
-
-static ssize_t state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
- struct usb_gadget *gadget = udc->gadget;
-
- return sprintf(buf, "%s\n", usb_state_string(gadget->state));
-}
-static DEVICE_ATTR_RO(state);
-
-#define USB_UDC_SPEED_ATTR(name, param) \
-ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
- return snprintf(buf, PAGE_SIZE, "%s\n", \
- usb_speed_string(udc->gadget->param)); \
-} \
-static DEVICE_ATTR_RO(name)
-
-static USB_UDC_SPEED_ATTR(current_speed, speed);
-static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
-
-#define USB_UDC_ATTR(name) \
-ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
- struct usb_gadget *gadget = udc->gadget; \
- \
- return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
-} \
-static DEVICE_ATTR_RO(name)
-
-static USB_UDC_ATTR(is_otg);
-static USB_UDC_ATTR(is_a_peripheral);
-static USB_UDC_ATTR(b_hnp_enable);
-static USB_UDC_ATTR(a_hnp_support);
-static USB_UDC_ATTR(a_alt_hnp_support);
-static USB_UDC_ATTR(is_selfpowered);
-
-static struct attribute *usb_udc_attrs[] = {
- &dev_attr_srp.attr,
- &dev_attr_soft_connect.attr,
- &dev_attr_state.attr,
- &dev_attr_current_speed.attr,
- &dev_attr_maximum_speed.attr,
-
- &dev_attr_is_otg.attr,
- &dev_attr_is_a_peripheral.attr,
- &dev_attr_b_hnp_enable.attr,
- &dev_attr_a_hnp_support.attr,
- &dev_attr_a_alt_hnp_support.attr,
- &dev_attr_is_selfpowered.attr,
- NULL,
-};
-
-static const struct attribute_group usb_udc_attr_group = {
- .attrs = usb_udc_attrs,
-};
-
-static const struct attribute_group *usb_udc_attr_groups[] = {
- &usb_udc_attr_group,
- NULL,
-};
-
-static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
- int ret;
-
- ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name);
- if (ret) {
- dev_err(dev, "failed to add uevent USB_UDC_NAME\n");
- return ret;
- }
-
- if (udc->driver) {
- ret = add_uevent_var(env, "USB_UDC_DRIVER=%s",
- udc->driver->function);
- if (ret) {
- dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
- return ret;
- }
- }
-
- return 0;
-}
-
-static int __init usb_udc_init(void)
-{
- udc_class = class_create(THIS_MODULE, "udc");
- if (IS_ERR(udc_class)) {
- pr_err("failed to create udc class --> %ld\n",
- PTR_ERR(udc_class));
- return PTR_ERR(udc_class);
- }
-
- udc_class->dev_uevent = usb_udc_uevent;
- return 0;
-}
-subsys_initcall(usb_udc_init);
-
-static void __exit usb_udc_exit(void)
-{
- class_destroy(udc_class);
-}
-module_exit(usb_udc_exit);
-
-MODULE_DESCRIPTION("UDC Framework");
-MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 1cbb0ac6b182..f8bf290f1894 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2055,7 +2055,6 @@ static int xudc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct resource *res;
struct xusb_udc *udc;
- struct xusb_ep *ep0;
int irq;
int ret;
u32 ier;
@@ -2119,8 +2118,6 @@ static int xudc_probe(struct platform_device *pdev)
xudc_eps_init(udc);
- ep0 = &udc->ep[0];
-
/* Set device address to 0.*/
udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d8f5674809e8..2e710a4cca52 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -180,7 +180,7 @@ config USB_EHCI_MXC
config USB_EHCI_HCD_OMAP
tristate "EHCI support for OMAP3 and later chips"
depends on ARCH_OMAP
- select NOP_USB_XCEIV
+ depends on NOP_USB_XCEIV
default y
---help---
Enables support for the on-chip EHCI controller on
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 1757ebb471b6..6816b8c371d0 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -39,11 +39,12 @@
#define DRIVER_DESC "EHCI generic platform driver"
#define EHCI_MAX_CLKS 3
+#define EHCI_MAX_RSTS 3
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
struct ehci_platform_priv {
struct clk *clks[EHCI_MAX_CLKS];
- struct reset_control *rst;
+ struct reset_control *rsts[EHCI_MAX_RSTS];
struct phy **phys;
int num_phys;
bool reset_on_resume;
@@ -149,7 +150,7 @@ static int ehci_platform_probe(struct platform_device *dev)
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv;
struct ehci_hcd *ehci;
- int err, irq, phy_num, clk = 0;
+ int err, irq, phy_num, clk = 0, rst;
if (usb_disabled())
return -ENODEV;
@@ -234,16 +235,20 @@ static int ehci_platform_probe(struct platform_device *dev)
}
}
- priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
- if (IS_ERR(priv->rst)) {
- err = PTR_ERR(priv->rst);
- if (err == -EPROBE_DEFER)
- goto err_put_clks;
- priv->rst = NULL;
- } else {
- err = reset_control_deassert(priv->rst);
+ for (rst = 0; rst < EHCI_MAX_RSTS; rst++) {
+ priv->rsts[rst] = devm_reset_control_get_shared_by_index(
+ &dev->dev, rst);
+ if (IS_ERR(priv->rsts[rst])) {
+ err = PTR_ERR(priv->rsts[rst]);
+ if (err == -EPROBE_DEFER)
+ goto err_reset;
+ priv->rsts[rst] = NULL;
+ break;
+ }
+
+ err = reset_control_deassert(priv->rsts[rst]);
if (err)
- goto err_put_clks;
+ goto err_reset;
}
if (pdata->big_endian_desc)
@@ -300,8 +305,8 @@ err_power:
if (pdata->power_off)
pdata->power_off(dev);
err_reset:
- if (priv->rst)
- reset_control_assert(priv->rst);
+ while (--rst >= 0)
+ reset_control_assert(priv->rsts[rst]);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
@@ -319,15 +324,15 @@ static int ehci_platform_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
- int clk;
+ int clk, rst;
usb_remove_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
- if (priv->rst)
- reset_control_assert(priv->rst);
+ for (rst = 0; rst < EHCI_MAX_RSTS && priv->rsts[rst]; rst++)
+ reset_control_assert(priv->rsts[rst]);
for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 0449235d4f22..1700908b84ef 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *ohci)
setup_timer(&ohci->io_watchdog, io_watchdog_func,
(unsigned long) ohci);
- set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20));
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index ae1c988da146..898b74086c12 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -33,11 +33,12 @@
#define DRIVER_DESC "OHCI generic platform driver"
#define OHCI_MAX_CLKS 3
+#define OHCI_MAX_RESETS 2
#define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
struct ohci_platform_priv {
struct clk *clks[OHCI_MAX_CLKS];
- struct reset_control *rst;
+ struct reset_control *resets[OHCI_MAX_RESETS];
struct phy **phys;
int num_phys;
};
@@ -117,7 +118,7 @@ static int ohci_platform_probe(struct platform_device *dev)
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv;
struct ohci_hcd *ohci;
- int err, irq, phy_num, clk = 0;
+ int err, irq, phy_num, clk = 0, rst = 0;
if (usb_disabled())
return -ENODEV;
@@ -195,19 +196,21 @@ static int ohci_platform_probe(struct platform_device *dev)
break;
}
}
-
- }
-
- priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
- if (IS_ERR(priv->rst)) {
- err = PTR_ERR(priv->rst);
- if (err == -EPROBE_DEFER)
- goto err_put_clks;
- priv->rst = NULL;
- } else {
- err = reset_control_deassert(priv->rst);
- if (err)
- goto err_put_clks;
+ for (rst = 0; rst < OHCI_MAX_RESETS; rst++) {
+ priv->resets[rst] =
+ devm_reset_control_get_shared_by_index(
+ &dev->dev, rst);
+ if (IS_ERR(priv->resets[rst])) {
+ err = PTR_ERR(priv->resets[rst]);
+ if (err == -EPROBE_DEFER)
+ goto err_reset;
+ priv->resets[rst] = NULL;
+ break;
+ }
+ err = reset_control_deassert(priv->resets[rst]);
+ if (err)
+ goto err_reset;
+ }
}
if (pdata->big_endian_desc)
@@ -265,8 +268,8 @@ err_power:
if (pdata->power_off)
pdata->power_off(dev);
err_reset:
- if (priv->rst)
- reset_control_assert(priv->rst);
+ while (--rst >= 0)
+ reset_control_assert(priv->resets[rst]);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
@@ -284,15 +287,15 @@ static int ohci_platform_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
- int clk;
+ int clk, rst;
usb_remove_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
- if (priv->rst)
- reset_control_assert(priv->rst);
+ for (rst = 0; rst < OHCI_MAX_RESETS && priv->resets[rst]; rst++)
+ reset_control_assert(priv->resets[rst]);
for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bad0d1f9a41d..6afe32381209 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -37,7 +37,9 @@
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
- unsigned int cycle_state, gfp_t flags)
+ unsigned int cycle_state,
+ unsigned int max_packet,
+ gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
@@ -53,6 +55,14 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
return NULL;
}
+ if (max_packet) {
+ seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
+ if (!seg->bounce_buf) {
+ dma_pool_free(xhci->segment_pool, seg->trbs, dma);
+ kfree(seg);
+ return NULL;
+ }
+ }
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
@@ -70,6 +80,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL;
}
+ kfree(seg->bounce_buf);
kfree(seg);
}
@@ -317,11 +328,11 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring,
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment **first, struct xhci_segment **last,
unsigned int num_segs, unsigned int cycle_state,
- enum xhci_ring_type type, gfp_t flags)
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_segment *prev;
- prev = xhci_segment_alloc(xhci, cycle_state, flags);
+ prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
if (!prev)
return -ENOMEM;
num_segs--;
@@ -330,7 +341,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
while (num_segs > 0) {
struct xhci_segment *next;
- next = xhci_segment_alloc(xhci, cycle_state, flags);
+ next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
if (!next) {
prev = *first;
while (prev) {
@@ -360,7 +371,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
*/
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
unsigned int num_segs, unsigned int cycle_state,
- enum xhci_ring_type type, gfp_t flags)
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_ring *ring;
int ret;
@@ -370,13 +381,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return NULL;
ring->num_segs = num_segs;
+ ring->bounce_buf_len = max_packet;
INIT_LIST_HEAD(&ring->td_list);
ring->type = type;
if (num_segs == 0)
return ring;
ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
- &ring->last_seg, num_segs, cycle_state, type, flags);
+ &ring->last_seg, num_segs, cycle_state, type,
+ max_packet, flags);
if (ret)
goto fail;
@@ -470,7 +483,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->num_segs : num_segs_needed;
ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
- num_segs, ring->cycle_state, ring->type, flags);
+ num_segs, ring->cycle_state, ring->type,
+ ring->bounce_buf_len, flags);
if (ret)
return -ENOMEM;
@@ -652,7 +666,8 @@ struct xhci_ring *xhci_stream_id_to_ring(
*/
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
- unsigned int num_streams, gfp_t mem_flags)
+ unsigned int num_streams,
+ unsigned int max_packet, gfp_t mem_flags)
{
struct xhci_stream_info *stream_info;
u32 cur_stream;
@@ -704,9 +719,11 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
* and add their segment DMA addresses to the radix tree.
* Stream 0 is reserved.
*/
+
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
- xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
+ xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
+ mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
@@ -1003,7 +1020,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
}
/* Allocate endpoint 0 ring */
- dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
if (!dev->eps[0].ring)
goto fail;
@@ -1434,22 +1451,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
return -EINVAL;
ring_type = usb_endpoint_type(&ep->desc);
- /* Set up the endpoint ring */
- virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 2, 1, ring_type, mem_flags);
- if (!virt_dev->eps[ep_index].new_ring) {
- /* Attempt to use the ring cache */
- if (virt_dev->num_rings_cached == 0)
- return -ENOMEM;
- virt_dev->num_rings_cached--;
- virt_dev->eps[ep_index].new_ring =
- virt_dev->ring_cache[virt_dev->num_rings_cached];
- virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
- xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
- 1, ring_type);
- }
- virt_dev->eps[ep_index].skip = false;
- ep_ring = virt_dev->eps[ep_index].new_ring;
/*
* Get values to fill the endpoint context, mostly from ep descriptor.
@@ -1479,6 +1480,23 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
mult = 0;
+ /* Set up the endpoint ring */
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ if (!virt_dev->eps[ep_index].new_ring) {
+ /* Attempt to use the ring cache */
+ if (virt_dev->num_rings_cached == 0)
+ return -ENOMEM;
+ virt_dev->num_rings_cached--;
+ virt_dev->eps[ep_index].new_ring =
+ virt_dev->ring_cache[virt_dev->num_rings_cached];
+ virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
+ xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
+ 1, ring_type);
+ }
+ virt_dev->eps[ep_index].skip = false;
+ ep_ring = virt_dev->eps[ep_index].new_ring;
+
/* Fill the endpoint context */
ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
EP_INTERVAL(interval) |
@@ -2409,7 +2427,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail;
/* Set up the command ring to have one segments for now. */
- xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2454,7 +2472,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
*/
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
- flags);
+ 0, flags);
if (!xhci->event_ring)
goto fail;
if (xhci_check_trb_in_td_math(xhci) < 0)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 1f3f981fe7f8..ed56bf9ed885 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -18,7 +18,6 @@
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
#include <linux/slab.h>
-#include <linux/usb/xhci_pdriver.h>
#include <linux/acpi.h>
#include "xhci.h"
@@ -138,8 +137,6 @@ MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
static int xhci_plat_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
- struct usb_xhci_pdata *pdata = dev_get_platdata(&pdev->dev);
const struct of_device_id *match;
const struct hc_driver *driver;
struct xhci_hcd *xhci;
@@ -202,7 +199,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
}
xhci = hcd_to_xhci(hcd);
- match = of_match_node(usb_xhci_of_match, node);
+ match = of_match_node(usb_xhci_of_match, pdev->dev.of_node);
if (match) {
const struct xhci_plat_priv *priv_match = match->data;
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
@@ -223,8 +220,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto disable_clk;
}
- if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
- (pdata && pdata->usb3_lpm_capable))
+ if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d7d502578d79..918e0c739b79 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -66,6 +66,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
#include "xhci-trace.h"
#include "xhci-mtk.h"
@@ -88,36 +89,25 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
return seg->dma + (segment_offset * sizeof(*trb));
}
-/* Does this link TRB point to the first segment in a ring,
- * or was the previous TRB the last TRB on the last segment in the ERST?
- */
-static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
- struct xhci_segment *seg, union xhci_trb *trb)
+static bool trb_is_link(union xhci_trb *trb)
{
- if (ring == xhci->event_ring)
- return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
- (seg->next == xhci->event_ring->first_seg);
- else
- return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
+ return TRB_TYPE_LINK_LE32(trb->link.control);
}
-/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
- * segment? I.e. would the updated event TRB pointer step off the end of the
- * event seg?
- */
-static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
- struct xhci_segment *seg, union xhci_trb *trb)
+static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
{
- if (ring == xhci->event_ring)
- return trb == &seg->trbs[TRBS_PER_SEGMENT];
- else
- return TRB_TYPE_LINK_LE32(trb->link.control);
+ return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
}
-static int enqueue_is_link_trb(struct xhci_ring *ring)
+static bool last_trb_on_ring(struct xhci_ring *ring,
+ struct xhci_segment *seg, union xhci_trb *trb)
{
- struct xhci_link_trb *link = &ring->enqueue->link;
- return TRB_TYPE_LINK_LE32(link->control);
+ return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
+}
+
+static bool link_trb_toggles_cycle(union xhci_trb *trb)
+{
+ return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
@@ -129,7 +119,7 @@ static void next_trb(struct xhci_hcd *xhci,
struct xhci_segment **seg,
union xhci_trb **trb)
{
- if (last_trb(xhci, ring, *seg, *trb)) {
+ if (trb_is_link(*trb)) {
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
@@ -145,32 +135,29 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
ring->deq_updates++;
- /*
- * If this is not event ring, and the dequeue pointer
- * is not on a link TRB, there is one more usable TRB
- */
- if (ring->type != TYPE_EVENT &&
- !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
- ring->num_trbs_free++;
-
- do {
- /*
- * Update the dequeue pointer further if that was a link TRB or
- * we're at the end of an event ring segment (which doesn't have
- * link TRBS)
- */
- if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
- if (ring->type == TYPE_EVENT &&
- last_trb_on_last_seg(xhci, ring,
- ring->deq_seg, ring->dequeue)) {
- ring->cycle_state ^= 1;
- }
- ring->deq_seg = ring->deq_seg->next;
- ring->dequeue = ring->deq_seg->trbs;
- } else {
+ /* event ring doesn't have link trbs, check for last trb */
+ if (ring->type == TYPE_EVENT) {
+ if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring->dequeue++;
+ return;
}
- } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
+ if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
+ ring->cycle_state ^= 1;
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ return;
+ }
+
+ /* All other rings have link trbs */
+ if (!trb_is_link(ring->dequeue)) {
+ ring->dequeue++;
+ ring->num_trbs_free++;
+ }
+ while (trb_is_link(ring->dequeue)) {
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ }
+ return;
}
/*
@@ -198,50 +185,42 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
/* If this is not event ring, there is one less usable TRB */
- if (ring->type != TYPE_EVENT &&
- !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
+ if (!trb_is_link(ring->enqueue))
ring->num_trbs_free--;
next = ++(ring->enqueue);
ring->enq_updates++;
- /* Update the dequeue pointer further if that was a link TRB or we're at
- * the end of an event ring segment (which doesn't have link TRBS)
- */
- while (last_trb(xhci, ring, ring->enq_seg, next)) {
- if (ring->type != TYPE_EVENT) {
- /*
- * If the caller doesn't plan on enqueueing more
- * TDs before ringing the doorbell, then we
- * don't want to give the link TRB to the
- * hardware just yet. We'll give the link TRB
- * back in prepare_ring() just before we enqueue
- * the TD at the top of the ring.
- */
- if (!chain && !more_trbs_coming)
- break;
+ /* Update the dequeue pointer further if that was a link TRB */
+ while (trb_is_link(next)) {
- /* If we're not dealing with 0.95 hardware or
- * isoc rings on AMD 0.96 host,
- * carry over the chain bit of the previous TRB
- * (which may mean the chain bit is cleared).
- */
- if (!(ring->type == TYPE_ISOC &&
- (xhci->quirks & XHCI_AMD_0x96_HOST))
- && !xhci_link_trb_quirk(xhci)) {
- next->link.control &=
- cpu_to_le32(~TRB_CHAIN);
- next->link.control |=
- cpu_to_le32(chain);
- }
- /* Give this link TRB to the hardware */
- wmb();
- next->link.control ^= cpu_to_le32(TRB_CYCLE);
+ /*
+ * If the caller doesn't plan on enqueueing more TDs before
+ * ringing the doorbell, then we don't want to give the link TRB
+ * to the hardware just yet. We'll give the link TRB back in
+ * prepare_ring() just before we enqueue the TD at the top of
+ * the ring.
+ */
+ if (!chain && !more_trbs_coming)
+ break;
- /* Toggle the cycle bit after the last ring segment. */
- if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
- ring->cycle_state ^= 1;
- }
+ /* If we're not dealing with 0.95 hardware or isoc rings on
+ * AMD 0.96 host, carry over the chain bit of the previous TRB
+ * (which may mean the chain bit is cleared).
+ */
+ if (!(ring->type == TYPE_ISOC &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
+ !xhci_link_trb_quirk(xhci)) {
+ next->link.control &= cpu_to_le32(~TRB_CHAIN);
+ next->link.control |= cpu_to_le32(chain);
}
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (link_trb_toggles_cycle(next))
+ ring->cycle_state ^= 1;
+
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
@@ -626,6 +605,31 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
}
}
+void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ struct xhci_td *td)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct xhci_segment *seg = td->bounce_seg;
+ struct urb *urb = td->urb;
+
+ if (!seg || !urb)
+ return;
+
+ if (usb_urb_dir_out(urb)) {
+ dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
+ DMA_TO_DEVICE);
+ return;
+ }
+
+ /* for in tranfers we need to copy the data from bounce to sg */
+ sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
+ DMA_FROM_DEVICE);
+ seg->bounce_len = 0;
+ seg->bounce_offs = 0;
+}
+
/*
* When we get a command completion for a Stop Endpoint Command, we need to
* unlink any cancelled TDs from the ring. There are two ways to do that:
@@ -745,6 +749,9 @@ remove_finished_td:
/* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
+ ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
+ if (ep_ring && cur_td->bounce_seg)
+ xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
xhci_giveback_urb_in_irq(xhci, cur_td, 0);
/* Stop processing the cancelled list if the watchdog timer is
@@ -767,6 +774,9 @@ static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
list_del_init(&cur_td->td_list);
if (!list_empty(&cur_td->cancelled_td_list))
list_del_init(&cur_td->cancelled_td_list);
+
+ if (cur_td->bounce_seg)
+ xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
@@ -917,7 +927,7 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
* the dequeue pointer one segment further, or we'll jump off
* the segment into la-la-land.
*/
- if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+ if (trb_is_link(ep_ring->dequeue)) {
ep_ring->deq_seg = ep_ring->deq_seg->next;
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
@@ -926,8 +936,7 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
/* We have more usable TRBs */
ep_ring->num_trbs_free++;
ep_ring->dequeue++;
- if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
- ep_ring->dequeue)) {
+ if (trb_is_link(ep_ring->dequeue)) {
if (ep_ring->dequeue ==
dev->eps[ep_index].queued_deq_ptr)
break;
@@ -1865,6 +1874,10 @@ td_cleanup:
urb = td->urb;
urb_priv = urb->hcpriv;
+ /* if a bounce buffer was used to align this td then unmap it */
+ if (td->bounce_seg)
+ xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
+
/* Do one last check of the actual transfer length.
* If the host controller said we transferred more data than the buffer
* length, urb->actual_length will be a very big number (since it's
@@ -2865,36 +2878,29 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
}
- if (enqueue_is_link_trb(ep_ring)) {
- struct xhci_ring *ring = ep_ring;
- union xhci_trb *next;
-
- next = ring->enqueue;
+ while (trb_is_link(ep_ring->enqueue)) {
+ /* If we're not dealing with 0.95 hardware or isoc rings
+ * on AMD 0.96 host, clear the chain bit.
+ */
+ if (!xhci_link_trb_quirk(xhci) &&
+ !(ep_ring->type == TYPE_ISOC &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST)))
+ ep_ring->enqueue->link.control &=
+ cpu_to_le32(~TRB_CHAIN);
+ else
+ ep_ring->enqueue->link.control |=
+ cpu_to_le32(TRB_CHAIN);
- while (last_trb(xhci, ring, ring->enq_seg, next)) {
- /* If we're not dealing with 0.95 hardware or isoc rings
- * on AMD 0.96 host, clear the chain bit.
- */
- if (!xhci_link_trb_quirk(xhci) &&
- !(ring->type == TYPE_ISOC &&
- (xhci->quirks & XHCI_AMD_0x96_HOST)))
- next->link.control &= cpu_to_le32(~TRB_CHAIN);
- else
- next->link.control |= cpu_to_le32(TRB_CHAIN);
+ wmb();
+ ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
- wmb();
- next->link.control ^= cpu_to_le32(TRB_CYCLE);
+ /* Toggle the cycle bit after the last ring segment. */
+ if (link_trb_toggles_cycle(ep_ring->enqueue))
+ ep_ring->cycle_state ^= 1;
- /* Toggle the cycle bit after the last ring segment. */
- if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
- ring->cycle_state ^= 1;
- }
- ring->enq_seg = ring->enq_seg->next;
- ring->enqueue = ring->enq_seg->trbs;
- next = ring->enqueue;
- }
+ ep_ring->enq_seg = ep_ring->enq_seg->next;
+ ep_ring->enqueue = ep_ring->enq_seg->trbs;
}
-
return 0;
}
@@ -3092,7 +3098,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
*/
static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
int trb_buff_len, unsigned int td_total_len,
- struct urb *urb, unsigned int num_trbs_left)
+ struct urb *urb, bool more_trbs_coming)
{
u32 maxp, total_packet_count;
@@ -3101,7 +3107,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
return ((td_total_len - transferred) >> 10);
/* One TRB with a zero-length data packet. */
- if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) ||
+ if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
trb_buff_len == td_total_len)
return 0;
@@ -3116,37 +3122,103 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
return (total_packet_count - ((transferred + trb_buff_len) / maxp));
}
+
+static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
+ u32 *trb_buff_len, struct xhci_segment *seg)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ unsigned int unalign;
+ unsigned int max_pkt;
+ u32 new_buff_len;
+
+ max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ unalign = (enqd_len + *trb_buff_len) % max_pkt;
+
+ /* we got lucky, last normal TRB data on segment is packet aligned */
+ if (unalign == 0)
+ return 0;
+
+ xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
+ unalign, *trb_buff_len);
+
+ /* is the last nornal TRB alignable by splitting it */
+ if (*trb_buff_len > unalign) {
+ *trb_buff_len -= unalign;
+ xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
+ return 0;
+ }
+
+ /*
+ * We want enqd_len + trb_buff_len to sum up to a number aligned to
+ * number which is divisible by the endpoint's wMaxPacketSize. IOW:
+ * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
+ */
+ new_buff_len = max_pkt - (enqd_len % max_pkt);
+
+ if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
+ new_buff_len = (urb->transfer_buffer_length - enqd_len);
+
+ /* create a max max_pkt sized bounce buffer pointed to by last trb */
+ if (usb_urb_dir_out(urb)) {
+ sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+ max_pkt, DMA_TO_DEVICE);
+ } else {
+ seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+ max_pkt, DMA_FROM_DEVICE);
+ }
+
+ if (dma_mapping_error(dev, seg->bounce_dma)) {
+ /* try without aligning. Some host controllers survive */
+ xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
+ return 0;
+ }
+ *trb_buff_len = new_buff_len;
+ seg->bounce_len = new_buff_len;
+ seg->bounce_offs = enqd_len;
+
+ xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
+
+ return 1;
+}
+
/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
- struct xhci_ring *ep_ring;
+ struct xhci_ring *ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
struct xhci_generic_trb *start_trb;
struct scatterlist *sg = NULL;
- bool more_trbs_coming;
- bool zero_length_needed;
- unsigned int num_trbs, last_trb_num, i;
+ bool more_trbs_coming = true;
+ bool need_zero_pkt = false;
+ bool first_trb = true;
+ unsigned int num_trbs;
unsigned int start_cycle, num_sgs = 0;
- unsigned int running_total, block_len, trb_buff_len;
- unsigned int full_len;
- int ret;
+ unsigned int enqd_len, block_len, trb_buff_len, full_len;
+ int sent_len, ret;
u32 field, length_field, remainder;
- u64 addr;
+ u64 addr, send_addr;
- ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
- if (!ep_ring)
+ ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ring)
return -EINVAL;
+ full_len = urb->transfer_buffer_length;
/* If we have scatter/gather list, we use it. */
if (urb->num_sgs) {
num_sgs = urb->num_mapped_sgs;
sg = urb->sg;
+ addr = (u64) sg_dma_address(sg);
+ block_len = sg_dma_len(sg);
num_trbs = count_sg_trbs_needed(urb);
- } else
+ } else {
num_trbs = count_trbs_needed(urb);
-
+ addr = (u64) urb->transfer_dma;
+ block_len = full_len;
+ }
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
@@ -3155,20 +3227,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
urb_priv = urb->hcpriv;
- last_trb_num = num_trbs - 1;
-
/* Deal with URB_ZERO_PACKET - need one more td/trb */
- zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
- urb_priv->length == 2;
- if (zero_length_needed) {
- num_trbs++;
- xhci_dbg(xhci, "Creating zero length td.\n");
- ret = prepare_transfer(xhci, xhci->devs[slot_id],
- ep_index, urb->stream_id,
- 1, urb, 1, mem_flags);
- if (unlikely(ret < 0))
- return ret;
- }
+ if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
+ need_zero_pkt = true;
td = urb_priv->td[0];
@@ -3177,102 +3238,97 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
- start_trb = &ep_ring->enqueue->generic;
- start_cycle = ep_ring->cycle_state;
-
- full_len = urb->transfer_buffer_length;
- running_total = 0;
- block_len = 0;
+ start_trb = &ring->enqueue->generic;
+ start_cycle = ring->cycle_state;
+ send_addr = addr;
/* Queue the TRBs, even if they are zero-length */
- for (i = 0; i < num_trbs; i++) {
+ for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) {
field = TRB_TYPE(TRB_NORMAL);
- if (block_len == 0) {
- /* A new contiguous block. */
- if (sg) {
- addr = (u64) sg_dma_address(sg);
- block_len = sg_dma_len(sg);
- } else {
- addr = (u64) urb->transfer_dma;
- block_len = full_len;
- }
- /* TRB buffer should not cross 64KB boundaries */
- trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
- trb_buff_len = min_t(unsigned int,
- trb_buff_len,
- block_len);
- } else {
- /* Further through the contiguous block. */
- trb_buff_len = block_len;
- if (trb_buff_len > TRB_MAX_BUFF_SIZE)
- trb_buff_len = TRB_MAX_BUFF_SIZE;
- }
+ /* TRB buffer should not cross 64KB boundaries */
+ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+ trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
- if (running_total + trb_buff_len > full_len)
- trb_buff_len = full_len - running_total;
+ if (enqd_len + trb_buff_len > full_len)
+ trb_buff_len = full_len - enqd_len;
/* Don't change the cycle bit of the first TRB until later */
- if (i == 0) {
+ if (first_trb) {
+ first_trb = false;
if (start_cycle == 0)
field |= TRB_CYCLE;
} else
- field |= ep_ring->cycle_state;
+ field |= ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
- if (i < last_trb_num) {
+ if (enqd_len + trb_buff_len < full_len) {
field |= TRB_CHAIN;
- } else {
- field |= TRB_IOC;
- if (i == last_trb_num)
- td->last_trb = ep_ring->enqueue;
- else if (zero_length_needed) {
- trb_buff_len = 0;
- urb_priv->td[1]->last_trb = ep_ring->enqueue;
+ if (trb_is_link(ring->enqueue + 1)) {
+ if (xhci_align_td(xhci, urb, enqd_len,
+ &trb_buff_len,
+ ring->enq_seg)) {
+ send_addr = ring->enq_seg->bounce_dma;
+ /* assuming TD won't span 2 segs */
+ td->bounce_seg = ring->enq_seg;
+ }
}
}
+ if (enqd_len + trb_buff_len >= full_len) {
+ field &= ~TRB_CHAIN;
+ field |= TRB_IOC;
+ more_trbs_coming = false;
+ td->last_trb = ring->enqueue;
+ }
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
- remainder = xhci_td_remainder(xhci, running_total,
- trb_buff_len, full_len,
- urb, num_trbs - i - 1);
+ remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
+ full_len, urb, more_trbs_coming);
length_field = TRB_LEN(trb_buff_len) |
TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
- if (i < num_trbs - 1)
- more_trbs_coming = true;
- else
- more_trbs_coming = false;
- queue_trb(xhci, ep_ring, more_trbs_coming,
- lower_32_bits(addr),
- upper_32_bits(addr),
+ queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
+ lower_32_bits(send_addr),
+ upper_32_bits(send_addr),
length_field,
field);
- running_total += trb_buff_len;
addr += trb_buff_len;
- block_len -= trb_buff_len;
-
- if (sg) {
- if (block_len == 0) {
- /* New sg entry */
- --num_sgs;
- if (num_sgs == 0)
- break;
+ sent_len = trb_buff_len;
+
+ while (sg && sent_len >= block_len) {
+ /* New sg entry */
+ --num_sgs;
+ sent_len -= block_len;
+ if (num_sgs != 0) {
sg = sg_next(sg);
+ block_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ addr += sent_len;
}
}
+ block_len -= sent_len;
+ send_addr = addr;
+ }
+
+ if (need_zero_pkt) {
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+ 1, urb, 1, mem_flags);
+ urb_priv->td[1]->last_trb = ring->enqueue;
+ field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
+ queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
}
- check_trb_math(urb, running_total);
+ check_trb_math(urb, enqd_len);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
@@ -3666,7 +3722,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Set the TRB length, TD size, & interrupter fields. */
remainder = xhci_td_remainder(xhci, running_total,
trb_buff_len, td_len,
- urb, trbs_per_td - j - 1);
+ urb, more_trbs_coming);
length_field = TRB_LEN(trb_buff_len) |
TRB_INTR_TARGET(0);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f2f9518c53ab..01d96c9b3a75 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -490,8 +490,6 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
- set_timer_slack(&xhci->comp_mode_recovery_timer,
- msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
add_timer(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Compliance mode recovery timer initialized");
@@ -3139,6 +3137,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_input_control_ctx *ctrl_ctx;
unsigned int ep_index;
unsigned int num_stream_ctxs;
+ unsigned int max_packet;
unsigned long flags;
u32 changed_ep_bitmask = 0;
@@ -3212,9 +3211,11 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc));
vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
num_stream_ctxs,
- num_streams, mem_flags);
+ num_streams,
+ max_packet, mem_flags);
if (!vdev->eps[ep_index].stream_info)
goto cleanup;
/* Set maxPstreams in endpoint context and update deq ptr to
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index b0b8d0f8791a..b2c1dc5dc0f3 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1347,6 +1347,11 @@ struct xhci_segment {
/* private to HCD */
struct xhci_segment *next;
dma_addr_t dma;
+ /* Max packet sized bounce buffer for td-fragmant alignment */
+ dma_addr_t bounce_dma;
+ void *bounce_buf;
+ unsigned int bounce_offs;
+ unsigned int bounce_len;
};
struct xhci_td {
@@ -1356,6 +1361,7 @@ struct xhci_td {
struct xhci_segment *start_seg;
union xhci_trb *first_trb;
union xhci_trb *last_trb;
+ struct xhci_segment *bounce_seg;
/* actual_length of the URB has already been set */
bool urb_length_set;
};
@@ -1405,6 +1411,7 @@ struct xhci_ring {
unsigned int num_segs;
unsigned int num_trbs_free;
unsigned int num_trbs_free_temp;
+ unsigned int bounce_buf_len;
enum xhci_ring_type type;
bool last_td_was_short;
struct radix_tree_root *trb_address_map;
@@ -1807,7 +1814,8 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
unsigned int ep_index);
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
- unsigned int num_streams, gfp_t flags);
+ unsigned int num_streams,
+ unsigned int max_packet, gfp_t flags);
void xhci_free_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info);
void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
diff --git a/drivers/usb/image/microtek.h b/drivers/usb/image/microtek.h
index ccce318f20a0..7e32ae787136 100644
--- a/drivers/usb/image/microtek.h
+++ b/drivers/usb/image/microtek.h
@@ -13,11 +13,11 @@ typedef void (*mts_scsi_cmnd_callback)(struct scsi_cmnd *);
struct mts_transfer_context
{
- struct mts_desc* instance;
+ struct mts_desc *instance;
mts_scsi_cmnd_callback final_callback;
struct scsi_cmnd *srb;
- void* data;
+ void *data;
unsigned data_length;
int data_pipe;
int fragment;
@@ -38,7 +38,7 @@ struct mts_desc {
u8 ep_response;
u8 ep_image;
- struct Scsi_Host * host;
+ struct Scsi_Host *host;
struct urb *urb;
struct mts_transfer_context context;
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index e9e5ae521fa6..6e705971d637 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -260,11 +260,12 @@ config USB_CHAOSKEY
tristate "ChaosKey random number generator driver support"
depends on HW_RANDOM
help
- Say Y here if you want to connect an AltusMetrum ChaosKey to
- your computer's USB port. The ChaosKey is a hardware random
- number generator which hooks into the kernel entropy pool to
- ensure a large supply of entropy for /dev/random and
- /dev/urandom and also provides direct access via /dev/chaoskeyX
+ Say Y here if you want to connect an AltusMetrum ChaosKey or
+ Araneus Alea I to your computer's USB port. These devices
+ are hardware random number generators which hook into the
+ kernel entropy pool to ensure a large supply of entropy for
+ /dev/random and /dev/urandom and also provides direct access
+ via /dev/chaoskeyX
To compile this driver as a module, choose M here: the
module will be called chaoskey.
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 76350e4ee807..6ddd08a32777 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -55,9 +55,13 @@ MODULE_LICENSE("GPL");
#define CHAOSKEY_VENDOR_ID 0x1d50 /* OpenMoko */
#define CHAOSKEY_PRODUCT_ID 0x60c6 /* ChaosKey */
+#define ALEA_VENDOR_ID 0x12d8 /* Araneus */
+#define ALEA_PRODUCT_ID 0x0001 /* Alea I */
+
#define CHAOSKEY_BUF_LEN 64 /* max size of USB full speed packet */
-#define NAK_TIMEOUT (HZ) /* stall/wait timeout for device */
+#define NAK_TIMEOUT (HZ) /* normal stall/wait timeout */
+#define ALEA_FIRST_TIMEOUT (HZ*3) /* first stall/wait timeout for Alea */
#ifdef CONFIG_USB_DYNAMIC_MINORS
#define USB_CHAOSKEY_MINOR_BASE 0
@@ -69,6 +73,7 @@ MODULE_LICENSE("GPL");
static const struct usb_device_id chaoskey_table[] = {
{ USB_DEVICE(CHAOSKEY_VENDOR_ID, CHAOSKEY_PRODUCT_ID) },
+ { USB_DEVICE(ALEA_VENDOR_ID, ALEA_PRODUCT_ID) },
{ },
};
MODULE_DEVICE_TABLE(usb, chaoskey_table);
@@ -84,6 +89,7 @@ struct chaoskey {
int open; /* open count */
bool present; /* device not disconnected */
bool reading; /* ongoing IO */
+ bool reads_started; /* track first read for Alea */
int size; /* size of buf */
int valid; /* bytes of buf read */
int used; /* bytes of buf consumed */
@@ -188,6 +194,9 @@ static int chaoskey_probe(struct usb_interface *interface,
dev->in_ep = in_ep;
+ if (udev->descriptor.idVendor != ALEA_VENDOR_ID)
+ dev->reads_started = 1;
+
dev->size = size;
dev->present = 1;
@@ -357,6 +366,7 @@ static int _chaoskey_fill(struct chaoskey *dev)
{
DEFINE_WAIT(wait);
int result;
+ bool started;
usb_dbg(dev->interface, "fill");
@@ -389,10 +399,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
goto out;
}
+ /* The first read on the Alea takes a little under 2 seconds.
+ * Reads after the first read take only a few microseconds
+ * though. Presumably the entropy-generating circuit needs
+ * time to ramp up. So, we wait longer on the first read.
+ */
+ started = dev->reads_started;
+ dev->reads_started = true;
result = wait_event_interruptible_timeout(
dev->wait_q,
!dev->reading,
- NAK_TIMEOUT);
+ (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
if (result < 0)
goto out;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 15666ad7c772..02abfcdfbf7b 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -1285,18 +1285,22 @@ int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 *data)
}
int sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
- u32 dest, int length, size_t *bytes_written)
+ u32 dest, int length)
{
+ size_t dummy;
+
return sisusb_write_mem_bulk(sisusb, dest, src, length,
- NULL, 0, bytes_written);
+ NULL, 0, &dummy);
}
#ifdef SISUSBENDIANTEST
-int sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest,
- u32 src, int length, size_t *bytes_written)
+static int sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest,
+ u32 src, int length)
{
+ size_t dummy;
+
return sisusb_read_mem_bulk(sisusb, src, dest, length,
- NULL, bytes_written);
+ NULL, &dummy);
}
#endif
#endif
@@ -1306,16 +1310,14 @@ static void sisusb_testreadwrite(struct sisusb_usb_data *sisusb)
{
static char srcbuffer[] = { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 };
char destbuffer[10];
- size_t dummy;
int i, j;
- sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7, &dummy);
+ sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7);
for (i = 1; i <= 7; i++) {
dev_dbg(&sisusb->sisusb_dev->dev,
"sisusb: rwtest %d bytes\n", i);
- sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase,
- i, &dummy);
+ sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase, i);
for (j = 0; j < i; j++) {
dev_dbg(&sisusb->sisusb_dev->dev,
"rwtest read[%d] = %x\n",
@@ -2276,7 +2278,6 @@ int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
const struct font_desc *myfont;
u8 *tempbuf;
u16 *tempbufb;
- size_t written;
static const char bootstring[] =
"SiSUSB VGA text console, (C) 2005 Thomas Winischhofer.";
static const char bootlogo[] = "(o_ //\\ V_/_";
@@ -2343,18 +2344,15 @@ int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
*(tempbufb++) = 0x0700 | bootstring[i++];
ret |= sisusb_copy_memory(sisusb, tempbuf,
- sisusb->vrambase, 8192, &written);
+ sisusb->vrambase, 8192);
vfree(tempbuf);
}
} else if (sisusb->scrbuf) {
-
ret |= sisusb_copy_memory(sisusb, (char *)sisusb->scrbuf,
- sisusb->vrambase, sisusb->scrbuf_size,
- &written);
-
+ sisusb->vrambase, sisusb->scrbuf_size);
}
if (sisusb->sisusb_cursor_size_from >= 0 &&
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index afa853209f1d..460cebf322e3 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -370,7 +370,6 @@ static void
sisusbcon_putc(struct vc_data *c, int ch, int y, int x)
{
struct sisusb_usb_data *sisusb;
- ssize_t written;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
@@ -384,7 +383,7 @@ sisusbcon_putc(struct vc_data *c, int ch, int y, int x)
sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
- (long)SISUSB_HADDR(x, y), 2, &written);
+ (long)SISUSB_HADDR(x, y), 2);
mutex_unlock(&sisusb->lock);
}
@@ -395,7 +394,6 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s,
int count, int y, int x)
{
struct sisusb_usb_data *sisusb;
- ssize_t written;
u16 *dest;
int i;
@@ -420,7 +418,7 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s,
}
sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
- (long)SISUSB_HADDR(x, y), count * 2, &written);
+ (long)SISUSB_HADDR(x, y), count * 2);
mutex_unlock(&sisusb->lock);
}
@@ -431,7 +429,6 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width)
{
struct sisusb_usb_data *sisusb;
u16 eattr = c->vc_video_erase_char;
- ssize_t written;
int i, length, cols;
u16 *dest;
@@ -475,41 +472,7 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width)
sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(x, y),
- (long)SISUSB_HADDR(x, y), length, &written);
-
- mutex_unlock(&sisusb->lock);
-}
-
-/* Interface routine */
-static void
-sisusbcon_bmove(struct vc_data *c, int sy, int sx,
- int dy, int dx, int height, int width)
-{
- struct sisusb_usb_data *sisusb;
- ssize_t written;
- int cols, length;
-
- if (width <= 0 || height <= 0)
- return;
-
- sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
- if (!sisusb)
- return;
-
- /* sisusb->lock is down */
-
- cols = sisusb->sisusb_num_columns;
-
- if (sisusb_is_inactive(c, sisusb)) {
- mutex_unlock(&sisusb->lock);
- return;
- }
-
- length = ((height * cols) - dx - (cols - width - dx)) * 2;
-
-
- sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(dx, dy),
- (long)SISUSB_HADDR(dx, dy), length, &written);
+ (long)SISUSB_HADDR(x, y), length);
mutex_unlock(&sisusb->lock);
}
@@ -519,7 +482,6 @@ static int
sisusbcon_switch(struct vc_data *c)
{
struct sisusb_usb_data *sisusb;
- ssize_t written;
int length;
/* Returnvalue 0 means we have fully restored screen,
@@ -559,7 +521,7 @@ sisusbcon_switch(struct vc_data *c)
sisusb_copy_memory(sisusb, (unsigned char *)c->vc_origin,
(long)SISUSB_HADDR(0, 0),
- length, &written);
+ length);
mutex_unlock(&sisusb->lock);
@@ -600,7 +562,7 @@ sisusbcon_save_screen(struct vc_data *c)
}
/* interface routine */
-static int
+static void
sisusbcon_set_palette(struct vc_data *c, const unsigned char *table)
{
struct sisusb_usb_data *sisusb;
@@ -608,18 +570,18 @@ sisusbcon_set_palette(struct vc_data *c, const unsigned char *table)
/* Return value not used by vt */
- if (!CON_IS_VISIBLE(c))
- return -EINVAL;
+ if (!con_is_visible(c))
+ return;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
- return -EINVAL;
+ return;
/* sisusb->lock is down */
if (sisusb_is_inactive(c, sisusb)) {
mutex_unlock(&sisusb->lock);
- return -EINVAL;
+ return;
}
for (i = j = 0; i < 16; i++) {
@@ -634,8 +596,6 @@ sisusbcon_set_palette(struct vc_data *c, const unsigned char *table)
}
mutex_unlock(&sisusb->lock);
-
- return 0;
}
/* interface routine */
@@ -644,7 +604,6 @@ sisusbcon_blank(struct vc_data *c, int blank, int mode_switch)
{
struct sisusb_usb_data *sisusb;
u8 sr1, cr17, pmreg, cr63;
- ssize_t written;
int ret = 0;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
@@ -672,7 +631,7 @@ sisusbcon_blank(struct vc_data *c, int blank, int mode_switch)
(unsigned char *)c->vc_origin,
(u32)(sisusb->vrambase +
(c->vc_origin - sisusb->scrbuf)),
- c->vc_screenbuf_size, &written);
+ c->vc_screenbuf_size);
sisusb->con_blanked = 1;
ret = 1;
break;
@@ -723,24 +682,22 @@ sisusbcon_blank(struct vc_data *c, int blank, int mode_switch)
}
/* interface routine */
-static int
+static void
sisusbcon_scrolldelta(struct vc_data *c, int lines)
{
struct sisusb_usb_data *sisusb;
int margin = c->vc_size_row * 4;
int ul, we, p, st;
- /* The return value does not seem to be used */
-
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
- return 0;
+ return;
/* sisusb->lock is down */
if (sisusb_is_inactive(c, sisusb)) {
mutex_unlock(&sisusb->lock);
- return 0;
+ return;
}
if (!lines) /* Turn scrollback off */
@@ -780,8 +737,6 @@ sisusbcon_scrolldelta(struct vc_data *c, int lines)
sisusbcon_set_start_address(sisusb, c);
mutex_unlock(&sisusb->lock);
-
- return 1;
}
/* Interface routine */
@@ -860,7 +815,6 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
int cols = sisusb->sisusb_num_columns;
int length = ((b - t) * cols) * 2;
u16 eattr = c->vc_video_erase_char;
- ssize_t written;
/* sisusb->lock is down */
@@ -890,7 +844,7 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
}
sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(0, t),
- (long)SISUSB_HADDR(0, t), length, &written);
+ (long)SISUSB_HADDR(0, t), length);
mutex_unlock(&sisusb->lock);
@@ -903,7 +857,6 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
{
struct sisusb_usb_data *sisusb;
u16 eattr = c->vc_video_erase_char;
- ssize_t written;
int copyall = 0;
unsigned long oldorigin;
unsigned int delta = lines * c->vc_size_row;
@@ -996,18 +949,18 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
sisusb_copy_memory(sisusb,
(char *)c->vc_origin,
(u32)(sisusb->vrambase + originoffset),
- c->vc_screenbuf_size, &written);
+ c->vc_screenbuf_size);
else if (dir == SM_UP)
sisusb_copy_memory(sisusb,
(char *)c->vc_origin + c->vc_screenbuf_size - delta,
(u32)sisusb->vrambase + originoffset +
c->vc_screenbuf_size - delta,
- delta, &written);
+ delta);
else
sisusb_copy_memory(sisusb,
(char *)c->vc_origin,
(u32)(sisusb->vrambase + originoffset),
- delta, &written);
+ delta);
c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
c->vc_visible_origin = c->vc_origin;
@@ -1273,7 +1226,7 @@ sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
struct vc_data *vc = vc_cons[i].d;
if (vc && vc->vc_sw == &sisusb_con) {
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
vc->vc_sw->con_cursor(vc, CM_DRAW);
}
vc->vc_font.height = fh;
@@ -1385,7 +1338,6 @@ static const struct consw sisusb_con = {
.con_putcs = sisusbcon_putcs,
.con_cursor = sisusbcon_cursor,
.con_scroll = sisusbcon_scroll,
- .con_bmove = sisusbcon_bmove,
.con_switch = sisusbcon_switch,
.con_blank = sisusbcon_blank,
.con_font_set = sisusbcon_font_set,
@@ -1433,15 +1385,12 @@ static const struct consw sisusb_dummy_con = {
.con_putcs = SISUSBCONDUMMY,
.con_cursor = SISUSBCONDUMMY,
.con_scroll = SISUSBCONDUMMY,
- .con_bmove = SISUSBCONDUMMY,
.con_switch = SISUSBCONDUMMY,
.con_blank = SISUSBCONDUMMY,
.con_font_set = SISUSBCONDUMMY,
.con_font_get = SISUSBCONDUMMY,
.con_font_default = SISUSBCONDUMMY,
.con_font_copy = SISUSBCONDUMMY,
- .con_set_palette = SISUSBCONDUMMY,
- .con_scrolldelta = SISUSBCONDUMMY,
};
int
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
index c46ce42d4489..e79a616f0d26 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
@@ -828,7 +828,7 @@ void sisusb_delete(struct kref *kref);
int sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data);
int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 * data);
int sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
- u32 dest, int length, size_t * bytes_written);
+ u32 dest, int length);
int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init);
int sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
u8 * arg, int cmapsz, int ch512, int dorecalc,
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index b45cb77c0744..8e7737d7ac0a 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -330,6 +330,17 @@ static int usb3503_i2c_probe(struct i2c_client *i2c,
return usb3503_probe(hub);
}
+static int usb3503_i2c_remove(struct i2c_client *i2c)
+{
+ struct usb3503 *hub;
+
+ hub = i2c_get_clientdata(i2c);
+ if (hub->clk)
+ clk_disable_unprepare(hub->clk);
+
+ return 0;
+}
+
static int usb3503_platform_probe(struct platform_device *pdev)
{
struct usb3503 *hub;
@@ -338,10 +349,22 @@ static int usb3503_platform_probe(struct platform_device *pdev)
if (!hub)
return -ENOMEM;
hub->dev = &pdev->dev;
+ platform_set_drvdata(pdev, hub);
return usb3503_probe(hub);
}
+static int usb3503_platform_remove(struct platform_device *pdev)
+{
+ struct usb3503 *hub;
+
+ hub = platform_get_drvdata(pdev);
+ if (hub->clk)
+ clk_disable_unprepare(hub->clk);
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int usb3503_i2c_suspend(struct device *dev)
{
@@ -395,6 +418,7 @@ static struct i2c_driver usb3503_i2c_driver = {
.of_match_table = of_match_ptr(usb3503_of_match),
},
.probe = usb3503_i2c_probe,
+ .remove = usb3503_i2c_remove,
.id_table = usb3503_id,
};
@@ -404,6 +428,7 @@ static struct platform_driver usb3503_platform_driver = {
.of_match_table = of_match_ptr(usb3503_of_match),
},
.probe = usb3503_platform_probe,
+ .remove = usb3503_platform_remove,
};
static int __init usb3503_init(void)
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index f95befe18cc1..689d42aba8a9 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -2,9 +2,12 @@
# for USB OTG silicon based on Mentor Graphics INVENTRA designs
#
+# define_trace.h needs to know how to find our header
+CFLAGS_musb_trace.o := -I$(src)
+
obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
-musb_hdrc-y := musb_core.o
+musb_hdrc-y := musb_core.o musb_trace.o
musb_hdrc-$(CONFIG_USB_MUSB_HOST)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_virthub.o musb_host.o
musb_hdrc-$(CONFIG_USB_MUSB_GADGET)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_gadget_ep0.o musb_gadget.o
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index cc134109b056..1ae48e64e975 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -14,6 +14,7 @@
#include "musb_core.h"
#include "musb_debug.h"
#include "cppi_dma.h"
+#include "davinci.h"
/* CPPI DMA status 7-mar-2006:
@@ -232,7 +233,7 @@ static void cppi_controller_stop(struct cppi *controller)
musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
- dev_dbg(musb->controller, "Tearing down RX and TX Channels\n");
+ musb_dbg(musb, "Tearing down RX and TX Channels");
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
/* FIXME restructure of txdma to use bds like rxdma */
controller->tx[i].last_processed = NULL;
@@ -297,13 +298,13 @@ cppi_channel_allocate(struct dma_controller *c,
*/
if (transmit) {
if (index >= ARRAY_SIZE(controller->tx)) {
- dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'T', index);
+ musb_dbg(musb, "no %cX%d CPPI channel", 'T', index);
return NULL;
}
cppi_ch = controller->tx + index;
} else {
if (index >= ARRAY_SIZE(controller->rx)) {
- dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'R', index);
+ musb_dbg(musb, "no %cX%d CPPI channel", 'R', index);
return NULL;
}
cppi_ch = controller->rx + index;
@@ -314,13 +315,13 @@ cppi_channel_allocate(struct dma_controller *c,
* with the other DMA engine too
*/
if (cppi_ch->hw_ep)
- dev_dbg(musb->controller, "re-allocating DMA%d %cX channel %p\n",
+ musb_dbg(musb, "re-allocating DMA%d %cX channel %p",
index, transmit ? 'T' : 'R', cppi_ch);
cppi_ch->hw_ep = ep;
cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
cppi_ch->channel.max_len = 0x7fffffff;
- dev_dbg(musb->controller, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
+ musb_dbg(musb, "Allocate CPPI%d %cX", index, transmit ? 'T' : 'R');
return &cppi_ch->channel;
}
@@ -335,8 +336,8 @@ static void cppi_channel_release(struct dma_channel *channel)
c = container_of(channel, struct cppi_channel, channel);
tibase = c->controller->tibase;
if (!c->hw_ep)
- dev_dbg(c->controller->musb->controller,
- "releasing idle DMA channel %p\n", c);
+ musb_dbg(c->controller->musb,
+ "releasing idle DMA channel %p", c);
else if (!c->transmit)
core_rxirq_enable(tibase, c->index + 1);
@@ -354,11 +355,10 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
musb_ep_select(base, c->index + 1);
- dev_dbg(c->controller->musb->controller,
+ musb_dbg(c->controller->musb,
"RX DMA%d%s: %d left, csr %04x, "
"%08x H%08x S%08x C%08x, "
- "B%08x L%08x %08x .. %08x"
- "\n",
+ "B%08x L%08x %08x .. %08x",
c->index, tag,
musb_readl(c->controller->tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
@@ -385,11 +385,10 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
musb_ep_select(base, c->index + 1);
- dev_dbg(c->controller->musb->controller,
+ musb_dbg(c->controller->musb,
"TX DMA%d%s: csr %04x, "
"H%08x S%08x C%08x %08x, "
- "F%08x L%08x .. %08x"
- "\n",
+ "F%08x L%08x .. %08x",
c->index, tag,
musb_readw(c->hw_ep->regs, MUSB_TXCSR),
@@ -590,7 +589,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
length = min(n_bds * maxpacket, length);
}
- dev_dbg(musb->controller, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
+ musb_dbg(musb, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u",
tx->index,
maxpacket,
rndis ? "rndis" : "transparent",
@@ -647,7 +646,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
bd->hw_options |= CPPI_ZERO_SET;
}
- dev_dbg(musb->controller, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
+ musb_dbg(musb, "TXBD %p: nxt %08x buf %08x len %04x opt %08x",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
@@ -813,8 +812,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
length = min(n_bds * maxpacket, length);
- dev_dbg(musb->controller, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
- "dma 0x%llx len %u %u/%u\n",
+ musb_dbg(musb, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
+ "dma 0x%llx len %u %u/%u",
rx->index, maxpacket,
onepacket
? (is_rndis ? "rndis" : "onepacket")
@@ -924,7 +923,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff;
if (i < (2 + n_bds)) {
- dev_dbg(musb->controller, "bufcnt%d underrun - %d (for %d)\n",
+ musb_dbg(musb, "bufcnt%d underrun - %d (for %d)",
rx->index, i, n_bds);
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
@@ -973,7 +972,7 @@ static int cppi_channel_program(struct dma_channel *ch,
/* WARN_ON(1); */
break;
case MUSB_DMA_STATUS_UNKNOWN:
- dev_dbg(musb->controller, "%cX DMA%d not allocated!\n",
+ musb_dbg(musb, "%cX DMA%d not allocated!",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* FALLTHROUGH */
@@ -1029,8 +1028,8 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
if (!completed && (bd->hw_options & CPPI_OWN_SET))
break;
- dev_dbg(musb->controller, "C/RXBD %llx: nxt %08x buf %08x "
- "off.len %08x opt.len %08x (%d)\n",
+ musb_dbg(musb, "C/RXBD %llx: nxt %08x buf %08x "
+ "off.len %08x opt.len %08x (%d)",
(unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options,
rx->channel.actual_len);
@@ -1051,7 +1050,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
* CPPI ignores those BDs even though OWN is still set.
*/
completed = true;
- dev_dbg(musb->controller, "rx short %d/%d (%d)\n",
+ musb_dbg(musb, "rx short %d/%d (%d)",
len, bd->buflen,
rx->channel.actual_len);
}
@@ -1101,7 +1100,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
musb_ep_select(cppi->mregs, rx->index + 1);
csr = musb_readw(regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_DMAENAB) {
- dev_dbg(musb->controller, "list%d %p/%p, last %llx%s, csr %04x\n",
+ musb_dbg(musb, "list%d %p/%p, last %llx%s, csr %04x",
rx->index,
rx->head, rx->tail,
rx->last_processed
@@ -1164,7 +1163,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- dev_dbg(musb->controller, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
+ musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx);
/* process TX channels */
for (index = 0; tx; tx = tx >> 1, index++) {
@@ -1192,7 +1191,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
* that needs to be acknowledged.
*/
if (NULL == bd) {
- dev_dbg(musb->controller, "null BD\n");
+ musb_dbg(musb, "null BD");
musb_writel(&tx_ram->tx_complete, 0, 0);
continue;
}
@@ -1207,7 +1206,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
if (bd->hw_options & CPPI_OWN_SET)
break;
- dev_dbg(musb->controller, "C/TXBD %p n %x b %x off %x opt %x\n",
+ musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
index 59bf949e589b..7fdfb71a8f09 100644
--- a/drivers/usb/musb/cppi_dma.h
+++ b/drivers/usb/musb/cppi_dma.h
@@ -7,17 +7,10 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
-#include "musb_dma.h"
#include "musb_core.h"
-
-
-/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers
- * would seem to be shared with the TUSB6020 (over VLYNQ).
- */
-
-#include "davinci.h"
-
+#include "musb_dma.h"
/* CPPI RX/TX state RAM */
@@ -131,4 +124,24 @@ struct cppi {
/* CPPI IRQ handler */
extern irqreturn_t cppi_interrupt(int, void *);
+struct cppi41_dma_channel {
+ struct dma_channel channel;
+ struct cppi41_dma_controller *controller;
+ struct musb_hw_ep *hw_ep;
+ struct dma_chan *dc;
+ dma_cookie_t cookie;
+ u8 port_num;
+ u8 is_tx;
+ u8 is_allocated;
+ u8 usb_toggle;
+
+ dma_addr_t buf_addr;
+ u32 total_len;
+ u32 prog_len;
+ u32 transferred;
+ u32 packet_sz;
+ struct list_head tx_check;
+ int tx_zlp;
+};
+
#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index f824336def5c..74fc3069cb42 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -102,6 +102,7 @@
#include <linux/usb.h>
#include "musb_core.h"
+#include "musb_trace.h"
#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
@@ -258,31 +259,43 @@ static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
{
- return __raw_readb(addr + offset);
+ u8 data = __raw_readb(addr + offset);
+
+ trace_musb_readb(__builtin_return_address(0), addr, offset, data);
+ return data;
}
static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
{
+ trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
__raw_writeb(data, addr + offset);
}
static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
{
- return __raw_readw(addr + offset);
+ u16 data = __raw_readw(addr + offset);
+
+ trace_musb_readw(__builtin_return_address(0), addr, offset, data);
+ return data;
}
static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
{
+ trace_musb_writew(__builtin_return_address(0), addr, offset, data);
__raw_writew(data, addr + offset);
}
static u32 musb_default_readl(const void __iomem *addr, unsigned offset)
{
- return __raw_readl(addr + offset);
+ u32 data = __raw_readl(addr + offset);
+
+ trace_musb_readl(__builtin_return_address(0), addr, offset, data);
+ return data;
}
static void musb_default_writel(void __iomem *addr, unsigned offset, u32 data)
{
+ trace_musb_writel(__builtin_return_address(0), addr, offset, data);
__raw_writel(data, addr + offset);
}
@@ -461,20 +474,21 @@ static void musb_otg_timer_func(unsigned long data)
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->otg->state) {
case OTG_STATE_B_WAIT_ACON:
- dev_dbg(musb->controller, "HNP: b_wait_acon timeout; back to b_peripheral\n");
+ musb_dbg(musb,
+ "HNP: b_wait_acon timeout; back to b_peripheral");
musb_g_disconnect(musb);
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
musb->is_active = 0;
break;
case OTG_STATE_A_SUSPEND:
case OTG_STATE_A_WAIT_BCON:
- dev_dbg(musb->controller, "HNP: %s timeout\n",
+ musb_dbg(musb, "HNP: %s timeout",
usb_otg_state_string(musb->xceiv->otg->state));
musb_platform_set_vbus(musb, 0);
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
break;
default:
- dev_dbg(musb->controller, "HNP: Unhandled mode %s\n",
+ musb_dbg(musb, "HNP: Unhandled mode %s",
usb_otg_state_string(musb->xceiv->otg->state));
}
spin_unlock_irqrestore(&musb->lock, flags);
@@ -489,17 +503,17 @@ void musb_hnp_stop(struct musb *musb)
void __iomem *mbase = musb->mregs;
u8 reg;
- dev_dbg(musb->controller, "HNP: stop from %s\n",
+ musb_dbg(musb, "HNP: stop from %s",
usb_otg_state_string(musb->xceiv->otg->state));
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_PERIPHERAL:
musb_g_disconnect(musb);
- dev_dbg(musb->controller, "HNP: back to %s\n",
+ musb_dbg(musb, "HNP: back to %s",
usb_otg_state_string(musb->xceiv->otg->state));
break;
case OTG_STATE_B_HOST:
- dev_dbg(musb->controller, "HNP: Disabling HR\n");
+ musb_dbg(musb, "HNP: Disabling HR");
if (hcd)
hcd->self.is_b_host = 0;
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -510,7 +524,7 @@ void musb_hnp_stop(struct musb *musb)
/* REVISIT: Start SESSION_REQUEST here? */
break;
default:
- dev_dbg(musb->controller, "HNP: Stopping in unknown state %s\n",
+ musb_dbg(musb, "HNP: Stopping in unknown state %s",
usb_otg_state_string(musb->xceiv->otg->state));
}
@@ -541,8 +555,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
{
irqreturn_t handled = IRQ_NONE;
- dev_dbg(musb->controller, "<== DevCtl=%02x, int_usb=0x%x\n", devctl,
- int_usb);
+ musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
/* in host mode, the peripheral may issue remote wakeup.
* in peripheral mode, the host may resume the link.
@@ -550,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
*/
if (int_usb & MUSB_INTR_RESUME) {
handled = IRQ_HANDLED;
- dev_dbg(musb->controller, "RESUME (%s)\n",
+ musb_dbg(musb, "RESUME (%s)",
usb_otg_state_string(musb->xceiv->otg->state));
if (devctl & MUSB_DEVCTL_HM) {
@@ -619,11 +632,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
&& (devctl & MUSB_DEVCTL_BDEVICE)) {
- dev_dbg(musb->controller, "SessReq while on B state\n");
+ musb_dbg(musb, "SessReq while on B state");
return IRQ_HANDLED;
}
- dev_dbg(musb->controller, "SESSION_REQUEST (%s)\n",
+ musb_dbg(musb, "SESSION_REQUEST (%s)",
usb_otg_state_string(musb->xceiv->otg->state));
/* IRQ arrives from ID pin sense or (later, if VBUS power
@@ -714,7 +727,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
}
if (int_usb & MUSB_INTR_SUSPEND) {
- dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x\n",
+ musb_dbg(musb, "SUSPEND (%s) devctl %02x",
usb_otg_state_string(musb->xceiv->otg->state), devctl);
handled = IRQ_HANDLED;
@@ -743,7 +756,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->is_active = musb->g.b_hnp_enable;
if (musb->is_active) {
musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
- dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
+ musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
mod_timer(&musb->otg_timer, jiffies
+ msecs_to_jiffies(
OTG_TIME_B_ASE0_BRST));
@@ -760,7 +773,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
break;
case OTG_STATE_B_HOST:
/* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
- dev_dbg(musb->controller, "REVISIT: SUSPEND as B_HOST\n");
+ musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
break;
default:
/* "should not happen" */
@@ -797,14 +810,14 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
switch (musb->xceiv->otg->state) {
case OTG_STATE_B_PERIPHERAL:
if (int_usb & MUSB_INTR_SUSPEND) {
- dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n");
+ musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
int_usb &= ~MUSB_INTR_SUSPEND;
goto b_host;
} else
- dev_dbg(musb->controller, "CONNECT as b_peripheral???\n");
+ musb_dbg(musb, "CONNECT as b_peripheral???");
break;
case OTG_STATE_B_WAIT_ACON:
- dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n");
+ musb_dbg(musb, "HNP: CONNECT, now b_host");
b_host:
musb->xceiv->otg->state = OTG_STATE_B_HOST;
if (musb->hcd)
@@ -823,12 +836,12 @@ b_host:
musb_host_poke_root_hub(musb);
- dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n",
+ musb_dbg(musb, "CONNECT (%s) devctl %02x",
usb_otg_state_string(musb->xceiv->otg->state), devctl);
}
if (int_usb & MUSB_INTR_DISCONNECT) {
- dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n",
+ musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
usb_otg_state_string(musb->xceiv->otg->state),
MUSB_MODE(musb), devctl);
handled = IRQ_HANDLED;
@@ -891,7 +904,7 @@ b_host:
if (is_host_active(musb))
musb_recover_from_babble(musb);
} else {
- dev_dbg(musb->controller, "BUS RESET as %s\n",
+ musb_dbg(musb, "BUS RESET as %s",
usb_otg_state_string(musb->xceiv->otg->state));
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
@@ -899,7 +912,7 @@ b_host:
/* FALLTHROUGH */
case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
/* never use invalid T(a_wait_bcon) */
- dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n",
+ musb_dbg(musb, "HNP: in %s, %d msec timeout",
usb_otg_state_string(musb->xceiv->otg->state),
TA_WAIT_BCON(musb));
mod_timer(&musb->otg_timer, jiffies
@@ -910,7 +923,7 @@ b_host:
musb_g_reset(musb);
break;
case OTG_STATE_B_WAIT_ACON:
- dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n",
+ musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
usb_otg_state_string(musb->xceiv->otg->state));
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
musb_g_reset(musb);
@@ -922,7 +935,7 @@ b_host:
musb_g_reset(musb);
break;
default:
- dev_dbg(musb->controller, "Unhandled BUS RESET as %s\n",
+ musb_dbg(musb, "Unhandled BUS RESET as %s",
usb_otg_state_string(musb->xceiv->otg->state));
}
}
@@ -1030,7 +1043,7 @@ void musb_start(struct musb *musb)
u8 devctl = musb_readb(regs, MUSB_DEVCTL);
u8 power;
- dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
+ musb_dbg(musb, "<== devctl %02x", devctl);
musb_enable_interrupts(musb);
musb_writeb(regs, MUSB_TESTMODE, 0);
@@ -1078,7 +1091,7 @@ void musb_stop(struct musb *musb)
/* stop IRQs, timers, ... */
musb_platform_disable(musb);
musb_generic_disable(musb);
- dev_dbg(musb->controller, "HDRC disabled\n");
+ musb_dbg(musb, "HDRC disabled");
/* FIXME
* - mark host and/or peripheral drivers unusable/inactive
@@ -1391,7 +1404,7 @@ static int ep_config_from_hw(struct musb *musb)
void __iomem *mbase = musb->mregs;
int ret = 0;
- dev_dbg(musb->controller, "<== static silicon ep config\n");
+ musb_dbg(musb, "<== static silicon ep config");
/* FIXME pick up ep0 maxpacket size */
@@ -1532,8 +1545,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
hw_ep->tx_reinit = 1;
if (hw_ep->max_packet_sz_tx) {
- dev_dbg(musb->controller,
- "%s: hw_ep %d%s, %smax %d\n",
+ musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
musb_driver_name, i,
hw_ep->is_shared_fifo ? "shared" : "tx",
hw_ep->tx_double_buffered
@@ -1541,8 +1553,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
hw_ep->max_packet_sz_tx);
}
if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
- dev_dbg(musb->controller,
- "%s: hw_ep %d%s, %smax %d\n",
+ musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
musb_driver_name, i,
"rx",
hw_ep->rx_double_buffered
@@ -1550,7 +1561,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
hw_ep->max_packet_sz_rx);
}
if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
- dev_dbg(musb->controller, "hw_ep %d not configured\n", i);
+ musb_dbg(musb, "hw_ep %d not configured", i);
}
return 0;
@@ -1577,9 +1588,7 @@ irqreturn_t musb_interrupt(struct musb *musb)
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- dev_dbg(musb->controller, "** IRQ %s usb%04x tx%04x rx%04x\n",
- is_host_active(musb) ? "host" : "peripheral",
- musb->int_usb, musb->int_tx, musb->int_rx);
+ trace_musb_isr(musb);
/**
* According to Mentor Graphics' documentation, flowchart on page 98,
@@ -1976,7 +1985,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
* Fail when the board needs a feature that's not enabled.
*/
if (!plat) {
- dev_dbg(dev, "no platform_data?\n");
+ dev_err(dev, "no platform_data?\n");
status = -ENODEV;
goto fail0;
}
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index e499b862a946..d4d7c56b48c7 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -5,7 +5,9 @@
#include <linux/platform_device.h>
#include <linux/of.h>
+#include "cppi_dma.h"
#include "musb_core.h"
+#include "musb_trace.h"
#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
@@ -22,26 +24,6 @@
#define USB_CTRL_AUTOREQ 0xd0
#define USB_TDOWN 0xd8
-struct cppi41_dma_channel {
- struct dma_channel channel;
- struct cppi41_dma_controller *controller;
- struct musb_hw_ep *hw_ep;
- struct dma_chan *dc;
- dma_cookie_t cookie;
- u8 port_num;
- u8 is_tx;
- u8 is_allocated;
- u8 usb_toggle;
-
- dma_addr_t buf_addr;
- u32 total_len;
- u32 prog_len;
- u32 transferred;
- u32 packet_sz;
- struct list_head tx_check;
- int tx_zlp;
-};
-
#define MUSB_DMA_NUM_CHANNELS 15
struct cppi41_dma_controller {
@@ -96,8 +78,8 @@ static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
if (!toggle && toggle == cppi41_channel->usb_toggle) {
csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
- dev_dbg(cppi41_channel->controller->musb->controller,
- "Restoring DATA1 toggle.\n");
+ musb_dbg(cppi41_channel->controller->musb,
+ "Restoring DATA1 toggle.");
}
cppi41_channel->usb_toggle = toggle;
@@ -145,6 +127,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
}
+
+ trace_musb_cppi41_done(cppi41_channel);
musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
} else {
/* next iteration, reload */
@@ -173,6 +157,7 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
dma_desc->callback = cppi41_dma_callback;
dma_desc->callback_param = &cppi41_channel->channel;
cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
+ trace_musb_cppi41_cont(cppi41_channel);
dma_async_issue_pending(dc);
if (!cppi41_channel->is_tx) {
@@ -240,10 +225,7 @@ static void cppi41_dma_callback(void *private_data)
transferred = cppi41_channel->prog_len - txstate.residue;
cppi41_channel->transferred += transferred;
- dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
- hw_ep->epnum, cppi41_channel->transferred,
- cppi41_channel->total_len);
-
+ trace_musb_cppi41_gb(cppi41_channel);
update_rx_toggle(cppi41_channel);
if (cppi41_channel->transferred == cppi41_channel->total_len ||
@@ -374,12 +356,6 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
struct musb *musb = cppi41_channel->controller->musb;
unsigned use_gen_rndis = 0;
- dev_dbg(musb->controller,
- "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
- cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
- packet_sz, mode, (unsigned long long) dma_addr,
- len, cppi41_channel->is_tx);
-
cppi41_channel->buf_addr = dma_addr;
cppi41_channel->total_len = len;
cppi41_channel->transferred = 0;
@@ -431,6 +407,8 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
cppi41_channel->channel.rx_packet_done = false;
+ trace_musb_cppi41_config(cppi41_channel);
+
save_rx_toggle(cppi41_channel);
dma_async_issue_pending(dc);
return true;
@@ -461,6 +439,7 @@ static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
cppi41_channel->hw_ep = hw_ep;
cppi41_channel->is_allocated = 1;
+ trace_musb_cppi41_alloc(cppi41_channel);
return &cppi41_channel->channel;
}
@@ -468,6 +447,7 @@ static void cppi41_dma_channel_release(struct dma_channel *channel)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ trace_musb_cppi41_free(cppi41_channel);
if (cppi41_channel->is_allocated) {
cppi41_channel->is_allocated = 0;
channel->status = MUSB_DMA_STATUS_FREE;
@@ -537,8 +517,7 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
u16 csr;
is_tx = cppi41_channel->is_tx;
- dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
- cppi41_channel->port_num, is_tx);
+ trace_musb_cppi41_abort(cppi41_channel);
if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
return 0;
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 27ba8f799462..9a78877a8afe 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -42,6 +42,8 @@
#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
+void musb_dbg(struct musb *musb, const char *fmt, ...);
+
#ifdef CONFIG_DEBUG_FS
int musb_init_debugfs(struct musb *musb);
void musb_exit_debugfs(struct musb *musb);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index eeb7d9ecf7df..2537179636db 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -52,30 +52,6 @@
static const struct of_device_id musb_dsps_of_match[];
/**
- * avoid using musb_readx()/musb_writex() as glue layer should not be
- * dependent on musb core layer symbols.
- */
-static inline u8 dsps_readb(const void __iomem *addr, unsigned offset)
-{
- return __raw_readb(addr + offset);
-}
-
-static inline u32 dsps_readl(const void __iomem *addr, unsigned offset)
-{
- return __raw_readl(addr + offset);
-}
-
-static inline void dsps_writeb(void __iomem *addr, unsigned offset, u8 data)
-{
- __raw_writeb(data, addr + offset);
-}
-
-static inline void dsps_writel(void __iomem *addr, unsigned offset, u32 data)
-{
- __raw_writel(data, addr + offset);
-}
-
-/**
* DSPS musb wrapper register offset.
* FIXME: This should be expanded to have all the wrapper registers from TI DSPS
* musb ips.
@@ -223,8 +199,8 @@ static void dsps_musb_enable(struct musb *musb)
((musb->epmask & wrp->rxep_mask) << wrp->rxep_shift);
coremask = (wrp->usb_bitmap & ~MUSB_INTR_SOF);
- dsps_writel(reg_base, wrp->epintr_set, epmask);
- dsps_writel(reg_base, wrp->coreintr_set, coremask);
+ musb_writel(reg_base, wrp->epintr_set, epmask);
+ musb_writel(reg_base, wrp->coreintr_set, coremask);
/* start polling for ID change in dual-role idle mode */
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
@@ -244,10 +220,10 @@ static void dsps_musb_disable(struct musb *musb)
const struct dsps_musb_wrapper *wrp = glue->wrp;
void __iomem *reg_base = musb->ctrl_base;
- dsps_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
- dsps_writel(reg_base, wrp->epintr_clear,
+ musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
+ musb_writel(reg_base, wrp->epintr_clear,
wrp->txep_bitmap | wrp->rxep_bitmap);
- dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
}
static void otg_timer(unsigned long _musb)
@@ -265,14 +241,14 @@ static void otg_timer(unsigned long _musb)
* We poll because DSPS IP's won't expose several OTG-critical
* status change events (from the transceiver) otherwise.
*/
- devctl = dsps_readb(mregs, MUSB_DEVCTL);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
usb_otg_state_string(musb->xceiv->otg->state));
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_BCON:
- dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
/* fall */
@@ -286,13 +262,13 @@ static void otg_timer(unsigned long _musb)
MUSB_HST_MODE(musb);
}
if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
- dsps_writeb(mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+ musb_writeb(mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
mod_timer(&glue->timer, jiffies +
msecs_to_jiffies(wrp->poll_timeout));
break;
case OTG_STATE_A_WAIT_VFALL:
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
- dsps_writel(musb->ctrl_base, wrp->coreintr_set,
+ musb_writel(musb->ctrl_base, wrp->coreintr_set,
MUSB_INTR_VBUSERROR << wrp->usb_shift);
break;
default:
@@ -315,29 +291,29 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
spin_lock_irqsave(&musb->lock, flags);
/* Get endpoint interrupts */
- epintr = dsps_readl(reg_base, wrp->epintr_status);
+ epintr = musb_readl(reg_base, wrp->epintr_status);
musb->int_rx = (epintr & wrp->rxep_bitmap) >> wrp->rxep_shift;
musb->int_tx = (epintr & wrp->txep_bitmap) >> wrp->txep_shift;
if (epintr)
- dsps_writel(reg_base, wrp->epintr_status, epintr);
+ musb_writel(reg_base, wrp->epintr_status, epintr);
/* Get usb core interrupts */
- usbintr = dsps_readl(reg_base, wrp->coreintr_status);
+ usbintr = musb_readl(reg_base, wrp->coreintr_status);
if (!usbintr && !epintr)
goto out;
musb->int_usb = (usbintr & wrp->usb_bitmap) >> wrp->usb_shift;
if (usbintr)
- dsps_writel(reg_base, wrp->coreintr_status, usbintr);
+ musb_writel(reg_base, wrp->coreintr_status, usbintr);
dev_dbg(musb->controller, "usbintr (%x) epintr(%x)\n",
usbintr, epintr);
if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) {
- int drvvbus = dsps_readl(reg_base, wrp->status);
+ int drvvbus = musb_readl(reg_base, wrp->status);
void __iomem *mregs = musb->mregs;
- u8 devctl = dsps_readb(mregs, MUSB_DEVCTL);
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
int err;
err = musb->int_usb & MUSB_INTR_VBUSERROR;
@@ -442,7 +418,7 @@ static int dsps_musb_init(struct musb *musb)
musb->phy = devm_phy_get(dev->parent, "usb2-phy");
/* Returns zero if e.g. not clocked */
- rev = dsps_readl(reg_base, wrp->revision);
+ rev = musb_readl(reg_base, wrp->revision);
if (!rev)
return -ENODEV;
@@ -463,14 +439,14 @@ static int dsps_musb_init(struct musb *musb)
setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
/* Reset the musb */
- dsps_writel(reg_base, wrp->control, (1 << wrp->reset));
+ musb_writel(reg_base, wrp->control, (1 << wrp->reset));
musb->isr = dsps_interrupt;
/* reset the otgdisable bit, needed for host mode to work */
- val = dsps_readl(reg_base, wrp->phy_utmi);
+ val = musb_readl(reg_base, wrp->phy_utmi);
val &= ~(1 << wrp->otg_disable);
- dsps_writel(musb->ctrl_base, wrp->phy_utmi, val);
+ musb_writel(musb->ctrl_base, wrp->phy_utmi, val);
/*
* Check whether the dsps version has babble control enabled.
@@ -478,11 +454,11 @@ static int dsps_musb_init(struct musb *musb)
* If MUSB_BABBLE_CTL returns 0x4 then we have the babble control
* logic enabled.
*/
- val = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+ val = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
if (val & MUSB_BABBLE_RCV_DISABLE) {
glue->sw_babble_enabled = true;
val |= MUSB_BABBLE_SW_SESSION_CTRL;
- dsps_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
+ musb_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
}
return dsps_musb_dbg_init(musb, glue);
@@ -510,7 +486,7 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
void __iomem *ctrl_base = musb->ctrl_base;
u32 reg;
- reg = dsps_readl(ctrl_base, wrp->mode);
+ reg = musb_readl(ctrl_base, wrp->mode);
switch (mode) {
case MUSB_HOST:
@@ -523,8 +499,8 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
*/
reg |= (1 << wrp->iddig_mux);
- dsps_writel(ctrl_base, wrp->mode, reg);
- dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
+ musb_writel(ctrl_base, wrp->mode, reg);
+ musb_writel(ctrl_base, wrp->phy_utmi, 0x02);
break;
case MUSB_PERIPHERAL:
reg |= (1 << wrp->iddig);
@@ -536,10 +512,10 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
*/
reg |= (1 << wrp->iddig_mux);
- dsps_writel(ctrl_base, wrp->mode, reg);
+ musb_writel(ctrl_base, wrp->mode, reg);
break;
case MUSB_OTG:
- dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
+ musb_writel(ctrl_base, wrp->phy_utmi, 0x02);
break;
default:
dev_err(glue->dev, "unsupported mode %d\n", mode);
@@ -554,7 +530,7 @@ static bool dsps_sw_babble_control(struct musb *musb)
u8 babble_ctl;
bool session_restart = false;
- babble_ctl = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+ babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
dev_dbg(musb->controller, "babble: MUSB_BABBLE_CTL value %x\n",
babble_ctl);
/*
@@ -571,14 +547,14 @@ static bool dsps_sw_babble_control(struct musb *musb)
* babble is due to noise, then set transmit idle (d7 bit)
* to resume normal operation
*/
- babble_ctl = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+ babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
babble_ctl |= MUSB_BABBLE_FORCE_TXIDLE;
- dsps_writeb(musb->mregs, MUSB_BABBLE_CTL, babble_ctl);
+ musb_writeb(musb->mregs, MUSB_BABBLE_CTL, babble_ctl);
/* wait till line monitor flag cleared */
dev_dbg(musb->controller, "Set TXIDLE, wait J to clear\n");
do {
- babble_ctl = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+ babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
udelay(1);
} while ((babble_ctl & MUSB_BABBLE_STUCK_J) && timeout--);
@@ -896,13 +872,13 @@ static int dsps_suspend(struct device *dev)
return 0;
mbase = musb->ctrl_base;
- glue->context.control = dsps_readl(mbase, wrp->control);
- glue->context.epintr = dsps_readl(mbase, wrp->epintr_set);
- glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set);
- glue->context.phy_utmi = dsps_readl(mbase, wrp->phy_utmi);
- glue->context.mode = dsps_readl(mbase, wrp->mode);
- glue->context.tx_mode = dsps_readl(mbase, wrp->tx_mode);
- glue->context.rx_mode = dsps_readl(mbase, wrp->rx_mode);
+ glue->context.control = musb_readl(mbase, wrp->control);
+ glue->context.epintr = musb_readl(mbase, wrp->epintr_set);
+ glue->context.coreintr = musb_readl(mbase, wrp->coreintr_set);
+ glue->context.phy_utmi = musb_readl(mbase, wrp->phy_utmi);
+ glue->context.mode = musb_readl(mbase, wrp->mode);
+ glue->context.tx_mode = musb_readl(mbase, wrp->tx_mode);
+ glue->context.rx_mode = musb_readl(mbase, wrp->rx_mode);
return 0;
}
@@ -918,13 +894,13 @@ static int dsps_resume(struct device *dev)
return 0;
mbase = musb->ctrl_base;
- dsps_writel(mbase, wrp->control, glue->context.control);
- dsps_writel(mbase, wrp->epintr_set, glue->context.epintr);
- dsps_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
- dsps_writel(mbase, wrp->phy_utmi, glue->context.phy_utmi);
- dsps_writel(mbase, wrp->mode, glue->context.mode);
- dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
- dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
+ musb_writel(mbase, wrp->control, glue->context.control);
+ musb_writel(mbase, wrp->epintr_set, glue->context.epintr);
+ musb_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
+ musb_writel(mbase, wrp->phy_utmi, glue->context.phy_utmi);
+ musb_writel(mbase, wrp->mode, glue->context.mode);
+ musb_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
+ musb_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
mod_timer(&glue->timer, jiffies +
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index af2a3a7addf9..6d1e975e9605 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -44,6 +44,7 @@
#include <linux/slab.h>
#include "musb_core.h"
+#include "musb_trace.h"
/* ----------------------------------------------------------------------- */
@@ -167,15 +168,7 @@ __acquires(ep->musb->lock)
if (!dma_mapping_error(&musb->g.dev, request->dma))
unmap_dma_buffer(req, musb);
- if (request->status == 0)
- dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
- ep->end_point.name, request,
- req->request.actual, req->request.length);
- else
- dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
- ep->end_point.name, request,
- req->request.actual, req->request.length,
- request->status);
+ trace_musb_req_gb(req);
usb_gadget_giveback_request(&req->ep->end_point, &req->request);
spin_lock(&musb->lock);
ep->busy = busy;
@@ -217,8 +210,7 @@ static void nuke(struct musb_ep *ep, const int status)
}
value = c->channel_abort(ep->dma);
- dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
- ep->name, value);
+ musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
c->channel_release(ep->dma);
ep->dma = NULL;
}
@@ -266,14 +258,14 @@ static void txstate(struct musb *musb, struct musb_request *req)
/* Check if EP is disabled */
if (!musb_ep->desc) {
- dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
+ musb_dbg(musb, "ep:%s disabled - ignore request",
musb_ep->end_point.name);
return;
}
/* we shouldn't get here while DMA is active ... but we do ... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
- dev_dbg(musb->controller, "dma pending...\n");
+ musb_dbg(musb, "dma pending...");
return;
}
@@ -285,18 +277,18 @@ static void txstate(struct musb *musb, struct musb_request *req)
(int)(request->length - request->actual));
if (csr & MUSB_TXCSR_TXPKTRDY) {
- dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
+ musb_dbg(musb, "%s old packet still ready , txcsr %03x",
musb_ep->end_point.name, csr);
return;
}
if (csr & MUSB_TXCSR_P_SENDSTALL) {
- dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
+ musb_dbg(musb, "%s stalling, txcsr %03x",
musb_ep->end_point.name, csr);
return;
}
- dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
+ musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
epnum, musb_ep->packet_sz, fifo_count,
csr);
@@ -424,7 +416,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
}
/* host may already have the data when this message shows... */
- dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
+ musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
musb_ep->end_point.name, use_dma ? "dma" : "pio",
request->actual, request->length,
musb_readw(epio, MUSB_TXCSR),
@@ -450,8 +442,9 @@ void musb_g_tx(struct musb *musb, u8 epnum)
req = next_request(musb_ep);
request = &req->request;
+ trace_musb_req_tx(req);
csr = musb_readw(epio, MUSB_TXCSR);
- dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
+ musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
dma = is_dma_capable() ? musb_ep->dma : NULL;
@@ -480,7 +473,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
* SHOULD NOT HAPPEN... has with CPPI though, after
* changing SENDSTALL (and other cases); harmless?
*/
- dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
+ musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
return;
}
@@ -497,7 +490,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
/* Ensure writebuffer is empty. */
csr = musb_readw(epio, MUSB_TXCSR);
request->actual += musb_ep->dma->actual_len;
- dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
+ musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
epnum, csr, musb_ep->dma->actual_len, request);
}
@@ -524,7 +517,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
if (csr & MUSB_TXCSR_TXPKTRDY)
return;
- dev_dbg(musb->controller, "sending zero pkt\n");
musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
| MUSB_TXCSR_TXPKTRDY);
request->zero = 0;
@@ -543,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
musb_ep_select(mbase, epnum);
req = musb_ep->desc ? next_request(musb_ep) : NULL;
if (!req) {
- dev_dbg(musb->controller, "%s idle now\n",
+ musb_dbg(musb, "%s idle now",
musb_ep->end_point.name);
return;
}
@@ -579,19 +571,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
/* Check if EP is disabled */
if (!musb_ep->desc) {
- dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
+ musb_dbg(musb, "ep:%s disabled - ignore request",
musb_ep->end_point.name);
return;
}
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
- dev_dbg(musb->controller, "DMA pending...\n");
+ musb_dbg(musb, "DMA pending...");
return;
}
if (csr & MUSB_RXCSR_P_SENDSTALL) {
- dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
+ musb_dbg(musb, "%s stalling, RXCSR %04x",
musb_ep->end_point.name, csr);
return;
}
@@ -766,7 +758,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
}
len = request->length - request->actual;
- dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+ musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
musb_ep->end_point.name,
fifo_count, len,
musb_ep->packet_sz);
@@ -849,12 +841,13 @@ void musb_g_rx(struct musb *musb, u8 epnum)
if (!req)
return;
+ trace_musb_req_rx(req);
request = &req->request;
csr = musb_readw(epio, MUSB_RXCSR);
dma = is_dma_capable() ? musb_ep->dma : NULL;
- dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
+ musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
csr, dma ? " (dma)" : "", request);
if (csr & MUSB_RXCSR_P_SENTSTALL) {
@@ -869,18 +862,18 @@ void musb_g_rx(struct musb *musb, u8 epnum)
csr &= ~MUSB_RXCSR_P_OVERRUN;
musb_writew(epio, MUSB_RXCSR, csr);
- dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
+ musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
if (request->status == -EINPROGRESS)
request->status = -EOVERFLOW;
}
if (csr & MUSB_RXCSR_INCOMPRX) {
/* REVISIT not necessarily an error */
- dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
+ musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/* "should not happen"; likely RXPKTRDY pending for DMA */
- dev_dbg(musb->controller, "%s busy, csr %04x\n",
+ musb_dbg(musb, "%s busy, csr %04x",
musb_ep->end_point.name, csr);
return;
}
@@ -894,11 +887,6 @@ void musb_g_rx(struct musb *musb, u8 epnum)
request->actual += musb_ep->dma->actual_len;
- dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
- epnum, csr,
- musb_readw(epio, MUSB_RXCSR),
- musb_ep->dma->actual_len, request);
-
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
defined(CONFIG_USB_UX500_DMA)
/* Autoclear doesn't clear RxPktRdy for short packets */
@@ -996,7 +984,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
ok = musb->hb_iso_rx;
if (!ok) {
- dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
+ musb_dbg(musb, "no support for high bandwidth ISO");
goto fail;
}
musb_ep->hb_mult = (tmp >> 11) & 3;
@@ -1019,7 +1007,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
if (tmp > hw_ep->max_packet_sz_tx) {
- dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
+ musb_dbg(musb, "packet size beyond hardware FIFO size");
goto fail;
}
@@ -1062,7 +1050,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
if (tmp > hw_ep->max_packet_sz_rx) {
- dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
+ musb_dbg(musb, "packet size beyond hardware FIFO size");
goto fail;
}
@@ -1174,7 +1162,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
spin_unlock_irqrestore(&(musb->lock), flags);
- dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
+ musb_dbg(musb, "%s", musb_ep->end_point.name);
return status;
}
@@ -1186,19 +1174,17 @@ static int musb_gadget_disable(struct usb_ep *ep)
struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
- struct musb *musb = musb_ep->musb;
struct musb_request *request = NULL;
request = kzalloc(sizeof *request, gfp_flags);
- if (!request) {
- dev_dbg(musb->controller, "not enough memory\n");
+ if (!request)
return NULL;
- }
request->request.dma = DMA_ADDR_INVALID;
request->epnum = musb_ep->current_epnum;
request->ep = musb_ep;
+ trace_musb_req_alloc(request);
return &request->request;
}
@@ -1208,7 +1194,10 @@ struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
*/
void musb_free_request(struct usb_ep *ep, struct usb_request *req)
{
- kfree(to_musb_request(req));
+ struct musb_request *request = to_musb_request(req);
+
+ trace_musb_req_free(request);
+ kfree(request);
}
static LIST_HEAD(buffers);
@@ -1225,10 +1214,7 @@ struct free_record {
*/
void musb_ep_restart(struct musb *musb, struct musb_request *req)
{
- dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
- req->tx ? "TX/IN" : "RX/OUT",
- &req->request, req->request.length, req->epnum);
-
+ trace_musb_req_start(req);
musb_ep_select(musb->mregs, req->epnum);
if (req->tx)
txstate(musb, req);
@@ -1259,7 +1245,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
if (request->ep != musb_ep)
return -EINVAL;
- dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
+ trace_musb_req_enq(request);
/* request is mine now... */
request->request.actual = 0;
@@ -1273,7 +1259,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
/* don't queue if the ep is down */
if (!musb_ep->desc) {
- dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
+ musb_dbg(musb, "req %p queued to %s while ep %s",
req, ep->name, "disabled");
status = -ESHUTDOWN;
unmap_dma_buffer(request, musb);
@@ -1301,9 +1287,11 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
int status = 0;
struct musb *musb = musb_ep->musb;
- if (!ep || !request || to_musb_request(request)->ep != musb_ep)
+ if (!ep || !request || req->ep != musb_ep)
return -EINVAL;
+ trace_musb_req_deq(req);
+
spin_lock_irqsave(&musb->lock, flags);
list_for_each_entry(r, &musb_ep->req_list, list) {
@@ -1311,7 +1299,8 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
break;
}
if (r != req) {
- dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
+ dev_err(musb->controller, "request %p not queued to %s\n",
+ request, ep->name);
status = -EINVAL;
goto done;
}
@@ -1377,7 +1366,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
request = next_request(musb_ep);
if (value) {
if (request) {
- dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
+ musb_dbg(musb, "request in progress, cannot halt %s",
ep->name);
status = -EAGAIN;
goto done;
@@ -1386,7 +1375,8 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
- dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
+ musb_dbg(musb, "FIFO busy, cannot halt %s",
+ ep->name);
status = -EAGAIN;
goto done;
}
@@ -1395,7 +1385,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
musb_ep->wedged = 0;
/* set/clear the stall and toggle bits */
- dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
+ musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
csr |= MUSB_TXCSR_P_WZC_BITS
@@ -1422,7 +1412,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
/* maybe start the first request in the queue */
if (!musb_ep->busy && !value && request) {
- dev_dbg(musb->controller, "restarting the request\n");
+ musb_dbg(musb, "restarting the request");
musb_ep_restart(musb, request);
}
@@ -1558,7 +1548,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
case OTG_STATE_B_IDLE:
/* Start SRP ... OTG not required. */
devctl = musb_readb(mregs, MUSB_DEVCTL);
- dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
+ musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(mregs, MUSB_DEVCTL);
@@ -1586,7 +1576,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
status = 0;
goto done;
default:
- dev_dbg(musb->controller, "Unhandled wake: %s\n",
+ musb_dbg(musb, "Unhandled wake: %s",
usb_otg_state_string(musb->xceiv->otg->state));
goto done;
}
@@ -1596,7 +1586,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
power = musb_readb(mregs, MUSB_POWER);
power |= MUSB_POWER_RESUME;
musb_writeb(mregs, MUSB_POWER, power);
- dev_dbg(musb->controller, "issue wakeup\n");
+ musb_dbg(musb, "issue wakeup");
/* FIXME do this next chunk in a timer callback, no udelay */
mdelay(2);
@@ -1628,7 +1618,7 @@ static void musb_pullup(struct musb *musb, int is_on)
/* FIXME if on, HdrcStart; if off, HdrcStop */
- dev_dbg(musb->controller, "gadget D+ pullup %s\n",
+ musb_dbg(musb, "gadget D+ pullup %s",
is_on ? "on" : "off");
musb_writeb(musb->mregs, MUSB_POWER, power);
}
@@ -1636,7 +1626,7 @@ static void musb_pullup(struct musb *musb, int is_on)
#if 0
static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
{
- dev_dbg(musb->controller, "<= %s =>\n", __func__);
+ musb_dbg(musb, "<= %s =>\n", __func__);
/*
* FIXME iff driver's softconnect flag is set (as it is during probe,
@@ -2011,7 +2001,7 @@ void musb_g_suspend(struct musb *musb)
u8 devctl;
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- dev_dbg(musb->controller, "devctl %02x\n", devctl);
+ musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
switch (musb->xceiv->otg->state) {
case OTG_STATE_B_IDLE:
@@ -2030,7 +2020,7 @@ void musb_g_suspend(struct musb *musb)
/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
* A_PERIPHERAL may need care too
*/
- WARNING("unhandled SUSPEND transition (%s)\n",
+ WARNING("unhandled SUSPEND transition (%s)",
usb_otg_state_string(musb->xceiv->otg->state));
}
}
@@ -2047,7 +2037,7 @@ void musb_g_disconnect(struct musb *musb)
void __iomem *mregs = musb->mregs;
u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
- dev_dbg(musb->controller, "devctl %02x\n", devctl);
+ musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
/* clear HR */
musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
@@ -2064,7 +2054,7 @@ void musb_g_disconnect(struct musb *musb)
switch (musb->xceiv->otg->state) {
default:
- dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
+ musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
usb_otg_state_string(musb->xceiv->otg->state));
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
@@ -2094,7 +2084,7 @@ __acquires(musb->lock)
u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
u8 power;
- dev_dbg(musb->controller, "<== %s driver '%s'\n",
+ musb_dbg(musb, "<== %s driver '%s'",
(devctl & MUSB_DEVCTL_BDEVICE)
? "B-Device" : "A-Device",
musb->gadget_driver
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 10d30afe4a3c..844a309fe895 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -206,7 +206,7 @@ static inline void musb_try_b_hnp_enable(struct musb *musb)
void __iomem *mbase = musb->mregs;
u8 devctl;
- dev_dbg(musb->controller, "HNP: Setting HR\n");
+ musb_dbg(musb, "HNP: Setting HR");
devctl = musb_readb(mbase, MUSB_DEVCTL);
musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
}
@@ -303,7 +303,7 @@ __acquires(musb->lock)
/* Maybe start the first request in the queue */
request = next_request(musb_ep);
if (!musb_ep->busy && request) {
- dev_dbg(musb->controller, "restarting the request\n");
+ musb_dbg(musb, "restarting the request");
musb_ep_restart(musb, request);
}
@@ -550,7 +550,7 @@ static void ep0_txstate(struct musb *musb)
if (!req) {
/* WARN_ON(1); */
- dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
+ musb_dbg(musb, "odd; csr0 %04x", musb_readw(regs, MUSB_CSR0));
return;
}
@@ -607,7 +607,7 @@ musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
/* NOTE: earlier 2.6 versions changed setup packets to host
* order, but now USB packets always stay in USB byte order.
*/
- dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n",
+ musb_dbg(musb, "SETUP req%02x.%02x v%04x i%04x l%d",
req->bRequestType,
req->bRequest,
le16_to_cpu(req->wValue),
@@ -675,7 +675,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
csr = musb_readw(regs, MUSB_CSR0);
len = musb_readb(regs, MUSB_COUNT0);
- dev_dbg(musb->controller, "csr %04x, count %d, ep0stage %s\n",
+ musb_dbg(musb, "csr %04x, count %d, ep0stage %s",
csr, len, decode_ep0stage(musb->ep0_state));
if (csr & MUSB_CSR0_P_DATAEND) {
@@ -752,7 +752,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
/* enter test mode if needed (exit by reset) */
else if (musb->test_mode) {
- dev_dbg(musb->controller, "entering TESTMODE\n");
+ musb_dbg(musb, "entering TESTMODE");
if (MUSB_TEST_PACKET == musb->test_mode_nr)
musb_load_testpacket(musb);
@@ -864,7 +864,7 @@ setup:
break;
}
- dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n",
+ musb_dbg(musb, "handled %d, csr %04x, ep0stage %s",
handled, csr,
decode_ep0stage(musb->ep0_state));
@@ -881,7 +881,7 @@ setup:
if (handled < 0) {
musb_ep_select(mbase, 0);
stall:
- dev_dbg(musb->controller, "stall (%d)\n", handled);
+ musb_dbg(musb, "stall (%d)", handled);
musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
finish:
@@ -961,7 +961,7 @@ musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
status = 0;
break;
default:
- dev_dbg(musb->controller, "ep0 request queued in state %d\n",
+ musb_dbg(musb, "ep0 request queued in state %d",
musb->ep0_state);
status = -EINVAL;
goto cleanup;
@@ -970,7 +970,7 @@ musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
/* add request to the list */
list_add_tail(&req->list, &ep->req_list);
- dev_dbg(musb->controller, "queue to %s (%s), length=%d\n",
+ musb_dbg(musb, "queue to %s (%s), length=%d",
ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
req->request.length);
@@ -1063,7 +1063,7 @@ static int musb_g_ep0_halt(struct usb_ep *e, int value)
musb->ackpend = 0;
break;
default:
- dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state);
+ musb_dbg(musb, "ep0 can't halt in state %d", musb->ep0_state);
status = -EINVAL;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index d227a71d85e1..53bc4ceefe89 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -44,6 +44,7 @@
#include "musb_core.h"
#include "musb_host.h"
+#include "musb_trace.h"
/* MUSB HOST status 22-mar-2006
*
@@ -131,7 +132,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
* I found using a usb-ethernet device and running iperf
* (client on AM335x) has very high chance to trigger it.
*
- * Better to turn on dev_dbg() in musb_cleanup_urb() with
+ * Better to turn on musb_dbg() in musb_cleanup_urb() with
* CPPI enabled to see the issue when aborting the tx channel.
*/
if (dev_WARN_ONCE(musb->controller, retries-- < 1,
@@ -225,8 +226,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
void *buf = urb->transfer_buffer;
u32 offset = 0;
struct musb_hw_ep *hw_ep = qh->hw_ep;
- unsigned pipe = urb->pipe;
- u8 address = usb_pipedevice(pipe);
int epnum = hw_ep->epnum;
/* initialize software qh state */
@@ -254,16 +253,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
len = urb->transfer_buffer_length - urb->actual_length;
}
- dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
- qh, urb, address, qh->epnum,
- is_in ? "in" : "out",
- ({char *s; switch (qh->type) {
- case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
- case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
- case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
- default: s = "-intr"; break;
- } s; }),
- epnum, buf + offset, len);
+ trace_musb_urb_start(musb, urb);
/* Configure endpoint */
musb_ep_set_qh(hw_ep, is_in, qh);
@@ -277,7 +267,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
- dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
+ musb_dbg(musb, "check whether there's still time for periodic Tx");
frame = musb_readw(mbase, MUSB_FRAME);
/* FIXME this doesn't implement that scheduling policy ...
* or handle framecounter wrapping
@@ -291,7 +281,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
} else {
qh->frame = urb->start_frame;
/* enable SOF interrupt so we can count down */
- dev_dbg(musb->controller, "SOF for %d\n", epnum);
+ musb_dbg(musb, "SOF for %d", epnum);
#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
#endif
@@ -299,7 +289,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
break;
default:
start:
- dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
+ musb_dbg(musb, "Start TX%d %s", epnum,
hw_ep->tx_channel ? "dma" : "pio");
if (!hw_ep->tx_channel)
@@ -314,14 +304,7 @@ static void musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(musb->lock)
__acquires(musb->lock)
{
- dev_dbg(musb->controller,
- "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
- urb, urb->complete, status,
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- urb->actual_length, urb->transfer_buffer_length
- );
+ trace_musb_urb_gb(musb, urb);
usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
spin_unlock(&musb->lock);
@@ -441,7 +424,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
* for RX, until we have a test case to understand the behavior of TX.
*/
if ((!status || !is_in) && qh && qh->is_ready) {
- dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
+ musb_dbg(musb, "... next ep%d %cX urb %p",
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
}
@@ -486,7 +469,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
/* musb_ep_select(mbase, epnum); */
rx_count = musb_readw(epio, MUSB_RXCOUNT);
- dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+ musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
urb->transfer_buffer, qh->offset,
urb->transfer_buffer_length);
@@ -508,7 +491,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
status = -EOVERFLOW;
urb->error_count++;
}
- dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
+ musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
do_flush = 1;
} else
length = rx_count;
@@ -526,7 +509,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
if (rx_count > length) {
if (urb->status == -EINPROGRESS)
urb->status = -EOVERFLOW;
- dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
+ musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
do_flush = 1;
} else
length = rx_count;
@@ -750,8 +733,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
u8 use_dma = 1;
u16 csr;
- dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
- "h_addr%02x h_port%02x bytes %d\n",
+ musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ "h_addr%02x h_port%02x bytes %d",
is_out ? "-->" : "<--",
epnum, urb, urb->dev->speed,
qh->addr_reg, qh->epnum, is_out ? "out" : "in",
@@ -969,7 +952,7 @@ finish:
}
csr |= MUSB_RXCSR_H_REQPKT;
- dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
+ musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
}
@@ -1085,15 +1068,15 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
request = (struct usb_ctrlrequest *) urb->setup_packet;
if (!request->wLength) {
- dev_dbg(musb->controller, "start no-DATA\n");
+ musb_dbg(musb, "start no-DATA");
break;
} else if (request->bRequestType & USB_DIR_IN) {
- dev_dbg(musb->controller, "start IN-DATA\n");
+ musb_dbg(musb, "start IN-DATA");
musb->ep0_stage = MUSB_EP0_IN;
more = true;
break;
} else {
- dev_dbg(musb->controller, "start OUT-DATA\n");
+ musb_dbg(musb, "start OUT-DATA");
musb->ep0_stage = MUSB_EP0_OUT;
more = true;
}
@@ -1105,7 +1088,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
if (fifo_count) {
fifo_dest = (u8 *) (urb->transfer_buffer
+ urb->actual_length);
- dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
+ musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
fifo_count,
(fifo_count == 1) ? "" : "s",
fifo_dest);
@@ -1150,7 +1133,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
? musb_readb(epio, MUSB_COUNT0)
: 0;
- dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+ musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
csr, qh, len, urb, musb->ep0_stage);
/* if we just did status stage, we are done */
@@ -1161,15 +1144,15 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
/* prepare status */
if (csr & MUSB_CSR0_H_RXSTALL) {
- dev_dbg(musb->controller, "STALLING ENDPOINT\n");
+ musb_dbg(musb, "STALLING ENDPOINT");
status = -EPIPE;
} else if (csr & MUSB_CSR0_H_ERROR) {
- dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
+ musb_dbg(musb, "no response, csr0 %04x", csr);
status = -EPROTO;
} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
- dev_dbg(musb->controller, "control NAK timeout\n");
+ musb_dbg(musb, "control NAK timeout");
/* NOTE: this code path would be a good place to PAUSE a
* control transfer, if another one is queued, so that
@@ -1184,7 +1167,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
}
if (status) {
- dev_dbg(musb->controller, "aborting\n");
+ musb_dbg(musb, "aborting");
retval = IRQ_HANDLED;
if (urb)
urb->status = status;
@@ -1237,7 +1220,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
/* flag status stage */
musb->ep0_stage = MUSB_EP0_STATUS;
- dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
+ musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
}
musb_writew(epio, MUSB_CSR0, csr);
@@ -1291,38 +1274,37 @@ void musb_host_tx(struct musb *musb, u8 epnum)
/* with CPPI, DMA sometimes triggers "extra" irqs */
if (!urb) {
- dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
return;
}
pipe = urb->pipe;
dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
- dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
+ trace_musb_urb_tx(musb, urb);
+ musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
dma ? ", dma" : "");
/* check for errors */
if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
/* dma was disabled, fifo flushed */
- dev_dbg(musb->controller, "TX end %d stall\n", epnum);
+ musb_dbg(musb, "TX end %d stall", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
/* (NON-ISO) dma was disabled, fifo flushed */
- dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
+ musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
status = -ETIMEDOUT;
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
&& !list_is_singular(&musb->out_bulk)) {
- dev_dbg(musb->controller,
- "NAK timeout on TX%d ep\n", epnum);
+ musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
musb_bulk_nak_timeout(musb, hw_ep, 0);
} else {
- dev_dbg(musb->controller,
- "TX end=%d device not responding\n", epnum);
+ musb_dbg(musb, "TX ep%d device not responding", epnum);
/* NOTE: this code path would be a good place to PAUSE a
* transfer, if there's some other (nonperiodic) tx urb
* that could use this fifo. (dma complicates it...)
@@ -1368,7 +1350,7 @@ done:
/* second cppi case */
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
return;
}
@@ -1427,8 +1409,9 @@ done:
* FIFO mode too...
*/
if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
- dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
- "CSR %04x\n", tx_csr);
+ musb_dbg(musb,
+ "DMA complete but FIFO not empty, CSR %04x",
+ tx_csr);
return;
}
}
@@ -1494,7 +1477,7 @@ done:
return;
}
} else if (tx_csr & MUSB_TXCSR_DMAENAB) {
- dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
+ musb_dbg(musb, "not complete, but DMA enabled?");
return;
}
@@ -1723,7 +1706,7 @@ static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
d_status = -EOVERFLOW;
urb->error_count++;
}
- dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",
+ musb_dbg(musb, "** OVERFLOW %d into %d",
rx_count, d->length);
length = d->length;
@@ -1847,28 +1830,26 @@ void musb_host_rx(struct musb *musb, u8 epnum)
* usbtest #11 (unlinks) triggers it regularly, sometimes
* with fifo full. (Only with DMA??)
*/
- dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
- musb_readw(epio, MUSB_RXCOUNT));
+ musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
+ epnum, val, musb_readw(epio, MUSB_RXCOUNT));
musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
return;
}
pipe = urb->pipe;
- dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
- epnum, rx_csr, urb->actual_length,
- dma ? dma->actual_len : 0);
+ trace_musb_urb_rx(musb, urb);
/* check for errors, concurrent stall & unlink is not really
* handled yet! */
if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
- dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
+ musb_dbg(musb, "RX end %d STALL", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
- dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
+ musb_dbg(musb, "end %d RX proto error", epnum);
status = -EPROTO;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
@@ -1879,7 +1860,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
- dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
+ musb_dbg(musb, "RX end %d NAK timeout", epnum);
/* NOTE: NAKing is *NOT* an error, so we want to
* continue. Except ... if there's a request for
@@ -1902,12 +1883,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
goto finish;
} else {
- dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
+ musb_dbg(musb, "RX end %d ISO data error", epnum);
/* packet error reported later */
iso_err = true;
}
} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
- dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
+ musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
epnum);
status = -EPROTO;
}
@@ -1952,7 +1933,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
done = true;
}
- dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
+ musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
xfer_len, dma ? ", dma" : "");
rx_csr &= ~MUSB_RXCSR_H_REQPKT;
@@ -1973,8 +1954,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
musb_dma_cppi41(musb)) {
done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
- dev_dbg(hw_ep->musb->controller,
- "ep %d dma %s, rxcsr %04x, rxcount %d\n",
+ musb_dbg(hw_ep->musb,
+ "ep %d dma %s, rxcsr %04x, rxcount %d",
epnum, done ? "off" : "reset",
musb_readw(epio, MUSB_RXCSR),
musb_readw(epio, MUSB_RXCOUNT));
@@ -2001,8 +1982,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* we are expecting IN packets */
if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
musb_dma_cppi41(musb)) && dma) {
- dev_dbg(hw_ep->musb->controller,
- "RX%d count %d, buffer 0x%llx len %d/%d\n",
+ musb_dbg(hw_ep->musb,
+ "RX%d count %d, buffer 0x%llx len %d/%d",
epnum, musb_readw(epio, MUSB_RXCOUNT),
(unsigned long long) urb->transfer_dma
+ urb->actual_length,
@@ -2054,7 +2035,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
done = musb_host_packet_rx(musb, urb,
epnum, iso_err);
}
- dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
+ musb_dbg(musb, "read %spacket", done ? "last " : "");
}
}
@@ -2178,7 +2159,7 @@ static int musb_schedule(
idle = 1;
qh->mux = 0;
hw_ep = musb->endpoints + best_end;
- dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
+ musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
success:
if (head) {
idle = list_empty(head);
@@ -2210,6 +2191,8 @@ static int musb_urb_enqueue(
if (!is_host_active(musb) || !musb->is_active)
return -ENODEV;
+ trace_musb_urb_enq(musb, urb);
+
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_link_urb_to_ep(hcd, urb);
qh = ret ? NULL : hep->hcpriv;
@@ -2400,8 +2383,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
dma = is_in ? ep->rx_channel : ep->tx_channel;
if (dma) {
status = ep->musb->dma_controller->channel_abort(dma);
- dev_dbg(musb->controller,
- "abort %cX%d DMA for urb %p --> %d\n",
+ musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
is_in ? 'R' : 'T', ep->epnum,
urb, status);
urb->actual_length += dma->actual_len;
@@ -2447,10 +2429,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
int is_in = usb_pipein(urb->pipe);
int ret;
- dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe),
- is_in ? "in" : "out");
+ trace_musb_urb_deq(musb, urb);
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
diff --git a/drivers/usb/musb/musb_trace.c b/drivers/usb/musb/musb_trace.c
new file mode 100644
index 000000000000..70973d901a21
--- /dev/null
+++ b/drivers/usb/musb/musb_trace.c
@@ -0,0 +1,33 @@
+/*
+ * musb_trace.c - MUSB Controller Trace Support
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Bin Liu <b-liu@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "musb_trace.h"
+
+void musb_dbg(struct musb *musb, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ trace_musb_log(musb, &vaf);
+
+ va_end(args);
+}
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
new file mode 100644
index 000000000000..f031c9e74322
--- /dev/null
+++ b/drivers/usb/musb/musb_trace.h
@@ -0,0 +1,371 @@
+/*
+ * musb_trace.h - MUSB Controller Trace Support
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Bin Liu <b-liu@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM musb
+
+#if !defined(__MUSB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MUSB_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/usb.h>
+#include "musb_core.h"
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+#include "cppi_dma.h"
+#endif
+
+#define MUSB_MSG_MAX 500
+
+TRACE_EVENT(musb_log,
+ TP_PROTO(struct musb *musb, struct va_format *vaf),
+ TP_ARGS(musb, vaf),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __dynamic_array(char, msg, MUSB_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ vsnprintf(__get_str(msg), MUSB_MSG_MAX, vaf->fmt, *vaf->va);
+ ),
+ TP_printk("%s: %s", __get_str(name), __get_str(msg))
+);
+
+DECLARE_EVENT_CLASS(musb_regb,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_ARGS(caller, addr, offset, data),
+ TP_STRUCT__entry(
+ __field(void *, caller)
+ __field(const void *, addr)
+ __field(unsigned int, offset)
+ __field(u8, data)
+ ),
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->data = data;
+ ),
+ TP_printk("%pS: %p + %04x: %02x",
+ __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regb, musb_readb,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regb, musb_writeb,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DECLARE_EVENT_CLASS(musb_regw,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_ARGS(caller, addr, offset, data),
+ TP_STRUCT__entry(
+ __field(void *, caller)
+ __field(const void *, addr)
+ __field(unsigned int, offset)
+ __field(u16, data)
+ ),
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->data = data;
+ ),
+ TP_printk("%pS: %p + %04x: %04x",
+ __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regw, musb_readw,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regw, musb_writew,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DECLARE_EVENT_CLASS(musb_regl,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_ARGS(caller, addr, offset, data),
+ TP_STRUCT__entry(
+ __field(void *, caller)
+ __field(const void *, addr)
+ __field(unsigned int, offset)
+ __field(u32, data)
+ ),
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->data = data;
+ ),
+ TP_printk("%pS: %p + %04x: %08x",
+ __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regl, musb_readl,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regl, musb_writel,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+TRACE_EVENT(musb_isr,
+ TP_PROTO(struct musb *musb),
+ TP_ARGS(musb),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __field(u8, int_usb)
+ __field(u16, int_tx)
+ __field(u16, int_rx)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ __entry->int_usb = musb->int_usb;
+ __entry->int_tx = musb->int_tx;
+ __entry->int_rx = musb->int_rx;
+ ),
+ TP_printk("%s: usb %02x, tx %04x, rx %04x",
+ __get_str(name), __entry->int_usb,
+ __entry->int_tx, __entry->int_rx
+ )
+);
+
+DECLARE_EVENT_CLASS(musb_urb,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __field(struct urb *, urb)
+ __field(unsigned int, pipe)
+ __field(int, status)
+ __field(unsigned int, flag)
+ __field(u32, buf_len)
+ __field(u32, actual_len)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ __entry->urb = urb;
+ __entry->pipe = urb->pipe;
+ __entry->status = urb->status;
+ __entry->flag = urb->transfer_flags;
+ __entry->buf_len = urb->transfer_buffer_length;
+ __entry->actual_len = urb->actual_length;
+ ),
+ TP_printk("%s: %p, dev%d ep%d%s, flag 0x%x, len %d/%d, status %d",
+ __get_str(name), __entry->urb,
+ usb_pipedevice(__entry->pipe),
+ usb_pipeendpoint(__entry->pipe),
+ usb_pipein(__entry->pipe) ? "in" : "out",
+ __entry->flag,
+ __entry->actual_len, __entry->buf_len,
+ __entry->status
+ )
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_start,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_gb,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_rx,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_tx,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_enq,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_deq,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DECLARE_EVENT_CLASS(musb_req,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(struct usb_request *, req)
+ __field(u8, is_tx)
+ __field(u8, epnum)
+ __field(int, status)
+ __field(unsigned int, buf_len)
+ __field(unsigned int, actual_len)
+ __field(unsigned int, zero)
+ __field(unsigned int, short_not_ok)
+ __field(unsigned int, no_interrupt)
+ ),
+ TP_fast_assign(
+ __entry->req = &req->request;
+ __entry->is_tx = req->tx;
+ __entry->epnum = req->epnum;
+ __entry->status = req->request.status;
+ __entry->buf_len = req->request.length;
+ __entry->actual_len = req->request.actual;
+ __entry->zero = req->request.zero;
+ __entry->short_not_ok = req->request.short_not_ok;
+ __entry->no_interrupt = req->request.no_interrupt;
+ ),
+ TP_printk("%p, ep%d %s, %s%s%s, len %d/%d, status %d",
+ __entry->req, __entry->epnum,
+ __entry->is_tx ? "tx/IN" : "rx/OUT",
+ __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "I" : "i",
+ __entry->actual_len, __entry->buf_len,
+ __entry->status
+ )
+);
+
+DEFINE_EVENT(musb_req, musb_req_gb,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_tx,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_rx,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_alloc,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_free,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_start,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_enq,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_deq,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+DECLARE_EVENT_CLASS(musb_cppi41,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch),
+ TP_STRUCT__entry(
+ __field(struct cppi41_dma_channel *, ch)
+ __string(name, dev_name(ch->hw_ep->musb->controller))
+ __field(u8, hwep)
+ __field(u8, port)
+ __field(u8, is_tx)
+ __field(u32, len)
+ __field(u32, prog_len)
+ __field(u32, xferred)
+ ),
+ TP_fast_assign(
+ __entry->ch = ch;
+ __assign_str(name, dev_name(ch->hw_ep->musb->controller));
+ __entry->hwep = ch->hw_ep->epnum;
+ __entry->port = ch->port_num;
+ __entry->is_tx = ch->is_tx;
+ __entry->len = ch->total_len;
+ __entry->prog_len = ch->prog_len;
+ __entry->xferred = ch->transferred;
+ ),
+ TP_printk("%s: %p, hwep%d ch%d%s, prog_len %d, len %d/%d",
+ __get_str(name), __entry->ch, __entry->hwep,
+ __entry->port, __entry->is_tx ? "tx" : "rx",
+ __entry->prog_len, __entry->xferred, __entry->len
+ )
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_done,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_gb,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_config,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_cont,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_alloc,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_abort,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_free,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+#endif /* CONFIG_USB_TI_CPPI41_DMA */
+
+#endif /* __MUSB_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE musb_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 92d5f718659b..192248f974ec 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -55,8 +55,7 @@ void musb_host_finish_resume(struct work_struct *work)
power = musb_readb(musb->mregs, MUSB_POWER);
power &= ~MUSB_POWER_RESUME;
- dev_dbg(musb->controller, "root port resume stopped, power %02x\n",
- power);
+ musb_dbg(musb, "root port resume stopped, power %02x", power);
musb_writeb(musb->mregs, MUSB_POWER, power);
/*
@@ -104,7 +103,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
break;
}
- dev_dbg(musb->controller, "Root port suspended, power %02x\n", power);
+ musb_dbg(musb, "Root port suspended, power %02x", power);
musb->port1_status |= USB_PORT_STAT_SUSPEND;
switch (musb->xceiv->otg->state) {
@@ -123,7 +122,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
musb_platform_try_idle(musb, 0);
break;
default:
- dev_dbg(musb->controller, "bogus rh suspend? %s\n",
+ musb_dbg(musb, "bogus rh suspend? %s",
usb_otg_state_string(musb->xceiv->otg->state));
}
} else if (power & MUSB_POWER_SUSPENDM) {
@@ -131,7 +130,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
power |= MUSB_POWER_RESUME;
musb_writeb(mbase, MUSB_POWER, power);
- dev_dbg(musb->controller, "Root port resuming, power %02x\n", power);
+ musb_dbg(musb, "Root port resuming, power %02x", power);
/* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
@@ -146,7 +145,7 @@ void musb_port_reset(struct musb *musb, bool do_reset)
void __iomem *mbase = musb->mregs;
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) {
- dev_dbg(musb->controller, "HNP: Returning from HNP; no hub reset from b_idle\n");
+ musb_dbg(musb, "HNP: Returning from HNP; no hub reset from b_idle");
musb->port1_status &= ~USB_PORT_STAT_RESET;
return;
}
@@ -194,7 +193,7 @@ void musb_port_reset(struct musb *musb, bool do_reset)
schedule_delayed_work(&musb->deassert_reset_work,
msecs_to_jiffies(50));
} else {
- dev_dbg(musb->controller, "root port reset stopped\n");
+ musb_dbg(musb, "root port reset stopped");
musb_platform_pre_root_reset_end(musb);
musb_writeb(mbase, MUSB_POWER,
power & ~MUSB_POWER_RESET);
@@ -202,7 +201,7 @@ void musb_port_reset(struct musb *musb, bool do_reset)
power = musb_readb(mbase, MUSB_POWER);
if (power & MUSB_POWER_HSMODE) {
- dev_dbg(musb->controller, "high-speed device connected\n");
+ musb_dbg(musb, "high-speed device connected");
musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
}
@@ -242,7 +241,7 @@ void musb_root_disconnect(struct musb *musb)
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
break;
default:
- dev_dbg(musb->controller, "host disconnect (%s)\n",
+ musb_dbg(musb, "host disconnect (%s)",
usb_otg_state_string(musb->xceiv->otg->state));
}
}
@@ -337,7 +336,7 @@ int musb_hub_control(
default:
goto error;
}
- dev_dbg(musb->controller, "clear feature %d\n", wValue);
+ musb_dbg(musb, "clear feature %d", wValue);
musb->port1_status &= ~(1 << wValue);
break;
case GetHubDescriptor:
@@ -372,8 +371,7 @@ int musb_hub_control(
(__le32 *) buf);
/* port change status is more interesting */
- dev_dbg(musb->controller, "port status %08x\n",
- musb->port1_status);
+ musb_dbg(musb, "port status %08x", musb->port1_status);
break;
case SetPortFeature:
if ((wIndex & 0xff) != 1)
@@ -443,7 +441,7 @@ int musb_hub_control(
default:
goto error;
}
- dev_dbg(musb->controller, "set feature %d\n", wValue);
+ musb_dbg(musb, "set feature %d", wValue);
musb->port1_status |= 1 << wValue;
break;
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 8abfe4ec62fb..3620073da58c 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -117,7 +117,7 @@ static void configure_channel(struct dma_channel *channel,
u8 bchannel = musb_channel->idx;
u16 csr = 0;
- dev_dbg(musb->controller, "%p, pkt_sz %d, addr %pad, len %d, mode %d\n",
+ musb_dbg(musb, "%p, pkt_sz %d, addr %pad, len %d, mode %d",
channel, packet_sz, &dma_addr, len, mode);
if (mode) {
@@ -152,7 +152,7 @@ static int dma_channel_program(struct dma_channel *channel,
struct musb_dma_controller *controller = musb_channel->controller;
struct musb *musb = controller->private_data;
- dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d\n",
+ musb_dbg(musb, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d",
musb_channel->epnum,
musb_channel->transmit ? "Tx" : "Rx",
packet_sz, &dma_addr, len, mode);
@@ -266,7 +266,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
#endif
if (!int_hsdma) {
- dev_dbg(musb->controller, "spurious DMA irq\n");
+ musb_dbg(musb, "spurious DMA irq");
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
musb_channel = (struct musb_dma_channel *)
@@ -280,7 +280,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
}
}
- dev_dbg(musb->controller, "int_hsdma = 0x%x\n", int_hsdma);
+ musb_dbg(musb, "int_hsdma = 0x%x", int_hsdma);
if (!int_hsdma)
goto done;
@@ -307,7 +307,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
channel->actual_len = addr
- musb_channel->start_addr;
- dev_dbg(musb->controller, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n",
+ musb_dbg(musb, "ch %p, 0x%x -> 0x%x (%zu / %d) %s",
channel, musb_channel->start_addr,
addr, channel->actual_len,
musb_channel->len,
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 76500515dd8b..c6ee16660572 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -256,12 +256,10 @@ static int sunxi_musb_init(struct musb *musb)
writeb(SUNXI_MUSB_VEND0_PIO_MODE, musb->mregs + SUNXI_MUSB_VEND0);
/* Register notifier before calling phy_init() */
- if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) {
- ret = extcon_register_notifier(glue->extcon, EXTCON_USB_HOST,
- &glue->host_nb);
- if (ret)
- goto error_reset_assert;
- }
+ ret = extcon_register_notifier(glue->extcon, EXTCON_USB_HOST,
+ &glue->host_nb);
+ if (ret)
+ goto error_reset_assert;
ret = phy_init(glue->phy);
if (ret)
@@ -275,9 +273,8 @@ static int sunxi_musb_init(struct musb *musb)
return 0;
error_unregister_notifier:
- if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
- extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
- &glue->host_nb);
+ extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
+ &glue->host_nb);
error_reset_assert:
if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
reset_control_assert(glue->rst);
@@ -301,9 +298,8 @@ static int sunxi_musb_exit(struct musb *musb)
phy_exit(glue->phy);
- if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
- extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
- &glue->host_nb);
+ extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
+ &glue->host_nb);
if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
reset_control_assert(glue->rst);
@@ -315,25 +311,6 @@ static int sunxi_musb_exit(struct musb *musb)
return 0;
}
-static int sunxi_set_mode(struct musb *musb, u8 mode)
-{
- struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
- int ret;
-
- if (mode == MUSB_HOST) {
- ret = phy_power_on(glue->phy);
- if (ret)
- return ret;
-
- set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
- /* Stop musb work from turning vbus off again */
- set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
- musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
- }
-
- return 0;
-}
-
static void sunxi_musb_enable(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
@@ -354,13 +331,13 @@ static void sunxi_musb_disable(struct musb *musb)
clear_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags);
}
-struct dma_controller *sunxi_musb_dma_controller_create(struct musb *musb,
- void __iomem *base)
+static struct dma_controller *
+sunxi_musb_dma_controller_create(struct musb *musb, void __iomem *base)
{
return NULL;
}
-void sunxi_musb_dma_controller_destroy(struct dma_controller *c)
+static void sunxi_musb_dma_controller_destroy(struct dma_controller *c)
{
}
@@ -582,7 +559,6 @@ static const struct musb_platform_ops sunxi_musb_ops = {
.exit = sunxi_musb_exit,
.enable = sunxi_musb_enable,
.disable = sunxi_musb_disable,
- .set_mode = sunxi_set_mode,
.fifo_offset = sunxi_musb_fifo_offset,
.ep_offset = sunxi_musb_ep_offset,
.busctl_offset = sunxi_musb_busctl_offset,
@@ -638,10 +614,6 @@ static int sunxi_musb_probe(struct platform_device *pdev)
return -EINVAL;
}
- glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue)
- return -ENOMEM;
-
memset(&pdata, 0, sizeof(pdata));
switch (usb_get_dr_mode(&pdev->dev)) {
#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_HOST
@@ -649,15 +621,13 @@ static int sunxi_musb_probe(struct platform_device *pdev)
pdata.mode = MUSB_PORT_MODE_HOST;
break;
#endif
+#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_GADGET
+ case USB_DR_MODE_PERIPHERAL:
+ pdata.mode = MUSB_PORT_MODE_GADGET;
+ break;
+#endif
#ifdef CONFIG_USB_MUSB_DUAL_ROLE
case USB_DR_MODE_OTG:
- glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
- if (IS_ERR(glue->extcon)) {
- if (PTR_ERR(glue->extcon) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "Invalid or missing extcon\n");
- return PTR_ERR(glue->extcon);
- }
pdata.mode = MUSB_PORT_MODE_DUAL_ROLE;
break;
#endif
@@ -668,6 +638,10 @@ static int sunxi_musb_probe(struct platform_device *pdev)
pdata.platform_ops = &sunxi_musb_ops;
pdata.config = &sunxi_musb_hdrc_config;
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
glue->dev = &pdev->dev;
INIT_WORK(&glue->work, sunxi_musb_work);
glue->host_nb.notifier_call = sunxi_musb_host_notifier;
@@ -701,6 +675,14 @@ static int sunxi_musb_probe(struct platform_device *pdev)
}
}
+ glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
+ if (IS_ERR(glue->extcon)) {
+ if (PTR_ERR(glue->extcon) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "Invalid or missing extcon\n");
+ return PTR_ERR(glue->extcon);
+ }
+
glue->phy = devm_phy_get(&pdev->dev, "usb");
if (IS_ERR(glue->phy)) {
if (PTR_ERR(glue->phy) == -EPROBE_DEFER)
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index c6904742e2aa..b9c409a18faa 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,6 +21,7 @@ config AB8500_USB
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
Enable this to support Freescale USB OTG transceiver.
@@ -29,6 +30,7 @@ config ISP1301_OMAP
tristate "Philips ISP1301 with OMAP OTG"
depends on I2C && ARCH_OMAP_OTG
depends on USB
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
If you say yes here you get support for the Philips ISP1301
@@ -43,7 +45,7 @@ config ISP1301_OMAP
config KEYSTONE_USB_PHY
tristate "Keystone USB PHY Driver"
depends on ARCH_KEYSTONE || COMPILE_TEST
- select NOP_USB_XCEIV
+ depends on NOP_USB_XCEIV
help
Enable this to support Keystone USB phy. This driver provides
interface to interact with USB 2.0 and USB 3.0 PHY that is part
@@ -51,6 +53,7 @@ config KEYSTONE_USB_PHY
config NOP_USB_XCEIV
tristate "NOP USB Transceiver Driver"
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, NOP can't be built-in
select USB_PHY
help
This driver is to be used by all the usb transceiver which are either
@@ -63,9 +66,9 @@ config AM335X_CONTROL_USB
config AM335X_PHY_USB
tristate "AM335x USB PHY Driver"
depends on ARM || COMPILE_TEST
+ depends on NOP_USB_XCEIV
select USB_PHY
select AM335X_CONTROL_USB
- select NOP_USB_XCEIV
select USB_COMMON
help
This driver provides PHY support for that phy which part for the
@@ -92,6 +95,7 @@ config TWL6030_USB
config USB_GPIO_VBUS
tristate "GPIO based peripheral-only VBUS sensing 'transceiver'"
depends on GPIOLIB || COMPILE_TEST
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
Provides simple GPIO VBUS sensing for controllers with an
@@ -112,6 +116,7 @@ config OMAP_OTG
config TAHVO_USB
tristate "Tahvo USB transceiver driver"
depends on MFD_RETU && EXTCON
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
Enable this to support USB transceiver on Tahvo. This is used
@@ -140,6 +145,7 @@ config USB_ISP1301
config USB_MSM_OTG
tristate "Qualcomm on-chip USB OTG controller support"
depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
depends on RESET_CONTROLLER
depends on EXTCON
select USB_PHY
@@ -169,6 +175,7 @@ config USB_QCOM_8X16_PHY
config USB_MV_OTG
tristate "Marvell USB OTG support"
depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
Say Y here if you want to build Marvell USB OTG transciever
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index a262a4343f29..7e5aece769da 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -54,7 +54,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
return am_phy->id;
}
- am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node);
+ am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL);
if (ret)
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 72b387d592c2..8a34759727bb 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/extcon.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -35,6 +36,8 @@
#include <linux/of_device.h>
#include <linux/reboot.h>
#include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/usb/otg.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
@@ -42,10 +45,183 @@
#include <linux/usb/ulpi.h>
#include <linux/usb/gadget.h>
#include <linux/usb/hcd.h>
-#include <linux/usb/msm_hsusb.h>
#include <linux/usb/msm_hsusb_hw.h>
#include <linux/regulator/consumer.h>
+/**
+ * OTG control
+ *
+ * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host
+ * only configuration.
+ * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY.
+ * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware.
+ * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs.
+ *
+ */
+enum otg_control_type {
+ OTG_NO_CONTROL = 0,
+ OTG_PHY_CONTROL,
+ OTG_PMIC_CONTROL,
+ OTG_USER_CONTROL,
+};
+
+/**
+ * PHY used in
+ *
+ * INVALID_PHY Unsupported PHY
+ * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY
+ * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY
+ *
+ */
+enum msm_usb_phy_type {
+ INVALID_PHY = 0,
+ CI_45NM_INTEGRATED_PHY,
+ SNPS_28NM_INTEGRATED_PHY,
+};
+
+#define IDEV_CHG_MAX 1500
+#define IUNIT 100
+
+/**
+ * Different states involved in USB charger detection.
+ *
+ * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
+ * process is not yet started.
+ * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
+ * between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
+ * between DCP and CDP).
+ * USB_CHG_STATE_DETECTED USB charger type is determined.
+ *
+ */
+enum usb_chg_state {
+ USB_CHG_STATE_UNDEFINED = 0,
+ USB_CHG_STATE_WAIT_FOR_DCD,
+ USB_CHG_STATE_DCD_DONE,
+ USB_CHG_STATE_PRIMARY_DONE,
+ USB_CHG_STATE_SECONDARY_DONE,
+ USB_CHG_STATE_DETECTED,
+};
+
+/**
+ * USB charger types
+ *
+ * USB_INVALID_CHARGER Invalid USB charger.
+ * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
+ * on USB2.0 compliant host/hub.
+ * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
+ * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
+ * IDEV_CHG_MAX can be drawn irrespective of USB state.
+ *
+ */
+enum usb_chg_type {
+ USB_INVALID_CHARGER = 0,
+ USB_SDP_CHARGER,
+ USB_DCP_CHARGER,
+ USB_CDP_CHARGER,
+};
+
+/**
+ * struct msm_otg_platform_data - platform device data
+ * for msm_otg driver.
+ * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
+ * "do not overwrite default vaule at this address".
+ * @phy_init_sz: PHY configuration sequence size.
+ * @vbus_power: VBUS power on/off routine.
+ * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
+ * @mode: Supported mode (OTG/peripheral/host).
+ * @otg_control: OTG switch controlled by user/Id pin
+ */
+struct msm_otg_platform_data {
+ int *phy_init_seq;
+ int phy_init_sz;
+ void (*vbus_power)(bool on);
+ unsigned power_budget;
+ enum usb_dr_mode mode;
+ enum otg_control_type otg_control;
+ enum msm_usb_phy_type phy_type;
+ void (*setup_gpio)(enum usb_otg_state state);
+};
+
+/**
+ * struct msm_usb_cable - structure for exteternal connector cable
+ * state tracking
+ * @nb: hold event notification callback
+ * @conn: used for notification registration
+ */
+struct msm_usb_cable {
+ struct notifier_block nb;
+ struct extcon_dev *extcon;
+};
+
+/**
+ * struct msm_otg: OTG driver data. Shared by HCD and DCD.
+ * @otg: USB OTG Transceiver structure.
+ * @pdata: otg device platform data.
+ * @irq: IRQ number assigned for HSUSB controller.
+ * @clk: clock struct of usb_hs_clk.
+ * @pclk: clock struct of usb_hs_pclk.
+ * @core_clk: clock struct of usb_hs_core_clk.
+ * @regs: ioremapped register base address.
+ * @inputs: OTG state machine inputs(Id, SessValid etc).
+ * @sm_work: OTG state machine work.
+ * @in_lpm: indicates low power mode (LPM) state.
+ * @async_int: Async interrupt arrived.
+ * @cur_power: The amount of mA available from downstream port.
+ * @chg_work: Charger detection work.
+ * @chg_state: The state of charger detection process.
+ * @chg_type: The type of charger attached.
+ * @dcd_retires: The retry count used to track Data contact
+ * detection process.
+ * @manual_pullup: true if VBUS is not routed to USB controller/phy
+ * and controller driver therefore enables pull-up explicitly before
+ * starting controller using usbcmd run/stop bit.
+ * @vbus: VBUS signal state trakining, using extcon framework
+ * @id: ID signal state trakining, using extcon framework
+ * @switch_gpio: Descriptor for GPIO used to control external Dual
+ * SPDT USB Switch.
+ * @reboot: Used to inform the driver to route USB D+/D- line to Device
+ * connector
+ */
+struct msm_otg {
+ struct usb_phy phy;
+ struct msm_otg_platform_data *pdata;
+ int irq;
+ struct clk *clk;
+ struct clk *pclk;
+ struct clk *core_clk;
+ void __iomem *regs;
+#define ID 0
+#define B_SESS_VLD 1
+ unsigned long inputs;
+ struct work_struct sm_work;
+ atomic_t in_lpm;
+ int async_int;
+ unsigned cur_power;
+ int phy_number;
+ struct delayed_work chg_work;
+ enum usb_chg_state chg_state;
+ enum usb_chg_type chg_type;
+ u8 dcd_retries;
+ struct regulator *v3p3;
+ struct regulator *v1p8;
+ struct regulator *vddcx;
+
+ struct reset_control *phy_rst;
+ struct reset_control *link_rst;
+ int vdd_levels[3];
+
+ bool manual_pullup;
+
+ struct msm_usb_cable vbus;
+ struct msm_usb_cable id;
+
+ struct gpio_desc *switch_gpio;
+ struct notifier_block reboot;
+};
+
#define MSM_USB_BASE (motg->regs)
#define DRIVER_NAME "msm_otg"
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index c4bf2de6d14e..6f6d2a7fd5a0 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -148,7 +148,7 @@ static int omap_otg_remove(struct platform_device *pdev)
struct otg_device *otg_dev = platform_get_drvdata(pdev);
struct extcon_dev *edev = otg_dev->extcon;
- extcon_unregister_notifier(edev, EXTCON_USB_HOST,&otg_dev->id_nb);
+ extcon_unregister_notifier(edev, EXTCON_USB_HOST, &otg_dev->id_nb);
extcon_unregister_notifier(edev, EXTCON_USB, &otg_dev->vbus_nb);
return 0;
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index baeb7d23bf24..8fbbc2d32371 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -697,7 +697,7 @@ probe_end_fifo_exit:
probe_end_pipe_exit:
usbhs_pipe_remove(priv);
- dev_info(&pdev->dev, "probe failed\n");
+ dev_info(&pdev->dev, "probe failed (%d)\n", ret);
return ret;
}
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 7be4e7d57ace..280ed5ff021b 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -810,20 +810,27 @@ static void xfer_work(struct work_struct *work)
{
struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
struct usbhs_pipe *pipe = pkt->pipe;
- struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
+ struct usbhs_fifo *fifo;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct dma_async_tx_descriptor *desc;
- struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
+ struct dma_chan *chan;
struct device *dev = usbhs_priv_to_dev(priv);
enum dma_transfer_direction dir;
+ unsigned long flags;
+ usbhs_lock(priv, flags);
+ fifo = usbhs_pipe_to_fifo(pipe);
+ if (!fifo)
+ goto xfer_work_end;
+
+ chan = usbhsf_dma_chan_get(fifo, pkt);
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
pkt->trans, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
- return;
+ goto xfer_work_end;
desc->callback = usbhsf_dma_complete;
desc->callback_param = pipe;
@@ -831,7 +838,7 @@ static void xfer_work(struct work_struct *work)
pkt->cookie = dmaengine_submit(desc);
if (pkt->cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n");
- return;
+ goto xfer_work_end;
}
dev_dbg(dev, " %s %d (%d/ %d)\n",
@@ -842,6 +849,9 @@ static void xfer_work(struct work_struct *work)
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
dma_async_issue_pending(chan);
usbhs_pipe_enable(pipe);
+
+xfer_work_end:
+ usbhs_unlock(priv, flags);
}
/*
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 30345c2d01be..50f3363cc382 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -585,6 +585,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct usbhs_pipe *pipe;
int ret = -EIO;
+ unsigned long flags;
+
+ usbhs_lock(priv, flags);
/*
* if it already have pipe,
@@ -593,7 +596,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
if (uep->pipe) {
usbhs_pipe_clear(uep->pipe);
usbhs_pipe_sequence_data0(uep->pipe);
- return 0;
+ ret = 0;
+ goto usbhsg_ep_enable_end;
}
pipe = usbhs_pipe_malloc(priv,
@@ -621,6 +625,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
ret = 0;
}
+usbhsg_ep_enable_end:
+ usbhs_unlock(priv, flags);
+
return ret;
}
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index 38b01f2aeeb0..1d70add926f0 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -23,7 +23,7 @@
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
#define UGCTRL2_USB0SEL_OTG 0x00000030
-void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
+static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
{
iowrite32(data, priv->base + reg);
}
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index c7508cbce3ce..9f490375ac92 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -245,7 +245,7 @@ enum usbip_side {
#define USBIP_EH_RESET (1 << 2)
#define USBIP_EH_UNUSABLE (1 << 3)
-#define SDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
+#define SDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
#define SDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define SDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define SDEV_EVENT_ERROR_SUBMIT (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 99397fa1e3f0..0f98f2c7475f 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -40,7 +40,7 @@ int get_gadget_descs(struct vudc *udc)
struct usb_ctrlrequest req;
int ret;
- if (!udc || !udc->driver || !udc->pullup)
+ if (!udc->driver || !udc->pullup)
return -EINVAL;
req.bRequestType = USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index 0efc52f11ad0..9269d5685239 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -64,14 +64,11 @@ const struct consw dummy_con = {
.con_putcs = DUMMY,
.con_cursor = DUMMY,
.con_scroll = DUMMY,
- .con_bmove = DUMMY,
.con_switch = DUMMY,
.con_blank = DUMMY,
.con_font_set = DUMMY,
.con_font_get = DUMMY,
.con_font_default = DUMMY,
.con_font_copy = DUMMY,
- .con_set_palette = DUMMY,
- .con_scrolldelta = DUMMY,
};
EXPORT_SYMBOL_GPL(dummy_con);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index afd3301ac40c..b87f5cfdaea5 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -170,8 +170,7 @@ static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
-static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
-static int fbcon_scrolldelta(struct vc_data *vc, int lines);
+static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
/*
* Internal routines
@@ -381,7 +380,7 @@ static void fb_flashcursor(struct work_struct *work)
if (ops && ops->currcon != -1)
vc = vc_cons[ops->currcon].d;
- if (!vc || !CON_IS_VISIBLE(vc) ||
+ if (!vc || !con_is_visible(vc) ||
registered_fb[con2fb_map[vc->vc_num]] != info ||
vc->vc_deccm != 1) {
console_unlock();
@@ -619,7 +618,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
erase,
vc->vc_size_row * logo_lines);
- if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) {
+ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
update_screen(vc);
}
@@ -1113,7 +1112,7 @@ static void fbcon_init(struct vc_data *vc, int init)
*
* We need to do it in fbcon_init() to prevent screen corruption.
*/
- if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) {
+ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
if (info->fbops->fb_set_par &&
!(ops->flags & FBCON_FLAGS_INIT)) {
ret = info->fbops->fb_set_par(info);
@@ -1193,7 +1192,7 @@ static void fbcon_deinit(struct vc_data *vc)
if (!ops)
goto finished;
- if (CON_IS_VISIBLE(vc))
+ if (con_is_visible(vc))
fbcon_del_cursor_timer(info);
ops->flags &= ~FBCON_FLAGS_INIT;
@@ -1398,7 +1397,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
rows /= vc->vc_font.height;
vc_resize(vc, cols, rows);
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
update_screen(vc);
if (softback_buf)
fbcon_update_softback(vc);
@@ -2146,7 +2145,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
return -EINVAL;
DPRINTK("resize now %ix%i\n", var.xres, var.yres);
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
var.activate = FB_ACTIVATE_NOW |
FB_ACTIVATE_FORCE;
fb_set_var(info, &var);
@@ -2449,7 +2448,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
int cnt;
char *old_data = NULL;
- if (CON_IS_VISIBLE(vc) && softback_lines)
+ if (con_is_visible(vc) && softback_lines)
fbcon_set_origin(vc);
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
@@ -2530,9 +2529,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
cols /= w;
rows /= h;
vc_resize(vc, cols, rows);
- if (CON_IS_VISIBLE(vc) && softback_buf)
+ if (con_is_visible(vc) && softback_buf)
fbcon_update_softback(vc);
- } else if (CON_IS_VISIBLE(vc)
+ } else if (con_is_visible(vc)
&& vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
update_screen(vc);
@@ -2652,17 +2651,17 @@ static struct fb_cmap palette_cmap = {
0, 16, palette_red, palette_green, palette_blue, NULL
};
-static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
+static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
int i, j, k, depth;
u8 val;
if (fbcon_is_inactive(vc, info))
- return -EINVAL;
+ return;
- if (!CON_IS_VISIBLE(vc))
- return 0;
+ if (!con_is_visible(vc))
+ return;
depth = fb_get_color_depth(&info->var, &info->fix);
if (depth > 3) {
@@ -2684,7 +2683,7 @@ static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
} else
fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
- return fb_set_cmap(&palette_cmap, info);
+ fb_set_cmap(&palette_cmap, info);
}
static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
@@ -2765,7 +2764,7 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
}
}
-static int fbcon_scrolldelta(struct vc_data *vc, int lines)
+static void fbcon_scrolldelta(struct vc_data *vc, int lines)
{
struct fb_info *info = registered_fb[con2fb_map[fg_console]];
struct fbcon_ops *ops = info->fbcon_par;
@@ -2774,9 +2773,9 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
if (softback_top) {
if (vc->vc_num != fg_console)
- return 0;
+ return;
if (vc->vc_mode != KD_TEXT || !lines)
- return 0;
+ return;
if (logo_shown >= 0) {
struct vc_data *conp2 = vc_cons[logo_shown].d;
@@ -2809,11 +2808,11 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
fbcon_redraw_softback(vc, disp, lines);
fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
- return 0;
+ return;
}
if (!scrollback_phys_max)
- return -ENOSYS;
+ return;
scrollback_old = scrollback_current;
scrollback_current -= lines;
@@ -2822,10 +2821,10 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
else if (scrollback_current > scrollback_max)
scrollback_current = scrollback_max;
if (scrollback_current == scrollback_old)
- return 0;
+ return;
if (fbcon_is_inactive(vc, info))
- return 0;
+ return;
fbcon_cursor(vc, CM_ERASE);
@@ -2852,7 +2851,6 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
if (!scrollback_current)
fbcon_cursor(vc, CM_DRAW);
- return 0;
}
static int fbcon_set_origin(struct vc_data *vc)
@@ -2904,7 +2902,7 @@ static void fbcon_modechanged(struct fb_info *info)
p = &fb_display[vc->vc_num];
set_blitting_type(vc, info);
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
var_to_display(p, &info->var, info);
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
@@ -2943,7 +2941,7 @@ static void fbcon_set_all_vcs(struct fb_info *info)
registered_fb[con2fb_map[i]] != info)
continue;
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
fg = i;
continue;
}
@@ -3182,7 +3180,7 @@ static void fbcon_fb_blanked(struct fb_info *info, int blank)
registered_fb[con2fb_map[ops->currcon]] != info)
return;
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
if (blank)
do_blank_screen(0);
else
@@ -3336,7 +3334,6 @@ static const struct consw fb_con = {
.con_putcs = fbcon_putcs,
.con_cursor = fbcon_cursor,
.con_scroll = fbcon_scroll,
- .con_bmove = fbcon_bmove,
.con_switch = fbcon_switch,
.con_blank = fbcon_blank,
.con_font_set = fbcon_set_font,
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index 8edc062536a8..bacbb044d77c 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -444,48 +444,11 @@ static void mdacon_clear(struct vc_data *c, int y, int x,
}
}
-static void mdacon_bmove(struct vc_data *c, int sy, int sx,
- int dy, int dx, int height, int width)
-{
- u16 *src, *dest;
-
- if (width <= 0 || height <= 0)
- return;
-
- if (sx==0 && dx==0 && width==mda_num_columns) {
- scr_memmovew(MDA_ADDR(0,dy), MDA_ADDR(0,sy), height*width*2);
-
- } else if (dy < sy || (dy == sy && dx < sx)) {
- src = MDA_ADDR(sx, sy);
- dest = MDA_ADDR(dx, dy);
-
- for (; height > 0; height--) {
- scr_memmovew(dest, src, width*2);
- src += mda_num_columns;
- dest += mda_num_columns;
- }
- } else {
- src = MDA_ADDR(sx, sy+height-1);
- dest = MDA_ADDR(dx, dy+height-1);
-
- for (; height > 0; height--) {
- scr_memmovew(dest, src, width*2);
- src -= mda_num_columns;
- dest -= mda_num_columns;
- }
- }
-}
-
static int mdacon_switch(struct vc_data *c)
{
return 1; /* redrawing needed */
}
-static int mdacon_set_palette(struct vc_data *c, const unsigned char *table)
-{
- return -EINVAL;
-}
-
static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
{
if (mda_type == TYPE_MDA) {
@@ -505,11 +468,6 @@ static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
}
}
-static int mdacon_scrolldelta(struct vc_data *c, int lines)
-{
- return 0;
-}
-
static void mdacon_cursor(struct vc_data *c, int mode)
{
if (mode == CM_ERASE) {
@@ -574,11 +532,8 @@ static const struct consw mda_con = {
.con_putcs = mdacon_putcs,
.con_cursor = mdacon_cursor,
.con_scroll = mdacon_scroll,
- .con_bmove = mdacon_bmove,
.con_switch = mdacon_switch,
.con_blank = mdacon_blank,
- .con_set_palette = mdacon_set_palette,
- .con_scrolldelta = mdacon_scrolldelta,
.con_build_attr = mdacon_build_attr,
.con_invert_region = mdacon_invert_region,
};
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index 0553dfe684ef..e3b9521e4ec3 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -574,17 +574,6 @@ static int newport_font_set(struct vc_data *vc, struct console_font *font, unsig
return newport_set_font(vc->vc_num, font);
}
-static int newport_set_palette(struct vc_data *vc, const unsigned char *table)
-{
- return -EINVAL;
-}
-
-static int newport_scrolldelta(struct vc_data *vc, int lines)
-{
- /* there is (nearly) no off-screen memory, so we can't scroll back */
- return 0;
-}
-
static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
int lines)
{
@@ -684,34 +673,6 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
return 1;
}
-static void newport_bmove(struct vc_data *vc, int sy, int sx, int dy,
- int dx, int h, int w)
-{
- short xs, ys, xe, ye, xoffs, yoffs;
-
- xs = sx << 3;
- xe = ((sx + w) << 3) - 1;
- /*
- * as bmove is only used to move stuff around in the same line
- * (h == 1), we don't care about wrap arounds caused by topscan != 0
- */
- ys = ((sy << 4) + topscan) & 0x3ff;
- ye = (((sy + h) << 4) - 1 + topscan) & 0x3ff;
- xoffs = (dx - sx) << 3;
- yoffs = (dy - sy) << 4;
- if (xoffs > 0) {
- /* move to the right, exchange starting points */
- swap(xe, xs);
- }
- newport_wait(npregs);
- npregs->set.drawmode0 = (NPORT_DMODE0_S2S | NPORT_DMODE0_BLOCK |
- NPORT_DMODE0_DOSETUP | NPORT_DMODE0_STOPX
- | NPORT_DMODE0_STOPY);
- npregs->set.xystarti = (xs << 16) | ys;
- npregs->set.xyendi = (xe << 16) | ye;
- npregs->go.xymove = (xoffs << 16) | yoffs;
-}
-
static int newport_dummy(struct vc_data *c)
{
return 0;
@@ -729,13 +690,10 @@ const struct consw newport_con = {
.con_putcs = newport_putcs,
.con_cursor = newport_cursor,
.con_scroll = newport_scroll,
- .con_bmove = newport_bmove,
.con_switch = newport_switch,
.con_blank = newport_blank,
.con_font_set = newport_font_set,
.con_font_default = newport_font_default,
- .con_set_palette = newport_set_palette,
- .con_scrolldelta = newport_scrolldelta,
.con_set_origin = DUMMY,
.con_save_screen = DUMMY
};
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index e440c2d9fe7c..3a10ac19598f 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -79,11 +79,6 @@ static const char *sticon_startup(void)
return "STI console";
}
-static int sticon_set_palette(struct vc_data *c, const unsigned char *table)
-{
- return -EINVAL;
-}
-
static void sticon_putc(struct vc_data *conp, int c, int ypos, int xpos)
{
int redraw_cursor = 0;
@@ -182,22 +177,6 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
return 0;
}
-static void sticon_bmove(struct vc_data *conp, int sy, int sx,
- int dy, int dx, int height, int width)
-{
- if (!width || !height)
- return;
-#if 0
- if (((sy <= p->cursor_y) && (p->cursor_y < sy+height) &&
- (sx <= p->cursor_x) && (p->cursor_x < sx+width)) ||
- ((dy <= p->cursor_y) && (p->cursor_y < dy+height) &&
- (dx <= p->cursor_x) && (p->cursor_x < dx+width)))
- sticon_cursor(p, CM_ERASE /*|CM_SOFTBACK*/);
-#endif
-
- sti_bmove(sticon_sti, sy, sx, dy, dx, height, width);
-}
-
static void sticon_init(struct vc_data *c, int init)
{
struct sti_struct *sti = sticon_sti;
@@ -256,11 +235,6 @@ static int sticon_blank(struct vc_data *c, int blank, int mode_switch)
return 1;
}
-static int sticon_scrolldelta(struct vc_data *conp, int lines)
-{
- return 0;
-}
-
static u16 *sticon_screen_pos(struct vc_data *conp, int offset)
{
int line;
@@ -355,11 +329,8 @@ static const struct consw sti_con = {
.con_putcs = sticon_putcs,
.con_cursor = sticon_cursor,
.con_scroll = sticon_scroll,
- .con_bmove = sticon_bmove,
.con_switch = sticon_switch,
.con_blank = sticon_blank,
- .con_set_palette = sticon_set_palette,
- .con_scrolldelta = sticon_scrolldelta,
.con_set_origin = sticon_set_origin,
.con_save_screen = sticon_save_screen,
.con_build_attr = sticon_build_attr,
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 8bf911002cba..11576611a974 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -80,7 +80,7 @@ static void vgacon_deinit(struct vc_data *c);
static void vgacon_cursor(struct vc_data *c, int mode);
static int vgacon_switch(struct vc_data *c);
static int vgacon_blank(struct vc_data *c, int blank, int mode_switch);
-static int vgacon_scrolldelta(struct vc_data *c, int lines);
+static void vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
static void vgacon_save_screen(struct vc_data *c);
static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
@@ -248,18 +248,18 @@ static void vgacon_restore_screen(struct vc_data *c)
}
}
-static int vgacon_scrolldelta(struct vc_data *c, int lines)
+static void vgacon_scrolldelta(struct vc_data *c, int lines)
{
int start, end, count, soff;
if (!lines) {
c->vc_visible_origin = c->vc_origin;
vga_set_mem_top(c);
- return 1;
+ return;
}
if (!vgacon_scrollback)
- return 1;
+ return;
if (!vgacon_scrollback_save) {
vgacon_cursor(c, CM_ERASE);
@@ -320,8 +320,6 @@ static int vgacon_scrolldelta(struct vc_data *c, int lines)
scr_memcpyw(d, s, diff * c->vc_size_row);
} else
vgacon_cursor(c, CM_MOVE);
-
- return 1;
}
#else
#define vgacon_scrollback_startup(...) do { } while (0)
@@ -334,7 +332,7 @@ static void vgacon_restore_screen(struct vc_data *c)
vgacon_scrolldelta(c, 0);
}
-static int vgacon_scrolldelta(struct vc_data *c, int lines)
+static void vgacon_scrolldelta(struct vc_data *c, int lines)
{
if (!lines) /* Turn scrollback off */
c->vc_visible_origin = c->vc_origin;
@@ -362,7 +360,6 @@ static int vgacon_scrolldelta(struct vc_data *c, int lines)
c->vc_visible_origin = vga_vram_base + (p + ul) % we;
}
vga_set_mem_top(c);
- return 1;
}
#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
@@ -592,7 +589,7 @@ static void vgacon_init(struct vc_data *c, int init)
static void vgacon_deinit(struct vc_data *c)
{
/* When closing the active console, reset video origin */
- if (CON_IS_VISIBLE(c)) {
+ if (con_is_visible(c)) {
c->vc_visible_origin = vga_vram_base;
vga_set_mem_top(c);
}
@@ -859,16 +856,13 @@ static void vga_set_palette(struct vc_data *vc, const unsigned char *table)
}
}
-static int vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
+static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
{
#ifdef CAN_LOAD_PALETTE
if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked
- || !CON_IS_VISIBLE(vc))
- return -EINVAL;
+ || !con_is_visible(vc))
+ return;
vga_set_palette(vc, table);
- return 0;
-#else
- return -EINVAL;
#endif
}
@@ -1254,7 +1248,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
struct vc_data *c = vc_cons[i].d;
if (c && c->vc_sw == &vga_con) {
- if (CON_IS_VISIBLE(c)) {
+ if (con_is_visible(c)) {
/* void size to cause regs to be rewritten */
cursor_size_lastfrom = 0;
cursor_size_lastto = 0;
@@ -1318,7 +1312,7 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
return success */
return (user) ? 0 : -EINVAL;
- if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */
+ if (con_is_visible(c) && !vga_is_gfx) /* who knows */
vgacon_doresize(c, width, height);
return 0;
}
@@ -1427,7 +1421,6 @@ const struct consw vga_con = {
.con_putcs = DUMMY,
.con_cursor = vgacon_cursor,
.con_scroll = vgacon_scroll,
- .con_bmove = DUMMY,
.con_switch = vgacon_switch,
.con_blank = vgacon_blank,
.con_font_set = vgacon_font_set,
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 076970a54f89..4ce10bcca18b 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -423,36 +423,7 @@ upload:
return 0;
}
-static int __init check_prereq(void)
-{
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- if (!xen_initial_domain())
- return -ENODEV;
-
- if (!acpi_gbl_FADT.smi_command)
- return -ENODEV;
-
- if (c->x86_vendor == X86_VENDOR_INTEL) {
- if (!cpu_has(c, X86_FEATURE_EST))
- return -ENODEV;
- return 0;
- }
- if (c->x86_vendor == X86_VENDOR_AMD) {
- /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
- * as we get compile warnings for the static functions.
- */
-#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
-#define USE_HW_PSTATE 0x00000080
- u32 eax, ebx, ecx, edx;
- cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
- if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
- return -ENODEV;
- return 0;
- }
- return -ENODEV;
-}
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance __percpu *acpi_perf_data;
@@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
static int __init xen_acpi_processor_init(void)
{
unsigned int i;
- int rc = check_prereq();
+ int rc;
- if (rc)
- return rc;
+ if (!xen_initial_domain())
+ return -ENODEV;
nr_acpi_bits = get_max_acpi_id() + 1;
acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index cacf30d14747..7487971f9f78 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
rc = -ENOMEM;
goto out;
}
+ } else {
+ list_for_each_entry(trans, &u->transactions, list)
+ if (trans->handle.id == u->u.msg.tx_id)
+ break;
+ if (&trans->list == &u->transactions)
+ return -ESRCH;
}
reply = xenbus_dev_request_and_reply(&u->u.msg);
if (IS_ERR(reply)) {
- kfree(trans);
+ if (msg_type == XS_TRANSACTION_START)
+ kfree(trans);
rc = PTR_ERR(reply);
goto out;
}
@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
list_add(&trans->list, &u->transactions);
}
} else if (u->u.msg.type == XS_TRANSACTION_END) {
- list_for_each_entry(trans, &u->transactions, list)
- if (trans->handle.id == u->u.msg.tx_id)
- break;
- BUG_ON(&trans->list == &u->transactions);
list_del(&trans->list);
-
kfree(trans);
}
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 374b12af8812..22f7cd711c57 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -232,10 +232,10 @@ static void transaction_resume(void)
void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
{
void *ret;
- struct xsd_sockmsg req_msg = *msg;
+ enum xsd_sockmsg_type type = msg->type;
int err;
- if (req_msg.type == XS_TRANSACTION_START)
+ if (type == XS_TRANSACTION_START)
transaction_start();
mutex_lock(&xs_state.request_mutex);
@@ -249,12 +249,8 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
mutex_unlock(&xs_state.request_mutex);
- if (IS_ERR(ret))
- return ret;
-
if ((msg->type == XS_TRANSACTION_END) ||
- ((req_msg.type == XS_TRANSACTION_START) &&
- (msg->type == XS_ERROR)))
+ ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR)))
transaction_end();
return ret;