aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_adxl.c192
-rw-r--r--drivers/acpi/nfit/core.c297
-rw-r--r--drivers/acpi/nfit/intel.h38
-rw-r--r--drivers/acpi/nfit/nfit.h21
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/acpi/property.c97
-rw-r--r--drivers/acpi/x86/apple.c2
-rw-r--r--drivers/android/Kconfig2
-rw-r--r--drivers/android/binder.c489
-rw-r--r--drivers/android/binder_trace.h36
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/base/component.c6
-rw-r--r--drivers/base/devres.c36
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/platform-msi.c14
-rw-r--r--drivers/block/cryptoloop.c22
-rw-r--r--drivers/block/null_blk.h11
-rw-r--r--drivers/block/null_blk_main.c30
-rw-r--r--drivers/block/null_blk_zoned.c57
-rw-r--r--drivers/block/rsxx/core.c2
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/block/z2ram.c3
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c27
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/cdrom/gdrom.c8
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/char/random.c24
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm-dev-common.c147
-rw-r--r--drivers/char/tpm/tpm-dev.c11
-rw-r--r--drivers/char/tpm/tpm-dev.h18
-rw-r--r--drivers/char/tpm/tpm-interface.c30
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c4
-rw-r--r--drivers/char/tpm/tpmrm-dev.c15
-rw-r--r--drivers/char/tpm/xen-tpmfront.c2
-rw-r--r--drivers/clk/mvebu/clk-cpu.c4
-rw-r--r--drivers/clocksource/Makefile26
-rw-r--r--drivers/clocksource/asm9260_timer.c2
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c20
-rw-r--r--drivers/clocksource/pxa_timer.c6
-rw-r--r--drivers/clocksource/renesas-ostm.c11
-rw-r--r--drivers/clocksource/riscv_timer.c12
-rw-r--r--drivers/clocksource/sh_cmt.c106
-rw-r--r--drivers/clocksource/sh_mtu2.c10
-rw-r--r--drivers/clocksource/sh_tmu.c10
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c (renamed from drivers/clocksource/time-armada-370-xp.c)0
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c (renamed from drivers/clocksource/cadence_ttc_timer.c)2
-rw-r--r--drivers/clocksource/timer-efm32.c (renamed from drivers/clocksource/time-efm32.c)0
-rw-r--r--drivers/clocksource/timer-fsl-ftm.c (renamed from drivers/clocksource/fsl_ftm_timer.c)0
-rw-r--r--drivers/clocksource/timer-integrator-ap.c2
-rw-r--r--drivers/clocksource/timer-lpc32xx.c (renamed from drivers/clocksource/time-lpc32xx.c)0
-rw-r--r--drivers/clocksource/timer-orion.c (renamed from drivers/clocksource/time-orion.c)8
-rw-r--r--drivers/clocksource/timer-owl.c (renamed from drivers/clocksource/owl-timer.c)0
-rw-r--r--drivers/clocksource/timer-pistachio.c (renamed from drivers/clocksource/time-pistachio.c)0
-rw-r--r--drivers/clocksource/timer-qcom.c (renamed from drivers/clocksource/qcom-timer.c)0
-rw-r--r--drivers/clocksource/timer-sp804.c2
-rw-r--r--drivers/clocksource/timer-versatile.c (renamed from drivers/clocksource/versatile.c)0
-rw-r--r--drivers/clocksource/timer-vf-pit.c (renamed from drivers/clocksource/vf_pit_timer.c)0
-rw-r--r--drivers/clocksource/timer-vt8500.c (renamed from drivers/clocksource/vt8500_timer.c)0
-rw-r--r--drivers/clocksource/timer-zevio.c (renamed from drivers/clocksource/zevio-timer.c)8
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/atmel-aes.c5
-rw-r--r--drivers/crypto/atmel-authenc.h13
-rw-r--r--drivers/crypto/atmel-ecc.c11
-rw-r--r--drivers/crypto/atmel-ecc.h14
-rw-r--r--drivers/crypto/atmel-sha.c5
-rw-r--r--drivers/crypto/atmel-tdes.c5
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c20
-rw-r--r--drivers/crypto/caam/Kconfig57
-rw-r--r--drivers/crypto/caam/Makefile10
-rw-r--r--drivers/crypto/caam/caamalg.c728
-rw-r--r--drivers/crypto/caam/caamalg_desc.c143
-rw-r--r--drivers/crypto/caam/caamalg_desc.h28
-rw-r--r--drivers/crypto/caam/caamalg_qi.c627
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c5165
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h223
-rw-r--r--drivers/crypto/caam/caamhash.c80
-rw-r--r--drivers/crypto/caam/caamhash_desc.c80
-rw-r--r--drivers/crypto/caam/caamhash_desc.h21
-rw-r--r--drivers/crypto/caam/caampkc.c1
-rw-r--r--drivers/crypto/caam/caamrng.c1
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c1
-rw-r--r--drivers/crypto/caam/dpseci.c426
-rw-r--r--drivers/crypto/caam/dpseci.h333
-rw-r--r--drivers/crypto/caam/dpseci_cmd.h149
-rw-r--r--drivers/crypto/caam/error.c79
-rw-r--r--drivers/crypto/caam/error.h6
-rw-r--r--drivers/crypto/caam/jr.c1
-rw-r--r--drivers/crypto/caam/qi.c43
-rw-r--r--drivers/crypto/caam/qi.h3
-rw-r--r--drivers/crypto/caam/regs.h30
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h29
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c20
-rw-r--r--drivers/crypto/cavium/nitrox/Makefile3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_common.h19
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_csr.h111
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c115
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h162
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c71
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.h23
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c337
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.h10
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c98
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c203
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c49
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_sriov.c151
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h2
-rw-r--r--drivers/crypto/ccp/psp-dev.c47
-rw-r--r--drivers/crypto/ccp/sp-platform.c53
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c30
-rw-r--r--drivers/crypto/chelsio/chcr_core.c2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c3
-rw-r--r--drivers/crypto/mxs-dcp.c142
-rw-r--r--drivers/crypto/omap-aes.c17
-rw-r--r--drivers/crypto/omap-aes.h2
-rw-r--r--drivers/crypto/picoxcell_crypto.c21
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c60
-rw-r--r--drivers/crypto/qce/ablkcipher.c13
-rw-r--r--drivers/crypto/qce/cipher.h2
-rw-r--r--drivers/crypto/s5p-sss.c113
-rw-r--r--drivers/crypto/sahara.c31
-rw-r--r--drivers/crypto/vmx/aes_cbc.c22
-rw-r--r--drivers/crypto/vmx/aes_ctr.c18
-rw-r--r--drivers/crypto/vmx/aes_xts.c18
-rw-r--r--drivers/dma/Kconfig13
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/bcm2835-dma.c8
-rw-r--r--drivers/dma/coh901318.c28
-rw-r--r--drivers/dma/dma-jz4740.c21
-rw-r--r--drivers/dma/dma-jz4780.c289
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c4
-rw-r--r--drivers/dma/dw/core.c5
-rw-r--r--drivers/dma/dw/platform.c2
-rw-r--r--drivers/dma/ep93xx_dma.c21
-rw-r--r--drivers/dma/fsl-edma-common.c626
-rw-r--r--drivers/dma/fsl-edma-common.h233
-rw-r--r--drivers/dma/fsl-edma.c729
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/hsu/hsu.c4
-rw-r--r--drivers/dma/idma64.c9
-rw-r--r--drivers/dma/imx-dma.c20
-rw-r--r--drivers/dma/ioat/init.c23
-rw-r--r--drivers/dma/k3dma.c36
-rw-r--r--drivers/dma/mcf-edma.c317
-rw-r--r--drivers/dma/mmp_tdma.c29
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/mxs-dma.c3
-rw-r--r--drivers/dma/nbpfaxi.c9
-rw-r--r--drivers/dma/owl-dma.c283
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/pxa_dma.c5
-rw-r--r--drivers/dma/sh/rcar-dmac.c3
-rw-r--r--drivers/dma/sh/shdma-arm.h5
-rw-r--r--drivers/dma/sh/shdma-base.c5
-rw-r--r--drivers/dma/sh/shdma-of.c5
-rw-r--r--drivers/dma/sh/shdma-r8a73a4.c5
-rw-r--r--drivers/dma/sh/shdma.h6
-rw-r--r--drivers/dma/sh/shdmac.c6
-rw-r--r--drivers/dma/sh/sudmac.c5
-rw-r--r--drivers/dma/sh/usb-dmac.c5
-rw-r--r--drivers/dma/sprd-dma.c81
-rw-r--r--drivers/dma/st_fdma.c7
-rw-r--r--drivers/dma/ste_dma40.c14
-rw-r--r--drivers/dma/stm32-dma.c20
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/edac/altera_edac.c667
-rw-r--r--drivers/edac/altera_edac.h73
-rw-r--r--drivers/edac/amd64_edac.c24
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/edac/cpc925_edac.c20
-rw-r--r--drivers/edac/ghes_edac.c23
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c5
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/sb_edac.c204
-rw-r--r--drivers/edac/skx_edac.c7
-rw-r--r--drivers/edac/thunderx_edac.c4
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c60
-rw-r--r--drivers/extcon/extcon-intel-int3496.c12
-rw-r--r--drivers/extcon/extcon-max14577.c24
-rw-r--r--drivers/extcon/extcon-max77693.c22
-rw-r--r--drivers/extcon/extcon-max77843.c19
-rw-r--r--drivers/extcon/extcon-max8997.c22
-rw-r--r--drivers/extcon/extcon.c15
-rw-r--r--drivers/firmware/google/Kconfig32
-rw-r--r--drivers/firmware/google/Makefile2
-rw-r--r--drivers/firmware/google/coreboot_table-acpi.c88
-rw-r--r--drivers/firmware/google/coreboot_table-of.c82
-rw-r--r--drivers/firmware/google/coreboot_table.c126
-rw-r--r--drivers/firmware/google/coreboot_table.h6
-rw-r--r--drivers/firmware/google/gsmi.c122
-rw-r--r--drivers/firmware/google/vpd.c2
-rw-r--r--drivers/firmware/scpi_pm_domain.c2
-rw-r--r--drivers/fpga/altera-cvp.c8
-rw-r--r--drivers/fpga/altera-fpga2sdram.c8
-rw-r--r--drivers/fpga/altera-freeze-bridge.c13
-rw-r--r--drivers/fpga/altera-hps2fpga.c7
-rw-r--r--drivers/fpga/altera-pr-ip-core.c9
-rw-r--r--drivers/fpga/altera-ps-spi.c11
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c2
-rw-r--r--drivers/fpga/dfl-fme-br.c11
-rw-r--r--drivers/fpga/dfl-fme-mgr.c13
-rw-r--r--drivers/fpga/dfl-fme-region.c6
-rw-r--r--drivers/fpga/dfl.c6
-rw-r--r--drivers/fpga/fpga-bridge.c68
-rw-r--r--drivers/fpga/fpga-mgr.c64
-rw-r--r--drivers/fpga/fpga-region.c65
-rw-r--r--drivers/fpga/ice40-spi.c10
-rw-r--r--drivers/fpga/machxo2-spi.c11
-rw-r--r--drivers/fpga/of-fpga-region.c6
-rw-r--r--drivers/fpga/socfpga-a10.c5
-rw-r--r--drivers/fpga/socfpga.c10
-rw-r--r--drivers/fpga/ts73xx-fpga.c11
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c4
-rw-r--r--drivers/fpga/xilinx-spi.c12
-rw-r--r--drivers/fpga/zynq-fpga.c5
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c5
-rw-r--r--drivers/hid/Kconfig16
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-bigbenff.c414
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-cougar.c66
-rw-r--r--drivers/hid/hid-elan.c2
-rw-r--r--drivers/hid/hid-google-hammer.c413
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c64
-rw-r--r--drivers/hid/hid-logitech-hidpp.c309
-rw-r--r--drivers/hid/hid-magicmouse.c142
-rw-r--r--drivers/hid/hid-microsoft.c141
-rw-r--r--drivers/hid/hid-multitouch.c72
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/i2c-hid/Makefile3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c (renamed from drivers/hid/i2c-hid/i2c-hid.c)60
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c376
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h20
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c32
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c75
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c41
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c52
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h5
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c49
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c24
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h5
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h2
-rw-r--r--drivers/hid/wacom_wac.c19
-rw-r--r--drivers/hv/channel.c300
-rw-r--r--drivers/hv/channel_mgmt.c54
-rw-r--r--drivers/hv/hv.c15
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hv/hv_kvp.c14
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hv/vmbus_drv.c118
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c81
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c183
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c132
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h26
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c58
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c93
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c198
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c385
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c6
-rw-r--r--drivers/hwtracing/coresight/coresight.c184
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c262
-rw-r--r--drivers/hwtracing/stm/Kconfig29
-rw-r--r--drivers/hwtracing/stm/Makefile6
-rw-r--r--drivers/hwtracing/stm/core.c292
-rw-r--r--drivers/hwtracing/stm/heartbeat.c2
-rw-r--r--drivers/hwtracing/stm/p_basic.c48
-rw-r--r--drivers/hwtracing/stm/p_sys-t.c382
-rw-r--r--drivers/hwtracing/stm/policy.c147
-rw-r--r--drivers/hwtracing/stm/stm.h56
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c12
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/addr.c406
-rw-r--r--drivers/infiniband/core/cache.c79
-rw-r--r--drivers/infiniband/core/cm.c9
-rw-r--r--drivers/infiniband/core/cma.c251
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/core_priv.h12
-rw-r--r--drivers/infiniband/core/cq.c10
-rw-r--r--drivers/infiniband/core/device.c264
-rw-r--r--drivers/infiniband/core/fmr_pool.c5
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/mad.c80
-rw-r--r--drivers/infiniband/core/mad_priv.h2
-rw-r--r--drivers/infiniband/core/netlink.c4
-rw-r--r--drivers/infiniband/core/nldev.c37
-rw-r--r--drivers/infiniband/core/rdma_core.c56
-rw-r--r--drivers/infiniband/core/rdma_core.h1
-rw-r--r--drivers/infiniband/core/restrack.c30
-rw-r--r--drivers/infiniband/core/rw.c11
-rw-r--r--drivers/infiniband/core/sa.h8
-rw-r--r--drivers/infiniband/core/sa_query.c70
-rw-r--r--drivers/infiniband/core/security.c7
-rw-r--r--drivers/infiniband/core/sysfs.c101
-rw-r--r--drivers/infiniband/core/umem.c125
-rw-r--r--drivers/infiniband/core/umem_odp.c621
-rw-r--r--drivers/infiniband/core/user_mad.c13
-rw-r--r--drivers/infiniband/core/uverbs.h15
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c43
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c140
-rw-r--r--drivers/infiniband/core/uverbs_main.c340
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c7
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c12
-rw-r--r--drivers/infiniband/core/verbs.c19
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c125
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c134
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c88
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c77
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c55
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c50
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c20
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h2
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile42
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c486
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h71
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h4
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h48
-rw-r--r--drivers/infiniband/hw/hfi1/init.c113
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.c94
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h192
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c4
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c363
-rw-r--r--drivers/infiniband/hw/hfi1/msix.h64
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c75
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c8
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c100
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h31
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c24
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c382
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c56
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h21
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c69
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h3
-rw-r--r--drivers/infiniband/hw/hfi1/trace_iowait.h54
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c14
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c22
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c137
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h20
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c251
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h35
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h11
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c12
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c21
-rw-r--r--drivers/infiniband/hw/hns/Kconfig1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h45
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c629
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h96
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c123
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c212
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c41
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c73
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c20
-rw-r--r--drivers/infiniband/hw/mlx4/main.c182
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c129
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h14
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c3
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c358
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c393
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c510
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c9
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h98
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c14
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c123
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c491
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c44
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.c3
-rw-r--r--drivers/infiniband/hw/nes/nes.h9
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c63
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c74
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c3
-rw-r--r--drivers/infiniband/hw/qedr/main.c73
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c5
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c342
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c101
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c47
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h15
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c39
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c74
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c16
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c91
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c46
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c677
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h42
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c39
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c35
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c49
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c55
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c36
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c9
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c3
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c19
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c28
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c22
-rw-r--r--drivers/iommu/Kconfig21
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c140
-rw-r--r--drivers/iommu/arm-smmu.c106
-rw-r--r--drivers/iommu/dma-iommu.c55
-rw-r--r--drivers/iommu/fsl_pamu.c2
-rw-r--r--drivers/iommu/fsl_pamu_domain.c119
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c314
-rw-r--r--drivers/iommu/intel-iommu.c32
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c11
-rw-r--r--drivers/iommu/io-pgtable-arm.c23
-rw-r--r--drivers/iommu/io-pgtable.h5
-rw-r--r--drivers/iommu/iommu.c58
-rw-r--r--drivers/iommu/iova.c22
-rw-r--r--drivers/iommu/ipmmu-vmsa.c5
-rw-r--r--drivers/iommu/of_iommu.c25
-rw-r--r--drivers/irqchip/Kconfig3
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c249
-rw-r--r--drivers/irqchip/irq-gic-v3.c85
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c253
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c507
-rw-r--r--drivers/irqchip/irq-sifive-plic.c10
-rw-r--r--drivers/irqchip/qcom-pdc.c1
-rw-r--r--drivers/macintosh/adb-iop.c50
-rw-r--r--drivers/macintosh/adb.c8
-rw-r--r--drivers/macintosh/adbhid.c53
-rw-r--r--drivers/macintosh/macio_asic.c8
-rw-r--r--drivers/macintosh/macio_sysfs.c8
-rw-r--r--drivers/macintosh/via-cuda.c35
-rw-r--r--drivers/macintosh/via-macii.c352
-rw-r--r--drivers/macintosh/via-pmu.c33
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c25
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-core.h10
-rw-r--r--drivers/md/dm-crypt.c15
-rw-r--r--drivers/md/dm-flakey.c30
-rw-r--r--drivers/md/dm-integrity.c23
-rw-r--r--drivers/md/dm-ioctl.c18
-rw-r--r--drivers/md/dm-linear.c35
-rw-r--r--drivers/md/dm-mpath.c26
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c316
-rw-r--r--drivers/md/dm-rq.h4
-rw-r--r--drivers/md/dm-sysfs.c3
-rw-r--r--drivers/md/dm-table.c56
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-verity-fec.c5
-rw-r--r--drivers/md/dm-writecache.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c80
-rw-r--r--drivers/md/dm-zoned-target.c23
-rw-r--r--drivers/md/dm.c194
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/md-bitmap.c9
-rw-r--r--drivers/md/md-cluster.c234
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c113
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c109
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/md/raid5.c12
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c5
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c5
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c5
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h2
-rw-r--r--drivers/message/fusion/mptbase.c12
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/mfd/Kconfig26
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/arizona-core.c10
-rw-r--r--drivers/mfd/at91-usart.c72
-rw-r--r--drivers/mfd/cros_ec.c3
-rw-r--r--drivers/mfd/cros_ec_dev.c1
-rw-r--r--drivers/mfd/intel_msic.c49
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c56
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c5
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c5
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c25
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h12
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c78
-rw-r--r--drivers/mfd/madera-core.c33
-rw-r--r--drivers/mfd/max14577.c28
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/max77686.c32
-rw-r--r--drivers/mfd/max77693.c34
-rw-r--r--drivers/mfd/max77843.c19
-rw-r--r--drivers/mfd/max8997-irq.c30
-rw-r--r--drivers/mfd/max8997.c40
-rw-r--r--drivers/mfd/max8998-irq.c18
-rw-r--r--drivers/mfd/max8998.c28
-rw-r--r--drivers/mfd/mc13xxx-core.c3
-rw-r--r--drivers/mfd/menelaus.c13
-rw-r--r--drivers/mfd/motorola-cpcap.c51
-rw-r--r--drivers/mfd/sec-core.c16
-rw-r--r--drivers/mfd/sec-irq.c24
-rw-r--r--drivers/mfd/ti-lmu.c91
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c14
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c2
-rw-r--r--drivers/misc/ad525x_dpot-spi.c2
-rw-r--r--drivers/misc/ad525x_dpot.c6
-rw-r--r--drivers/misc/apds990x.c1
-rw-r--r--drivers/misc/bh1770glc.c3
-rw-r--r--drivers/misc/cxl/flash.c4
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/echo/echo.c2
-rw-r--r--drivers/misc/eeprom/Kconfig11
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at25.c13
-rw-r--r--drivers/misc/eeprom/ee1004.c281
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c19
-rw-r--r--drivers/misc/genwqe/card_base.c1
-rw-r--r--drivers/misc/genwqe/card_ddcb.c1
-rw-r--r--drivers/misc/genwqe/card_utils.c15
-rw-r--r--drivers/misc/kgdbts.c16
-rw-r--r--drivers/misc/lkdtm/usercopy.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c1
-rw-r--r--drivers/misc/mei/main.c4
-rw-r--r--drivers/misc/mic/scif/scif_dma.c9
-rw-r--r--drivers/misc/mic/scif/scif_fence.c2
-rw-r--r--drivers/misc/ocxl/config.c4
-rw-r--r--drivers/misc/sgi-gru/grukservices.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c6
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c3
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/misc/sram.c6
-rw-r--r--drivers/misc/vmw_balloon.c1802
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c3
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c12
-rw-r--r--drivers/net/ppp/ppp_mppe.c27
-rw-r--r--drivers/nfc/nfcmrvl/uart.c5
-rw-r--r--drivers/nvdimm/bus.c20
-rw-r--r--drivers/nvdimm/dimm.c6
-rw-r--r--drivers/nvdimm/dimm_devs.c60
-rw-r--r--drivers/nvdimm/label.c144
-rw-r--r--drivers/nvdimm/label.h4
-rw-r--r--drivers/nvdimm/namespace_devs.c1
-rw-r--r--drivers/nvdimm/nd-core.h1
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c61
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvdimm/region_devs.c11
-rw-r--r--drivers/nvme/host/core.c4
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c98
-rw-r--r--drivers/nvme/target/configfs.c47
-rw-r--r--drivers/nvme/target/core.c180
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c3
-rw-r--r--drivers/nvme/target/nvmet.h17
-rw-r--r--drivers/nvme/target/rdma.c22
-rw-r--r--drivers/nvmem/core.c533
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c7
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/nvmem/sunxi_sid.c22
-rw-r--r--drivers/of/base.c149
-rw-r--r--drivers/of/device.c5
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/of/of_mdio.c12
-rw-r--r--drivers/of/of_numa.c19
-rw-r--r--drivers/of/of_private.h8
-rw-r--r--drivers/of/overlay.c4
-rw-r--r--drivers/of/platform.c8
-rw-r--r--drivers/of/unittest-data/overlay_15.dts4
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi4
-rw-r--r--drivers/of/unittest.c29
-rw-r--r--drivers/pci/Kconfig20
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/access.c4
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/dwc/Makefile2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c11
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c176
-rw-r--r--drivers/pci/controller/dwc/pci-keystone-dw.c484
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c788
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.h57
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h4
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c56
-rw-r--r--drivers/pci/controller/pci-aardvark.c129
-rw-r--r--drivers/pci/controller/pci-host-common.c8
-rw-r--r--drivers/pci/controller/pci-mvebu.c384
-rw-r--r--drivers/pci/controller/pcie-cadence-ep.c13
-rw-r--r--drivers/pci/controller/pcie-cadence-host.c7
-rw-r--r--drivers/pci/controller/pcie-cadence.c20
-rw-r--r--drivers/pci/controller/pcie-iproc.c8
-rw-r--r--drivers/pci/controller/pcie-mediatek.c321
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c7
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c9
-rw-r--r--drivers/pci/controller/pcie-xilinx.c7
-rw-r--r--drivers/pci/controller/vmd.c2
-rw-r--r--drivers/pci/hotplug/TODO74
-rw-r--r--drivers/pci/hotplug/acpiphp.h10
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c36
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h11
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c105
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c6
-rw-r--r--drivers/pci/hotplug/cpqphp.h9
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c61
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c31
-rw-r--r--drivers/pci/hotplug/ibmphp.h9
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c121
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c70
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c53
-rw-r--r--drivers/pci/hotplug/pciehp.h133
-rw-r--r--drivers/pci/hotplug/pciehp_core.c168
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c263
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c184
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c41
-rw-r--r--drivers/pci/hotplug/pnv_php.c40
-rw-r--r--drivers/pci/hotplug/rpaphp.h10
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c20
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c11
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c22
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c44
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c63
-rw-r--r--drivers/pci/hotplug/shpchp.h8
-rw-r--r--drivers/pci/hotplug/shpchp_core.c48
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c21
-rw-r--r--drivers/pci/iov.c3
-rw-r--r--drivers/pci/msi.c9
-rw-r--r--drivers/pci/of.c101
-rw-r--r--drivers/pci/p2pdma.c805
-rw-r--r--drivers/pci/pci-acpi.c63
-rw-r--r--drivers/pci/pci-bridge-emul.c408
-rw-r--r--drivers/pci/pci-bridge-emul.h124
-rw-r--r--drivers/pci/pci.c112
-rw-r--r--drivers/pci/pci.h78
-rw-r--r--drivers/pci/pcie/Kconfig4
-rw-r--r--drivers/pci/pcie/aer.c239
-rw-r--r--drivers/pci/pcie/aer_inject.c96
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/dpc.c72
-rw-r--r--drivers/pci/pcie/err.c281
-rw-r--r--drivers/pci/pcie/pme.c30
-rw-r--r--drivers/pci/pcie/portdrv.h32
-rw-r--r--drivers/pci/pcie/portdrv_core.c21
-rw-r--r--drivers/pci/pcie/portdrv_pci.c31
-rw-r--r--drivers/pci/probe.c24
-rw-r--r--drivers/pci/quirks.c96
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/pci/setup-bus.c28
-rw-r--r--drivers/pci/slot.c3
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/broadcom/Kconfig3
-rw-r--r--drivers/phy/broadcom/phy-bcm-cygnus-pcie.c4
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c74
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c4
-rw-r--r--drivers/phy/cadence/Kconfig10
-rw-r--r--drivers/phy/cadence/Makefile1
-rw-r--r--drivers/phy/cadence/phy-cadence-dp.c541
-rw-r--r--drivers/phy/lantiq/phy-lantiq-rcu-usb2.c5
-rw-r--r--drivers/phy/marvell/Kconfig11
-rw-r--r--drivers/phy/marvell/Makefile1
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c6
-rw-r--r--drivers/phy/marvell/phy-pxa-usb.c345
-rw-r--r--drivers/phy/qualcomm/Kconfig17
-rw-r--r--drivers/phy/qualcomm/Makefile4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c222
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h15
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c50
-rw-r--r--drivers/phy/renesas/Kconfig1
-rw-r--r--drivers/phy/renesas/Makefile1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c5
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c86
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb3.c5
-rw-r--r--drivers/phy/rockchip/Kconfig8
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c1277
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c8
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c8
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usb.c145
-rw-r--r--drivers/phy/socionext/Kconfig34
-rw-r--r--drivers/phy/socionext/Makefile8
-rw-r--r--drivers/phy/socionext/phy-uniphier-pcie.c240
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb2.c244
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3hs.c422
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3ss.c349
-rw-r--r--drivers/phy/tegra/xusb.c4
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c29
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c399
-rw-r--r--drivers/platform/goldfish/goldfish_pipe_qemu.h98
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/asus-wmi.c39
-rw-r--r--drivers/platform/x86/eeepc-laptop.c43
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c27
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c119
-rw-r--r--drivers/power/reset/qcom-pon.c1
-rw-r--r--drivers/power/reset/rmobile-reset.c5
-rw-r--r--drivers/power/supply/Kconfig7
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/ab8500_fg.c52
-rw-r--r--drivers/power/supply/bq25890_charger.c62
-rw-r--r--drivers/power/supply/bq27xxx_battery.c9
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c2
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c117
-rw-r--r--drivers/power/supply/ds2780_battery.c2
-rw-r--r--drivers/power/supply/ds2781_battery.c2
-rw-r--r--drivers/power/supply/ds2782_battery.c2
-rw-r--r--drivers/power/supply/max14577_charger.c22
-rw-r--r--drivers/power/supply/max17040_battery.c18
-rw-r--r--drivers/power/supply/max17042_battery.c32
-rw-r--r--drivers/power/supply/max77693_charger.c22
-rw-r--r--drivers/power/supply/max8925_power.c1
-rw-r--r--drivers/power/supply/max8997_charger.c26
-rw-r--r--drivers/power/supply/max8998_charger.c28
-rw-r--r--drivers/power/supply/power_supply_sysfs.c3
-rw-r--r--drivers/power/supply/sc2731_charger.c504
-rw-r--r--drivers/power/supply/twl4030_charger.c35
-rw-r--r--drivers/reset/reset-imx7.c1
-rw-r--r--drivers/s390/crypto/Makefile4
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c157
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c939
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h88
-rw-r--r--drivers/s390/net/ism_drv.c4
-rw-r--r--drivers/scsi/3w-9xxx.c50
-rw-r--r--drivers/scsi/3w-sas.c38
-rw-r--r--drivers/scsi/3w-xxxx.c20
-rw-r--r--drivers/scsi/3w-xxxx.h1
-rw-r--r--drivers/scsi/53c700.h2
-rw-r--r--drivers/scsi/BusLogic.c36
-rw-r--r--drivers/scsi/FlashPoint.c6
-rw-r--r--drivers/scsi/Kconfig35
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c167
-rw-r--r--drivers/scsi/NCR5380.h2
-rw-r--r--drivers/scsi/a100u2w.c20
-rw-r--r--drivers/scsi/aacraid/aachba.c7
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c44
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c41
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c7
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.h4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c46
-rw-r--r--drivers/scsi/am53c974.c54
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c29
-rw-r--r--drivers/scsi/atp870u.c6
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c10
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c75
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c27
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c108
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h9
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c3
-rw-r--r--drivers/scsi/csiostor/csio_init.c8
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c6
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c12
-rw-r--r--drivers/scsi/csiostor/csio_wr.c17
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c154
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h3
-rw-r--r--drivers/scsi/dc395x.c191
-rw-r--r--drivers/scsi/esp_scsi.c286
-rw-r--r--drivers/scsi/esp_scsi.h38
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c41
-rw-r--r--drivers/scsi/fnic/fnic_main.c19
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c99
-rw-r--r--drivers/scsi/fnic/vnic_dev.c26
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c161
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c15
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c15
-rw-r--r--drivers/scsi/hpsa.c148
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c1
-rw-r--r--drivers/scsi/ips.c81
-rw-r--r--drivers/scsi/isci/host.c8
-rw-r--r--drivers/scsi/isci/host.h2
-rw-r--r--drivers/scsi/isci/request.c4
-rw-r--r--drivers/scsi/isci/task.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c3
-rw-r--r--drivers/scsi/jazz_esp.c30
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c22
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c22
-rw-r--r--drivers/scsi/lpfc/lpfc.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c344
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h36
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h45
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c310
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c14
-rw-r--r--drivers/scsi/mac_esp.c217
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c117
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c153
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c16
-rw-r--r--drivers/scsi/mesh.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c1189
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c89
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c527
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1488
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c355
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c101
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c70
-rw-r--r--drivers/scsi/mvsas/mv_init.c21
-rw-r--r--drivers/scsi/mvsas/mv_sas.c12
-rw-r--r--drivers/scsi/mvumi.c89
-rw-r--r--drivers/scsi/myrb.c3656
-rw-r--r--drivers/scsi/myrb.h958
-rw-r--r--drivers/scsi/myrs.c3268
-rw-r--r--drivers/scsi/myrs.h1134
-rw-r--r--drivers/scsi/nsp32.c18
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h8
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c49
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c119
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h9
-rw-r--r--drivers/scsi/qedf/qedf_main.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla1280.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c587
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c536
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c412
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h23
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c84
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c319
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c542
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c51
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c26
-rw-r--r--drivers/scsi/raid_class.c4
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/sd.c15
-rw-r--r--drivers/scsi/sd.h15
-rw-r--r--drivers/scsi/sd_zbc.c501
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c100
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c11
-rw-r--r--drivers/scsi/snic/snic_disc.c7
-rw-r--r--drivers/scsi/snic/snic_io.c25
-rw-r--r--drivers/scsi/snic/snic_main.c24
-rw-r--r--drivers/scsi/snic/snic_scsi.c15
-rw-r--r--drivers/scsi/snic/vnic_dev.c29
-rw-r--r--drivers/scsi/sun3x_esp.c30
-rw-r--r--drivers/scsi/sun_esp.c61
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c15
-rw-r--r--drivers/scsi/ufs/Kconfig19
-rw-r--r--drivers/scsi/ufs/Makefile3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c82
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h5
-rw-r--r--drivers/scsi/ufs/ufs.h94
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c210
-rw-r--r--drivers/scsi/ufs/ufs_bsg.h23
-rw-r--r--drivers/scsi/ufs/ufshcd.c431
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h25
-rw-r--r--drivers/scsi/vmw_pvscsi.c77
-rw-r--r--drivers/scsi/zorro_esp.c290
-rw-r--r--drivers/slimbus/core.c37
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c34
-rw-r--r--drivers/soc/dove/pmu.c8
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c58
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c2
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c4
-rw-r--r--drivers/soc/qcom/apr.c2
-rw-r--r--drivers/soc/rockchip/pm_domains.c44
-rw-r--r--drivers/soc/tegra/pmc.c12
-rw-r--r--drivers/soc/ti/knav_dma.c8
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c8
-rw-r--r--drivers/soundwire/bus.c6
-rw-r--r--drivers/soundwire/bus.h4
-rw-r--r--drivers/soundwire/intel.c68
-rw-r--r--drivers/soundwire/intel_init.c2
-rw-r--r--drivers/soundwire/stream.c488
-rw-r--r--drivers/spi/Kconfig8
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-at91-usart.c432
-rw-r--r--drivers/staging/erofs/namei.c19
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c34
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c28
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c34
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c26
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c23
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c44
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/target_core_iblock.c58
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_sbc.c23
-rw-r--r--drivers/target/target_core_transport.c19
-rw-r--r--drivers/target/target_core_xcopy.c3
-rw-r--r--drivers/tc/tc.c8
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/armada_thermal.c4
-rw-r--r--drivers/thermal/da9062-thermal.c4
-rw-r--r--drivers/thermal/hisi_thermal.c249
-rw-r--r--drivers/thermal/imx_thermal.c31
-rw-r--r--drivers/thermal/of-thermal.c152
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c158
-rw-r--r--drivers/thermal/qcom/tsens-8916.c12
-rw-r--r--drivers/thermal/qcom/tsens-8960.c41
-rw-r--r--drivers/thermal/qcom/tsens-8974.c12
-rw-r--r--drivers/thermal/qcom/tsens-common.c62
-rw-r--r--drivers/thermal/qcom/tsens-v2.c8
-rw-r--r--drivers/thermal/qcom/tsens.c19
-rw-r--r--drivers/thermal/qcom/tsens.h23
-rw-r--r--drivers/thermal/qoriq_thermal.c5
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c2
-rw-r--r--drivers/thermal/rcar_thermal.c11
-rw-r--r--drivers/thermal/st/Kconfig14
-rw-r--r--drivers/thermal/st/Makefile1
-rw-r--r--drivers/thermal/st/stm_thermal.c760
-rw-r--r--drivers/thunderbolt/cap.c3
-rw-r--r--drivers/thunderbolt/ctl.c12
-rw-r--r--drivers/thunderbolt/ctl.h3
-rw-r--r--drivers/thunderbolt/dma_port.c5
-rw-r--r--drivers/thunderbolt/dma_port.h5
-rw-r--r--drivers/thunderbolt/domain.c7
-rw-r--r--drivers/thunderbolt/eeprom.c5
-rw-r--r--drivers/thunderbolt/icm.c5
-rw-r--r--drivers/thunderbolt/nhi.c33
-rw-r--r--drivers/thunderbolt/nhi.h3
-rw-r--r--drivers/thunderbolt/nhi_regs.h1
-rw-r--r--drivers/thunderbolt/path.c26
-rw-r--r--drivers/thunderbolt/property.c5
-rw-r--r--drivers/thunderbolt/switch.c71
-rw-r--r--drivers/thunderbolt/tb.c10
-rw-r--r--drivers/thunderbolt/tb.h9
-rw-r--r--drivers/thunderbolt/tb_msgs.h5
-rw-r--r--drivers/thunderbolt/tb_regs.h3
-rw-r--r--drivers/thunderbolt/xdomain.c5
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/atmel_serial.c42
-rw-r--r--drivers/uio/uio.c35
-rw-r--r--drivers/uio/uio_dmem_genirq.c3
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c3
-rw-r--r--drivers/uio/uio_hv_generic.c116
-rw-r--r--drivers/uio/uio_pdrv_genirq.c3
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c19
-rw-r--r--drivers/usb/chipidea/core.c19
-rw-r--r--drivers/usb/chipidea/host.c9
-rw-r--r--drivers/usb/chipidea/otg.c9
-rw-r--r--drivers/usb/chipidea/otg.h3
-rw-r--r--drivers/usb/chipidea/udc.c9
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c4
-rw-r--r--drivers/usb/class/usbtmc.c1583
-rw-r--r--drivers/usb/core/buffer.c8
-rw-r--r--drivers/usb/core/driver.c3
-rw-r--r--drivers/usb/core/generic.c27
-rw-r--r--drivers/usb/core/hcd.c14
-rw-r--r--drivers/usb/core/hub.c42
-rw-r--r--drivers/usb/core/phy.c7
-rw-r--r--drivers/usb/core/port.c10
-rw-r--r--drivers/usb/dwc2/core.h29
-rw-r--r--drivers/usb/dwc2/debugfs.c1
-rw-r--r--drivers/usb/dwc2/gadget.c121
-rw-r--r--drivers/usb/dwc2/hcd.c48
-rw-r--r--drivers/usb/dwc2/hw.h15
-rw-r--r--drivers/usb/dwc2/params.c7
-rw-r--r--drivers/usb/dwc2/platform.c8
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c212
-rw-r--r--drivers/usb/dwc3/gadget.c29
-rw-r--r--drivers/usb/early/xhci-dbc.c3
-rw-r--r--drivers/usb/gadget/function/f_uac2.c216
-rw-r--r--drivers/usb/gadget/function/f_uvc.c57
-rw-r--r--drivers/usb/gadget/function/u_uvc.h3
-rw-r--r--drivers/usb/gadget/function/uvc.h16
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c1168
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c4
-rw-r--r--drivers/usb/gadget/function/uvc_video.c48
-rw-r--r--drivers/usb/gadget/function/uvc_video.h2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c8
-rw-r--r--drivers/usb/gadget/udc/core.c9
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c36
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.c3
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c14
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-hcd.c11
-rw-r--r--drivers/usb/host/ehci-mv.c181
-rw-r--r--drivers/usb/host/ehci-q.c4
-rw-r--r--drivers/usb/host/ehci-timer.c2
-rw-r--r--drivers/usb/host/ehci.h4
-rw-r--r--drivers/usb/host/fotg210-hcd.c50
-rw-r--r--drivers/usb/host/fotg210.h7
-rw-r--r--drivers/usb/host/ohci-at91.c2
-rw-r--r--drivers/usb/host/pci-quirks.c12
-rw-r--r--drivers/usb/host/xhci-hub.c5
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c429
-rw-r--r--drivers/usb/host/xhci-mtk.h23
-rw-r--r--drivers/usb/host/xhci-pci.c24
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c20
-rw-r--r--drivers/usb/host/xhci-tegra.c144
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/appledisplay.c7
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/trancevibrator.c4
-rw-r--r--drivers/usb/mtu3/mtu3_core.c4
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c22
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c8
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c113
-rw-r--r--drivers/usb/renesas_usbhs/common.h5
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c27
-rw-r--r--drivers/usb/serial/cypress_m8.c7
-rw-r--r--drivers/usb/serial/ftdi_sio.c391
-rw-r--r--drivers/usb/serial/ftdi_sio.h28
-rw-r--r--drivers/usb/storage/Kconfig23
-rw-r--r--drivers/usb/storage/isd200.c2
-rw-r--r--drivers/usb/typec/Kconfig45
-rw-r--r--drivers/usb/typec/Makefile6
-rw-r--r--drivers/usb/typec/class.c40
-rw-r--r--drivers/usb/typec/fusb302/Kconfig7
-rw-r--r--drivers/usb/typec/fusb302/Makefile2
-rw-r--r--drivers/usb/typec/tcpm/Kconfig52
-rw-r--r--drivers/usb/typec/tcpm/Makefile7
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c (renamed from drivers/usb/typec/fusb302/fusb302.c)75
-rw-r--r--drivers/usb/typec/tcpm/fusb302_reg.h (renamed from drivers/usb/typec/fusb302/fusb302_reg.h)0
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c (renamed from drivers/usb/typec/tcpci.c)0
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h (renamed from drivers/usb/typec/tcpci.h)0
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c (renamed from drivers/usb/typec/tcpci_rt1711h.c)0
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c (renamed from drivers/usb/typec/tcpm.c)17
-rw-r--r--drivers/usb/typec/tcpm/wcove.c (renamed from drivers/usb/typec/typec_wcove.c)0
-rw-r--r--drivers/usb/usbip/vudc_main.c10
-rw-r--r--drivers/usb/wusbcore/crypto.c16
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c6
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c23
-rw-r--r--drivers/video/backlight/Kconfig16
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/adp5520_bl.c2
-rw-r--r--drivers/video/backlight/adp8860_bl.c2
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/ld9040.c811
-rw-r--r--drivers/video/backlight/ld9040_gamma.h202
-rw-r--r--drivers/video/backlight/lm3639_bl.c6
-rw-r--r--drivers/video/backlight/pwm_bl.c81
-rw-r--r--drivers/video/backlight/s6e63m0.c857
-rw-r--r--drivers/video/backlight/s6e63m0_gamma.h266
-rw-r--r--drivers/video/fbdev/chipsfb.c3
-rw-r--r--drivers/video/fbdev/controlfb.c5
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c2
-rw-r--r--drivers/video/fbdev/platinumfb.c5
-rw-r--r--drivers/video/fbdev/valkyriefb.c12
-rw-r--r--drivers/vme/vme.c1
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/slaves/w1_ds2438.c66
1212 files changed, 63021 insertions, 28456 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 9705fc986da9..365e6c1a729e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -492,6 +492,9 @@ config ACPI_EXTLOG
driver adds support for that functionality with corresponding
tracepoint which carries that information to userspace.
+config ACPI_ADXL
+ bool
+
menuconfig PMIC_OPREGION
bool "PMIC (Power Management Integrated Circuit) operation region support"
help
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 6d59aa109a91..edc039313cd6 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -61,6 +61,9 @@ acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
+# Address translation
+acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
+
# These are (potentially) separate modules
# IPMI may be used by other drivers, so it has to initialise before them
diff --git a/drivers/acpi/acpi_adxl.c b/drivers/acpi/acpi_adxl.c
new file mode 100644
index 000000000000..13c8f7b50c46
--- /dev/null
+++ b/drivers/acpi/acpi_adxl.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Address translation interface via ACPI DSM.
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Specification for this interface is available at:
+ *
+ * https://cdrdv2.intel.com/v1/dl/getContent/603354
+ */
+
+#include <linux/acpi.h>
+#include <linux/adxl.h>
+
+#define ADXL_REVISION 0x1
+#define ADXL_IDX_GET_ADDR_PARAMS 0x1
+#define ADXL_IDX_FORWARD_TRANSLATE 0x2
+#define ACPI_ADXL_PATH "\\_SB.ADXL"
+
+/*
+ * The specification doesn't provide a limit on how many
+ * components are in a memory address. But since we allocate
+ * memory based on the number the BIOS tells us, we should
+ * defend against insane values.
+ */
+#define ADXL_MAX_COMPONENTS 500
+
+#undef pr_fmt
+#define pr_fmt(fmt) "ADXL: " fmt
+
+static acpi_handle handle;
+static union acpi_object *params;
+static const guid_t adxl_guid =
+ GUID_INIT(0xAA3C050A, 0x7EA4, 0x4C1F,
+ 0xAF, 0xDA, 0x12, 0x67, 0xDF, 0xD3, 0xD4, 0x8D);
+
+static int adxl_count;
+static char **adxl_component_names;
+
+static union acpi_object *adxl_dsm(int cmd, union acpi_object argv[])
+{
+ union acpi_object *obj, *o;
+
+ obj = acpi_evaluate_dsm_typed(handle, &adxl_guid, ADXL_REVISION,
+ cmd, argv, ACPI_TYPE_PACKAGE);
+ if (!obj) {
+ pr_info("DSM call failed for cmd=%d\n", cmd);
+ return NULL;
+ }
+
+ if (obj->package.count != 2) {
+ pr_info("Bad pkg count %d\n", obj->package.count);
+ goto err;
+ }
+
+ o = obj->package.elements;
+ if (o->type != ACPI_TYPE_INTEGER) {
+ pr_info("Bad 1st element type %d\n", o->type);
+ goto err;
+ }
+ if (o->integer.value) {
+ pr_info("Bad ret val %llu\n", o->integer.value);
+ goto err;
+ }
+
+ o = obj->package.elements + 1;
+ if (o->type != ACPI_TYPE_PACKAGE) {
+ pr_info("Bad 2nd element type %d\n", o->type);
+ goto err;
+ }
+ return obj;
+
+err:
+ ACPI_FREE(obj);
+ return NULL;
+}
+
+/**
+ * adxl_get_component_names - get list of memory component names
+ * Returns NULL terminated list of string names
+ *
+ * Give the caller a pointer to the list of memory component names
+ * e.g. { "SystemAddress", "ProcessorSocketId", "ChannelId", ... NULL }
+ * Caller should count how many strings in order to allocate a buffer
+ * for the return from adxl_decode().
+ */
+const char * const *adxl_get_component_names(void)
+{
+ return (const char * const *)adxl_component_names;
+}
+EXPORT_SYMBOL_GPL(adxl_get_component_names);
+
+/**
+ * adxl_decode - ask BIOS to decode a system address to memory address
+ * @addr: the address to decode
+ * @component_values: pointer to array of values for each component
+ * Returns 0 on success, negative error code otherwise
+ *
+ * The index of each value returned in the array matches the index of
+ * each component name returned by adxl_get_component_names().
+ * Components that are not defined for this address translation (e.g.
+ * mirror channel number for a non-mirrored address) are set to ~0ull.
+ */
+int adxl_decode(u64 addr, u64 component_values[])
+{
+ union acpi_object argv4[2], *results, *r;
+ int i, cnt;
+
+ if (!adxl_component_names)
+ return -EOPNOTSUPP;
+
+ argv4[0].type = ACPI_TYPE_PACKAGE;
+ argv4[0].package.count = 1;
+ argv4[0].package.elements = &argv4[1];
+ argv4[1].integer.type = ACPI_TYPE_INTEGER;
+ argv4[1].integer.value = addr;
+
+ results = adxl_dsm(ADXL_IDX_FORWARD_TRANSLATE, argv4);
+ if (!results)
+ return -EINVAL;
+
+ r = results->package.elements + 1;
+ cnt = r->package.count;
+ if (cnt != adxl_count) {
+ ACPI_FREE(results);
+ return -EINVAL;
+ }
+ r = r->package.elements;
+
+ for (i = 0; i < cnt; i++)
+ component_values[i] = r[i].integer.value;
+
+ ACPI_FREE(results);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adxl_decode);
+
+static int __init adxl_init(void)
+{
+ char *path = ACPI_ADXL_PATH;
+ union acpi_object *p;
+ acpi_status status;
+ int i;
+
+ status = acpi_get_handle(NULL, path, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("No ACPI handle for path %s\n", path);
+ return -ENODEV;
+ }
+
+ if (!acpi_has_method(handle, "_DSM")) {
+ pr_info("No DSM method\n");
+ return -ENODEV;
+ }
+
+ if (!acpi_check_dsm(handle, &adxl_guid, ADXL_REVISION,
+ ADXL_IDX_GET_ADDR_PARAMS |
+ ADXL_IDX_FORWARD_TRANSLATE)) {
+ pr_info("DSM method does not support forward translate\n");
+ return -ENODEV;
+ }
+
+ params = adxl_dsm(ADXL_IDX_GET_ADDR_PARAMS, NULL);
+ if (!params) {
+ pr_info("Failed to get component names\n");
+ return -ENODEV;
+ }
+
+ p = params->package.elements + 1;
+ adxl_count = p->package.count;
+ if (adxl_count > ADXL_MAX_COMPONENTS) {
+ pr_info("Insane number of address component names %d\n", adxl_count);
+ ACPI_FREE(params);
+ return -ENODEV;
+ }
+ p = p->package.elements;
+
+ /*
+ * Allocate one extra for NULL termination.
+ */
+ adxl_component_names = kcalloc(adxl_count + 1, sizeof(char *), GFP_KERNEL);
+ if (!adxl_component_names) {
+ ACPI_FREE(params);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adxl_count; i++)
+ adxl_component_names[i] = p[i].string.pointer;
+
+ return 0;
+}
+subsys_initcall(adxl_init);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b072cfc5f20e..f8c638f3c946 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <acpi/nfit.h>
#include "nfit.h"
+#include "intel.h"
/*
* For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
@@ -191,18 +192,20 @@ static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd
* In the _LSI, _LSR, _LSW case the locked status is
* communicated via the read/write commands
*/
- if (nfit_mem->has_lsr)
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
break;
if (status >> 16 & ND_CONFIG_LOCKED)
return -EACCES;
break;
case ND_CMD_GET_CONFIG_DATA:
- if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED)
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
+ && status == ACPI_LABELS_LOCKED)
return -EACCES;
break;
case ND_CMD_SET_CONFIG_DATA:
- if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED)
+ if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
+ && status == ACPI_LABELS_LOCKED)
return -EACCES;
break;
default:
@@ -480,14 +483,16 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
min_t(u32, 256, in_buf.buffer.length), true);
/* call the BIOS, prefer the named methods over _DSM if available */
- if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr)
+ if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
+ && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
out_obj = acpi_label_info(handle);
- else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
+ else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
+ && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
struct nd_cmd_get_config_data_hdr *p = buf;
out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
} else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
- && nfit_mem->has_lsw) {
+ && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
struct nd_cmd_set_config_hdr *p = buf;
out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
@@ -1547,7 +1552,12 @@ static DEVICE_ATTR_RO(dsm_mask);
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u16 flags = to_nfit_memdev(dev)->flags;
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ u16 flags = __to_nfit_memdev(nfit_mem)->flags;
+
+ if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
+ flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
return sprintf(buf, "%s%s%s%s%s%s%s\n",
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
@@ -1578,6 +1588,16 @@ static ssize_t id_show(struct device *dev,
}
static DEVICE_ATTR_RO(id);
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+ return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
+}
+static DEVICE_ATTR_RO(dirty_shutdown);
+
static struct attribute *acpi_nfit_dimm_attributes[] = {
&dev_attr_handle.attr,
&dev_attr_phys_id.attr,
@@ -1595,6 +1615,7 @@ static struct attribute *acpi_nfit_dimm_attributes[] = {
&dev_attr_id.attr,
&dev_attr_family.attr,
&dev_attr_dsm_mask.attr,
+ &dev_attr_dirty_shutdown.attr,
NULL,
};
@@ -1603,6 +1624,7 @@ static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
if (!to_nfit_dcr(dev)) {
/* Without a dcr only the memdev attributes can be surfaced */
@@ -1616,6 +1638,11 @@ static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
return 0;
+
+ if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
+ && a == &dev_attr_dirty_shutdown.attr)
+ return 0;
+
return a->mode;
}
@@ -1694,6 +1721,56 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
return false;
}
+__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
+{
+ struct nd_intel_smart smart = { 0 };
+ union acpi_object in_buf = {
+ .type = ACPI_TYPE_BUFFER,
+ .buffer.pointer = (char *) &smart,
+ .buffer.length = sizeof(smart),
+ };
+ union acpi_object in_obj = {
+ .type = ACPI_TYPE_PACKAGE,
+ .package.count = 1,
+ .package.elements = &in_buf,
+ };
+ const u8 func = ND_INTEL_SMART;
+ const guid_t *guid = to_nfit_uuid(nfit_mem->family);
+ u8 revid = nfit_dsm_revid(nfit_mem->family, func);
+ struct acpi_device *adev = nfit_mem->adev;
+ acpi_handle handle = adev->handle;
+ union acpi_object *out_obj;
+
+ if ((nfit_mem->dsm_mask & (1 << func)) == 0)
+ return;
+
+ out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
+ if (!out_obj)
+ return;
+
+ if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
+ if (smart.shutdown_state)
+ set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
+ }
+
+ if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
+ set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
+ nfit_mem->dirty_shutdown = smart.shutdown_count;
+ }
+ ACPI_FREE(out_obj);
+}
+
+static void populate_shutdown_status(struct nfit_mem *nfit_mem)
+{
+ /*
+ * For DIMMs that provide a dynamic facility to retrieve a
+ * dirty-shutdown status and/or a dirty-shutdown count, cache
+ * these values in nfit_mem.
+ */
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
+ nfit_intel_shutdown_status(nfit_mem);
+}
+
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, u32 device_handle)
{
@@ -1708,8 +1785,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
nfit_mem->family = NVDIMM_FAMILY_INTEL;
adev = to_acpi_dev(acpi_desc);
- if (!adev)
+ if (!adev) {
+ /* unit test case */
+ populate_shutdown_status(nfit_mem);
return 0;
+ }
adev_dimm = acpi_find_child_device(adev, device_handle, false);
nfit_mem->adev = adev_dimm;
@@ -1784,14 +1864,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
&& acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
- nfit_mem->has_lsr = true;
+ set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
}
- if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
+ && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
- nfit_mem->has_lsw = true;
+ set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
}
+ populate_shutdown_status(nfit_mem);
+
return 0;
}
@@ -1878,11 +1961,11 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
}
- if (nfit_mem->has_lsr) {
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
}
- if (nfit_mem->has_lsw)
+ if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
@@ -2466,7 +2549,8 @@ static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
return cmd_rc;
}
-static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
+static int ars_start(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
{
int rc;
int cmd_rc;
@@ -2477,7 +2561,7 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa
memset(&ars_start, 0, sizeof(ars_start));
ars_start.address = spa->address;
ars_start.length = spa->length;
- if (test_bit(ARS_SHORT, &nfit_spa->ars_state))
+ if (req_type == ARS_REQ_SHORT)
ars_start.flags = ND_ARS_RETURN_PREV_DATA;
if (nfit_spa_type(spa) == NFIT_SPA_PM)
ars_start.type = ND_ARS_PERSISTENT;
@@ -2534,6 +2618,15 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
struct nd_region *nd_region = nfit_spa->nd_region;
struct device *dev;
+ lockdep_assert_held(&acpi_desc->init_mutex);
+ /*
+ * Only advance the ARS state for ARS runs initiated by the
+ * kernel, ignore ARS results from BIOS initiated runs for scrub
+ * completion tracking.
+ */
+ if (acpi_desc->scrub_spa != nfit_spa)
+ return;
+
if ((ars_status->address >= spa->address && ars_status->address
< spa->address + spa->length)
|| (ars_status->address < spa->address)) {
@@ -2553,28 +2646,13 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
} else
return;
- if (test_bit(ARS_DONE, &nfit_spa->ars_state))
- return;
-
- if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state))
- return;
-
+ acpi_desc->scrub_spa = NULL;
if (nd_region) {
dev = nd_region_dev(nd_region);
nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
} else
dev = acpi_desc->dev;
-
- dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index,
- test_bit(ARS_SHORT, &nfit_spa->ars_state)
- ? "short" : "long");
- clear_bit(ARS_SHORT, &nfit_spa->ars_state);
- if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) {
- set_bit(ARS_SHORT, &nfit_spa->ars_state);
- set_bit(ARS_REQ, &nfit_spa->ars_state);
- dev_dbg(dev, "ARS: processing scrub request received while in progress\n");
- } else
- set_bit(ARS_DONE, &nfit_spa->ars_state);
+ dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
}
static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
@@ -2855,46 +2933,55 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
return 0;
}
-static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa,
- int *query_rc)
+static int ars_register(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa)
{
- int rc = *query_rc;
+ int rc;
- if (no_init_ars)
+ if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
return acpi_nfit_register_region(acpi_desc, nfit_spa);
- set_bit(ARS_REQ, &nfit_spa->ars_state);
- set_bit(ARS_SHORT, &nfit_spa->ars_state);
+ set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
+ set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
- switch (rc) {
+ switch (acpi_nfit_query_poison(acpi_desc)) {
case 0:
case -EAGAIN:
- rc = ars_start(acpi_desc, nfit_spa);
- if (rc == -EBUSY) {
- *query_rc = rc;
+ rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
+ /* shouldn't happen, try again later */
+ if (rc == -EBUSY)
break;
- } else if (rc == 0) {
- rc = acpi_nfit_query_poison(acpi_desc);
- } else {
+ if (rc) {
set_bit(ARS_FAILED, &nfit_spa->ars_state);
break;
}
- if (rc == -EAGAIN)
- clear_bit(ARS_SHORT, &nfit_spa->ars_state);
- else if (rc == 0)
- ars_complete(acpi_desc, nfit_spa);
+ clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
+ rc = acpi_nfit_query_poison(acpi_desc);
+ if (rc)
+ break;
+ acpi_desc->scrub_spa = nfit_spa;
+ ars_complete(acpi_desc, nfit_spa);
+ /*
+ * If ars_complete() says we didn't complete the
+ * short scrub, we'll try again with a long
+ * request.
+ */
+ acpi_desc->scrub_spa = NULL;
break;
case -EBUSY:
+ case -ENOMEM:
case -ENOSPC:
+ /*
+ * BIOS was using ARS, wait for it to complete (or
+ * resources to become available) and then perform our
+ * own scrubs.
+ */
break;
default:
set_bit(ARS_FAILED, &nfit_spa->ars_state);
break;
}
- if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state))
- set_bit(ARS_REQ, &nfit_spa->ars_state);
-
return acpi_nfit_register_region(acpi_desc, nfit_spa);
}
@@ -2916,6 +3003,8 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
struct device *dev = acpi_desc->dev;
struct nfit_spa *nfit_spa;
+ lockdep_assert_held(&acpi_desc->init_mutex);
+
if (acpi_desc->cancel)
return 0;
@@ -2939,21 +3028,49 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
ars_complete_all(acpi_desc);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ enum nfit_ars_state req_type;
+ int rc;
+
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
continue;
- if (test_bit(ARS_REQ, &nfit_spa->ars_state)) {
- int rc = ars_start(acpi_desc, nfit_spa);
-
- clear_bit(ARS_DONE, &nfit_spa->ars_state);
- dev = nd_region_dev(nfit_spa->nd_region);
- dev_dbg(dev, "ARS: range %d ARS start (%d)\n",
- nfit_spa->spa->range_index, rc);
- if (rc == 0 || rc == -EBUSY)
- return 1;
- dev_err(dev, "ARS: range %d ARS failed (%d)\n",
- nfit_spa->spa->range_index, rc);
- set_bit(ARS_FAILED, &nfit_spa->ars_state);
+
+ /* prefer short ARS requests first */
+ if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
+ req_type = ARS_REQ_SHORT;
+ else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
+ req_type = ARS_REQ_LONG;
+ else
+ continue;
+ rc = ars_start(acpi_desc, nfit_spa, req_type);
+
+ dev = nd_region_dev(nfit_spa->nd_region);
+ dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
+ nfit_spa->spa->range_index,
+ req_type == ARS_REQ_SHORT ? "short" : "long",
+ rc);
+ /*
+ * Hmm, we raced someone else starting ARS? Try again in
+ * a bit.
+ */
+ if (rc == -EBUSY)
+ return 1;
+ if (rc == 0) {
+ dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
+ "scrub start while range %d active\n",
+ acpi_desc->scrub_spa->spa->range_index);
+ clear_bit(req_type, &nfit_spa->ars_state);
+ acpi_desc->scrub_spa = nfit_spa;
+ /*
+ * Consider this spa last for future scrub
+ * requests
+ */
+ list_move_tail(&nfit_spa->list, &acpi_desc->spas);
+ return 1;
}
+
+ dev_err(dev, "ARS: range %d ARS failed (%d)\n",
+ nfit_spa->spa->range_index, rc);
+ set_bit(ARS_FAILED, &nfit_spa->ars_state);
}
return 0;
}
@@ -3009,6 +3126,7 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
struct nd_cmd_ars_cap ars_cap;
int rc;
+ set_bit(ARS_FAILED, &nfit_spa->ars_state);
memset(&ars_cap, 0, sizeof(ars_cap));
rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
if (rc < 0)
@@ -3025,16 +3143,14 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
clear_bit(ARS_FAILED, &nfit_spa->ars_state);
- set_bit(ARS_REQ, &nfit_spa->ars_state);
}
static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_spa *nfit_spa;
- int rc, query_rc;
+ int rc;
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
- set_bit(ARS_FAILED, &nfit_spa->ars_state);
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
case NFIT_SPA_PM:
@@ -3043,20 +3159,12 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
}
}
- /*
- * Reap any results that might be pending before starting new
- * short requests.
- */
- query_rc = acpi_nfit_query_poison(acpi_desc);
- if (query_rc == 0)
- ars_complete_all(acpi_desc);
-
list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
case NFIT_SPA_PM:
/* register regions and kick off initial ARS run */
- rc = ars_register(acpi_desc, nfit_spa, &query_rc);
+ rc = ars_register(acpi_desc, nfit_spa);
if (rc)
return rc;
break;
@@ -3233,6 +3341,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct nfit_spa *nfit_spa;
+ int rc = 0;
if (nvdimm)
return 0;
@@ -3242,16 +3352,24 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
/*
* The kernel and userspace may race to initiate a scrub, but
* the scrub thread is prepared to lose that initial race. It
- * just needs guarantees that any ars it initiates are not
- * interrupted by any intervening start reqeusts from userspace.
+ * just needs guarantees that any ARS it initiates are not
+ * interrupted by any intervening start requests from userspace.
*/
- if (work_busy(&acpi_desc->dwork.work))
- return -EBUSY;
+ mutex_lock(&acpi_desc->init_mutex);
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ if (acpi_desc->scrub_spa
+ || test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)
+ || test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) {
+ rc = -EBUSY;
+ break;
+ }
+ mutex_unlock(&acpi_desc->init_mutex);
- return 0;
+ return rc;
}
-int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
+int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
+ enum nfit_ars_state req_type)
{
struct device *dev = acpi_desc->dev;
int scheduled = 0, busy = 0;
@@ -3271,14 +3389,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
continue;
- if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) {
+ if (test_and_set_bit(req_type, &nfit_spa->ars_state))
busy++;
- set_bit(ARS_REQ_REDO, &nfit_spa->ars_state);
- } else {
- if (test_bit(ARS_SHORT, &flags))
- set_bit(ARS_SHORT, &nfit_spa->ars_state);
+ else
scheduled++;
- }
}
if (scheduled) {
sched_ars(acpi_desc);
@@ -3464,10 +3578,11 @@ static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
{
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
- unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ?
- 0 : 1 << ARS_SHORT;
- acpi_nfit_ars_rescan(acpi_desc, flags);
+ if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
+ acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
+ else
+ acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
}
void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h
new file mode 100644
index 000000000000..86746312381f
--- /dev/null
+++ b/drivers/acpi/nfit/intel.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ * Intel specific definitions for NVDIMM Firmware Interface Table - NFIT
+ */
+#ifndef _NFIT_INTEL_H_
+#define _NFIT_INTEL_H_
+
+#define ND_INTEL_SMART 1
+
+#define ND_INTEL_SMART_SHUTDOWN_COUNT_VALID (1 << 5)
+#define ND_INTEL_SMART_SHUTDOWN_VALID (1 << 10)
+
+struct nd_intel_smart {
+ u32 status;
+ union {
+ struct {
+ u32 flags;
+ u8 reserved0[4];
+ u8 health;
+ u8 spares;
+ u8 life_used;
+ u8 alarm_flags;
+ u16 media_temperature;
+ u16 ctrl_temperature;
+ u32 shutdown_count;
+ u8 ait_status;
+ u16 pmic_temperature;
+ u8 reserved1[8];
+ u8 shutdown_state;
+ u32 vendor_size;
+ u8 vendor_data[92];
+ } __packed;
+ u8 data[128];
+ };
+} __packed;
+
+#endif
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index d1274ea2d251..df0f6b8407e7 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -118,10 +118,8 @@ enum nfit_dimm_notifiers {
};
enum nfit_ars_state {
- ARS_REQ,
- ARS_REQ_REDO,
- ARS_DONE,
- ARS_SHORT,
+ ARS_REQ_SHORT,
+ ARS_REQ_LONG,
ARS_FAILED,
};
@@ -159,6 +157,13 @@ struct nfit_memdev {
struct acpi_nfit_memory_map memdev[0];
};
+enum nfit_mem_flags {
+ NFIT_MEM_LSR,
+ NFIT_MEM_LSW,
+ NFIT_MEM_DIRTY,
+ NFIT_MEM_DIRTY_COUNT,
+};
+
/* assembled tables for a given dimm/memory-device */
struct nfit_mem {
struct nvdimm *nvdimm;
@@ -178,9 +183,9 @@ struct nfit_mem {
struct acpi_nfit_desc *acpi_desc;
struct resource *flush_wpq;
unsigned long dsm_mask;
+ unsigned long flags;
+ u32 dirty_shutdown;
int family;
- bool has_lsr;
- bool has_lsw;
};
struct acpi_nfit_desc {
@@ -198,6 +203,7 @@ struct acpi_nfit_desc {
struct device *dev;
u8 ars_start_flags;
struct nd_cmd_ars_status *ars_status;
+ struct nfit_spa *scrub_spa;
struct delayed_work dwork;
struct list_head list;
struct kernfs_node *scrub_count_state;
@@ -252,7 +258,8 @@ struct nfit_blk {
extern struct list_head acpi_descs;
extern struct mutex acpi_desc_lock;
-int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags);
+int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
+ enum nfit_ars_state req_type);
#ifdef CONFIG_X86_MCE
void nfit_mce_register(void);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 7433035ded95..707aafc7c2aa 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -421,7 +421,8 @@ out:
}
EXPORT_SYMBOL(acpi_pci_osc_control_set);
-static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
+static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
+ bool is_pcie)
{
u32 support, control, requested;
acpi_status status;
@@ -455,9 +456,15 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
decode_osc_support(root, "OS supports", support);
status = acpi_pci_osc_support(root, support);
if (ACPI_FAILURE(status)) {
- dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
- acpi_format_exception(status));
*no_aspm = 1;
+
+ /* _OSC is optional for PCI host bridges */
+ if ((status == AE_NOT_FOUND) && !is_pcie)
+ return;
+
+ dev_info(&device->dev, "_OSC failed (%s)%s\n",
+ acpi_format_exception(status),
+ pcie_aspm_support_enabled() ? "; disabling ASPM" : "");
return;
}
@@ -533,6 +540,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_handle handle = device->handle;
int no_aspm = 0;
bool hotadd = system_state == SYSTEM_RUNNING;
+ bool is_pcie;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -590,7 +598,8 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
- negotiate_os_control(root, &no_aspm);
+ is_pcie = strcmp(acpi_device_hid(device), "PNP0A08") == 0;
+ negotiate_os_control(root, &no_aspm, is_pcie);
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 693cf05b0cc4..8c7c4583b52d 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -24,11 +24,15 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
acpi_object_type type,
const union acpi_object **obj);
-/* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
-static const guid_t prp_guid =
+static const guid_t prp_guids[] = {
+ /* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
- 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01);
-/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
+ 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01),
+ /* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */
+ GUID_INIT(0x6211e2c0, 0x58a3, 0x4af3,
+ 0x90, 0xe1, 0x92, 0x7a, 0x4e, 0x0c, 0x55, 0xa4),
+};
+
static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
@@ -56,6 +60,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
dn->name = link->package.elements[0].string.pointer;
dn->fwnode.ops = &acpi_data_fwnode_ops;
dn->parent = parent;
+ INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
result = acpi_extract_properties(desc, &dn->data);
@@ -288,6 +293,35 @@ static void acpi_init_of_compatible(struct acpi_device *adev)
adev->flags.of_compatible_ok = 1;
}
+static bool acpi_is_property_guid(const guid_t *guid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prp_guids); i++) {
+ if (guid_equal(guid, &prp_guids[i]))
+ return true;
+ }
+
+ return false;
+}
+
+struct acpi_device_properties *
+acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
+ const union acpi_object *properties)
+{
+ struct acpi_device_properties *props;
+
+ props = kzalloc(sizeof(*props), GFP_KERNEL);
+ if (props) {
+ INIT_LIST_HEAD(&props->list);
+ props->guid = guid;
+ props->properties = properties;
+ list_add_tail(&props->list, &data->properties);
+ }
+
+ return props;
+}
+
static bool acpi_extract_properties(const union acpi_object *desc,
struct acpi_device_data *data)
{
@@ -312,7 +346,7 @@ static bool acpi_extract_properties(const union acpi_object *desc,
properties->type != ACPI_TYPE_PACKAGE)
break;
- if (!guid_equal((guid_t *)guid->buffer.pointer, &prp_guid))
+ if (!acpi_is_property_guid((guid_t *)guid->buffer.pointer))
continue;
/*
@@ -320,13 +354,13 @@ static bool acpi_extract_properties(const union acpi_object *desc,
* package immediately following it.
*/
if (!acpi_properties_format_valid(properties))
- break;
+ continue;
- data->properties = properties;
- return true;
+ acpi_data_add_props(data, (const guid_t *)guid->buffer.pointer,
+ properties);
}
- return false;
+ return !list_empty(&data->properties);
}
void acpi_init_properties(struct acpi_device *adev)
@@ -336,6 +370,7 @@ void acpi_init_properties(struct acpi_device *adev)
acpi_status status;
bool acpi_of = false;
+ INIT_LIST_HEAD(&adev->data.properties);
INIT_LIST_HEAD(&adev->data.subnodes);
if (!adev->handle)
@@ -398,11 +433,16 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
void acpi_free_properties(struct acpi_device *adev)
{
+ struct acpi_device_properties *props, *tmp;
+
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
adev->data.pointer = NULL;
- adev->data.properties = NULL;
+ list_for_each_entry_safe(props, tmp, &adev->data.properties, list) {
+ list_del(&props->list);
+ kfree(props);
+ }
}
/**
@@ -427,32 +467,37 @@ static int acpi_data_get_property(const struct acpi_device_data *data,
const char *name, acpi_object_type type,
const union acpi_object **obj)
{
- const union acpi_object *properties;
- int i;
+ const struct acpi_device_properties *props;
if (!data || !name)
return -EINVAL;
- if (!data->pointer || !data->properties)
+ if (!data->pointer || list_empty(&data->properties))
return -EINVAL;
- properties = data->properties;
- for (i = 0; i < properties->package.count; i++) {
- const union acpi_object *propname, *propvalue;
- const union acpi_object *property;
+ list_for_each_entry(props, &data->properties, list) {
+ const union acpi_object *properties;
+ unsigned int i;
- property = &properties->package.elements[i];
+ properties = props->properties;
+ for (i = 0; i < properties->package.count; i++) {
+ const union acpi_object *propname, *propvalue;
+ const union acpi_object *property;
- propname = &property->package.elements[0];
- propvalue = &property->package.elements[1];
+ property = &properties->package.elements[i];
- if (!strcmp(name, propname->string.pointer)) {
- if (type != ACPI_TYPE_ANY && propvalue->type != type)
- return -EPROTO;
- if (obj)
- *obj = propvalue;
+ propname = &property->package.elements[0];
+ propvalue = &property->package.elements[1];
- return 0;
+ if (!strcmp(name, propname->string.pointer)) {
+ if (type != ACPI_TYPE_ANY &&
+ propvalue->type != type)
+ return -EPROTO;
+ if (obj)
+ *obj = propvalue;
+
+ return 0;
+ }
}
}
return -EINVAL;
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
index bb1984f6c9fe..b7c98ff82d78 100644
--- a/drivers/acpi/x86/apple.c
+++ b/drivers/acpi/x86/apple.c
@@ -132,8 +132,8 @@ void acpi_extract_apple_properties(struct acpi_device *adev)
}
WARN_ON(free_space != (void *)newprops + newsize);
- adev->data.properties = newprops;
adev->data.pointer = newprops;
+ acpi_data_add_props(&adev->data, &apple_prp_guid, newprops);
out_free:
ACPI_FREE(props);
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 432e9ad77070..51e8250d113f 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -10,7 +10,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
- depends on MMU
+ depends on MMU && !CPU_CACHE_VIVT
default n
---help---
Binder is used in Android for both communication between processes,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index d58763b6b009..cb30a524d16d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -71,6 +71,7 @@
#include <linux/security.h>
#include <linux/spinlock.h>
#include <linux/ratelimit.h>
+#include <linux/syscalls.h>
#include <uapi/linux/android/binder.h>
@@ -457,9 +458,8 @@ struct binder_ref {
};
enum binder_deferred_state {
- BINDER_DEFERRED_PUT_FILES = 0x01,
- BINDER_DEFERRED_FLUSH = 0x02,
- BINDER_DEFERRED_RELEASE = 0x04,
+ BINDER_DEFERRED_FLUSH = 0x01,
+ BINDER_DEFERRED_RELEASE = 0x02,
};
/**
@@ -480,9 +480,6 @@ enum binder_deferred_state {
* (invariant after initialized)
* @tsk task_struct for group_leader of process
* (invariant after initialized)
- * @files files_struct for process
- * (protected by @files_lock)
- * @files_lock mutex to protect @files
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -527,8 +524,6 @@ struct binder_proc {
struct list_head waiting_threads;
int pid;
struct task_struct *tsk;
- struct files_struct *files;
- struct mutex files_lock;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
@@ -611,6 +606,23 @@ struct binder_thread {
bool is_dead;
};
+/**
+ * struct binder_txn_fd_fixup - transaction fd fixup list element
+ * @fixup_entry: list entry
+ * @file: struct file to be associated with new fd
+ * @offset: offset in buffer data to this fixup
+ *
+ * List element for fd fixups in a transaction. Since file
+ * descriptors need to be allocated in the context of the
+ * target process, we pass each fd to be processed in this
+ * struct.
+ */
+struct binder_txn_fd_fixup {
+ struct list_head fixup_entry;
+ struct file *file;
+ size_t offset;
+};
+
struct binder_transaction {
int debug_id;
struct binder_work work;
@@ -628,6 +640,7 @@ struct binder_transaction {
long priority;
long saved_priority;
kuid_t sender_euid;
+ struct list_head fd_fixups;
/**
* @lock: protects @from, @to_proc, and @to_thread
*
@@ -822,6 +835,7 @@ static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
struct binder_work *work)
{
+ WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
}
@@ -839,6 +853,7 @@ static void
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
struct binder_work *work)
{
+ WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
thread->process_todo = true;
}
@@ -920,66 +935,6 @@ static void binder_free_thread(struct binder_thread *thread);
static void binder_free_proc(struct binder_proc *proc);
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
-static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
-{
- unsigned long rlim_cur;
- unsigned long irqs;
- int ret;
-
- mutex_lock(&proc->files_lock);
- if (proc->files == NULL) {
- ret = -ESRCH;
- goto err;
- }
- if (!lock_task_sighand(proc->tsk, &irqs)) {
- ret = -EMFILE;
- goto err;
- }
- rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
- unlock_task_sighand(proc->tsk, &irqs);
-
- ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
-err:
- mutex_unlock(&proc->files_lock);
- return ret;
-}
-
-/*
- * copied from fd_install
- */
-static void task_fd_install(
- struct binder_proc *proc, unsigned int fd, struct file *file)
-{
- mutex_lock(&proc->files_lock);
- if (proc->files)
- __fd_install(proc->files, fd, file);
- mutex_unlock(&proc->files_lock);
-}
-
-/*
- * copied from sys_close
- */
-static long task_close_fd(struct binder_proc *proc, unsigned int fd)
-{
- int retval;
-
- mutex_lock(&proc->files_lock);
- if (proc->files == NULL) {
- retval = -ESRCH;
- goto err;
- }
- retval = __close_fd(proc->files, fd);
- /* can't restart close syscall because file table entry was cleared */
- if (unlikely(retval == -ERESTARTSYS ||
- retval == -ERESTARTNOINTR ||
- retval == -ERESTARTNOHAND ||
- retval == -ERESTART_RESTARTBLOCK))
- retval = -EINTR;
-err:
- mutex_unlock(&proc->files_lock);
- return retval;
-}
-
static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work)
{
@@ -1270,19 +1225,12 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
+ struct binder_thread *thread = container_of(target_list,
+ struct binder_thread, todo);
binder_dequeue_work_ilocked(&node->work);
- /*
- * Note: this function is the only place where we queue
- * directly to a thread->todo without using the
- * corresponding binder_enqueue_thread_work() helper
- * functions; in this case it's ok to not set the
- * process_todo flag, since we know this node work will
- * always be followed by other work that starts queue
- * processing: in case of synchronous transactions, a
- * BR_REPLY or BR_ERROR; in case of oneway
- * transactions, a BR_TRANSACTION_COMPLETE.
- */
- binder_enqueue_work_ilocked(&node->work, target_list);
+ BUG_ON(&thread->todo != target_list);
+ binder_enqueue_deferred_thread_work_ilocked(thread,
+ &node->work);
}
} else {
if (!internal)
@@ -1958,10 +1906,32 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
return NULL;
}
+/**
+ * binder_free_txn_fixups() - free unprocessed fd fixups
+ * @t: binder transaction for t->from
+ *
+ * If the transaction is being torn down prior to being
+ * processed by the target process, free all of the
+ * fd fixups and fput the file structs. It is safe to
+ * call this function after the fixups have been
+ * processed -- in that case, the list will be empty.
+ */
+static void binder_free_txn_fixups(struct binder_transaction *t)
+{
+ struct binder_txn_fd_fixup *fixup, *tmp;
+
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
+ fput(fixup->file);
+ list_del(&fixup->fixup_entry);
+ kfree(fixup);
+ }
+}
+
static void binder_free_transaction(struct binder_transaction *t)
{
if (t->buffer)
t->buffer->transaction = NULL;
+ binder_free_txn_fixups(t);
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
@@ -2262,12 +2232,17 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break;
case BINDER_TYPE_FD: {
- struct binder_fd_object *fp = to_binder_fd_object(hdr);
-
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %d\n", fp->fd);
- if (failed_at)
- task_close_fd(proc, fp->fd);
+ /*
+ * No need to close the file here since user-space
+ * closes it for for successfully delivered
+ * transactions. For transactions that weren't
+ * delivered, the new fd was never allocated so
+ * there is no need to close and the fput on the
+ * file is done when the transaction is torn
+ * down.
+ */
+ WARN_ON(failed_at &&
+ proc->tsk == current->group_leader);
} break;
case BINDER_TYPE_PTR:
/*
@@ -2283,6 +2258,15 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
size_t fd_index;
binder_size_t fd_buf_size;
+ if (proc->tsk != current->group_leader) {
+ /*
+ * Nothing to do if running in sender context
+ * The fd fixups have not been applied so no
+ * fds need to be closed.
+ */
+ continue;
+ }
+
fda = to_binder_fd_array_object(hdr);
parent = binder_validate_ptr(buffer, fda->parent,
off_start,
@@ -2315,7 +2299,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
}
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
- task_close_fd(proc, fd_array[fd_index]);
+ ksys_close(fd_array[fd_index]);
} break;
default:
pr_err("transaction release %d bad object type %x\n",
@@ -2447,17 +2431,18 @@ done:
return ret;
}
-static int binder_translate_fd(int fd,
+static int binder_translate_fd(u32 *fdp,
struct binder_transaction *t,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
{
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
- int target_fd;
+ struct binder_txn_fd_fixup *fixup;
struct file *file;
- int ret;
+ int ret = 0;
bool target_allows_fd;
+ int fd = *fdp;
if (in_reply_to)
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
@@ -2485,19 +2470,24 @@ static int binder_translate_fd(int fd,
goto err_security;
}
- target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
- if (target_fd < 0) {
+ /*
+ * Add fixup record for this transaction. The allocation
+ * of the fd in the target needs to be done from a
+ * target thread.
+ */
+ fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
+ if (!fixup) {
ret = -ENOMEM;
- goto err_get_unused_fd;
+ goto err_alloc;
}
- task_fd_install(target_proc, target_fd, file);
- trace_binder_transaction_fd(t, fd, target_fd);
- binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
- fd, target_fd);
+ fixup->file = file;
+ fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
+ trace_binder_transaction_fd_send(t, fd, fixup->offset);
+ list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
- return target_fd;
+ return ret;
-err_get_unused_fd:
+err_alloc:
err_security:
fput(file);
err_fget:
@@ -2511,8 +2501,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
{
- binder_size_t fdi, fd_buf_size, num_installed_fds;
- int target_fd;
+ binder_size_t fdi, fd_buf_size;
uintptr_t parent_buffer;
u32 *fd_array;
struct binder_proc *proc = thread->proc;
@@ -2544,23 +2533,12 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
return -EINVAL;
}
for (fdi = 0; fdi < fda->num_fds; fdi++) {
- target_fd = binder_translate_fd(fd_array[fdi], t, thread,
+ int ret = binder_translate_fd(&fd_array[fdi], t, thread,
in_reply_to);
- if (target_fd < 0)
- goto err_translate_fd_failed;
- fd_array[fdi] = target_fd;
+ if (ret < 0)
+ return ret;
}
return 0;
-
-err_translate_fd_failed:
- /*
- * Failed to allocate fd or security error, free fds
- * installed so far.
- */
- num_installed_fds = fdi;
- for (fdi = 0; fdi < num_installed_fds; fdi++)
- task_close_fd(target_proc, fd_array[fdi]);
- return target_fd;
}
static int binder_fixup_parent(struct binder_transaction *t,
@@ -2723,6 +2701,7 @@ static void binder_transaction(struct binder_proc *proc,
{
int ret;
struct binder_transaction *t;
+ struct binder_work *w;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
@@ -2864,6 +2843,29 @@ static void binder_transaction(struct binder_proc *proc,
goto err_invalid_target_handle;
}
binder_inner_proc_lock(proc);
+
+ w = list_first_entry_or_null(&thread->todo,
+ struct binder_work, entry);
+ if (!(tr->flags & TF_ONE_WAY) && w &&
+ w->type == BINDER_WORK_TRANSACTION) {
+ /*
+ * Do not allow new outgoing transaction from a
+ * thread that has a transaction at the head of
+ * its todo list. Only need to check the head
+ * because binder_select_thread_ilocked picks a
+ * thread from proc->waiting_threads to enqueue
+ * the transaction, and nothing is queued to the
+ * todo list while the thread is on waiting_threads.
+ */
+ binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
+ proc->pid, thread->pid);
+ binder_inner_proc_unlock(proc);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
+ goto err_bad_todo_list;
+ }
+
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
@@ -2911,6 +2913,7 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_alloc_t_failed;
}
+ INIT_LIST_HEAD(&t->fd_fixups);
binder_stats_created(BINDER_STAT_TRANSACTION);
spin_lock_init(&t->lock);
@@ -3066,17 +3069,16 @@ static void binder_transaction(struct binder_proc *proc,
case BINDER_TYPE_FD: {
struct binder_fd_object *fp = to_binder_fd_object(hdr);
- int target_fd = binder_translate_fd(fp->fd, t, thread,
- in_reply_to);
+ int ret = binder_translate_fd(&fp->fd, t, thread,
+ in_reply_to);
- if (target_fd < 0) {
+ if (ret < 0) {
return_error = BR_FAILED_REPLY;
- return_error_param = target_fd;
+ return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
- fp->fd = target_fd;
} break;
case BINDER_TYPE_FDA: {
struct binder_fd_array_object *fda =
@@ -3233,6 +3235,7 @@ err_bad_object_type:
err_bad_offset:
err_bad_parent:
err_copy_data_failed:
+ binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
if (target_node)
@@ -3247,6 +3250,7 @@ err_alloc_tcomplete_failed:
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
+err_bad_todo_list:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
@@ -3294,6 +3298,47 @@ err_invalid_target_handle:
}
}
+/**
+ * binder_free_buf() - free the specified buffer
+ * @proc: binder proc that owns buffer
+ * @buffer: buffer to be freed
+ *
+ * If buffer for an async transaction, enqueue the next async
+ * transaction from the node.
+ *
+ * Cleanup buffer and free it.
+ */
+static void
+binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
+{
+ if (buffer->transaction) {
+ buffer->transaction->buffer = NULL;
+ buffer->transaction = NULL;
+ }
+ if (buffer->async_transaction && buffer->target_node) {
+ struct binder_node *buf_node;
+ struct binder_work *w;
+
+ buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
+ BUG_ON(!buf_node->has_async_transaction);
+ BUG_ON(buf_node->proc != proc);
+ w = binder_dequeue_work_head_ilocked(
+ &buf_node->async_todo);
+ if (!w) {
+ buf_node->has_async_transaction = false;
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_node_inner_unlock(buf_node);
+ }
+ trace_binder_transaction_buffer_release(buffer);
+ binder_transaction_buffer_release(proc, buffer, NULL);
+ binder_alloc_free_buf(&proc->alloc, buffer);
+}
+
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
@@ -3480,33 +3525,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
-
- if (buffer->transaction) {
- buffer->transaction->buffer = NULL;
- buffer->transaction = NULL;
- }
- if (buffer->async_transaction && buffer->target_node) {
- struct binder_node *buf_node;
- struct binder_work *w;
-
- buf_node = buffer->target_node;
- binder_node_inner_lock(buf_node);
- BUG_ON(!buf_node->has_async_transaction);
- BUG_ON(buf_node->proc != proc);
- w = binder_dequeue_work_head_ilocked(
- &buf_node->async_todo);
- if (!w) {
- buf_node->has_async_transaction = false;
- } else {
- binder_enqueue_work_ilocked(
- w, &proc->todo);
- binder_wakeup_proc_ilocked(proc);
- }
- binder_node_inner_unlock(buf_node);
- }
- trace_binder_transaction_buffer_release(buffer);
- binder_transaction_buffer_release(proc, buffer, NULL);
- binder_alloc_free_buf(&proc->alloc, buffer);
+ binder_free_buf(proc, buffer);
break;
}
@@ -3829,6 +3848,76 @@ static int binder_wait_for_work(struct binder_thread *thread,
return ret;
}
+/**
+ * binder_apply_fd_fixups() - finish fd translation
+ * @t: binder transaction with list of fd fixups
+ *
+ * Now that we are in the context of the transaction target
+ * process, we can allocate and install fds. Process the
+ * list of fds to translate and fixup the buffer with the
+ * new fds.
+ *
+ * If we fail to allocate an fd, then free the resources by
+ * fput'ing files that have not been processed and ksys_close'ing
+ * any fds that have already been allocated.
+ */
+static int binder_apply_fd_fixups(struct binder_transaction *t)
+{
+ struct binder_txn_fd_fixup *fixup, *tmp;
+ int ret = 0;
+
+ list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ u32 *fdp;
+
+ if (fd < 0) {
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "failed fd fixup txn %d fd %d\n",
+ t->debug_id, fd);
+ ret = -ENOMEM;
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "fd fixup txn %d fd %d\n",
+ t->debug_id, fd);
+ trace_binder_transaction_fd_recv(t, fd, fixup->offset);
+ fd_install(fd, fixup->file);
+ fixup->file = NULL;
+ fdp = (u32 *)(t->buffer->data + fixup->offset);
+ /*
+ * This store can cause problems for CPUs with a
+ * VIVT cache (eg ARMv5) since the cache cannot
+ * detect virtual aliases to the same physical cacheline.
+ * To support VIVT, this address and the user-space VA
+ * would both need to be flushed. Since this kernel
+ * VA is not constructed via page_to_virt(), we can't
+ * use flush_dcache_page() on it, so we'd have to use
+ * an internal function. If devices with VIVT ever
+ * need to run Android, we'll either need to go back
+ * to patching the translated fd from the sender side
+ * (using the non-standard kernel functions), or rework
+ * how the kernel uses the buffer to use page_to_virt()
+ * addresses instead of allocating in our own vm area.
+ *
+ * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
+ */
+ *fdp = fd;
+ }
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
+ if (fixup->file) {
+ fput(fixup->file);
+ } else if (ret) {
+ u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
+
+ ksys_close(*fdp);
+ }
+ list_del(&fixup->fixup_entry);
+ kfree(fixup);
+ }
+
+ return ret;
+}
+
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
@@ -4110,6 +4199,34 @@ retry:
tr.sender_pid = 0;
}
+ ret = binder_apply_fd_fixups(t);
+ if (ret) {
+ struct binder_buffer *buffer = t->buffer;
+ bool oneway = !!(t->flags & TF_ONE_WAY);
+ int tid = t->debug_id;
+
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
+ buffer->transaction = NULL;
+ binder_cleanup_transaction(t, "fd fixups failed",
+ BR_FAILED_REPLY);
+ binder_free_buf(proc, buffer);
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
+ proc->pid, thread->pid,
+ oneway ? "async " :
+ (cmd == BR_REPLY ? "reply " : ""),
+ tid, BR_FAILED_REPLY, ret, __LINE__);
+ if (cmd == BR_REPLY) {
+ cmd = BR_FAILED_REPLY;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ binder_stat_br(proc, thread, cmd);
+ break;
+ }
+ continue;
+ }
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)
@@ -4544,6 +4661,42 @@ out:
return ret;
}
+static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
+ struct binder_node_info_for_ref *info)
+{
+ struct binder_node *node;
+ struct binder_context *context = proc->context;
+ __u32 handle = info->handle;
+
+ if (info->strong_count || info->weak_count || info->reserved1 ||
+ info->reserved2 || info->reserved3) {
+ binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
+ proc->pid);
+ return -EINVAL;
+ }
+
+ /* This ioctl may only be used by the context manager */
+ mutex_lock(&context->context_mgr_node_lock);
+ if (!context->binder_context_mgr_node ||
+ context->binder_context_mgr_node->proc != proc) {
+ mutex_unlock(&context->context_mgr_node_lock);
+ return -EPERM;
+ }
+ mutex_unlock(&context->context_mgr_node_lock);
+
+ node = binder_get_node_from_ref(proc, handle, true, NULL);
+ if (!node)
+ return -EINVAL;
+
+ info->strong_count = node->local_strong_refs +
+ node->internal_strong_refs;
+ info->weak_count = node->local_weak_refs;
+
+ binder_put_node(node);
+
+ return 0;
+}
+
static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
struct binder_node_debug_info *info)
{
@@ -4638,6 +4791,25 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
+ case BINDER_GET_NODE_INFO_FOR_REF: {
+ struct binder_node_info_for_ref info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_node_info_for_ref(proc, &info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ break;
+ }
case BINDER_GET_NODE_DEBUG_INFO: {
struct binder_node_debug_info info;
@@ -4693,7 +4865,6 @@ static void binder_vma_close(struct vm_area_struct *vma)
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
binder_alloc_vma_close(&proc->alloc);
- binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
@@ -4739,9 +4910,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
if (ret)
return ret;
- mutex_lock(&proc->files_lock);
- proc->files = get_files_struct(current);
- mutex_unlock(&proc->files_lock);
return 0;
err_bad_arg:
@@ -4765,7 +4933,6 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
- mutex_init(&proc->files_lock);
INIT_LIST_HEAD(&proc->todo);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
@@ -4915,8 +5082,6 @@ static void binder_deferred_release(struct binder_proc *proc)
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->files);
-
mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
mutex_unlock(&binder_procs_lock);
@@ -4998,7 +5163,6 @@ static void binder_deferred_release(struct binder_proc *proc)
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
- struct files_struct *files;
int defer;
@@ -5016,23 +5180,11 @@ static void binder_deferred_func(struct work_struct *work)
}
mutex_unlock(&binder_deferred_lock);
- files = NULL;
- if (defer & BINDER_DEFERRED_PUT_FILES) {
- mutex_lock(&proc->files_lock);
- files = proc->files;
- if (files)
- proc->files = NULL;
- mutex_unlock(&proc->files_lock);
- }
-
if (defer & BINDER_DEFERRED_FLUSH)
binder_deferred_flush(proc);
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
-
- if (files)
- put_files_struct(files);
} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
@@ -5667,12 +5819,11 @@ static int __init binder_init(void)
* Copy the module_parameter string, because we don't want to
* tokenize it in-place.
*/
- device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
+ device_names = kstrdup(binder_devices_param, GFP_KERNEL);
if (!device_names) {
ret = -ENOMEM;
goto err_alloc_device_names_failed;
}
- strcpy(device_names, binder_devices_param);
device_tmp = device_names;
while ((device_name = strsep(&device_tmp, ","))) {
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 588eb3ec3507..14de7ac57a34 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -223,22 +223,40 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
__entry->dest_ref_debug_id, __entry->dest_ref_desc)
);
-TRACE_EVENT(binder_transaction_fd,
- TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd),
- TP_ARGS(t, src_fd, dest_fd),
+TRACE_EVENT(binder_transaction_fd_send,
+ TP_PROTO(struct binder_transaction *t, int fd, size_t offset),
+ TP_ARGS(t, fd, offset),
TP_STRUCT__entry(
__field(int, debug_id)
- __field(int, src_fd)
- __field(int, dest_fd)
+ __field(int, fd)
+ __field(size_t, offset)
+ ),
+ TP_fast_assign(
+ __entry->debug_id = t->debug_id;
+ __entry->fd = fd;
+ __entry->offset = offset;
+ ),
+ TP_printk("transaction=%d src_fd=%d offset=%zu",
+ __entry->debug_id, __entry->fd, __entry->offset)
+);
+
+TRACE_EVENT(binder_transaction_fd_recv,
+ TP_PROTO(struct binder_transaction *t, int fd, size_t offset),
+ TP_ARGS(t, fd, offset),
+
+ TP_STRUCT__entry(
+ __field(int, debug_id)
+ __field(int, fd)
+ __field(size_t, offset)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->src_fd = src_fd;
- __entry->dest_fd = dest_fd;
+ __entry->fd = fd;
+ __entry->offset = offset;
),
- TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d",
- __entry->debug_id, __entry->src_fd, __entry->dest_fd)
+ TP_printk("transaction=%d dest_fd=%d offset=%zu",
+ __entry->debug_id, __entry->fd, __entry->offset)
);
DECLARE_EVENT_CLASS(binder_buffer_class,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a9dd4ea7467d..6e594644cb1d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4553,6 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
+ { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 9b6d7930d1c7..e0bcf9b2dab0 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -873,7 +873,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* like others but it will lock up the whole machine HARD if
* 65536 byte PRD entry is fed. Reduce maximum segment size.
*/
- rc = pci_set_dma_max_seg_size(pdev, 65536 - 512);
+ rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512);
if (rc) {
dev_err(&pdev->dev, "failed to set the maximum segment size\n");
return rc;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 8946dfee4768..e8d676fad0c9 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -536,9 +536,9 @@ int component_bind_all(struct device *master_dev, void *data)
}
if (ret != 0) {
- for (; i--; )
- if (!master->match->compare[i].duplicate) {
- c = master->match->compare[i].component;
+ for (; i > 0; i--)
+ if (!master->match->compare[i - 1].duplicate) {
+ c = master->match->compare[i - 1].component;
component_unbind(c, master, data);
}
}
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index f98a097e73f2..4aaf00d2098b 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -11,6 +11,8 @@
#include <linux/slab.h>
#include <linux/percpu.h>
+#include <asm/sections.h>
+
#include "base.h"
struct devres_node {
@@ -823,6 +825,28 @@ char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
EXPORT_SYMBOL_GPL(devm_kstrdup);
/**
+ * devm_kstrdup_const - resource managed conditional string duplication
+ * @dev: device for which to duplicate the string
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Strings allocated by devm_kstrdup_const will be automatically freed when
+ * the associated device is detached.
+ *
+ * RETURNS:
+ * Source string if it is in .rodata section otherwise it falls back to
+ * devm_kstrdup.
+ */
+const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)s))
+ return s;
+
+ return devm_kstrdup(dev, s, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup_const);
+
+/**
* devm_kvasprintf - Allocate resource managed space and format a string
* into that.
* @dev: Device to allocate memory for
@@ -885,11 +909,19 @@ EXPORT_SYMBOL_GPL(devm_kasprintf);
*
* Free memory allocated with devm_kmalloc().
*/
-void devm_kfree(struct device *dev, void *p)
+void devm_kfree(struct device *dev, const void *p)
{
int rc;
- rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
+ /*
+ * Special case: pointer to a string in .rodata returned by
+ * devm_kstrdup_const().
+ */
+ if (unlikely(is_kernel_rodata((unsigned long)p)))
+ return;
+
+ rc = devres_destroy(dev, devm_kmalloc_release,
+ devm_kmalloc_match, (void *)p);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_kfree);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index f7768077e817..b93fc862d365 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -252,7 +252,7 @@ static int dev_rmdir(const char *name)
static int delete_path(const char *nodepath)
{
- const char *path;
+ char *path;
int err = 0;
path = kstrdup(nodepath, GFP_KERNEL);
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 60d6cc618f1c..f39a920496fb 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -321,11 +321,12 @@ void *platform_msi_get_host_data(struct irq_domain *domain)
* Returns an irqdomain for @nvec interrupts
*/
struct irq_domain *
-platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data)
+__platform_msi_create_device_domain(struct device *dev,
+ unsigned int nvec,
+ bool is_tree,
+ irq_write_msi_msg_t write_msi_msg,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
struct platform_msi_priv_data *data;
struct irq_domain *domain;
@@ -336,7 +337,8 @@ platform_msi_create_device_domain(struct device *dev,
return NULL;
data->host_data = host_data;
- domain = irq_domain_create_hierarchy(dev->msi_domain, 0, nvec,
+ domain = irq_domain_create_hierarchy(dev->msi_domain, 0,
+ is_tree ? 0 : nvec,
dev->fwnode, ops, data);
if (!domain)
goto free_priv;
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 7033a4beda66..254ee7d54e91 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -45,7 +45,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
char cms[LO_NAME_SIZE]; /* cipher-mode string */
char *mode;
char *cmsp = cms; /* c-m string pointer */
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
/* encryption breaks for non sector aligned offsets */
@@ -80,13 +80,13 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
*cmsp++ = ')';
*cmsp = 0;
- tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_sync_skcipher(cms, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
- err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
-
+ err = crypto_sync_skcipher_setkey(tfm, info->lo_encrypt_key,
+ info->lo_encrypt_key_size);
+
if (err != 0)
goto out_free_tfm;
@@ -94,7 +94,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
return 0;
out_free_tfm:
- crypto_free_skcipher(tfm);
+ crypto_free_sync_skcipher(tfm);
out:
return err;
@@ -109,8 +109,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
{
- struct crypto_skcipher *tfm = lo->key_data;
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ struct crypto_sync_skcipher *tfm = lo->key_data;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg_out;
struct scatterlist sg_in;
@@ -119,7 +119,7 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
unsigned in_offs, out_offs;
int err;
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
@@ -175,9 +175,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
static int
cryptoloop_release(struct loop_device *lo)
{
- struct crypto_skcipher *tfm = lo->key_data;
+ struct crypto_sync_skcipher *tfm = lo->key_data;
if (tfm != NULL) {
- crypto_free_skcipher(tfm);
+ crypto_free_sync_skcipher(tfm);
lo->key_data = NULL;
return 0;
}
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index 34e0030f0592..7685df43f1ef 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -87,7 +87,9 @@ struct nullb {
#ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
-blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
+int null_zone_report(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask);
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors);
void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
@@ -97,10 +99,11 @@ static inline int null_zone_init(struct nullb_device *dev)
return -EINVAL;
}
static inline void null_zone_exit(struct nullb_device *dev) {}
-static inline blk_status_t null_zone_report(struct nullb *nullb,
- struct bio *bio)
+static inline int null_zone_report(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones,
+ unsigned int *nr_zones, gfp_t gfp_mask)
{
- return BLK_STS_NOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors)
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index e94591021682..09339203dfba 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1129,34 +1129,12 @@ static void null_restart_queue_async(struct nullb *nullb)
blk_mq_start_stopped_hw_queues(q, true);
}
-static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
-{
- struct nullb_device *dev = cmd->nq->dev;
-
- if (dev->queue_mode == NULL_Q_BIO) {
- if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
- cmd->error = null_zone_report(nullb, cmd->bio);
- return true;
- }
- } else {
- if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
- cmd->error = null_zone_report(nullb, cmd->rq->bio);
- return true;
- }
- }
-
- return false;
-}
-
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
int err = 0;
- if (cmd_report_zone(nullb, cmd))
- goto out;
-
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq;
@@ -1443,6 +1421,7 @@ static const struct block_device_operations null_fops = {
.owner = THIS_MODULE,
.open = null_open,
.release = null_release,
+ .report_zones = null_zone_report,
};
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
@@ -1549,6 +1528,13 @@ static int null_gendisk_register(struct nullb *nullb)
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ if (nullb->dev->zoned) {
+ int ret = blk_revalidate_disk_zones(disk);
+
+ if (ret != 0)
+ return ret;
+ }
+
add_disk(disk);
return 0;
}
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 7c6b86d98700..c0b0e4a3fa8f 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -48,54 +48,27 @@ void null_zone_exit(struct nullb_device *dev)
kvfree(dev->zones);
}
-static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
- unsigned int zno, unsigned int nr_zones)
+int null_zone_report(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
{
- struct blk_zone_report_hdr *hdr = NULL;
- struct bio_vec bvec;
- struct bvec_iter iter;
- void *addr;
- unsigned int zones_to_cpy;
-
- bio_for_each_segment(bvec, bio, iter) {
- addr = kmap_atomic(bvec.bv_page);
-
- zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
-
- if (!hdr) {
- hdr = (struct blk_zone_report_hdr *)addr;
- hdr->nr_zones = nr_zones;
- zones_to_cpy--;
- addr += sizeof(struct blk_zone_report_hdr);
- }
-
- zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones);
-
- memcpy(addr, &dev->zones[zno],
- zones_to_cpy * sizeof(struct blk_zone));
-
- kunmap_atomic(addr);
+ struct nullb *nullb = disk->private_data;
+ struct nullb_device *dev = nullb->dev;
+ unsigned int zno, nrz = 0;
- nr_zones -= zones_to_cpy;
- zno += zones_to_cpy;
+ if (!dev->zoned)
+ /* Not a zoned null device */
+ return -EOPNOTSUPP;
- if (!nr_zones)
- break;
+ zno = null_zone_no(dev, sector);
+ if (zno < dev->nr_zones) {
+ nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
+ memcpy(zones, &dev->zones[zno], nrz * sizeof(struct blk_zone));
}
-}
-blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
-{
- struct nullb_device *dev = nullb->dev;
- unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
- unsigned int nr_zones = dev->nr_zones - zno;
- unsigned int max_zones;
+ *nr_zones = nrz;
- max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
- nr_zones = min_t(unsigned int, nr_zones, max_zones);
- null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
-
- return BLK_STS_OK;
+ return 0;
}
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 639051502181..0cf4509d575c 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -780,7 +780,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
goto failed_enable;
pci_set_master(dev);
- pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
+ dma_set_max_seg_size(&dev->dev, RSXX_HW_BLK_SIZE);
st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
if (st) {
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 7c5fc6942f32..2459dcc04b1c 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -3175,7 +3175,7 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
@@ -3364,7 +3364,7 @@ static int skd_pci_resume(struct pci_dev *pdev)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9eea83ae01c6..56452cabce5b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2493,6 +2493,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
+ if (!info)
+ return 0;
+
blkif_free(info, 0);
mutex_lock(&info->mutex);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 1106c076fa4b..600430685e28 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -191,8 +191,7 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
vfree(vmalloc (size));
}
- vaddr = (unsigned long) __ioremap (paddr, size,
- _PAGE_WRITETHRU);
+ vaddr = (unsigned long)ioremap_wt(paddr, size);
#else
vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 5d8266c6571f..f0404c6d1ff4 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -127,6 +127,16 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static int fsl_mc_dma_configure(struct device *dev)
+{
+ struct device *dma_dev = dev;
+
+ while (dev_is_fsl_mc(dma_dev))
+ dma_dev = dma_dev->parent;
+
+ return of_dma_configure(dev, dma_dev->of_node, 0);
+}
+
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -148,6 +158,7 @@ struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
+ .dma_configure = fsl_mc_dma_configure,
.dev_groups = fsl_mc_dev_groups,
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
@@ -188,6 +199,10 @@ struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
+struct device_type fsl_mc_bus_dpseci_type = {
+ .name = "fsl_mc_bus_dpseci"
+};
+
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
@@ -203,6 +218,7 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
{ &fsl_mc_bus_dpmcp_type, "dpmcp" },
{ &fsl_mc_bus_dpmac_type, "dpmac" },
{ &fsl_mc_bus_dprtc_type, "dprtc" },
+ { &fsl_mc_bus_dpseci_type, "dpseci" },
{ NULL, NULL }
};
int i;
@@ -616,6 +632,7 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
mc_dev->icid = parent_mc_dev->icid;
mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
mc_dev->dev.dma_mask = &mc_dev->dma_mask;
+ mc_dev->dev.coherent_dma_mask = mc_dev->dma_mask;
dev_set_msi_domain(&mc_dev->dev,
dev_get_msi_domain(&parent_mc_dev->dev));
}
@@ -633,10 +650,6 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
goto error_cleanup_dev;
}
- /* Objects are coherent, unless 'no shareability' flag set. */
- if (!(obj_desc->flags & FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY))
- arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
-
/*
* The device-specific probe callback will get invoked by device_add()
*/
@@ -693,8 +706,8 @@ static int parse_mc_ranges(struct device *dev,
*ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
if (!(*ranges_start) || !ranges_len) {
dev_warn(dev,
- "missing or empty ranges property for device tree node '%s'\n",
- mc_node->name);
+ "missing or empty ranges property for device tree node '%pOFn'\n",
+ mc_node);
return 0;
}
@@ -717,7 +730,7 @@ static int parse_mc_ranges(struct device *dev,
tuple_len = range_tuple_cell_count * sizeof(__be32);
if (ranges_len % tuple_len != 0) {
- dev_err(dev, "malformed ranges property '%s'\n", mc_node->name);
+ dev_err(dev, "malformed ranges property '%pOFn'\n", mc_node);
return -EINVAL;
}
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 70db4d5638a6..5b2a11a88951 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1229,7 +1229,7 @@ mbus_parse_ranges(struct device_node *node,
tuple_len = (*cell_count) * sizeof(__be32);
if (ranges_len % tuple_len) {
- pr_warn("malformed ranges entry '%s'\n", node->name);
+ pr_warn("malformed ranges entry '%pOFn'\n", node);
return -EINVAL;
}
return 0;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 757e85b81879..a5b8afe3609c 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -327,15 +327,15 @@ static int get_entry_track(int track)
static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
struct cdrom_multisession *ms_info)
{
- int fentry, lentry, track, data, tocuse, err;
+ int fentry, lentry, track, data, err;
+
if (!gd.toc)
return -ENOMEM;
- tocuse = 1;
+
/* Check if GD-ROM */
err = gdrom_readtoc_cmd(gd.toc, 1);
/* Not a GD-ROM so check if standard CD-ROM */
if (err) {
- tocuse = 0;
err = gdrom_readtoc_cmd(gd.toc, 0);
if (err) {
pr_info("Could not get CD table of contents\n");
@@ -794,7 +794,7 @@ static int probe_gdrom(struct platform_device *devptr)
gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
if (IS_ERR(gd.gdrom_rq)) {
- rc = PTR_ERR(gd.gdrom_rq);
+ err = PTR_ERR(gd.gdrom_rq);
gd.gdrom_rq = NULL;
goto probe_fail_requestq;
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index aaf9e5afaad4..95be7228f327 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -44,10 +44,10 @@ static unsigned short default_quality; /* = 0; default to "off" */
module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
- "current hwrng entropy estimation per mill");
+ "current hwrng entropy estimation per 1024 bits of input");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
- "default entropy content of hwrng per mill");
+ "default entropy content of hwrng per 1024 bits of input");
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c75b6cdf0053..2eb70e76ed35 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -433,9 +433,9 @@ static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
- __u32 out[CHACHA20_BLOCK_WORDS]);
+ __u8 out[CHACHA20_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -926,7 +926,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u32 block[CHACHA20_BLOCK_WORDS];
+ __u8 block[CHACHA20_BLOCK_SIZE];
__u32 key[8];
} buf;
@@ -973,7 +973,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u32 out[CHACHA20_BLOCK_WORDS])
+ __u8 out[CHACHA20_BLOCK_SIZE])
{
unsigned long v, flags;
@@ -990,7 +990,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
+static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
{
struct crng_state *crng = NULL;
@@ -1008,7 +1008,7 @@ static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
unsigned long flags;
__u32 *s, *d;
@@ -1020,14 +1020,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = &tmp[used / sizeof(__u32)];
+ s = (__u32 *) &tmp[used];
d = &crng->state[4];
for (i=0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
struct crng_state *crng = NULL;
@@ -1043,7 +1043,7 @@ static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u32 tmp[CHACHA20_BLOCK_WORDS];
+ __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1622,7 +1622,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u32 tmp[CHACHA20_BLOCK_WORDS];
+ __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -2248,7 +2248,7 @@ u64 get_random_u64(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((__u32 *)batch->entropy_u64);
+ extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
@@ -2278,7 +2278,7 @@ u32 get_random_u32(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng(batch->entropy_u32);
+ extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 18c81cbe4704..536e55d3919f 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -5,7 +5,7 @@
menuconfig TCG_TPM
tristate "TPM Hardware Support"
depends on HAS_IOMEM
- select SECURITYFS
+ imply SECURITYFS
select CRYPTO
select CRYPTO_HASH_INFO
---help---
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index e4a04b2d3c32..99b5133a9d05 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -17,11 +17,36 @@
* License.
*
*/
+#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/workqueue.h>
#include "tpm.h"
#include "tpm-dev.h"
+static struct workqueue_struct *tpm_dev_wq;
+static DEFINE_MUTEX(tpm_dev_wq_lock);
+
+static void tpm_async_work(struct work_struct *work)
+{
+ struct file_priv *priv =
+ container_of(work, struct file_priv, async_work);
+ ssize_t ret;
+
+ mutex_lock(&priv->buffer_mutex);
+ priv->command_enqueued = false;
+ ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer), 0);
+
+ tpm_put_ops(priv->chip);
+ if (ret > 0) {
+ priv->data_pending = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ }
+ mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
+}
+
static void user_reader_timeout(struct timer_list *t)
{
struct file_priv *priv = from_timer(priv, t, user_read_timer);
@@ -29,27 +54,32 @@ static void user_reader_timeout(struct timer_list *t)
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
task_tgid_nr(current));
- schedule_work(&priv->work);
+ schedule_work(&priv->timeout_work);
}
-static void timeout_work(struct work_struct *work)
+static void tpm_timeout_work(struct work_struct *work)
{
- struct file_priv *priv = container_of(work, struct file_priv, work);
+ struct file_priv *priv = container_of(work, struct file_priv,
+ timeout_work);
mutex_lock(&priv->buffer_mutex);
priv->data_pending = 0;
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
}
void tpm_common_open(struct file *file, struct tpm_chip *chip,
- struct file_priv *priv)
+ struct file_priv *priv, struct tpm_space *space)
{
priv->chip = chip;
+ priv->space = space;
+
mutex_init(&priv->buffer_mutex);
timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
- INIT_WORK(&priv->work, timeout_work);
-
+ INIT_WORK(&priv->timeout_work, tpm_timeout_work);
+ INIT_WORK(&priv->async_work, tpm_async_work);
+ init_waitqueue_head(&priv->async_wait);
file->private_data = priv;
}
@@ -61,15 +91,17 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
int rc;
del_singleshot_timer_sync(&priv->user_read_timer);
- flush_work(&priv->work);
+ flush_work(&priv->timeout_work);
mutex_lock(&priv->buffer_mutex);
if (priv->data_pending) {
ret_size = min_t(ssize_t, size, priv->data_pending);
- rc = copy_to_user(buf, priv->data_buffer, ret_size);
- memset(priv->data_buffer, 0, priv->data_pending);
- if (rc)
- ret_size = -EFAULT;
+ if (ret_size > 0) {
+ rc = copy_to_user(buf, priv->data_buffer, ret_size);
+ memset(priv->data_buffer, 0, priv->data_pending);
+ if (rc)
+ ret_size = -EFAULT;
+ }
priv->data_pending = 0;
}
@@ -79,13 +111,12 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
}
ssize_t tpm_common_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off, struct tpm_space *space)
+ size_t size, loff_t *off)
{
struct file_priv *priv = file->private_data;
- size_t in_size = size;
- ssize_t out_size;
+ int ret = 0;
- if (in_size > TPM_BUFSIZE)
+ if (size > TPM_BUFSIZE)
return -E2BIG;
mutex_lock(&priv->buffer_mutex);
@@ -94,21 +125,20 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
* tpm_read or a user_read_timer timeout. This also prevents split
* buffered writes from blocking here.
*/
- if (priv->data_pending != 0) {
- mutex_unlock(&priv->buffer_mutex);
- return -EBUSY;
+ if (priv->data_pending != 0 || priv->command_enqueued) {
+ ret = -EBUSY;
+ goto out;
}
- if (copy_from_user
- (priv->data_buffer, (void __user *) buf, in_size)) {
- mutex_unlock(&priv->buffer_mutex);
- return -EFAULT;
+ if (copy_from_user(priv->data_buffer, buf, size)) {
+ ret = -EFAULT;
+ goto out;
}
- if (in_size < 6 ||
- in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
- mutex_unlock(&priv->buffer_mutex);
- return -EINVAL;
+ if (size < 6 ||
+ size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
+ ret = -EINVAL;
+ goto out;
}
/* atomic tpm command send and result receive. We only hold the ops
@@ -116,25 +146,50 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
* the char dev is held open.
*/
if (tpm_try_get_ops(priv->chip)) {
- mutex_unlock(&priv->buffer_mutex);
- return -EPIPE;
+ ret = -EPIPE;
+ goto out;
}
- out_size = tpm_transmit(priv->chip, space, priv->data_buffer,
- sizeof(priv->data_buffer), 0);
- tpm_put_ops(priv->chip);
- if (out_size < 0) {
+ /*
+ * If in nonblocking mode schedule an async job to send
+ * the command return the size.
+ * In case of error the err code will be returned in
+ * the subsequent read call.
+ */
+ if (file->f_flags & O_NONBLOCK) {
+ priv->command_enqueued = true;
+ queue_work(tpm_dev_wq, &priv->async_work);
mutex_unlock(&priv->buffer_mutex);
- return out_size;
+ return size;
}
- priv->data_pending = out_size;
+ ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer), 0);
+ tpm_put_ops(priv->chip);
+
+ if (ret > 0) {
+ priv->data_pending = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ ret = size;
+ }
+out:
mutex_unlock(&priv->buffer_mutex);
+ return ret;
+}
+
+__poll_t tpm_common_poll(struct file *file, poll_table *wait)
+{
+ struct file_priv *priv = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &priv->async_wait, wait);
- /* Set a timeout by which the reader must come claim the result */
- mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ if (priv->data_pending)
+ mask = EPOLLIN | EPOLLRDNORM;
+ else
+ mask = EPOLLOUT | EPOLLWRNORM;
- return in_size;
+ return mask;
}
/*
@@ -142,8 +197,24 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
*/
void tpm_common_release(struct file *file, struct file_priv *priv)
{
+ flush_work(&priv->async_work);
del_singleshot_timer_sync(&priv->user_read_timer);
- flush_work(&priv->work);
+ flush_work(&priv->timeout_work);
file->private_data = NULL;
priv->data_pending = 0;
}
+
+int __init tpm_dev_common_init(void)
+{
+ tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
+
+ return !tpm_dev_wq ? -ENOMEM : 0;
+}
+
+void __exit tpm_dev_common_exit(void)
+{
+ if (tpm_dev_wq) {
+ destroy_workqueue(tpm_dev_wq);
+ tpm_dev_wq = NULL;
+ }
+}
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index ebd74ab5abef..32f9738f1cb2 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -39,7 +39,7 @@ static int tpm_open(struct inode *inode, struct file *file)
if (priv == NULL)
goto out;
- tpm_common_open(file, chip, priv);
+ tpm_common_open(file, chip, priv, NULL);
return 0;
@@ -48,12 +48,6 @@ static int tpm_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
-static ssize_t tpm_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off)
-{
- return tpm_common_write(file, buf, size, off, NULL);
-}
-
/*
* Called on file close
*/
@@ -73,6 +67,7 @@ const struct file_operations tpm_fops = {
.llseek = no_llseek,
.open = tpm_open,
.read = tpm_common_read,
- .write = tpm_write,
+ .write = tpm_common_write,
+ .poll = tpm_common_poll,
.release = tpm_release,
};
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
index b24cfb4d3ee1..a126b575cb8c 100644
--- a/drivers/char/tpm/tpm-dev.h
+++ b/drivers/char/tpm/tpm-dev.h
@@ -2,27 +2,33 @@
#ifndef _TPM_DEV_H
#define _TPM_DEV_H
+#include <linux/poll.h>
#include "tpm.h"
struct file_priv {
struct tpm_chip *chip;
+ struct tpm_space *space;
- /* Data passed to and from the tpm via the read/write calls */
- size_t data_pending;
+ /* Holds the amount of data passed or an error code from async op */
+ ssize_t data_pending;
struct mutex buffer_mutex;
struct timer_list user_read_timer; /* user needs to claim result */
- struct work_struct work;
+ struct work_struct timeout_work;
+ struct work_struct async_work;
+ wait_queue_head_t async_wait;
+ bool command_enqueued;
u8 data_buffer[TPM_BUFSIZE];
};
void tpm_common_open(struct file *file, struct tpm_chip *chip,
- struct file_priv *priv);
+ struct file_priv *priv, struct tpm_space *space);
ssize_t tpm_common_read(struct file *file, char __user *buf,
size_t size, loff_t *off);
ssize_t tpm_common_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off, struct tpm_space *space);
-void tpm_common_release(struct file *file, struct file_priv *priv);
+ size_t size, loff_t *off);
+__poll_t tpm_common_poll(struct file *file, poll_table *wait);
+void tpm_common_release(struct file *file, struct file_priv *priv);
#endif
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1a803b0cf980..129f640424b7 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -663,7 +663,8 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
return len;
err = be32_to_cpu(header->return_code);
- if (err != 0 && desc)
+ if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
+ && desc)
dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
if (err)
@@ -1321,7 +1322,8 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
}
rlength = be32_to_cpu(tpm_cmd.header.out.length);
- if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
+ if (rlength < TPM_HEADER_SIZE +
+ offsetof(struct tpm_getrandom_out, rng_data) +
recd) {
total = -EFAULT;
break;
@@ -1407,19 +1409,32 @@ static int __init tpm_init(void)
tpmrm_class = class_create(THIS_MODULE, "tpmrm");
if (IS_ERR(tpmrm_class)) {
pr_err("couldn't create tpmrm class\n");
- class_destroy(tpm_class);
- return PTR_ERR(tpmrm_class);
+ rc = PTR_ERR(tpmrm_class);
+ goto out_destroy_tpm_class;
}
rc = alloc_chrdev_region(&tpm_devt, 0, 2*TPM_NUM_DEVICES, "tpm");
if (rc < 0) {
pr_err("tpm: failed to allocate char dev region\n");
- class_destroy(tpmrm_class);
- class_destroy(tpm_class);
- return rc;
+ goto out_destroy_tpmrm_class;
+ }
+
+ rc = tpm_dev_common_init();
+ if (rc) {
+ pr_err("tpm: failed to allocate char dev region\n");
+ goto out_unreg_chrdev;
}
return 0;
+
+out_unreg_chrdev:
+ unregister_chrdev_region(tpm_devt, 2 * TPM_NUM_DEVICES);
+out_destroy_tpmrm_class:
+ class_destroy(tpmrm_class);
+out_destroy_tpm_class:
+ class_destroy(tpm_class);
+
+ return rc;
}
static void __exit tpm_exit(void)
@@ -1428,6 +1443,7 @@ static void __exit tpm_exit(void)
class_destroy(tpm_class);
class_destroy(tpmrm_class);
unregister_chrdev_region(tpm_devt, 2*TPM_NUM_DEVICES);
+ tpm_dev_common_exit();
}
subsys_initcall(tpm_init);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index f3501d05264f..f20dc8ece348 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -604,4 +604,6 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
int tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
+int tpm_dev_common_init(void);
+void tpm_dev_common_exit(void);
#endif
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c31b490bd41d..3acf4fd4e5a5 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -329,7 +329,9 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
&buf.data[TPM_HEADER_SIZE];
recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
if (tpm_buf_length(&buf) <
- offsetof(struct tpm2_get_random_out, buffer) + recd) {
+ TPM_HEADER_SIZE +
+ offsetof(struct tpm2_get_random_out, buffer) +
+ recd) {
err = -EFAULT;
goto out;
}
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
index 1a0e97a5da5a..0c751a79bbed 100644
--- a/drivers/char/tpm/tpmrm-dev.c
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -28,7 +28,7 @@ static int tpmrm_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- tpm_common_open(file, chip, &priv->priv);
+ tpm_common_open(file, chip, &priv->priv, &priv->space);
return 0;
}
@@ -45,21 +45,12 @@ static int tpmrm_release(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t tpmrm_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off)
-{
- struct file_priv *fpriv = file->private_data;
- struct tpmrm_priv *priv = container_of(fpriv, struct tpmrm_priv, priv);
-
- return tpm_common_write(file, buf, size, off, &priv->space);
-}
-
const struct file_operations tpmrm_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = tpmrm_open,
.read = tpm_common_read,
- .write = tpmrm_write,
+ .write = tpm_common_write,
+ .poll = tpm_common_poll,
.release = tpmrm_release,
};
-
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 911475d36800..b150f87f38f5 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -264,7 +264,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
return -ENOMEM;
}
- rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref);
+ rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
if (rv < 0)
return rv;
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 072aa38374ce..3045067448fb 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -183,7 +183,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
__func__);
- for_each_node_by_type(dn, "cpu")
+ for_each_of_cpu_node(dn)
ncpus++;
cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
@@ -194,7 +194,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
if (WARN_ON(!clks))
goto clks_out;
- for_each_node_by_type(dn, "cpu") {
+ for_each_of_cpu_node(dn) {
struct clk_init_data init;
struct clk *clk;
char *clk_name = kzalloc(5, GFP_KERNEL);
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index db51b2427e8a..e33b21d3f9d8 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -23,8 +23,8 @@ obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
obj-$(CONFIG_ROCKCHIP_TIMER) += rockchip_timer.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
-obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
-obj-$(CONFIG_ORION_TIMER) += time-orion.o
+obj-$(CONFIG_ARMADA_370_XP_TIMER) += timer-armada-370-xp.o
+obj-$(CONFIG_ORION_TIMER) += timer-orion.o
obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
@@ -36,25 +36,25 @@ obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o
obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o
-obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
-obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o
+obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o
+obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
-obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
-obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
+obj-$(CONFIG_CADENCE_TTC_TIMER) += timer-cadence-ttc.o
+obj-$(CONFIG_CLKSRC_EFM32) += timer-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
-obj-$(CONFIG_CLKSRC_LPC32XX) += time-lpc32xx.o
+obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o
obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
-obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
-obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
-obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
+obj-$(CONFIG_FSL_FTM_TIMER) += timer-fsl-ftm.o
+obj-$(CONFIG_VF_PIT_TIMER) += timer-vf-pit.o
+obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o
obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
-obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
+obj-$(CONFIG_CLKSRC_PISTACHIO) += timer-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
-obj-$(CONFIG_OWL_TIMER) += owl-timer.o
+obj-$(CONFIG_OWL_TIMER) += timer-owl.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
@@ -66,7 +66,7 @@ obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
-obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
+obj-$(CONFIG_CLKSRC_VERSATILE) += timer-versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 38cd2feb87c4..fbaee04fd1d9 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -193,7 +193,7 @@ static int __init asm9260_timer_init(struct device_node *np)
priv.base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(priv.base)) {
- pr_err("%s: unable to map resource\n", np->name);
+ pr_err("%pOFn: unable to map resource\n", np);
return PTR_ERR(priv.base);
}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 69866cd8f4bb..db410acd8964 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -22,6 +22,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/sched_clock.h>
static void __init timer_get_base_and_rate(struct device_node *np,
@@ -29,11 +30,22 @@ static void __init timer_get_base_and_rate(struct device_node *np,
{
struct clk *timer_clk;
struct clk *pclk;
+ struct reset_control *rstc;
*base = of_iomap(np, 0);
if (!*base)
- panic("Unable to map regs for %s", np->name);
+ panic("Unable to map regs for %pOFn", np);
+
+ /*
+ * Reset the timer if the reset control is available, wiping
+ * out the state the firmware may have left it
+ */
+ rstc = of_reset_control_get(np, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
+ }
/*
* Not all implementations use a periphal clock, so don't panic
@@ -42,8 +54,8 @@ static void __init timer_get_base_and_rate(struct device_node *np,
pclk = of_clk_get_by_name(np, "pclk");
if (!IS_ERR(pclk))
if (clk_prepare_enable(pclk))
- pr_warn("pclk for %s is present, but could not be activated\n",
- np->name);
+ pr_warn("pclk for %pOFn is present, but could not be activated\n",
+ np);
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk))
@@ -57,7 +69,7 @@ static void __init timer_get_base_and_rate(struct device_node *np,
try_clock_freq:
if (of_property_read_u32(np, "clock-freq", rate) &&
of_property_read_u32(np, "clock-frequency", rate))
- panic("No clock nor clock-frequency property for %s", np->name);
+ panic("No clock nor clock-frequency property for %pOFn", np);
}
static void __init add_clockevent(struct device_node *event_timer)
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 08cd6eaf3795..395837938301 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -191,13 +191,13 @@ static int __init pxa_timer_dt_init(struct device_node *np)
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
if (!timer_base) {
- pr_err("%s: unable to map resource\n", np->name);
+ pr_err("%pOFn: unable to map resource\n", np);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_crit("%s: unable to get clk\n", np->name);
+ pr_crit("%pOFn: unable to get clk\n", np);
return PTR_ERR(clk);
}
@@ -210,7 +210,7 @@ static int __init pxa_timer_dt_init(struct device_node *np)
/* we are only interested in OS-timer0 irq */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
- pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
+ pr_crit("%pOFn: unable to parse OS-timer0 irq\n", np);
return -EINVAL;
}
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 6cffd7c6001a..61d5f3b539ce 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Timer Support - OSTM
*
* Copyright (C) 2017 Renesas Electronics America, Inc.
* Copyright (C) 2017 Chris Brandt
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/of_address.h>
diff --git a/drivers/clocksource/riscv_timer.c b/drivers/clocksource/riscv_timer.c
index 4e8b347e43e2..084e97dc10ed 100644
--- a/drivers/clocksource/riscv_timer.c
+++ b/drivers/clocksource/riscv_timer.c
@@ -8,6 +8,7 @@
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/irq.h>
+#include <asm/smp.h>
#include <asm/sbi.h>
/*
@@ -84,13 +85,16 @@ void riscv_timer_interrupt(void)
static int __init riscv_timer_init_dt(struct device_node *n)
{
- int cpu_id = riscv_of_processor_hart(n), error;
+ int cpuid, hartid, error;
struct clocksource *cs;
- if (cpu_id != smp_processor_id())
+ hartid = riscv_of_processor_hartid(n);
+ cpuid = riscv_hartid_to_cpuid(hartid);
+
+ if (cpuid != smp_processor_id())
return 0;
- cs = per_cpu_ptr(&riscv_clocksource, cpu_id);
+ cs = per_cpu_ptr(&riscv_clocksource, cpuid);
clocksource_register_hz(cs, riscv_timebase);
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
@@ -98,7 +102,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
if (error)
pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
- error, cpu_id);
+ error, cpuid);
return error;
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index bbbf37c471a3..55d3e03f2cd4 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - CMT
*
* Copyright (C) 2008 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -78,18 +70,17 @@ struct sh_cmt_info {
unsigned int channels_mask;
unsigned long width; /* 16 or 32 bit version of hardware block */
- unsigned long overflow_bit;
- unsigned long clear_bits;
+ u32 overflow_bit;
+ u32 clear_bits;
/* callbacks for CMSTR and CMCSR access */
- unsigned long (*read_control)(void __iomem *base, unsigned long offs);
+ u32 (*read_control)(void __iomem *base, unsigned long offs);
void (*write_control)(void __iomem *base, unsigned long offs,
- unsigned long value);
+ u32 value);
/* callbacks for CMCNT and CMCOR access */
- unsigned long (*read_count)(void __iomem *base, unsigned long offs);
- void (*write_count)(void __iomem *base, unsigned long offs,
- unsigned long value);
+ u32 (*read_count)(void __iomem *base, unsigned long offs);
+ void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
};
struct sh_cmt_channel {
@@ -103,13 +94,13 @@ struct sh_cmt_channel {
unsigned int timer_bit;
unsigned long flags;
- unsigned long match_value;
- unsigned long next_match_value;
- unsigned long max_match_value;
+ u32 match_value;
+ u32 next_match_value;
+ u32 max_match_value;
raw_spinlock_t lock;
struct clock_event_device ced;
struct clocksource cs;
- unsigned long total_cycles;
+ u64 total_cycles;
bool cs_enabled;
};
@@ -160,24 +151,22 @@ struct sh_cmt_device {
#define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
#define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
-static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
+static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
{
return ioread16(base + (offs << 1));
}
-static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
+static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
{
return ioread32(base + (offs << 2));
}
-static void sh_cmt_write16(void __iomem *base, unsigned long offs,
- unsigned long value)
+static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
{
iowrite16(value, base + (offs << 1));
}
-static void sh_cmt_write32(void __iomem *base, unsigned long offs,
- unsigned long value)
+static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
{
iowrite32(value, base + (offs << 2));
}
@@ -242,7 +231,7 @@ static const struct sh_cmt_info sh_cmt_info[] = {
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
-static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
+static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
{
if (ch->iostart)
return ch->cmt->info->read_control(ch->iostart, 0);
@@ -250,8 +239,7 @@ static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
}
-static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
{
if (ch->iostart)
ch->cmt->info->write_control(ch->iostart, 0, value);
@@ -259,39 +247,35 @@ static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
}
-static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
+static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
{
return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
}
-static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
}
-static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
+static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
{
return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
}
-static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
}
-static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
}
-static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
- int *has_wrapped)
+static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
{
- unsigned long v1, v2, v3;
- int o1, o2;
+ u32 v1, v2, v3;
+ u32 o1, o2;
o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
@@ -311,7 +295,8 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
{
- unsigned long flags, value;
+ unsigned long flags;
+ u32 value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&ch->cmt->lock, flags);
@@ -418,11 +403,11 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch)
static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
int absolute)
{
- unsigned long new_match;
- unsigned long value = ch->next_match_value;
- unsigned long delay = 0;
- unsigned long now = 0;
- int has_wrapped;
+ u32 value = ch->next_match_value;
+ u32 new_match;
+ u32 delay = 0;
+ u32 now = 0;
+ u32 has_wrapped;
now = sh_cmt_get_counter(ch, &has_wrapped);
ch->flags |= FLAG_REPROGRAM; /* force reprogram */
@@ -619,9 +604,10 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
static u64 sh_cmt_clocksource_read(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
- unsigned long flags, raw;
- unsigned long value;
- int has_wrapped;
+ unsigned long flags;
+ u32 has_wrapped;
+ u64 value;
+ u32 raw;
raw_spin_lock_irqsave(&ch->lock, flags);
value = ch->total_cycles;
@@ -694,7 +680,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
cs->disable = sh_cmt_clocksource_disable;
cs->suspend = sh_cmt_clocksource_suspend;
cs->resume = sh_cmt_clocksource_resume;
- cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
+ cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
@@ -941,8 +927,22 @@ static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
.compatible = "renesas,cmt-48-gen2",
.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
},
- { .compatible = "renesas,rcar-gen2-cmt0", .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] },
- { .compatible = "renesas,rcar-gen2-cmt1", .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] },
+ {
+ .compatible = "renesas,rcar-gen2-cmt0",
+ .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
+ },
+ {
+ .compatible = "renesas,rcar-gen2-cmt1",
+ .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
+ },
+ {
+ .compatible = "renesas,rcar-gen3-cmt0",
+ .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
+ },
+ {
+ .compatible = "renesas,rcar-gen3-cmt1",
+ .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 6812e099b6a3..354b27d14a19 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - MTU2
*
* Copyright (C) 2009 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index c74a6c543ca2..49f1c805fc95 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - TMU
*
* Copyright (C) 2009 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index edf1a46269f1..edf1a46269f1 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/timer-cadence-ttc.c
index 29d51755e18b..b33402980b6f 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -535,7 +535,7 @@ static int __init ttc_timer_init(struct device_node *timer)
if (ret)
return ret;
- pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
+ pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq);
return 0;
}
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/timer-efm32.c
index 257e810ec1ad..257e810ec1ad 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/timer-efm32.c
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/timer-fsl-ftm.c
index 846d18daf893..846d18daf893 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/timer-fsl-ftm.c
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 62d24690ba02..76e526f58620 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -190,7 +190,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
- pr_err("No clock for %s\n", node->name);
+ pr_err("No clock for %pOFn\n", node);
return PTR_ERR(clk);
}
clk_prepare_enable(clk);
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/timer-lpc32xx.c
index d51a62a79ef7..d51a62a79ef7 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/timer-lpc32xx.c
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/timer-orion.c
index 12202067fe4b..7d487107e3cd 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/timer-orion.c
@@ -129,13 +129,13 @@ static int __init orion_timer_init(struct device_node *np)
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
if (!timer_base) {
- pr_err("%s: unable to map resource\n", np->name);
+ pr_err("%pOFn: unable to map resource\n", np);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_err("%s: unable to get clk\n", np->name);
+ pr_err("%pOFn: unable to get clk\n", np);
return PTR_ERR(clk);
}
@@ -148,7 +148,7 @@ static int __init orion_timer_init(struct device_node *np)
/* we are only interested in timer1 irq */
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0) {
- pr_err("%s: unable to parse timer1 irq\n", np->name);
+ pr_err("%pOFn: unable to parse timer1 irq\n", np);
return -EINVAL;
}
@@ -174,7 +174,7 @@ static int __init orion_timer_init(struct device_node *np)
/* setup timer1 as clockevent timer */
ret = setup_irq(irq, &orion_clkevt_irq);
if (ret) {
- pr_err("%s: unable to setup irq\n", np->name);
+ pr_err("%pOFn: unable to setup irq\n", np);
return ret;
}
diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/timer-owl.c
index ea00a5e8f95d..ea00a5e8f95d 100644
--- a/drivers/clocksource/owl-timer.c
+++ b/drivers/clocksource/timer-owl.c
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/timer-pistachio.c
index a2dd85d0c1d7..a2dd85d0c1d7 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/timer-pistachio.c
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/timer-qcom.c
index 89816f89ff3f..89816f89ff3f 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/timer-qcom.c
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index e01222ea888f..052b230ca312 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -249,7 +249,7 @@ static int __init sp804_of_init(struct device_node *np)
if (of_clk_get_parent_count(np) == 3) {
clk2 = of_clk_get(np, 1);
if (IS_ERR(clk2)) {
- pr_err("sp804: %s clock not found: %d\n", np->name,
+ pr_err("sp804: %pOFn clock not found: %d\n", np,
(int)PTR_ERR(clk2));
clk2 = NULL;
}
diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/timer-versatile.c
index 39725d38aede..39725d38aede 100644
--- a/drivers/clocksource/versatile.c
+++ b/drivers/clocksource/timer-versatile.c
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/timer-vf-pit.c
index 0f92089ec08c..0f92089ec08c 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/timer-vf-pit.c
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/timer-vt8500.c
index e0f7489cfc8e..e0f7489cfc8e 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/timer-vt8500.c
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/timer-zevio.c
index f74689334f7c..6127e8062a71 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/timer-zevio.c
@@ -148,12 +148,12 @@ static int __init zevio_timer_add(struct device_node *node)
of_address_to_resource(node, 0, &res);
scnprintf(timer->clocksource_name, sizeof(timer->clocksource_name),
- "%llx.%s_clocksource",
- (unsigned long long)res.start, node->name);
+ "%llx.%pOFn_clocksource",
+ (unsigned long long)res.start, node);
scnprintf(timer->clockevent_name, sizeof(timer->clockevent_name),
- "%llx.%s_clockevent",
- (unsigned long long)res.start, node->name);
+ "%llx.%pOFn_clockevent",
+ (unsigned long long)res.start, node);
if (timer->interrupt_regs && irqnr) {
timer->clkevt.name = timer->clockevent_name;
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c23396f32c8a..8e7e225d2446 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 801aeab5ab1e..2b7af44c7b85 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-aes.c driver.
*/
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index 2a60d1224143..cbd37a2edada 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* API for Atmel Secure Protocol Layers Improved Performances (SPLIP)
*
@@ -5,18 +6,6 @@
*
* Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 74f083f45e97..ba00e4563ca0 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Microchip / Atmel ECC (I2C) driver.
*
* Copyright (c) 2017, Microchip Technology Inc.
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/bitrev.h>
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
index 25232c8abcc2..643a3b947338 100644
--- a/drivers/crypto/atmel-ecc.h
+++ b/drivers/crypto/atmel-ecc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, Microchip Technology Inc.
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
*/
#ifndef __ATMEL_ECC_H__
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8a19df2fba6a..ab0cfe748931 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-sham.c drivers.
*/
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 97b0423efa7f..438e1ffb2ec0 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-aes.c drivers.
*/
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 7f07a5085e9b..f3442c2bdbdc 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -330,7 +330,7 @@ struct artpec6_cryptotfm_context {
size_t key_length;
u32 key_md;
int crypto_type;
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
struct artpec6_crypto_aead_hw_ctx {
@@ -1199,15 +1199,15 @@ artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
pr_debug("counter %x will overflow (nblks %u), falling back\n",
counter, counter + nblks);
- ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
- ctx->key_length);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
+ ctx->key_length);
if (ret)
return ret;
{
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -1561,10 +1561,9 @@ static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
- 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback =
+ crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback))
return PTR_ERR(ctx->fallback);
@@ -1605,7 +1604,7 @@ static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
artpec6_crypto_aes_exit(tfm);
}
@@ -3174,7 +3173,6 @@ static struct platform_driver artpec6_crypto_driver = {
.remove = artpec6_crypto_remove,
.driver = {
.name = "artpec6-crypto",
- .owner = THIS_MODULE,
.of_match_table = artpec6_crypto_of_match,
},
};
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 1eb852765469..c4b1cade55c1 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,7 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+config CRYPTO_DEV_FSL_CAAM_COMMON
+ tristate
+
config CRYPTO_DEV_FSL_CAAM
- tristate "Freescale CAAM-Multicore driver backend"
+ tristate "Freescale CAAM-Multicore platform driver backend"
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
select SOC_BUS
+ select CRYPTO_DEV_FSL_CAAM_COMMON
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -12,9 +17,16 @@ config CRYPTO_DEV_FSL_CAAM
To compile this driver as a module, choose M here: the module
will be called caam.
+if CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+ bool "Enable debug output in CAAM driver"
+ help
+ Selecting this will enable printing of various debug
+ information in the CAAM driver.
+
config CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
- depends on CRYPTO_DEV_FSL_CAAM
default y
help
Enables the driver module for Job Rings which are part of
@@ -25,9 +37,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
To compile this driver as a module, choose M here: the module
will be called caam_jr.
+if CRYPTO_DEV_FSL_CAAM_JR
+
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
- depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
@@ -45,7 +58,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
- depends on CRYPTO_DEV_FSL_CAAM_JR
help
Enable the Job Ring's interrupt coalescing feature.
@@ -75,7 +87,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -90,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
tristate "Queue Interface as Crypto API backend"
- depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
+ depends on FSL_DPAA && NET
default y
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
@@ -107,7 +118,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -119,7 +129,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_PKC_API
tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RSA
help
@@ -131,7 +140,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -142,13 +150,32 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
-config CRYPTO_DEV_FSL_CAAM_DEBUG
- bool "Enable debug output in CAAM driver"
- depends on CRYPTO_DEV_FSL_CAAM
+endif # CRYPTO_DEV_FSL_CAAM_JR
+
+endif # CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_DPAA2_CAAM
+ tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
+ depends on FSL_MC_DPIO
+ depends on NETDEVICES
+ select CRYPTO_DEV_FSL_CAAM_COMMON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ select CRYPTO_AEAD
+ select CRYPTO_HASH
help
- Selecting this will enable printing of various debug
- information in the CAAM driver.
+ CAAM driver for QorIQ Data Path Acceleration Architecture 2.
+ It handles DPSECI DPAA2 objects that sit on the Management Complex
+ (MC) fsl-mc bus.
+
+ To compile this as a module, choose M here: the module
+ will be called dpaa2_caam.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
+ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
+ def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index cb652ee7dfc8..7bbfd06a11ff 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
ccflags-y := -DDEBUG
endif
+ccflags-y += -DVERSION=\"\"
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
caam-objs := ctrl.o
-caam_jr-objs := jr.o key_gen.o error.o
+caam_jr-objs := jr.o key_gen.o
caam_pkc-y := caampkc.o pkc_desc.o
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
caam-objs += qi.o
endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
+
+dpaa2_caam-y := caamalg_qi2.o dpseci.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ec40f991e6c6..869f092432de 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1,8 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
* Based on talitos crypto API driver.
*
@@ -81,8 +82,6 @@
#define debug(format, arg...)
#endif
-static struct list_head alg_list;
-
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
@@ -96,17 +95,21 @@ struct caam_aead_alg {
bool registered;
};
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/*
* per-session context
*/
struct caam_ctx {
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
- dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
enum dma_data_direction dir;
struct device *jrdev;
@@ -648,20 +651,20 @@ static int rfc4543_setkey(struct crypto_aead *aead,
return rfc4543_set_sh_desc(aead);
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+ skcipher);
struct device *jrdev = ctx->jrdev;
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode &&
- (strstr(alg_name, "rfc3686") != NULL));
+ const bool is_rfc3686 = alg->caam.rfc3686;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
@@ -689,40 +692,32 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* ablkcipher_encrypt shared descriptor */
+ /* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
- /* ablkcipher_decrypt shared descriptor */
+ /* skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
- /* ablkcipher_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
- cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), ctx->dir);
-
return 0;
}
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(ablkcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
@@ -731,15 +726,15 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* xts_ablkcipher_encrypt shared descriptor */
+ /* xts_skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
- /* xts_ablkcipher_decrypt shared descriptor */
+ /* xts_skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -765,22 +760,20 @@ struct aead_edesc {
};
/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
- * @iv_dir: DMA mapping direction for IV
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
* and IV
*/
-struct ablkcipher_edesc {
+struct skcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
- enum dma_data_direction iv_dir;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -790,8 +783,7 @@ struct ablkcipher_edesc {
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents,
- dma_addr_t iv_dma, int ivsize,
- enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
int sec4_sg_bytes)
{
if (dst != src) {
@@ -803,7 +795,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
@@ -814,20 +806,19 @@ static void aead_unmap(struct device *dev,
struct aead_request *req)
{
caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+ edesc->src_nents, edesc->dst_nents, 0, 0,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
-static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst,
edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->iv_dir,
+ edesc->iv_dma, ivsize,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
@@ -881,87 +872,74 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
aead_request_complete(req, err);
}
-static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ablkcipher_request *req = context;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
#endif
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
ivsize, 0);
- /* In case initial IV was generated, copy it in GIVCIPHER request */
- if (edesc->iv_dir == DMA_FROM_DEVICE) {
- u8 *iv;
- struct skcipher_givcrypt_request *greq;
-
- greq = container_of(req, struct skcipher_givcrypt_request,
- creq);
- iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
- edesc->sec4_sg_bytes;
- memcpy(greq->giv, iv, ivsize);
- }
-
kfree(edesc);
- ablkcipher_request_complete(req, err);
+ skcipher_request_complete(req, err);
}
-static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ablkcipher_request *req = context;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
#ifdef DEBUG
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
#endif
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
- ablkcipher_request_complete(req, err);
+ skcipher_request_complete(req, err);
}
/*
@@ -1103,34 +1081,38 @@ static void init_authenc_job(struct aead_request *req,
}
/*
- * Fill in ablkcipher job descriptor
+ * Fill in skcipher job descriptor
*/
-static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void init_skcipher_job(struct skcipher_request *req,
+ struct skcipher_edesc *edesc,
+ const bool encrypt)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
+ u32 *sh_desc;
u32 out_options = 0;
- dma_addr_t dst_dma;
+ dma_addr_t dst_dma, ptr;
int len;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
- pr_err("asked=%d, nbytes%d\n",
- (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ pr_err("asked=%d, cryptlen%d\n",
+ (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
#endif
caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize,
LDST_SGF);
if (likely(req->src == req->dst)) {
@@ -1145,48 +1127,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
out_options = LDST_SGF;
}
}
- append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
-}
-
-/*
- * Fill in ablkcipher givencrypt job descriptor
- */
-static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- u32 *desc = edesc->hw_desc;
- u32 in_options;
- dma_addr_t dst_dma, src_dma;
- int len, sec4_sg_index = 0;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-
- if (edesc->src_nents == 1) {
- src_dma = sg_dma_address(req->src);
- in_options = 0;
- } else {
- src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
- in_options = LDST_SGF;
- }
- append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
-
- dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
- sizeof(struct sec4_sg_entry);
- append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
}
/*
@@ -1275,7 +1216,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
GFP_DMA | flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1476,35 +1417,35 @@ static int aead_decrypt(struct aead_request *req)
}
/*
- * allocate and map the ablkcipher extended descriptor for ablkcipher
+ * allocate and map the skcipher extended descriptor for skcipher
*/
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, int desc_bytes)
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ int desc_bytes)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_edesc *edesc;
dma_addr_t iv_dma;
u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(src_nents);
}
if (req->dst != req->src) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (unlikely(dst_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(dst_nents);
}
}
@@ -1546,7 +1487,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1555,17 +1496,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes);
- edesc->iv_dir = DMA_TO_DEVICE;
/* Make sure IV is located in a DMAable area */
iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1583,7 +1523,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+ iv_dma, ivsize, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1591,7 +1531,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->iv_dma = iv_dma;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
sec4_sg_bytes, 1);
#endif
@@ -1599,362 +1539,187 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
return edesc;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
+ init_skcipher_job(req, edesc, true);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
ivsize, 0);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
+ init_skcipher_job(req, edesc, false);
desc = edesc->hw_desc;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-/*
- * allocate and map the ablkcipher extended descriptor
- * for ablkcipher givencrypt
- */
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
- struct skcipher_givcrypt_request *greq,
- int desc_bytes)
-{
- struct ablkcipher_request *req = &greq->creq;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
- u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
-
- src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (unlikely(src_nents < 0)) {
- dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
- return ERR_PTR(src_nents);
- }
-
- if (likely(req->src == req->dst)) {
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!mapped_src_nents)) {
- dev_err(jrdev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = src_nents;
- mapped_dst_nents = src_nents;
- } else {
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
- DMA_TO_DEVICE);
- if (unlikely(!mapped_src_nents)) {
- dev_err(jrdev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- if (unlikely(dst_nents < 0)) {
- dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
- return ERR_PTR(dst_nents);
- }
-
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
- }
- }
-
- sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
- dst_sg_idx = sec4_sg_ents;
- sec4_sg_ents += 1 + mapped_dst_nents;
-
- /*
- * allocate space for base edesc and hw desc commands, link tables, IV
- */
- sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
- GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
- desc_bytes);
- edesc->iv_dir = DMA_FROM_DEVICE;
-
- /* Make sure IV is located in a DMAable area */
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- if (mapped_src_nents > 1)
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
- 0);
-
- dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
- dst_sg_idx + 1, 0);
-
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
- }
- edesc->iv_dma = iv_dma;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
- sec4_sg_bytes, 1);
-#endif
-
- return edesc;
-}
-
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *jrdev = ctx->jrdev;
- u32 *desc;
- int ret = 0;
-
- /* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* Create and submit job descriptor*/
- init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
- edesc, req);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
-
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ablkcipher_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
-
- return ret;
-}
-
-#define template_aead template_u.aead
-#define template_ablkcipher template_u.ablkcipher
-struct caam_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- } template_u;
- u32 class1_alg_type;
- u32 class2_alg_type;
-};
-
-static struct caam_alg_template driver_algs[] = {
- /* ablkcipher descriptor */
+static struct caam_skcipher_alg driver_algs[] = {
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
},
{
- .name = "xts(aes)",
- .driver_name = "xts-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = xts_ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
};
@@ -3239,12 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
},
};
-struct caam_crypto_alg {
- struct crypto_alg crypto_alg;
- struct list_head entry;
- struct caam_alg_entry caam;
-};
-
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
@@ -3276,8 +3035,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
ctx->sh_desc_enc_dma = dma_addr;
ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
sh_desc_dec);
- ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
- sh_desc_givenc);
ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
/* copy descriptor header template value */
@@ -3287,14 +3044,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
return 0;
}
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg =
- container_of(alg, struct caam_crypto_alg, crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
- return caam_init_common(ctx, &caam_alg->caam, false);
+ return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+ false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3316,9 +3073,9 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_jr_free(ctx->jrdev);
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_tfm_ctx(tfm));
+ caam_exit_common(crypto_skcipher_ctx(tfm));
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -3328,8 +3085,6 @@ static void caam_aead_exit(struct crypto_aead *tfm)
static void __exit caam_algapi_exit(void)
{
-
- struct caam_crypto_alg *t_alg, *n;
int i;
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -3339,57 +3094,25 @@ static void __exit caam_algapi_exit(void)
crypto_unregister_aead(&t_alg->aead);
}
- if (!alg_list.next)
- return;
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
- *template)
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct caam_crypto_alg *t_alg;
- struct crypto_alg *alg;
+ struct skcipher_alg *alg = &t_alg->skcipher;
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- pr_err("failed to allocate t_alg\n");
- return ERR_PTR(-ENOMEM);
- }
-
- alg = &t_alg->crypto_alg;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
- alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_cra_init;
- alg->cra_exit = caam_cra_exit;
- alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_blocksize = template->blocksize;
- alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- switch (template->type) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- alg->cra_type = &crypto_givcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- }
-
- t_alg->caam.class1_alg_type = template->class1_alg_type;
- t_alg->caam.class2_alg_type = template->class2_alg_type;
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- return t_alg;
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
}
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -3441,8 +3164,6 @@ static int __init caam_algapi_init(void)
return -ENODEV;
- INIT_LIST_HEAD(&alg_list);
-
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
@@ -3458,9 +3179,8 @@ static int __init caam_algapi_init(void)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- struct caam_crypto_alg *t_alg;
- struct caam_alg_template *alg = driver_algs + i;
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
/* Skip DES algorithms if not supported by device */
if (!des_inst &&
@@ -3477,26 +3197,20 @@ static int __init caam_algapi_init(void)
* on LP devices.
*/
if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
- if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+ if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_XTS)
continue;
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n", alg->driver_name);
- continue;
- }
+ caam_skcipher_alg_init(t_alg);
- err = crypto_register_alg(&t_alg->crypto_alg);
+ err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
+ t_alg->skcipher.base.cra_driver_name);
continue;
}
- list_add_tail(&t_alg->entry, &alg_list);
+ t_alg->registered = true;
registered = true;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index a408edd84f34..1a6f0da14106 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*/
#include "compat.h"
@@ -1212,11 +1213,8 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
+/* For skcipher encrypt and decrypt, read from req->src and write to req->dst */
+static inline void skcipher_append_src_dst(u32 *desc)
{
append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1226,7 +1224,7 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
}
/**
- * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1235,9 +1233,9 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
*/
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
@@ -1280,18 +1278,18 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
OP_ALG_ENCRYPT);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+ "skcipher enc shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
/**
- * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * cnstr_shdsc_skcipher_decap - skcipher decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1300,9 +1298,9 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
*/
-void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
@@ -1348,105 +1346,23 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
-}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
-
-/**
- * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
- * with HW-generated initialization vector.
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC.
- * @ivsize: initialization vector size
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- */
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
-{
- u32 *key_jump_cmd, geniv;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
- (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ "skcipher dec shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
/**
- * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
- * descriptor
+ * cnstr_shdsc_xts_skcipher_encap - xts skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
*/
-void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
u32 *key_jump_cmd;
@@ -1481,24 +1397,23 @@ void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
OP_ALG_ENCRYPT);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+ "xts skcipher enc shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
/**
- * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
- * descriptor
+ * cnstr_shdsc_xts_skcipher_decap - xts skcipher decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
*/
-void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
u32 *key_jump_cmd;
@@ -1532,15 +1447,15 @@ void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+ "xts skcipher dec shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM descriptor support");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index a917af5776ce..1315c8f6f951 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
*
* Copyright 2016 NXP
*/
@@ -42,10 +42,10 @@
#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
@@ -96,20 +96,16 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, unsigned int icvsize,
const bool is_qi);
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
-void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
-void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
-
-void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index d7aa7d7ff102..23c9fc4975f8 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -1,9 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale FSL CAAM support for crypto API over QI backend.
* Based on caamalg.c
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2018 NXP
*/
#include "compat.h"
@@ -43,6 +44,12 @@ struct caam_aead_alg {
bool registered;
};
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/*
* per-session context
*/
@@ -50,7 +57,6 @@ struct caam_ctx {
struct device *jrdev;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
enum dma_data_direction dir;
@@ -589,18 +595,19 @@ static int rfc4543_setkey(struct crypto_aead *aead,
return 0;
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+ skcipher);
struct device *jrdev = ctx->jrdev;
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+ const bool is_rfc3686 = alg->caam.rfc3686;
int ret = 0;
#ifdef DEBUG
@@ -629,13 +636,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
- is_rfc3686, ctx1_iv_off);
- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
- is_rfc3686, ctx1_iv_off);
- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
- ivsize, is_rfc3686, ctx1_iv_off);
+ /* skcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+ cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
@@ -656,25 +661,16 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
}
}
- if (ctx->drv_ctx[GIVENCRYPT]) {
- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
- ctx->sh_desc_givenc);
- if (ret) {
- dev_err(jrdev, "driver givenc context update failed\n");
- goto badkey;
- }
- }
-
return ret;
badkey:
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
int ret = 0;
@@ -687,9 +683,9 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* xts ablkcipher encrypt, decrypt shared descriptors */
- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+ /* xts skcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
@@ -712,7 +708,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return ret;
badkey:
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -741,7 +737,7 @@ struct aead_edesc {
};
/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
@@ -750,7 +746,7 @@ struct aead_edesc {
* @drv_req: driver-specific request structure
* @sgt: the h/w link table, followed by IV
*/
-struct ablkcipher_edesc {
+struct skcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
@@ -781,10 +777,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
if (type == ENCRYPT)
desc = ctx->sh_desc_enc;
- else if (type == DECRYPT)
+ else /* (type == DECRYPT) */
desc = ctx->sh_desc_dec;
- else /* (type == GIVENCRYPT) */
- desc = ctx->sh_desc_givenc;
cpu = smp_processor_id();
drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
@@ -803,8 +797,7 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- enum optype op_type, dma_addr_t qm_sg_dma,
- int qm_sg_bytes)
+ dma_addr_t qm_sg_dma, int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
@@ -815,9 +808,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize,
- op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
}
@@ -830,21 +821,18 @@ static void aead_unmap(struct device *dev,
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
-static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
}
static void aead_done(struct caam_drv_req *drv_req, u32 status)
@@ -902,9 +890,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int in_len, out_len;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- drv_ctx = get_drv_ctx(ctx, op_type);
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
return (struct aead_edesc *)drv_ctx;
@@ -994,7 +981,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1009,7 +996,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0, 0);
+ dst_nents, 0, 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1028,7 +1015,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1051,7 +1038,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "unable to map S/G table\n");
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1138,14 +1125,14 @@ static int ipsec_gcm_decrypt(struct aead_request *req)
return aead_crypt(req, false);
}
-static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
{
- struct ablkcipher_edesc *edesc;
- struct ablkcipher_request *req = drv_req->app_ctx;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct skcipher_request *req = drv_req->app_ctx;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = caam_ctx->qidev;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
#ifdef DEBUG
dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
@@ -1158,72 +1145,60 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
#endif
- ablkcipher_unmap(qidev, edesc, req);
-
- /* In case initial IV was generated, copy it in GIVCIPHER request */
- if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
- u8 *iv;
- struct skcipher_givcrypt_request *greq;
-
- greq = container_of(req, struct skcipher_givcrypt_request,
- creq);
- iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
- memcpy(greq->giv, iv, ivsize);
- }
+ skcipher_unmap(qidev, edesc, req);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+ if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
ivsize, ivsize, 0);
qi_cache_free(edesc);
- ablkcipher_request_complete(req, status);
+ skcipher_request_complete(req, status);
}
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, bool encrypt)
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ bool encrypt)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_edesc *edesc;
dma_addr_t iv_dma;
u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- drv_ctx = get_drv_ctx(ctx, op_type);
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- return (struct ablkcipher_edesc *)drv_ctx;
+ return (struct skcipher_edesc *)drv_ctx;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(src_nents);
}
if (unlikely(req->src != req->dst)) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (unlikely(dst_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(dst_nents);
}
@@ -1255,12 +1230,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1269,20 +1244,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
/* Make sure IV is located in a DMAable area */
sg_table = &edesc->sgt[0];
iv = (u8 *)(sg_table + qm_sg_ents);
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1292,7 +1267,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->iv_dma = iv_dma;
edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = ablkcipher_done;
+ edesc->drv_req.cbk = skcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
@@ -1307,7 +1282,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1315,348 +1290,172 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
fd_sgt = &edesc->drv_req.fd_sgt[0];
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
- ivsize + req->nbytes, 0);
+ ivsize + req->cryptlen, 0);
if (req->src == req->dst) {
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
- sizeof(*sg_table), req->nbytes, 0);
+ sizeof(*sg_table), req->cryptlen, 0);
} else if (mapped_dst_nents > 1) {
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), req->nbytes, 0);
+ sizeof(*sg_table), req->cryptlen, 0);
} else {
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
- req->nbytes, 0);
- }
-
- return edesc;
-}
-
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
- struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *qidev = ctx->qidev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
- u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- struct qm_sg_entry *sg_table, *fd_sgt;
- int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
- struct caam_drv_ctx *drv_ctx;
-
- drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- return (struct ablkcipher_edesc *)drv_ctx;
-
- src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (unlikely(src_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
- return ERR_PTR(src_nents);
- }
-
- if (unlikely(req->src != req->dst)) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- if (unlikely(dst_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
- return ERR_PTR(dst_nents);
- }
-
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- DMA_TO_DEVICE);
- if (unlikely(!mapped_src_nents)) {
- dev_err(qidev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(qidev, "unable to map destination\n");
- dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
- }
- } else {
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!mapped_src_nents)) {
- dev_err(qidev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = src_nents;
- mapped_dst_nents = src_nents;
- }
-
- qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
- dst_sg_idx = qm_sg_ents;
-
- qm_sg_ents += 1 + mapped_dst_nents;
- qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
- ivsize > CAAM_QI_MEMCACHE_SIZE)) {
- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
- qm_sg_ents, ivsize);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- /* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_alloc(GFP_DMA | flags);
- if (!edesc) {
- dev_err(qidev, "could not allocate extended descriptor\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- /* Make sure IV is located in a DMAable area */
- sg_table = &edesc->sgt[0];
- iv = (u8 *)(sg_table + qm_sg_ents);
- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
- edesc->iv_dma = iv_dma;
- edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = ablkcipher_done;
- edesc->drv_req.drv_ctx = drv_ctx;
-
- if (mapped_src_nents > 1)
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
-
- dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
- 0);
-
- edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
- DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
- dev_err(qidev, "unable to map S/G table\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, GIVENCRYPT, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
+ req->cryptlen, 0);
}
- fd_sgt = &edesc->drv_req.fd_sgt[0];
-
- if (mapped_src_nents > 1)
- dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
- 0);
- else
- dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
- req->nbytes, 0);
-
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), ivsize + req->nbytes, 0);
-
return edesc;
}
-static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int ret;
if (unlikely(caam_congested))
return -EAGAIN;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, encrypt);
+ edesc = skcipher_edesc_alloc(req, encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
if (!encrypt)
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
ivsize, ivsize, 0);
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(ctx->qidev, edesc, req);
+ skcipher_unmap(ctx->qidev, edesc, req);
qi_cache_free(edesc);
}
return ret;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- return ablkcipher_crypt(req, true);
+ return skcipher_crypt(req, true);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- return ablkcipher_crypt(req, false);
-}
-
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ret;
-
- if (unlikely(caam_congested))
- return -EAGAIN;
-
- /* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ablkcipher_unmap(ctx->qidev, edesc, req);
- qi_cache_free(edesc);
- }
-
- return ret;
+ return skcipher_crypt(req, false);
}
-#define template_ablkcipher template_u.ablkcipher
-struct caam_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- } template_u;
- u32 class1_alg_type;
- u32 class2_alg_type;
-};
-
-static struct caam_alg_template driver_algs[] = {
- /* ablkcipher descriptor */
+static struct caam_skcipher_alg driver_algs[] = {
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam-qi",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam-qi",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam-qi",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam-qi",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam-qi",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "xts(aes)",
- .driver_name = "xts-aes-caam-qi",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = xts_ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
};
@@ -2528,12 +2327,6 @@ static struct caam_aead_alg driver_aeads[] = {
},
};
-struct caam_crypto_alg {
- struct list_head entry;
- struct crypto_alg crypto_alg;
- struct caam_alg_entry caam;
-};
-
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
@@ -2572,19 +2365,18 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
spin_lock_init(&ctx->lock);
ctx->drv_ctx[ENCRYPT] = NULL;
ctx->drv_ctx[DECRYPT] = NULL;
- ctx->drv_ctx[GIVENCRYPT] = NULL;
return 0;
}
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
- crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
- return caam_init_common(ctx, &caam_alg->caam, false);
+ return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+ false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2602,16 +2394,15 @@ static void caam_exit_common(struct caam_ctx *ctx)
{
caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
- caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_tfm_ctx(tfm));
+ caam_exit_common(crypto_skcipher_ctx(tfm));
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -2619,10 +2410,8 @@ static void caam_aead_exit(struct crypto_aead *tfm)
caam_exit_common(crypto_aead_ctx(tfm));
}
-static struct list_head alg_list;
static void __exit caam_qi_algapi_exit(void)
{
- struct caam_crypto_alg *t_alg, *n;
int i;
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -2632,55 +2421,25 @@ static void __exit caam_qi_algapi_exit(void)
crypto_unregister_aead(&t_alg->aead);
}
- if (!alg_list.next)
- return;
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
- *template)
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct caam_crypto_alg *t_alg;
- struct crypto_alg *alg;
-
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg)
- return ERR_PTR(-ENOMEM);
+ struct skcipher_alg *alg = &t_alg->skcipher;
- alg = &t_alg->crypto_alg;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
- alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_cra_init;
- alg->cra_exit = caam_cra_exit;
- alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_blocksize = template->blocksize;
- alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- switch (template->type) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- alg->cra_type = &crypto_givcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- }
-
- t_alg->caam.class1_alg_type = template->class1_alg_type;
- t_alg->caam.class2_alg_type = template->class2_alg_type;
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- return t_alg;
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
}
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -2734,8 +2493,6 @@ static int __init caam_qi_algapi_init(void)
return -ENODEV;
}
- INIT_LIST_HEAD(&alg_list);
-
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
@@ -2751,9 +2508,8 @@ static int __init caam_qi_algapi_init(void)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- struct caam_crypto_alg *t_alg;
- struct caam_alg_template *alg = driver_algs + i;
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
/* Skip DES algorithms if not supported by device */
if (!des_inst &&
@@ -2765,23 +2521,16 @@ static int __init caam_qi_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
- dev_warn(priv->qidev, "%s alg allocation failed\n",
- alg->driver_name);
- continue;
- }
+ caam_skcipher_alg_init(t_alg);
- err = crypto_register_alg(&t_alg->crypto_alg);
+ err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
dev_warn(priv->qidev, "%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
+ t_alg->skcipher.base.cra_driver_name);
continue;
}
- list_add_tail(&t_alg->entry, &alg_list);
+ t_alg->registered = true;
registered = true;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
new file mode 100644
index 000000000000..7d8ac0222fa3
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -0,0 +1,5165 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "caamalg_qi2.h"
+#include "dpseci_cmd.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "sg_sw_qm2.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
+#include "caamhash_desc.h"
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+
+#define CAAM_CRA_PRIORITY 2000
+
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+
+#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+#endif
+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
+ * NOTE: A more elegant solution would be to have some headroom in the frames
+ * being processed. This can be added by the dpaa2-eth driver. This would
+ * pose a problem for userspace application processing which cannot
+ * know of this limitation. So for now, this will work.
+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
+ */
+static struct kmem_cache *qi_cache;
+
+struct caam_alg_entry {
+ struct device *dev;
+ int class1_alg_type;
+ int class2_alg_type;
+ bool rfc3686;
+ bool geniv;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+/**
+ * caam_ctx - per-session context
+ * @flc: Flow Contexts array
+ * @key: [authentication key], encryption key
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @key_dma: I/O virtual address of the key
+ * @dir: DMA direction for mapping key and Flow Contexts
+ * @dev: dpseci device
+ * @adata: authentication algorithm details
+ * @cdata: encryption algorithm details
+ * @authsize: authentication tag (a.k.a. ICV / MAC) size
+ */
+struct caam_ctx {
+ struct caam_flc flc[NUM_OP];
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t flc_dma[NUM_OP];
+ dma_addr_t key_dma;
+ enum dma_data_direction dir;
+ struct device *dev;
+ struct alginfo adata;
+ struct alginfo cdata;
+ unsigned int authsize;
+};
+
+static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
+ iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+/*
+ * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
+ *
+ * Allocate data on the hotpath. Instead of using kzalloc, one can use the
+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
+ * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
+ * hosting 16 SG entries.
+ *
+ * @flags - flags that would be used for the equivalent kmalloc(..) call
+ *
+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
+ */
+static inline void *qi_cache_zalloc(gfp_t flags)
+{
+ return kmem_cache_zalloc(qi_cache, flags);
+}
+
+/*
+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
+ *
+ * @obj - buffer previously allocated by qi_cache_zalloc
+ *
+ * No checking is being done, the call is a passthrough call to
+ * kmem_cache_free(...)
+ */
+static inline void qi_cache_free(void *obj)
+{
+ kmem_cache_free(qi_cache, obj);
+}
+
+static struct caam_request *to_caam_req(struct crypto_async_request *areq)
+{
+ switch (crypto_tfm_alg_type(areq->tfm)) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return skcipher_request_ctx(skcipher_request_cast(areq));
+ case CRYPTO_ALG_TYPE_AEAD:
+ return aead_request_ctx(container_of(areq, struct aead_request,
+ base));
+ case CRYPTO_ALG_TYPE_AHASH:
+ return ahash_request_ctx(ahash_request_cast(areq));
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents,
+ int dst_nents, dma_addr_t iv_dma, int ivsize,
+ dma_addr_t qm_sg_dma, int qm_sg_bytes)
+{
+ if (dst != src) {
+ if (src_nents)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
+
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+
+ if (qm_sg_bytes)
+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ struct caam_flc *flc;
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ u32 *nonce = NULL;
+ unsigned int data_len[2];
+ u32 inl_mask;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
+
+ /* aead_encrypt shared descriptor */
+ if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
+ DESC_QI_AEAD_ENC_LEN) +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+
+ if (alg->caam.geniv)
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686,
+ nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
+ else
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true, priv->sec_attr.era);
+
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+ dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+
+ ctx->cdata.keylen = keys.enckeylen;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return aead_set_sh_desc(aead);
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
+ return -EINVAL;
+}
+
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_request *req_ctx = aead_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+ unsigned int authsize = ctx->authsize;
+ int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct dpaa2_sg_entry *sg_table;
+
+ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize :
+ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : (-authsize)));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+ ivsize = crypto_aead_ivsize(aead);
+
+ /*
+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
+ */
+ qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+ CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_nents, ivsize);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (ivsize) {
+ u8 *iv = (u8 *)(sg_table + qm_sg_nents);
+
+ /* Make sure IV is located in a DMAable area */
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
+ dev_err(dev, "unable to map assoclen\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+ qm_sg_index++;
+ if (ivsize) {
+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+ qm_sg_index++;
+ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ out_len = req->assoclen + req->cryptlen +
+ (encrypt ? ctx->authsize : (-ctx->authsize));
+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, in_len);
+
+ if (req->dst == req->src) {
+ if (mapped_src_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
+ (1 + !!ivsize) * sizeof(*sg_table));
+ }
+ } else if (mapped_dst_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
+ sizeof(*sg_table));
+ }
+
+ dpaa2_fl_set_len(out_fle, out_len);
+
+ return edesc;
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES GCM encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
+ ctx->cdata.keylen = keylen;
+
+ return gcm_set_sh_desc(aead);
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4106 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ return rfc4106_set_sh_desc(aead);
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4543 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ return rfc4543_set_sh_desc(aead);
+}
+
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher),
+ struct caam_skcipher_alg, skcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* skcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* skcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ dev_err(dev, "key size mismatch\n");
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* xts_skcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* xts_skcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_request *req_ctx = skcipher_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct skcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ u8 *iv;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->cryptlen);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->cryptlen);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ qm_sg_ents = 1 + mapped_src_nents;
+ dst_sg_idx = qm_sg_ents;
+
+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Make sure IV is located in a DMAable area */
+ sg_table = &edesc->sgt[0];
+ iv = (u8 *)(sg_table + qm_sg_ents);
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ dst_sg_idx, 0);
+
+ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
+ dpaa2_fl_set_len(out_fle, req->cryptlen);
+
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+
+ if (req->src == req->dst) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
+ sizeof(*sg_table));
+ } else if (mapped_dst_nents > 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ }
+
+ return edesc;
+}
+
+static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
+static void aead_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static void aead_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if ((status & JRSTA_CCBERR_ERRID_MASK) ==
+ JRSTA_CCBERR_ERRID_ICVCHK)
+ ecode = -EBADMSG;
+ else
+ ecode = -EIO;
+ }
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->cbk = aead_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->cbk = aead_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_encrypt(req);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_decrypt(req);
+}
+
+static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct skcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
+ skcipher_unmap(ctx->dev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
+ ivsize, 0);
+
+ qi_cache_free(edesc);
+ skcipher_request_complete(req, ecode);
+}
+
+static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct skcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ skcipher_request_complete(req, ecode);
+}
+
+static int skcipher_encrypt(struct skcipher_request *req)
+{
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->cbk = skcipher_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int skcipher_decrypt(struct skcipher_request *req)
+{
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block.
+ */
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
+ ivsize, 0);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->cbk = skcipher_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
+{
+ dma_addr_t dma_addr;
+ int i;
+
+ /* copy descriptor header template value */
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+ ctx->dev = caam->dev;
+ ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
+ offsetof(struct caam_ctx, flc_dma),
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map key, shared descriptors\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < NUM_OP; i++)
+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
+ ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
+
+ return 0;
+}
+
+static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
+{
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
+}
+
+static int caam_cra_init_aead(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ aead);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
+ alg->setkey == aead_setkey);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
+ offsetof(struct caam_ctx, flc_dma), ctx->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void caam_cra_exit(struct crypto_skcipher *tfm)
+{
+ caam_exit_common(crypto_skcipher_ctx(tfm));
+}
+
+static void caam_cra_exit_aead(struct crypto_aead *tfm)
+{
+ caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static struct caam_skcipher_alg driver_algs[] = {
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ }
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ }
+ },
+ /* single-pass ipsec_esp descriptor */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-desi-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(md5),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha1),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha224),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha256),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha384),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha512),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+};
+
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
+{
+ struct skcipher_alg *alg = &t_alg->skcipher;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_skcipher;
+ alg->exit = caam_cra_exit;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+ struct aead_alg *alg = &t_alg->aead;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_aead;
+ alg->exit = caam_cra_exit_aead;
+}
+
+/* max hash key is max split key size */
+#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
+
+#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+
+/* caam context sizes for hashes: running digest + 8 */
+#define HASH_MSG_LEN 8
+#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
+enum hash_optype {
+ UPDATE = 0,
+ UPDATE_FIRST,
+ FINALIZE,
+ DIGEST,
+ HASH_NUM_OP
+};
+
+/**
+ * caam_hash_ctx - ahash per-session context
+ * @flc: Flow Contexts array
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @dev: dpseci device
+ * @ctx_len: size of Context Register
+ * @adata: hashing algorithm details
+ */
+struct caam_hash_ctx {
+ struct caam_flc flc[HASH_NUM_OP];
+ dma_addr_t flc_dma[HASH_NUM_OP];
+ struct device *dev;
+ int ctx_len;
+ struct alginfo adata;
+};
+
+/* ahash state */
+struct caam_hash_state {
+ struct caam_request caam_req;
+ dma_addr_t buf_dma;
+ dma_addr_t ctx_dma;
+ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_0;
+ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_1;
+ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int current_buf;
+};
+
+struct caam_export_state {
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
+ u8 caam_ctx[MAX_CTX_LEN];
+ int buflen;
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+};
+
+static inline void switch_buf(struct caam_hash_state *state)
+{
+ state->current_buf ^= 1;
+}
+
+static inline u8 *current_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_1 : state->buf_0;
+}
+
+static inline u8 *alt_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_0 : state->buf_1;
+}
+
+static inline int *current_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_1 : &state->buflen_0;
+}
+
+static inline int *alt_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_0 : &state->buflen_1;
+}
+
+/* Map current buffer in state (if length > 0) and put it in link table */
+static inline int buf_map_to_qm_sg(struct device *dev,
+ struct dpaa2_sg_entry *qm_sg,
+ struct caam_hash_state *state)
+{
+ int buflen = *current_buflen(state);
+
+ if (!buflen)
+ return 0;
+
+ state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, state->buf_dma)) {
+ dev_err(dev, "unable to map buf\n");
+ state->buf_dma = 0;
+ return -ENOMEM;
+ }
+
+ dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
+
+ return 0;
+}
+
+/* Map state->caam_ctx, and add it to link table */
+static inline int ctx_map_to_qm_sg(struct device *dev,
+ struct caam_hash_state *state, int ctx_len,
+ struct dpaa2_sg_entry *qm_sg, u32 flag)
+{
+ state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
+ if (dma_mapping_error(dev, state->ctx_dma)) {
+ dev_err(dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ return -ENOMEM;
+ }
+
+ dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
+
+ return 0;
+}
+
+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
+ struct caam_flc *flc;
+ u32 *desc;
+
+ /* ahash_update shared descriptor */
+ flc = &ctx->flc[UPDATE];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_update_first shared descriptor */
+ flc = &ctx->flc[UPDATE_FIRST];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_final shared descriptor */
+ flc = &ctx->flc[FINALIZE];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_digest shared descriptor */
+ flc = &ctx->flc[DIGEST];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ return 0;
+}
+
+struct split_key_sh_result {
+ struct completion completion;
+ int err;
+ struct device *dev;
+};
+
+static void split_key_sh_done(void *cbk_ctx, u32 err)
+{
+ struct split_key_sh_result *res = cbk_ctx;
+
+ dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+
+ if (err)
+ caam_qi2_strstatus(res->dev, err);
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+/* Digest hash size if it is too large */
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 *keylen, u8 *key_out, u32 digestsize)
+{
+ struct caam_request *req_ctx;
+ u32 *desc;
+ struct split_key_sh_result result;
+ dma_addr_t src_dma, dst_dma;
+ struct caam_flc *flc;
+ dma_addr_t flc_dma;
+ int ret = -ENOMEM;
+ struct dpaa2_fl_entry *in_fle, *out_fle;
+
+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
+ if (!req_ctx)
+ return -ENOMEM;
+
+ in_fle = &req_ctx->fd_flt[1];
+ out_fle = &req_ctx->fd_flt[0];
+
+ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
+ if (!flc)
+ goto err_flc;
+
+ src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, src_dma)) {
+ dev_err(ctx->dev, "unable to map key input memory\n");
+ goto err_src_dma;
+ }
+ dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, dst_dma)) {
+ dev_err(ctx->dev, "unable to map key output memory\n");
+ goto err_dst_dma;
+ }
+
+ desc = flc->sh_desc;
+
+ init_sh_desc(desc, 0);
+
+ /* descriptor to perform unkeyed hash on key_in */
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
+ OP_ALG_AS_INITFINAL);
+ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, flc_dma)) {
+ dev_err(ctx->dev, "unable to map shared descriptor\n");
+ goto err_flc_dma;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, src_dma);
+ dpaa2_fl_set_len(in_fle, *keylen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ result.err = 0;
+ init_completion(&result.completion);
+ result.dev = ctx->dev;
+
+ req_ctx->flc = flc;
+ req_ctx->flc_dma = flc_dma;
+ req_ctx->cbk = split_key_sh_done;
+ req_ctx->ctx = &result;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS) {
+ /* in progress */
+ wait_for_completion(&result.completion);
+ ret = result.err;
+ print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ digestsize, 1);
+ }
+
+ dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
+ DMA_TO_DEVICE);
+err_flc_dma:
+ dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
+err_dst_dma:
+ dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
+err_src_dma:
+ kfree(flc);
+err_flc:
+ kfree(req_ctx);
+
+ *keylen = digestsize;
+
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ int ret;
+ u8 *hashed_key = NULL;
+
+ dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
+
+ if (keylen > blocksize) {
+ hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
+ GFP_KERNEL | GFP_DMA);
+ if (!hashed_key)
+ return -ENOMEM;
+ ret = hash_digest_key(ctx, key, &keylen, hashed_key,
+ digestsize);
+ if (ret)
+ goto bad_free_key;
+ key = hashed_key;
+ }
+
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ ctx->adata.key_virt = key;
+ ctx->adata.key_inline = true;
+
+ ret = ahash_set_sh_desc(ahash);
+ kfree(hashed_key);
+ return ret;
+bad_free_key:
+ kfree(hashed_key);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (edesc->src_nents)
+ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+ if (edesc->dst_dma)
+ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+
+ if (edesc->qm_sg_bytes)
+ dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+
+ if (state->buf_dma) {
+ dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ DMA_TO_DEVICE);
+ state->buf_dma = 0;
+ }
+}
+
+static inline void ahash_unmap_ctx(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len,
+ u32 flag)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (state->ctx_dma) {
+ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ state->ctx_dma = 0;
+ }
+ ahash_unmap(dev, edesc, req, dst_len);
+}
+
+static void ahash_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_bi(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ switch_buf(state);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ crypto_ahash_digestsize(ahash), 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ switch_buf(state);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ crypto_ahash_digestsize(ahash), 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static int ahash_update_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state), last_buflen;
+ int in_len = *buflen + req->nbytes, to_hash;
+ int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ last_buflen = *next_buflen;
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - (*next_buflen));
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_src_index = 1 + (*buflen ? 1 : 0);
+ qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
+ sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ if (mapped_nents) {
+ sg_to_qm_sg_last(req->src, mapped_nents,
+ sg_table + qm_sg_src_index, 0);
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+ } else {
+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
+ true);
+ }
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE];
+ req_ctx->cbk = ahash_done_bi;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = last_buflen;
+ }
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_final_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, qm_sg_src_index;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc)
+ return -ENOMEM;
+
+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
+ qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[FINALIZE];
+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
+ req_ctx->cbk = ahash_done_ctx_src;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, qm_sg_src_index;
+ int src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
+ qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[FINALIZE];
+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
+ req_ctx->cbk = ahash_done_ctx_src;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = -ENOMEM;
+
+ state->buf_dma = 0;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to map source for DMA\n");
+ return ret;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ edesc->src_nents = src_nents;
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+
+ if (mapped_nents > 1) {
+ int qm_sg_bytes;
+ struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
+
+ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ goto unmap;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ goto unmap;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_final_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int buflen = *current_buflen(state);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = -ENOMEM;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc)
+ return ret;
+
+ state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+ dev_err(ctx->dev, "unable to map src\n");
+ goto unmap;
+ }
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ goto unmap;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, state->buf_dma);
+ dpaa2_fl_set_len(in_fle, buflen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_update_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
+ int in_len = *buflen + req->nbytes, to_hash;
+ int qm_sg_bytes, src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - *next_buflen);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+ ctx->ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, to_hash);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
+ req_ctx->cbk = ahash_done_ctx_dst;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = 0;
+ }
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
+ if (ret)
+ goto unmap;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap;
+
+ return ret;
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return -ENOMEM;
+}
+
+static int ahash_update_first(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
+ int to_hash;
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
+ 1);
+ to_hash = req->nbytes - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - (*next_buflen));
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ sg_table = &edesc->sgt[0];
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, to_hash);
+
+ if (mapped_nents > 1) {
+ int qm_sg_bytes;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+ *next_buflen, 0);
+
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+ ctx->ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
+ req_ctx->cbk = ahash_done_ctx_dst;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags &
+ CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else if (*next_buflen) {
+ state->update = ahash_update_no_ctx;
+ state->finup = ahash_finup_no_ctx;
+ state->final = ahash_final_no_ctx;
+ scatterwalk_map_and_copy(next_buf, req->src, 0,
+ req->nbytes, 0);
+ switch_buf(state);
+ }
+
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_first(struct ahash_request *req)
+{
+ return ahash_digest(req);
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ state->update = ahash_update_first;
+ state->finup = ahash_finup_first;
+ state->final = ahash_final_no_ctx;
+
+ state->ctx_dma = 0;
+ state->current_buf = 0;
+ state->buf_dma = 0;
+ state->buflen_0 = 0;
+ state->buflen_1 = 0;
+
+ return 0;
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->update(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->finup(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->final(req);
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_export_state *export = out;
+ int len;
+ u8 *buf;
+
+ if (state->current_buf) {
+ buf = state->buf_1;
+ len = state->buflen_1;
+ } else {
+ buf = state->buf_0;
+ len = state->buflen_0;
+ }
+
+ memcpy(export->buf, buf, len);
+ memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
+ export->buflen = len;
+ export->update = state->update;
+ export->final = state->final;
+ export->finup = state->finup;
+
+ return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ const struct caam_export_state *export = in;
+
+ memset(state, 0, sizeof(*state));
+ memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
+ state->buflen_0 = export->buflen;
+ state->update = export->update;
+ state->final = export->final;
+ state->finup = export->finup;
+
+ return 0;
+}
+
+struct caam_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ u32 alg_type;
+};
+
+/* ahash descriptors */
+static struct caam_hash_template driver_hash[] = {
+ {
+ .name = "sha1",
+ .driver_name = "sha1-caam-qi2",
+ .hmac_name = "hmac(sha1)",
+ .hmac_driver_name = "hmac-sha1-caam-qi2",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA1,
+ }, {
+ .name = "sha224",
+ .driver_name = "sha224-caam-qi2",
+ .hmac_name = "hmac(sha224)",
+ .hmac_driver_name = "hmac-sha224-caam-qi2",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA224,
+ }, {
+ .name = "sha256",
+ .driver_name = "sha256-caam-qi2",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-caam-qi2",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA256,
+ }, {
+ .name = "sha384",
+ .driver_name = "sha384-caam-qi2",
+ .hmac_name = "hmac(sha384)",
+ .hmac_driver_name = "hmac-sha384-caam-qi2",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA384,
+ }, {
+ .name = "sha512",
+ .driver_name = "sha512-caam-qi2",
+ .hmac_name = "hmac(sha512)",
+ .hmac_driver_name = "hmac-sha512-caam-qi2",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA512,
+ }, {
+ .name = "md5",
+ .driver_name = "md5-caam-qi2",
+ .hmac_name = "hmac(md5)",
+ .hmac_driver_name = "hmac-md5-caam-qi2",
+ .blocksize = MD5_BLOCK_WORDS * 4,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_MD5,
+ }
+};
+
+struct caam_hash_alg {
+ struct list_head entry;
+ struct device *dev;
+ int alg_type;
+ struct ahash_alg ahash_alg;
+};
+
+static int caam_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+ HASH_MSG_LEN + 32,
+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+ HASH_MSG_LEN + 64,
+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ dma_addr_t dma_addr;
+ int i;
+
+ ctx->dev = caam_hash->dev;
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map shared descriptors\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HASH_NUM_OP; i++)
+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
+
+ /* copy descriptor header template value */
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct caam_hash_state));
+
+ return ahash_set_sh_desc(ahash);
+}
+
+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
+ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
+ struct caam_hash_template *template, bool keyed)
+{
+ struct caam_hash_alg *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ t_alg->ahash_alg = template->template_ahash;
+ halg = &t_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_driver_name);
+ } else {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ t_alg->ahash_alg.setkey = NULL;
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_hash_cra_init;
+ alg->cra_exit = caam_hash_cra_exit;
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
+
+ t_alg->alg_type = template->alg_type;
+ t_alg->dev = dev;
+
+ return t_alg;
+}
+
+static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+
+ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
+ napi_schedule_irqoff(&ppriv->napi);
+}
+
+static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_io_notification_ctx *nctx;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->priv = priv;
+ nctx = &ppriv->nctx;
+ nctx->is_cdan = 0;
+ nctx->id = ppriv->rsp_fqid;
+ nctx->desired_cpu = cpu;
+ nctx->cb = dpaa2_caam_fqdan_cb;
+
+ /* Register notification callbacks */
+ err = dpaa2_io_service_register(NULL, nctx);
+ if (unlikely(err)) {
+ dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
+ nctx->cb = NULL;
+ /*
+ * If no affine DPIO for this core, there's probably
+ * none available for next cores either. Signal we want
+ * to retry later, in case the DPIO devices weren't
+ * probed yet.
+ */
+ err = -EPROBE_DEFER;
+ goto err;
+ }
+
+ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
+ dev);
+ if (unlikely(!ppriv->store)) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
+ err = -ENOMEM;
+ goto err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err:
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->nctx.cb)
+ break;
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ }
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->store)
+ break;
+ dpaa2_io_store_destroy(ppriv->store);
+ }
+
+ return err;
+}
+
+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ dpaa2_io_store_destroy(ppriv->store);
+
+ if (++i == priv->num_pairs)
+ return;
+ }
+}
+
+static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
+{
+ struct dpseci_rx_queue_cfg rx_queue_cfg;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err = 0, i = 0, cpu;
+
+ /* Configure Rx queues */
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+
+ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
+ DPSECI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.order_preservation_en = 0;
+ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
+ /*
+ * Rx priority (WQ) doesn't really matter, since we use
+ * pull mode, i.e. volatile dequeues from specific FQs
+ */
+ rx_queue_cfg.dest_cfg.priority = 0;
+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
+
+ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &rx_queue_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
+ err);
+ return err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return err;
+}
+
+static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+
+ if (!priv->cscn_mem)
+ return;
+
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ kfree(priv->cscn_mem);
+}
+
+static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+
+ dpaa2_dpseci_congestion_free(priv);
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
+ const struct dpaa2_fd *fd)
+{
+ struct caam_request *req;
+ u32 fd_err;
+
+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
+ dev_err(priv->dev, "Only Frame List FD format is supported!\n");
+ return;
+ }
+
+ fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
+ if (unlikely(fd_err))
+ dev_err(priv->dev, "FD error: %08x\n", fd_err);
+
+ /*
+ * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
+ * in FD[ERR] or FD[FRC].
+ */
+ req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
+ dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
+}
+
+static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ int err;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err == -EBUSY);
+
+ if (unlikely(err))
+ dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
+
+ return err;
+}
+
+static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ struct dpaa2_dq *dq;
+ int cleaned = 0, is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
+ if (unlikely(!dq)) {
+ if (unlikely(!is_last)) {
+ dev_dbg(ppriv->priv->dev,
+ "FQ %d returned no valid frames\n",
+ ppriv->rsp_fqid);
+ /*
+ * MUST retry until we get some sort of
+ * valid response token (be it "empty dequeue"
+ * or a valid frame).
+ */
+ continue;
+ }
+ break;
+ }
+
+ /* Process FD */
+ dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
+ cleaned++;
+ } while (!is_last);
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct dpaa2_caam_priv *priv;
+ int err, cleaned = 0, store_cleaned;
+
+ ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
+ priv = ppriv->priv;
+
+ if (unlikely(dpaa2_caam_pull_fq(ppriv)))
+ return 0;
+
+ do {
+ store_cleaned = dpaa2_caam_store_consume(ppriv);
+ cleaned += store_cleaned;
+
+ if (store_cleaned == 0 ||
+ cleaned > budget - DPAA2_CAAM_STORE_SIZE)
+ break;
+
+ /* Try to dequeue some more */
+ err = dpaa2_caam_pull_fq(ppriv);
+ if (unlikely(err))
+ break;
+ } while (1);
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
+ err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
+ if (unlikely(err))
+ dev_err(priv->dev, "Notification rearm failed: %d\n",
+ err);
+ }
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
+ u16 token)
+{
+ struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
+ struct device *dev = priv->dev;
+ int err;
+
+ /*
+ * Congestion group feature supported starting with DPSECI API v5.1
+ * and only when object has been created with this capability.
+ */
+ if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
+ !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
+ return 0;
+
+ priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
+ GFP_KERNEL | GFP_DMA);
+ if (!priv->cscn_mem)
+ return -ENOMEM;
+
+ priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, priv->cscn_dma)) {
+ dev_err(dev, "Error mapping CSCN memory area\n");
+ err = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
+ cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
+ cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
+ cong_notif_cfg.message_ctx = (uintptr_t)priv;
+ cong_notif_cfg.message_iova = priv->cscn_dma;
+ cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
+ DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
+ DPSECI_CGN_MODE_COHERENT_WRITE;
+
+ err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
+ &cong_notif_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_congestion_notification failed\n");
+ goto err_set_cong;
+ }
+
+ return 0;
+
+err_set_cong:
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+err_dma_map:
+ kfree(priv->cscn_mem);
+
+ return err;
+}
+
+static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_caam_priv *priv;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, cpu;
+ u8 i;
+
+ priv = dev_get_drvdata(dev);
+
+ priv->dev = dev;
+ priv->dpsec_id = ls_dev->obj_desc.id;
+
+ /* Get a handle for the DPSECI this interface is associate with */
+ err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_open() failed: %d\n", err);
+ goto err_open;
+ }
+
+ err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
+ &priv->minor_ver);
+ if (err) {
+ dev_err(dev, "dpseci_get_api_version() failed\n");
+ goto err_get_vers;
+ }
+
+ dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
+
+ err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->dpseci_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_attributes() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->sec_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_sec_attr() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "setup_congestion() failed\n");
+ goto err_get_vers;
+ }
+
+ priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
+ priv->dpseci_attr.num_tx_queues);
+ if (priv->num_pairs > num_online_cpus()) {
+ dev_warn(dev, "%d queues won't be used\n",
+ priv->num_pairs - num_online_cpus());
+ priv->num_pairs = num_online_cpus();
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
+ err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->rx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_rx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
+ err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->tx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_tx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
+ priv->rx_queue_attr[i].fqid,
+ priv->tx_queue_attr[i].fqid);
+
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
+ ppriv->prio = i;
+
+ ppriv->net_dev.dev = *dev;
+ INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
+ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err_get_rx_queue:
+ dpaa2_dpseci_congestion_free(priv);
+err_get_vers:
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+err_open:
+ return err;
+}
+
+static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_enable(&ppriv->napi);
+ }
+
+ return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ int i, err = 0, enabled;
+
+ err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_disable() failed\n");
+ return err;
+ }
+
+ err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
+ if (err) {
+ dev_err(dev, "dpseci_is_enabled() failed\n");
+ return err;
+ }
+
+ dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_disable(&ppriv->napi);
+ netif_napi_del(&ppriv->napi);
+ }
+
+ return 0;
+}
+
+static struct list_head hash_list;
+
+static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i, err = 0;
+ bool registered = false;
+
+ /*
+ * There is no way to get CAAM endianness - there is no direct register
+ * space access and MC f/w does not provide this attribute.
+ * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
+ * property.
+ */
+ caam_little_end = true;
+
+ caam_imx = false;
+
+ dev = &dpseci_dev->dev;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ priv->domain = iommu_get_domain_for_dev(dev);
+
+ qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
+ 0, SLAB_CACHE_DMA, NULL);
+ if (!qi_cache) {
+ dev_err(dev, "Can't allocate SEC cache\n");
+ return -ENOMEM;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+ if (err) {
+ dev_err(dev, "dma_set_mask_and_coherent() failed\n");
+ goto err_dma_mask;
+ }
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "MC portal allocation failed\n");
+
+ goto err_dma_mask;
+ }
+
+ priv->ppriv = alloc_percpu(*priv->ppriv);
+ if (!priv->ppriv) {
+ dev_err(dev, "alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto err_alloc_ppriv;
+ }
+
+ /* DPSECI initialization */
+ err = dpaa2_dpseci_setup(dpseci_dev);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_setup() failed\n");
+ goto err_dpseci_setup;
+ }
+
+ /* DPIO */
+ err = dpaa2_dpseci_dpio_setup(priv);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
+ /* DPSECI binding to DPIO */
+ err = dpaa2_dpseci_bind(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_bind() failed\n");
+ goto err_bind;
+ }
+
+ /* DPSECI enable */
+ err = dpaa2_dpseci_enable(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_enable() failed\n");
+ goto err_bind;
+ }
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ (alg_sel == OP_ALG_ALGSEL_3DES ||
+ alg_sel == OP_ALG_ALGSEL_DES))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ alg_sel == OP_ALG_ALGSEL_AES)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_skcipher_alg_init(t_alg);
+
+ err = crypto_register_skcipher(&t_alg->skcipher);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->skcipher.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+ OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
+ c1_alg_sel == OP_ALG_ALGSEL_DES))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ c1_alg_sel == OP_ALG_ALGSEL_AES)
+ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD not supported by device.
+ */
+ if (!priv->sec_attr.md_acc_num && c2_alg_sel)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_aead_alg_init(t_alg);
+
+ err = crypto_register_aead(&t_alg->aead);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->aead.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+ if (registered)
+ dev_info(dev, "algorithms registered in /proc/crypto\n");
+
+ /* register hash algorithms the device supports */
+ INIT_LIST_HEAD(&hash_list);
+
+ /*
+ * Skip registration of any hashing algorithms if MD block
+ * is not present.
+ */
+ if (!priv->sec_attr.md_acc_num)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+ struct caam_hash_alg *t_alg;
+ struct caam_hash_template *alg = driver_hash + i;
+
+ /* register hmac version */
+ t_alg = caam_hash_alloc(dev, alg, true);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s hash alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name,
+ err);
+ kfree(t_alg);
+ } else {
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+
+ /* register unkeyed version */
+ t_alg = caam_hash_alloc(dev, alg, false);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name,
+ err);
+ kfree(t_alg);
+ } else {
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+ }
+ if (!list_empty(&hash_list))
+ dev_info(dev, "hash algorithms registered in /proc/crypto\n");
+
+ return err;
+
+err_bind:
+ dpaa2_dpseci_dpio_free(priv);
+err_dpio_setup:
+ dpaa2_dpseci_free(priv);
+err_dpseci_setup:
+ free_percpu(priv->ppriv);
+err_alloc_ppriv:
+ fsl_mc_portal_free(priv->mc_io);
+err_dma_mask:
+ kmem_cache_destroy(qi_cache);
+
+ return err;
+}
+
+static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+
+ if (t_alg->registered)
+ crypto_unregister_aead(&t_alg->aead);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
+ }
+
+ if (hash_list.next) {
+ struct caam_hash_alg *t_hash_alg, *p;
+
+ list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
+ kfree(t_hash_alg);
+ }
+ }
+
+ dpaa2_dpseci_disable(priv);
+ dpaa2_dpseci_dpio_free(priv);
+ dpaa2_dpseci_free(priv);
+ free_percpu(priv->ppriv);
+ fsl_mc_portal_free(priv->mc_io);
+ kmem_cache_destroy(qi_cache);
+
+ return 0;
+}
+
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
+{
+ struct dpaa2_fd fd;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ int err = 0, i, id;
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (priv->cscn_mem) {
+ dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
+ DPAA2_CSCN_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
+ dev_dbg_ratelimited(dev, "Dropping request\n");
+ return -EBUSY;
+ }
+ }
+
+ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
+
+ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, req->fd_flt_dma)) {
+ dev_err(dev, "DMA mapping error for QI enqueue request\n");
+ goto err_out;
+ }
+
+ memset(&fd, 0, sizeof(fd));
+ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
+ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
+ dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
+ dpaa2_fd_set_flc(&fd, req->flc_dma);
+
+ /*
+ * There is no guarantee that preemption is disabled here,
+ * thus take action.
+ */
+ preempt_disable();
+ id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
+ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
+ err = dpaa2_io_service_enqueue_fq(NULL,
+ priv->tx_queue_attr[id].fqid,
+ &fd);
+ if (err != -EBUSY)
+ break;
+ }
+ preempt_enable();
+
+ if (unlikely(err)) {
+ dev_err(dev, "Error enqueuing frame: %d\n", err);
+ goto err_out;
+ }
+
+ return -EINPROGRESS;
+
+err_out:
+ dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ return -EIO;
+}
+EXPORT_SYMBOL(dpaa2_caam_enqueue);
+
+static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpseci",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_caam_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_caam_probe,
+ .remove = dpaa2_caam_remove,
+ .match_id_table = dpaa2_caam_match_id_table
+};
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
+
+module_fsl_mc_driver(dpaa2_caam_driver);
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
new file mode 100644
index 000000000000..9823bdefd029
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _CAAMALG_QI2_H_
+#define _CAAMALG_QI2_H_
+
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+#include <linux/threads.h>
+#include "dpseci.h"
+#include "desc_constr.h"
+
+#define DPAA2_CAAM_STORE_SIZE 16
+/* NAPI weight *must* be a multiple of the store size. */
+#define DPAA2_CAAM_NAPI_WEIGHT 64
+
+/* The congestion entrance threshold was chosen so that on LS2088
+ * we support the maximum throughput for the available memory
+ */
+#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
+#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
+
+/**
+ * dpaa2_caam_priv - driver private data
+ * @dpseci_id: DPSECI object unique ID
+ * @major_ver: DPSECI major version
+ * @minor_ver: DPSECI minor version
+ * @dpseci_attr: DPSECI attributes
+ * @sec_attr: SEC engine attributes
+ * @rx_queue_attr: array of Rx queue attributes
+ * @tx_queue_attr: array of Tx queue attributes
+ * @cscn_mem: pointer to memory region containing the congestion SCN
+ * it's size is larger than to accommodate alignment
+ * @cscn_mem_aligned: pointer to congestion SCN; it is computed as
+ * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
+ * @cscn_dma: dma address used by the QMAN to write CSCN messages
+ * @dev: device associated with the DPSECI object
+ * @mc_io: pointer to MC portal's I/O object
+ * @domain: IOMMU domain
+ * @ppriv: per CPU pointers to privata data
+ */
+struct dpaa2_caam_priv {
+ int dpsec_id;
+
+ u16 major_ver;
+ u16 minor_ver;
+
+ struct dpseci_attr dpseci_attr;
+ struct dpseci_sec_attr sec_attr;
+ struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
+ struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
+ int num_pairs;
+
+ /* congestion */
+ void *cscn_mem;
+ void *cscn_mem_aligned;
+ dma_addr_t cscn_dma;
+
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ struct iommu_domain *domain;
+
+ struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
+};
+
+/**
+ * dpaa2_caam_priv_per_cpu - per CPU private data
+ * @napi: napi structure
+ * @net_dev: netdev used by napi
+ * @req_fqid: (virtual) request (Tx / enqueue) FQID
+ * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
+ * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
+ * @nctx: notification context of response FQ
+ * @store: where dequeued frames are stored
+ * @priv: backpointer to dpaa2_caam_priv
+ */
+struct dpaa2_caam_priv_per_cpu {
+ struct napi_struct napi;
+ struct net_device net_dev;
+ int req_fqid;
+ int rsp_fqid;
+ int prio;
+ struct dpaa2_io_notification_ctx nctx;
+ struct dpaa2_io_store *store;
+ struct dpaa2_caam_priv *priv;
+};
+
+/*
+ * The CAAM QI hardware constructs a job descriptor which points
+ * to shared descriptor (as pointed by context_a of FQ to CAAM).
+ * When the job descriptor is executed by deco, the whole job
+ * descriptor together with shared descriptor gets loaded in
+ * deco buffer which is 64 words long (each 32-bit).
+ *
+ * The job descriptor constructed by QI hardware has layout:
+ *
+ * HEADER (1 word)
+ * Shdesc ptr (1 or 2 words)
+ * SEQ_OUT_PTR (1 word)
+ * Out ptr (1 or 2 words)
+ * Out length (1 word)
+ * SEQ_IN_PTR (1 word)
+ * In ptr (1 or 2 words)
+ * In length (1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents
+ * into deco buffer.
+ *
+ * Apart from shdesc contents, the total number of words that
+ * get loaded in deco buffer are '8' or '11'. The remaining words
+ * in deco buffer can be used for storing shared descriptor.
+ */
+#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 512
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @sgt: the h/w link table, followed by IV
+ */
+struct aead_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ unsigned int assoclen;
+ dma_addr_t assoclen_dma;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * skcipher_edesc - s/w-extended skcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @sgt: the h/w link table, followed by IV
+ */
+struct skcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @dst_dma: I/O virtual address of req->result
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @src_nents: number of segments in input scatterlist
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @sgt: pointer to h/w link table
+ */
+struct ahash_edesc {
+ dma_addr_t dst_dma;
+ dma_addr_t qm_sg_dma;
+ int src_nents;
+ int qm_sg_bytes;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/**
+ * caam_flc - Flow Context (FLC)
+ * @flc: Flow Context options
+ * @sh_desc: Shared Descriptor
+ */
+struct caam_flc {
+ u32 flc[16];
+ u32 sh_desc[MAX_SDLEN];
+} ____cacheline_aligned;
+
+enum optype {
+ ENCRYPT = 0,
+ DECRYPT,
+ NUM_OP
+};
+
+/**
+ * caam_request - the request structure the driver application should fill while
+ * submitting a job to driver.
+ * @fd_flt: Frame list table defining input and output
+ * fd_flt[0] - FLE pointing to output buffer
+ * fd_flt[1] - FLE pointing to input buffer
+ * @fd_flt_dma: DMA address for the frame list table
+ * @flc: Flow Context
+ * @flc_dma: I/O virtual address of Flow Context
+ * @cbk: Callback function to invoke when job is completed
+ * @ctx: arbit context attached with request by the application
+ * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
+ */
+struct caam_request {
+ struct dpaa2_fl_entry fd_flt[2];
+ dma_addr_t fd_flt_dma;
+ struct caam_flc *flc;
+ dma_addr_t flc_dma;
+ void (*cbk)(void *ctx, u32 err);
+ void *ctx;
+ void *edesc;
+};
+
+/**
+ * dpaa2_caam_enqueue() - enqueue a crypto request
+ * @dev: device associated with the DPSECI object
+ * @req: pointer to caam_request
+ */
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
+
+#endif /* _CAAMALG_QI2_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 43975ab5f09c..46924affa0bd 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
@@ -62,6 +63,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamhash_desc.h"
#define CAAM_CRA_PRIORITY 3000
@@ -71,14 +73,6 @@
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
-/* length of descriptors text */
-#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
-#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
CAAM_MAX_HASH_KEY_SIZE)
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -235,60 +229,6 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
return 0;
}
-/*
- * For ahash update, final and finup (import_ctx = true)
- * import context, read and write to seqout
- * For ahash firsts and digest (import_ctx = false)
- * read and write to seqout
- */
-static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx,
- int era)
-{
- u32 op = ctx->adata.algtype;
- u32 *skip_key_load;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Append key if it has been set; ahash update excluded */
- if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
- /* Skip key loading if already shared */
- skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- if (era < 6)
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_proto_dkp(desc, &ctx->adata);
-
- set_jump_tgt_here(desc, skip_key_load);
-
- op |= OP_ALG_AAI_HMAC_PRECOMP;
- }
-
- /* If needed, import context from software */
- if (import_ctx)
- append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
-
- /*
- * Load from buf and/or src and write to req->result or state->context
- * Calculate remaining bytes to read
- */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
-
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -301,8 +241,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -313,8 +253,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -325,8 +265,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -337,8 +277,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
new file mode 100644
index 000000000000..a12f7959a2c3
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Shared descriptors for ahash algorithms
+ *
+ * Copyright 2017 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamhash_desc.h"
+
+/**
+ * cnstr_shdsc_ahash - ahash shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
+ * @digestsize: algorithm's digest size
+ * @ctx_len: size of Context Register
+ * @import_ctx: true if previous Context Register needs to be restored
+ * must be true for ahash update and final
+ * must be false for for ahash first and digest
+ * @era: SEC Era
+ */
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era)
+{
+ u32 op = adata->algtype;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Append key if it has been set; ahash update excluded */
+ if (state != OP_ALG_AS_UPDATE && adata->keylen) {
+ u32 *skip_key_load;
+
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ if (era < 6)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, adata);
+
+ set_jump_tgt_here(desc, skip_key_load);
+
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
+
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
+ */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+}
+EXPORT_SYMBOL(cnstr_shdsc_ahash);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
+MODULE_AUTHOR("NXP Semiconductors");
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
new file mode 100644
index 000000000000..631fc1ac312c
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Shared descriptors for ahash algorithms
+ *
+ * Copyright 2017 NXP
+ */
+
+#ifndef _CAAMHASH_DESC_H_
+#define _CAAMHASH_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era);
+
+#endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index f26d62e5533a..4fc209cbbeab 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index fde07d4ff019..4318b0aa6fb9 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for hw_random
*
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 1c71e0cd5098..9604ff7a335e 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
@@ -39,6 +40,7 @@
#include <crypto/authenc.h>
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 538c01f428c1..3fc793193821 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* * CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
new file mode 100644
index 000000000000..8a68531ded0b
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+#include "dpseci.h"
+#include "dpseci_cmd.h"
+
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpseci_id: DPSECI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an already created
+ * object; an object may have been declared statically in the DPL
+ * or created dynamically.
+ * This function returns a unique authentication token, associated with the
+ * specific object ID and the specific MC portal; this token must be used in all
+ * subsequent commands for this specific object.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_open *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * After this function is called, no further operations are allowed on the
+ * object without opening a new control session.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_is_enabled *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
+ *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_attributes *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_tx_queues = rsp_params->num_tx_queues;
+ attr->num_rx_queues = rsp_params->num_rx_queues;
+ attr->options = le32_to_cpu(rsp_params->options);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
+ * Rx queues identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ cmd_params->queue = queue;
+ dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
+ cfg->order_preservation_en);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
+ attr->dest_cfg.priority = cmd_params->priority;
+ attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
+ DEST_TYPE);
+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
+ attr->fqid = le32_to_cpu(cmd_params->fqid);
+ attr->order_preservation_en =
+ dpseci_get_field(cmd_params->order_preservation_en,
+ ORDER_PRESERVATION);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->priority = rsp_params->priority;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned SEC attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_attr *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
+ attr->major_rev = rsp_params->major_rev;
+ attr->minor_rev = rsp_params->minor_rev;
+ attr->era = rsp_params->era;
+ attr->deco_num = rsp_params->deco_num;
+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
+ attr->crc_acc_num = rsp_params->crc_acc_num;
+ attr->pk_acc_num = rsp_params->pk_acc_num;
+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
+ attr->rng_acc_num = rsp_params->rng_acc_num;
+ attr->md_acc_num = rsp_params->md_acc_num;
+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
+ attr->des_acc_num = rsp_params->des_acc_num;
+ attr->aes_acc_num = rsp_params->aes_acc_num;
+ attr->ccha_acc_num = rsp_params->ccha_acc_num;
+ attr->ptha_acc_num = rsp_params->ptha_acc_num;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path sec API
+ * @minor_ver: Minor version of data path sec API
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_congestion_notification() - Set congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_congestion_notification() - Get congestion group notification
+ * configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->dest_cfg.priority = rsp_params->priority;
+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
+ CGN_DEST_TYPE);
+ cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+
+ return 0;
+}
diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
new file mode 100644
index 000000000000..4550e134d166
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+#ifndef _DPSECI_H_
+#define _DPSECI_H_
+
+/*
+ * Data Path SEC Interface API
+ * Contains initialization APIs and runtime control APIs for DPSECI
+ */
+
+struct fsl_mc_io;
+
+/**
+ * General DPSECI macros
+ */
+
+/**
+ * Maximum number of Tx/Rx queues per DPSECI object
+ */
+#define DPSECI_MAX_QUEUE_NUM 16
+
+/**
+ * All queues considered; see dpseci_set_rx_queue()
+ */
+#define DPSECI_ALL_QUEUES (u8)(-1)
+
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token);
+
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/**
+ * Enable the Congestion Group support
+ */
+#define DPSECI_OPT_HAS_CG 0x000020
+
+/**
+ * struct dpseci_cfg - Structure representing DPSECI configuration
+ * @options: Any combination of the following flags:
+ * DPSECI_OPT_HAS_CG
+ * @num_tx_queues: num of queues towards the SEC
+ * @num_rx_queues: num of queues back from the SEC
+ * @priorities: Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC;
+ * valid priorities are configured with values 1-8;
+ */
+struct dpseci_cfg {
+ u32 options;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 priorities[DPSECI_MAX_QUEUE_NUM];
+};
+
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en);
+
+/**
+ * struct dpseci_attr - Structure representing DPSECI attributes
+ * @id: DPSECI object ID
+ * @num_tx_queues: number of queues towards the SEC
+ * @num_rx_queues: number of queues back from the SEC
+ * @options: any combination of the following flags:
+ * DPSECI_OPT_HAS_CG
+ */
+struct dpseci_attr {
+ int id;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u32 options;
+};
+
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr);
+
+/**
+ * enum dpseci_dest - DPSECI destination types
+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue from
+ * the queue only after notification is received
+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpseci_dest {
+ DPSECI_DEST_NONE = 0,
+ DPSECI_DEST_DPIO,
+ DPSECI_DEST_DPCON
+};
+
+/**
+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that channel;
+ * not relevant for 'DPSECI_DEST_NONE' option
+ */
+struct dpseci_dest_cfg {
+ enum dpseci_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/**
+ * DPSECI queue modification options
+ */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPSECI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Select to modify the queue's order preservation
+ */
+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
+
+/**
+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
+ * in 'options'
+ * @dest_cfg: Queue destination parameters; valid only if
+ * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpseci_rx_queue_cfg {
+ u32 options;
+ int order_preservation_en;
+ u64 user_ctx;
+ struct dpseci_dest_cfg dest_cfg;
+};
+
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @order_preservation_en: Status of the order preservation configuration on the
+ * queue
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpseci_rx_queue_attr {
+ u64 user_ctx;
+ int order_preservation_en;
+ struct dpseci_dest_cfg dest_cfg;
+ u32 fqid;
+};
+
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr);
+
+/**
+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
+ * @priority: SEC hardware processing priority for the queue
+ */
+struct dpseci_tx_queue_attr {
+ u32 fqid;
+ u8 priority;
+};
+
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr);
+
+/**
+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
+ * hardware accelerator
+ * @ip_id: ID for SEC
+ * @major_rev: Major revision number for SEC
+ * @minor_rev: Minor revision number for SEC
+ * @era: SEC Era
+ * @deco_num: The number of copies of the DECO that are implemented in this
+ * version of SEC
+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
+ * version of SEC
+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
+ * version of SEC
+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC
+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC
+ * @crc_acc_num: The number of copies of the CRC module that are implemented in
+ * this version of SEC
+ * @pk_acc_num: The number of copies of the Public Key module that are
+ * implemented in this version of SEC
+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
+ * implemented in this version of SEC
+ * @rng_acc_num: The number of copies of the Random Number Generator that are
+ * implemented in this version of SEC
+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
+ * implemented in this version of SEC
+ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
+ * in this version of SEC
+ * @des_acc_num: The number of copies of the DES module that are implemented in
+ * this version of SEC
+ * @aes_acc_num: The number of copies of the AES module that are implemented in
+ * this version of SEC
+ * @ccha_acc_num: The number of copies of the ChaCha20 module that are
+ * implemented in this version of SEC.
+ * @ptha_acc_num: The number of copies of the Poly1305 module that are
+ * implemented in this version of SEC.
+ **/
+struct dpseci_sec_attr {
+ u16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+ u8 ccha_acc_num;
+ u8 ptha_acc_num;
+};
+
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr);
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+/**
+ * enum dpseci_congestion_unit - DPSECI congestion units
+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpseci_congestion_unit {
+ DPSECI_CONGESTION_UNIT_BYTES = 0,
+ DPSECI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
+
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
+
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
+ */
+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
+ * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
+ * (if enabled)
+ */
+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpseci_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned;
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
+ * values
+ */
+struct dpseci_congestion_notification_cfg {
+ enum dpseci_congestion_unit units;
+ u32 threshold_entry;
+ u32 threshold_exit;
+ u64 message_ctx;
+ u64 message_iova;
+ struct dpseci_dest_cfg dest_cfg;
+ u16 notification_mode;
+};
+
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg);
+
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg);
+
+#endif /* _DPSECI_H_ */
diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
new file mode 100644
index 000000000000..6ab77ead6e3d
--- /dev/null
+++ b/drivers/crypto/caam/dpseci_cmd.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _DPSECI_CMD_H_
+#define _DPSECI_CMD_H_
+
+/* DPSECI Version */
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 3
+
+#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
+#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
+
+/* Command versioning */
+#define DPSECI_CMD_BASE_VERSION 1
+#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_ID_OFFSET 4
+
+#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
+ DPSECI_CMD_BASE_VERSION)
+
+#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
+ DPSECI_CMD_BASE_VERSION_V2)
+
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
+#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
+#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
+
+#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
+#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
+#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
+
+#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
+#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
+#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
+
+/* Macros for accessing command fields smaller than 1 byte */
+#define DPSECI_MASK(field) \
+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
+ DPSECI_##field##_SHIFT)
+
+#define dpseci_set_field(var, field, val) \
+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
+
+#define dpseci_get_field(var, field) \
+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
+
+struct dpseci_cmd_open {
+ __le32 dpseci_id;
+};
+
+#define DPSECI_ENABLE_SHIFT 0
+#define DPSECI_ENABLE_SIZE 1
+
+struct dpseci_rsp_is_enabled {
+ u8 is_enabled;
+};
+
+struct dpseci_rsp_get_attributes {
+ __le32 id;
+ __le32 pad0;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 pad1[6];
+ __le32 options;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+
+#define DPSECI_ORDER_PRESERVATION_SHIFT 0
+#define DPSECI_ORDER_PRESERVATION_SIZE 1
+
+struct dpseci_cmd_queue {
+ __le32 dest_id;
+ u8 priority;
+ u8 queue;
+ u8 dest_type;
+ u8 pad;
+ __le64 user_ctx;
+ union {
+ __le32 options;
+ __le32 fqid;
+ };
+ u8 order_preservation_en;
+};
+
+struct dpseci_rsp_get_tx_queue {
+ __le32 pad;
+ __le32 fqid;
+ u8 priority;
+};
+
+struct dpseci_rsp_get_sec_attr {
+ __le16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 pad0[3];
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 pad1;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pad2;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 pad3;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+ u8 ccha_acc_num;
+ u8 ptha_acc_num;
+};
+
+struct dpseci_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+#define DPSECI_CGN_DEST_TYPE_SHIFT 0
+#define DPSECI_CGN_DEST_TYPE_SIZE 4
+#define DPSECI_CGN_UNITS_SHIFT 4
+#define DPSECI_CGN_UNITS_SIZE 2
+
+struct dpseci_cmd_congestion_notification {
+ __le32 dest_id;
+ __le16 notification_mode;
+ u8 priority;
+ u8 options;
+ __le64 message_iova;
+ __le64 message_ctx;
+ __le32 threshold_entry;
+ __le32 threshold_exit;
+};
+
+#endif /* _DPSECI_CMD_H_ */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 8da88beb1abb..7e8d690f2827 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -108,6 +108,54 @@ static const struct {
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
+static const struct {
+ u8 value;
+ const char *error_text;
+} qi_error_list[] = {
+ { 0x1F, "Job terminated by FQ or ICID flush" },
+ { 0x20, "FD format error"},
+ { 0x21, "FD command format error"},
+ { 0x23, "FL format error"},
+ { 0x25, "CRJD specified in FD, but not enabled in FLC"},
+ { 0x30, "Max. buffer size too small"},
+ { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
+ { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
+ { 0x33, "Size over/underflow (allocate mode)"},
+ { 0x34, "Size over/underflow (reuse mode)"},
+ { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
+ { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
+ { 0x41, "SBC frame format not supported (allocate mode)"},
+ { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
+ { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
+ { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
+ { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
+ { 0x46, "Annotation length exceeds offset (reuse mode)"},
+ { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
+ { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
+ { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
+ { 0x51, "Unsupported IF reuse mode"},
+ { 0x52, "Unsupported FL use mode"},
+ { 0x53, "Unsupported RJD use mode"},
+ { 0x54, "Unsupported inline descriptor use mode"},
+ { 0xC0, "Table buffer pool 0 depletion"},
+ { 0xC1, "Table buffer pool 1 depletion"},
+ { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
+ { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
+ { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
+ { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
+ { 0xD0, "FLC read error"},
+ { 0xD1, "FL read error"},
+ { 0xD2, "FL write error"},
+ { 0xD3, "OF SGT write error"},
+ { 0xD4, "PTA read error"},
+ { 0xD5, "PTA write error"},
+ { 0xD6, "OF SGT F-bit write error"},
+ { 0xD7, "ASA write error"},
+ { 0xE1, "FLC[ICR]=0 ICID error"},
+ { 0xE2, "FLC[ICR]=1 ICID error"},
+ { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
+};
+
static const char * const cha_id_list[] = {
"",
"AES",
@@ -236,6 +284,27 @@ static void report_deco_status(struct device *jrdev, const u32 status,
status, error, idx_str, idx, err_str, err_err_code);
}
+static void report_qi_status(struct device *qidev, const u32 status,
+ const char *error)
+{
+ u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
+ const char *err_str = "unidentified error value 0x";
+ char err_err_code[3] = { 0 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
+ if (qi_error_list[i].value == err_id)
+ break;
+
+ if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
+ err_str = qi_error_list[i].error_text;
+ else
+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+ dev_err(qidev, "%08x: %s: %s%s\n",
+ status, error, err_str, err_err_code);
+}
+
static void report_jr_status(struct device *jrdev, const u32 status,
const char *error)
{
@@ -250,7 +319,7 @@ static void report_cond_code_status(struct device *jrdev, const u32 status,
status, error, __func__);
}
-void caam_jr_strstatus(struct device *jrdev, u32 status)
+void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
{
static const struct stat_src {
void (*report_ssed)(struct device *jrdev, const u32 status,
@@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
- { NULL, "Queue Manager Interface" },
+ { report_qi_status, "Queue Manager Interface" },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
{ NULL, NULL },
@@ -288,4 +357,8 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
}
-EXPORT_SYMBOL(caam_jr_strstatus);
+EXPORT_SYMBOL(caam_strstatus);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM error reporting");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 5aa332bac4b0..67ea94079837 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -8,7 +8,11 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
-void caam_jr_strstatus(struct device *jrdev, u32 status);
+
+void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
+
+#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
+#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index acdd72016ffe..d50085a03597 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* CAAM/SEC 4.x transport/backend driver
* JobR backend functionality
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 67f7f8c42c93..b84e6c8b1e13 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -84,13 +84,6 @@ static u64 times_congested;
#endif
/*
- * CPU from where the module initialised. This is required because QMan driver
- * requires CGRs to be removed from same CPU from where they were originally
- * allocated.
- */
-static int mod_init_cpu;
-
-/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
* doing malloc on the hotpath.
@@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
}
EXPORT_SYMBOL(caam_drv_ctx_rel);
-int caam_qi_shutdown(struct device *qidev)
+void caam_qi_shutdown(struct device *qidev)
{
- int i, ret;
+ int i;
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
for_each_cpu(i, cpus) {
struct napi_struct *irqtask;
@@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev)
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
}
- /*
- * QMan driver requires CGRs to be deleted from same CPU from where they
- * were instantiated. Hence we get the module removal execute from the
- * same CPU from where it was originally inserted.
- */
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
- ret = qman_delete_cgr(&priv->cgr);
- if (ret)
- dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
- else
- qman_release_cgrid(priv->cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr);
+ qman_release_cgrid(priv->cgr.cgrid);
kmem_cache_destroy(qi_cache);
- /* Now that we're done with the CGRs, restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
-
platform_device_unregister(priv->qi_pdev);
- return ret;
}
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
@@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
static struct platform_device_info qi_pdev_info = {
.name = "caam_qi",
.id = PLATFORM_DEVID_NONE
};
- /*
- * QMAN requires CGRs to be removed from same CPU+portal from where it
- * was originally allocated. Hence we need to note down the
- * initialisation CPU and use the same CPU for module exit.
- * We select the first CPU to from the list of portal owning CPUs.
- * Then we pin module init to this CPU.
- */
- mod_init_cpu = cpumask_first(cpus);
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
qi_pdev_info.parent = ctrldev;
qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
qi_pdev = platform_device_register_full(&qi_pdev_info);
@@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
return -ENOMEM;
}
- /* Done with the CGRs; restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
&times_congested, &caam_fops_u64_ro);
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 357b69f57072..f93c9c7ed430 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -62,7 +62,6 @@ typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
enum optype {
ENCRYPT,
DECRYPT,
- GIVENCRYPT,
NUM_OP
};
@@ -174,7 +173,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
int caam_qi_init(struct platform_device *pdev);
-int caam_qi_shutdown(struct device *dev);
+void caam_qi_shutdown(struct device *dev);
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 4fb91ba39c36..457815f965c0 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -70,22 +70,22 @@
extern bool caam_little_end;
extern bool caam_imx;
-#define caam_to_cpu(len) \
-static inline u##len caam##len ## _to_cpu(u##len val) \
-{ \
- if (caam_little_end) \
- return le##len ## _to_cpu(val); \
- else \
- return be##len ## _to_cpu(val); \
+#define caam_to_cpu(len) \
+static inline u##len caam##len ## _to_cpu(u##len val) \
+{ \
+ if (caam_little_end) \
+ return le##len ## _to_cpu((__force __le##len)val); \
+ else \
+ return be##len ## _to_cpu((__force __be##len)val); \
}
-#define cpu_to_caam(len) \
-static inline u##len cpu_to_caam##len(u##len val) \
-{ \
- if (caam_little_end) \
- return cpu_to_le##len(val); \
- else \
- return cpu_to_be##len(val); \
+#define cpu_to_caam(len) \
+static inline u##len cpu_to_caam##len(u##len val) \
+{ \
+ if (caam_little_end) \
+ return (__force u##len)cpu_to_le##len(val); \
+ else \
+ return (__force u##len)cpu_to_be##len(val); \
}
caam_to_cpu(16)
@@ -633,6 +633,8 @@ struct caam_job_ring {
#define JRSTA_DECOERR_INVSIGN 0x86
#define JRSTA_DECOERR_DSASIGN 0x87
+#define JRSTA_QIERR_ERROR_MASK 0x00ff
+
#define JRSTA_CCBERR_JUMP 0x08000000
#define JRSTA_CCBERR_INDEX_MASK 0xff00
#define JRSTA_CCBERR_INDEX_SHIFT 8
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index d000b4df745f..b3e1aaaeffea 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
* Copyright 2016-2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SG_SW_QM_H
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
index b5b4c12179df..c9378402a5f8 100644
--- a/drivers/crypto/caam/sg_sw_qm2.h
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -1,35 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2015-2016 Freescale Semiconductor, Inc.
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the names of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SG_SW_QM2_H_
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index b0ba4331944b..ca549c5dc08e 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -308,21 +308,11 @@ void do_request_cleanup(struct cpt_vf *cptvf,
}
}
- if (info->scatter_components)
- kzfree(info->scatter_components);
-
- if (info->gather_components)
- kzfree(info->gather_components);
-
- if (info->out_buffer)
- kzfree(info->out_buffer);
-
- if (info->in_buffer)
- kzfree(info->in_buffer);
-
- if (info->completion_addr)
- kzfree((void *)info->completion_addr);
-
+ kzfree(info->scatter_components);
+ kzfree(info->gather_components);
+ kzfree(info->out_buffer);
+ kzfree(info->in_buffer);
+ kzfree((void *)info->completion_addr);
kzfree(info);
}
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile
index 45b7379e8e30..e12954791673 100644
--- a/drivers/crypto/cavium/nitrox/Makefile
+++ b/drivers/crypto/cavium/nitrox/Makefile
@@ -7,3 +7,6 @@ n5pf-objs := nitrox_main.o \
nitrox_hal.o \
nitrox_reqmgr.o \
nitrox_algs.o
+
+n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o
+n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h
index 312f72801af6..863143a8336b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_common.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_common.h
@@ -12,32 +12,15 @@ void crypto_free_context(void *ctx);
struct nitrox_device *nitrox_get_first_device(void);
void nitrox_put_device(struct nitrox_device *ndev);
-void nitrox_pf_cleanup_isr(struct nitrox_device *ndev);
-int nitrox_pf_init_isr(struct nitrox_device *ndev);
-
int nitrox_common_sw_init(struct nitrox_device *ndev);
void nitrox_common_sw_cleanup(struct nitrox_device *ndev);
-void pkt_slc_resp_handler(unsigned long data);
+void pkt_slc_resp_tasklet(unsigned long data);
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t cb,
struct skcipher_request *skreq);
void backlog_qflush_work(struct work_struct *work);
-void nitrox_config_emu_unit(struct nitrox_device *ndev);
-void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
-void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
-void nitrox_config_vfmode(struct nitrox_device *ndev, int mode);
-void nitrox_config_nps_unit(struct nitrox_device *ndev);
-void nitrox_config_pom_unit(struct nitrox_device *ndev);
-void nitrox_config_rand_unit(struct nitrox_device *ndev);
-void nitrox_config_efl_unit(struct nitrox_device *ndev);
-void nitrox_config_bmi_unit(struct nitrox_device *ndev);
-void nitrox_config_bmo_unit(struct nitrox_device *ndev);
-void nitrox_config_lbc_unit(struct nitrox_device *ndev);
-void invalidate_lbc(struct nitrox_device *ndev);
-void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
-void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
#endif /* __NITROX_COMMON_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
index 9dcb7fdbe0a7..1ad27b1a87c5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_csr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -7,9 +7,16 @@
/* EMU clusters */
#define NR_CLUSTERS 4
+/* Maximum cores per cluster,
+ * varies based on partname
+ */
#define AE_CORES_PER_CLUSTER 20
#define SE_CORES_PER_CLUSTER 16
+#define AE_MAX_CORES (AE_CORES_PER_CLUSTER * NR_CLUSTERS)
+#define SE_MAX_CORES (SE_CORES_PER_CLUSTER * NR_CLUSTERS)
+#define ZIP_MAX_CORES 5
+
/* BIST registers */
#define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000))
#define UCD_BIST_STATUS 0x12C0070
@@ -111,6 +118,9 @@
#define LBC_ELM_VF65_128_INT 0x120C000
#define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000
+#define RST_BOOT 0x10C1600
+#define FUS_DAT1 0x10C1408
+
/* PEM registers */
#define PEM0_INT 0x1080428
@@ -1082,4 +1092,105 @@ union lbc_inval_status {
} s;
};
+/**
+ * struct rst_boot: RST Boot Register
+ * @jtcsrdis: when set, internal CSR access via JTAG TAP controller
+ * is disabled
+ * @jt_tst_mode: JTAG test mode
+ * @io_supply: I/O power supply setting based on IO_VDD_SELECT pin:
+ * 0x1 = 1.8V
+ * 0x2 = 2.5V
+ * 0x4 = 3.3V
+ * All other values are reserved
+ * @pnr_mul: clock multiplier
+ * @lboot: last boot cause mask, resets only with PLL_DC_OK
+ * @rboot: determines whether core 0 remains in reset after
+ * chip cold or warm or soft reset
+ * @rboot_pin: read only access to REMOTE_BOOT pin
+ */
+union rst_boot {
+ u64 value;
+ struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+ u64 raz_63 : 1;
+ u64 jtcsrdis : 1;
+ u64 raz_59_61 : 3;
+ u64 jt_tst_mode : 1;
+ u64 raz_40_57 : 18;
+ u64 io_supply : 3;
+ u64 raz_30_36 : 7;
+ u64 pnr_mul : 6;
+ u64 raz_12_23 : 12;
+ u64 lboot : 10;
+ u64 rboot : 1;
+ u64 rboot_pin : 1;
+#else
+ u64 rboot_pin : 1;
+ u64 rboot : 1;
+ u64 lboot : 10;
+ u64 raz_12_23 : 12;
+ u64 pnr_mul : 6;
+ u64 raz_30_36 : 7;
+ u64 io_supply : 3;
+ u64 raz_40_57 : 18;
+ u64 jt_tst_mode : 1;
+ u64 raz_59_61 : 3;
+ u64 jtcsrdis : 1;
+ u64 raz_63 : 1;
+#endif
+ };
+};
+
+/**
+ * struct fus_dat1: Fuse Data 1 Register
+ * @pll_mul: main clock PLL multiplier hardware limit
+ * @pll_half_dis: main clock PLL control
+ * @efus_lck: efuse lockdown
+ * @zip_info: ZIP information
+ * @bar2_sz_conf: when zero, BAR2 size conforms to
+ * PCIe specification
+ * @efus_ign: efuse ignore
+ * @nozip: ZIP disable
+ * @pll_alt_matrix: select alternate PLL matrix
+ * @pll_bwadj_denom: select CLKF denominator for
+ * BWADJ value
+ * @chip_id: chip ID
+ */
+union fus_dat1 {
+ u64 value;
+ struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+ u64 raz_57_63 : 7;
+ u64 pll_mul : 3;
+ u64 pll_half_dis : 1;
+ u64 raz_43_52 : 10;
+ u64 efus_lck : 3;
+ u64 raz_26_39 : 14;
+ u64 zip_info : 5;
+ u64 bar2_sz_conf : 1;
+ u64 efus_ign : 1;
+ u64 nozip : 1;
+ u64 raz_11_17 : 7;
+ u64 pll_alt_matrix : 1;
+ u64 pll_bwadj_denom : 2;
+ u64 chip_id : 8;
+#else
+ u64 chip_id : 8;
+ u64 pll_bwadj_denom : 2;
+ u64 pll_alt_matrix : 1;
+ u64 raz_11_17 : 7;
+ u64 nozip : 1;
+ u64 efus_ign : 1;
+ u64 bar2_sz_conf : 1;
+ u64 zip_info : 5;
+ u64 raz_26_39 : 14;
+ u64 efus_lck : 3;
+ u64 raz_43_52 : 10;
+ u64 pll_half_dis : 1;
+ u64 pll_mul : 3;
+ u64 raz_57_63 : 7;
+#endif
+ };
+};
+
#endif /* __NITROX_CSR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
new file mode 100644
index 000000000000..5f3cd5fafe04
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include "nitrox_csr.h"
+#include "nitrox_dev.h"
+
+static int firmware_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
+ return 0;
+}
+
+static int firmware_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, firmware_show, inode->i_private);
+}
+
+static const struct file_operations firmware_fops = {
+ .owner = THIS_MODULE,
+ .open = firmware_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int device_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "NITROX [%d]\n", ndev->idx);
+ seq_printf(s, " Part Name: %s\n", ndev->hw.partname);
+ seq_printf(s, " Frequency: %d MHz\n", ndev->hw.freq);
+ seq_printf(s, " Device ID: 0x%0x\n", ndev->hw.device_id);
+ seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
+ seq_printf(s, " Cores: [AE=%u SE=%u ZIP=%u]\n",
+ ndev->hw.ae_cores, ndev->hw.se_cores, ndev->hw.zip_cores);
+
+ return 0;
+}
+
+static int nitrox_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, device_show, inode->i_private);
+}
+
+static const struct file_operations nitrox_fops = {
+ .owner = THIS_MODULE,
+ .open = nitrox_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int stats_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "NITROX [%d] Request Statistics\n", ndev->idx);
+ seq_printf(s, " Posted: %llu\n",
+ (u64)atomic64_read(&ndev->stats.posted));
+ seq_printf(s, " Completed: %llu\n",
+ (u64)atomic64_read(&ndev->stats.completed));
+ seq_printf(s, " Dropped: %llu\n",
+ (u64)atomic64_read(&ndev->stats.dropped));
+
+ return 0;
+}
+
+static int nitrox_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stats_show, inode->i_private);
+}
+
+static const struct file_operations nitrox_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = nitrox_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{
+ debugfs_remove_recursive(ndev->debugfs_dir);
+ ndev->debugfs_dir = NULL;
+}
+
+int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+ struct dentry *dir, *f;
+
+ dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ ndev->debugfs_dir = dir;
+ f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
+ if (!f)
+ goto err;
+ f = debugfs_create_file("device", 0400, dir, ndev, &nitrox_fops);
+ if (!f)
+ goto err;
+ f = debugfs_create_file("stats", 0400, dir, ndev, &nitrox_stats_fops);
+ if (!f)
+ goto err;
+
+ return 0;
+
+err:
+ nitrox_debugfs_exit(ndev);
+ return -ENODEV;
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index af596455b420..283e252385fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -5,92 +5,123 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/if.h>
#define VERSION_LEN 32
+/**
+ * struct nitrox_cmdq - NITROX command queue
+ * @cmd_qlock: command queue lock
+ * @resp_qlock: response queue lock
+ * @backlog_qlock: backlog queue lock
+ * @ndev: NITROX device
+ * @response_head: submitted request list
+ * @backlog_head: backlog queue
+ * @dbell_csr_addr: doorbell register address for this queue
+ * @compl_cnt_csr_addr: completion count register address of the slc port
+ * @base: command queue base address
+ * @dma: dma address of the base
+ * @pending_count: request pending at device
+ * @backlog_count: backlog request count
+ * @write_idx: next write index for the command
+ * @instr_size: command size
+ * @qno: command queue number
+ * @qsize: command queue size
+ * @unalign_base: unaligned base address
+ * @unalign_dma: unaligned dma address
+ */
struct nitrox_cmdq {
- /* command queue lock */
- spinlock_t cmdq_lock;
- /* response list lock */
- spinlock_t response_lock;
- /* backlog list lock */
- spinlock_t backlog_lock;
-
- /* request submitted to chip, in progress */
+ spinlock_t cmd_qlock;
+ spinlock_t resp_qlock;
+ spinlock_t backlog_qlock;
+
+ struct nitrox_device *ndev;
struct list_head response_head;
- /* hw queue full, hold in backlog list */
struct list_head backlog_head;
- /* doorbell address */
u8 __iomem *dbell_csr_addr;
- /* base address of the queue */
- u8 *head;
+ u8 __iomem *compl_cnt_csr_addr;
+ u8 *base;
+ dma_addr_t dma;
- struct nitrox_device *ndev;
- /* flush pending backlog commands */
struct work_struct backlog_qflush;
- /* requests posted waiting for completion */
atomic_t pending_count;
- /* requests in backlog queues */
atomic_t backlog_count;
int write_idx;
- /* command size 32B/64B */
u8 instr_size;
u8 qno;
u32 qsize;
- /* unaligned addresses */
- u8 *head_unaligned;
- dma_addr_t dma_unaligned;
- /* dma address of the base */
- dma_addr_t dma;
+ u8 *unalign_base;
+ dma_addr_t unalign_dma;
};
+/**
+ * struct nitrox_hw - NITROX hardware information
+ * @partname: partname ex: CNN55xxx-xxx
+ * @fw_name: firmware version
+ * @freq: NITROX frequency
+ * @vendor_id: vendor ID
+ * @device_id: device ID
+ * @revision_id: revision ID
+ * @se_cores: number of symmetric cores
+ * @ae_cores: number of asymmetric cores
+ * @zip_cores: number of zip cores
+ */
struct nitrox_hw {
- /* firmware version */
+ char partname[IFNAMSIZ * 2];
char fw_name[VERSION_LEN];
+ int freq;
u16 vendor_id;
u16 device_id;
u8 revision_id;
- /* CNN55XX cores */
u8 se_cores;
u8 ae_cores;
u8 zip_cores;
};
-#define MAX_MSIX_VECTOR_NAME 20
-/**
- * vectors for queues (64 AE, 64 SE and 64 ZIP) and
- * error condition/mailbox.
- */
-#define MAX_MSIX_VECTORS 192
-
-struct nitrox_msix {
- struct msix_entry *entries;
- char **names;
- DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS);
- u32 nr_entries;
+struct nitrox_stats {
+ atomic64_t posted;
+ atomic64_t completed;
+ atomic64_t dropped;
};
-struct bh_data {
- /* slc port completion count address */
- u8 __iomem *completion_cnt_csr_addr;
+#define IRQ_NAMESZ 32
+
+struct nitrox_q_vector {
+ char name[IRQ_NAMESZ];
+ bool valid;
+ int ring;
+ struct tasklet_struct resp_tasklet;
+ union {
+ struct nitrox_cmdq *cmdq;
+ struct nitrox_device *ndev;
+ };
+};
- struct nitrox_cmdq *cmdq;
- struct tasklet_struct resp_handler;
+/*
+ * NITROX Device states
+ */
+enum ndev_state {
+ __NDEV_NOT_READY,
+ __NDEV_READY,
+ __NDEV_IN_RESET,
};
-struct nitrox_bh {
- struct bh_data *slc;
+/* NITROX support modes for VF(s) */
+enum vf_mode {
+ __NDEV_MODE_PF,
+ __NDEV_MODE_VF16,
+ __NDEV_MODE_VF32,
+ __NDEV_MODE_VF64,
+ __NDEV_MODE_VF128,
};
-/* NITROX-V driver state */
-#define NITROX_UCODE_LOADED 0
-#define NITROX_READY 1
+#define __NDEV_SRIOV_BIT 0
/* command queue size */
#define DEFAULT_CMD_QLEN 2048
@@ -98,7 +129,6 @@ struct nitrox_bh {
#define CMD_TIMEOUT 2000
#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
-#define PF_MODE 0
#define NITROX_CSR_ADDR(ndev, offset) \
((ndev)->bar_addr + (offset))
@@ -108,17 +138,18 @@ struct nitrox_bh {
* @list: pointer to linked list of devices
* @bar_addr: iomap address
* @pdev: PCI device information
- * @status: NITROX status
+ * @state: NITROX device state
+ * @flags: flags to indicate device the features
* @timeout: Request timeout in jiffies
* @refcnt: Device usage count
* @idx: device index (0..N)
* @node: NUMA node id attached
* @qlen: Command queue length
* @nr_queues: Number of command queues
+ * @mode: Device mode PF/VF
* @ctx_pool: DMA pool for crypto context
- * @pkt_cmdqs: SE Command queues
- * @msix: MSI-X information
- * @bh: post processing work
+ * @pkt_inq: Packet input rings
+ * @qvec: MSI-X queue vectors information
* @hw: hardware information
* @debugfs_dir: debugfs directory
*/
@@ -128,7 +159,8 @@ struct nitrox_device {
u8 __iomem *bar_addr;
struct pci_dev *pdev;
- unsigned long status;
+ atomic_t state;
+ unsigned long flags;
unsigned long timeout;
refcount_t refcnt;
@@ -136,13 +168,16 @@ struct nitrox_device {
int node;
u16 qlen;
u16 nr_queues;
+ int num_vfs;
+ enum vf_mode mode;
struct dma_pool *ctx_pool;
- struct nitrox_cmdq *pkt_cmdqs;
+ struct nitrox_cmdq *pkt_inq;
- struct nitrox_msix msix;
- struct nitrox_bh bh;
+ struct nitrox_q_vector *qvec;
+ int num_vecs;
+ struct nitrox_stats stats;
struct nitrox_hw hw;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *debugfs_dir;
@@ -173,9 +208,22 @@ static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
writeq(value, (ndev->bar_addr + offset));
}
-static inline int nitrox_ready(struct nitrox_device *ndev)
+static inline bool nitrox_ready(struct nitrox_device *ndev)
{
- return test_bit(NITROX_READY, &ndev->status);
+ return atomic_read(&ndev->state) == __NDEV_READY;
}
+#ifdef CONFIG_DEBUG_FS
+int nitrox_debugfs_init(struct nitrox_device *ndev);
+void nitrox_debugfs_exit(struct nitrox_device *ndev);
+#else
+static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+ return 0;
+}
+
+static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{ }
+#endif
+
#endif /* __NITROX_DEV_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index ab4ccf2f9e77..a9b82387cf53 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -4,6 +4,8 @@
#include "nitrox_dev.h"
#include "nitrox_csr.h"
+#define PLL_REF_CLK 50
+
/**
* emu_enable_cores - Enable EMU cluster cores.
* @ndev: N5 device
@@ -117,7 +119,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
int i;
for (i = 0; i < ndev->nr_queues; i++) {
- struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
+ struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
union nps_pkt_in_instr_rsize pkt_in_rsize;
u64 offset;
@@ -256,7 +258,7 @@ void nitrox_config_nps_unit(struct nitrox_device *ndev)
/* disable ILK interface */
core_gbl_vfcfg.value = 0;
core_gbl_vfcfg.s.ilk_disable = 1;
- core_gbl_vfcfg.s.cfg = PF_MODE;
+ core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
/* config input and solicit ports */
nitrox_config_pkt_input_rings(ndev);
@@ -400,3 +402,68 @@ void nitrox_config_lbc_unit(struct nitrox_device *ndev)
offset = LBC_ELM_VF65_128_INT_ENA_W1S;
nitrox_write_csr(ndev, offset, (~0ULL));
}
+
+void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
+{
+ union nps_core_gbl_vfcfg vfcfg;
+
+ vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
+ vfcfg.s.cfg = mode & 0x7;
+
+ nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
+}
+
+void nitrox_get_hwinfo(struct nitrox_device *ndev)
+{
+ union emu_fuse_map emu_fuse;
+ union rst_boot rst_boot;
+ union fus_dat1 fus_dat1;
+ unsigned char name[IFNAMSIZ * 2] = {};
+ int i, dead_cores;
+ u64 offset;
+
+ /* get core frequency */
+ offset = RST_BOOT;
+ rst_boot.value = nitrox_read_csr(ndev, offset);
+ ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
+
+ for (i = 0; i < NR_CLUSTERS; i++) {
+ offset = EMU_FUSE_MAPX(i);
+ emu_fuse.value = nitrox_read_csr(ndev, offset);
+ if (emu_fuse.s.valid) {
+ dead_cores = hweight32(emu_fuse.s.ae_fuse);
+ ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
+ dead_cores = hweight16(emu_fuse.s.se_fuse);
+ ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
+ }
+ }
+ /* find zip hardware availability */
+ offset = FUS_DAT1;
+ fus_dat1.value = nitrox_read_csr(ndev, offset);
+ if (!fus_dat1.nozip) {
+ dead_cores = hweight8(fus_dat1.zip_info);
+ ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
+ }
+
+ /* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
+ if (ndev->hw.ae_cores == AE_MAX_CORES) {
+ switch (ndev->hw.se_cores) {
+ case SE_MAX_CORES:
+ i = snprintf(name, sizeof(name), "CNN5560");
+ break;
+ case 40:
+ i = snprintf(name, sizeof(name), "CNN5560s");
+ break;
+ }
+ } else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
+ i = snprintf(name, sizeof(name), "CNN5530");
+ } else {
+ i = snprintf(name, sizeof(name), "CNN5560i");
+ }
+
+ snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
+ ndev->hw.freq, ndev->hw.revision_id);
+
+ /* copy partname */
+ strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h
new file mode 100644
index 000000000000..489ee64c119e
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_HAL_H
+#define __NITROX_HAL_H
+
+#include "nitrox_dev.h"
+
+void nitrox_config_emu_unit(struct nitrox_device *ndev);
+void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
+void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
+void nitrox_config_nps_unit(struct nitrox_device *ndev);
+void nitrox_config_pom_unit(struct nitrox_device *ndev);
+void nitrox_config_rand_unit(struct nitrox_device *ndev);
+void nitrox_config_efl_unit(struct nitrox_device *ndev);
+void nitrox_config_bmi_unit(struct nitrox_device *ndev);
+void nitrox_config_bmo_unit(struct nitrox_device *ndev);
+void nitrox_config_lbc_unit(struct nitrox_device *ndev);
+void invalidate_lbc(struct nitrox_device *ndev);
+void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
+void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
+void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
+void nitrox_get_hwinfo(struct nitrox_device *ndev);
+
+#endif /* __NITROX_HAL_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index ee0d70ba25d5..88a77b8fb3fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -6,9 +6,16 @@
#include "nitrox_dev.h"
#include "nitrox_csr.h"
#include "nitrox_common.h"
+#include "nitrox_hal.h"
+/**
+ * One vector for each type of ring
+ * - NPS packet ring, AQMQ ring and ZQMQ ring
+ */
#define NR_RING_VECTORS 3
-#define NPS_CORE_INT_ACTIVE_ENTRY 192
+/* base entry for packet ring/port */
+#define PKT_RING_MSIX_BASE 0
+#define NON_RING_MSIX_BASE 192
/**
* nps_pkt_slc_isr - IRQ handler for NPS solicit port
@@ -17,13 +24,14 @@
*/
static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
{
- struct bh_data *slc = data;
- union nps_pkt_slc_cnts pkt_slc_cnts;
+ struct nitrox_q_vector *qvec = data;
+ union nps_pkt_slc_cnts slc_cnts;
+ struct nitrox_cmdq *cmdq = qvec->cmdq;
- pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
+ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
/* New packet on SLC output port */
- if (pkt_slc_cnts.s.slc_int)
- tasklet_hi_schedule(&slc->resp_handler);
+ if (slc_cnts.s.slc_int)
+ tasklet_hi_schedule(&qvec->resp_tasklet);
return IRQ_HANDLED;
}
@@ -190,165 +198,92 @@ static void clear_bmi_err_intr(struct nitrox_device *ndev)
dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
}
+static void nps_core_int_tasklet(unsigned long data)
+{
+ struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
+ struct nitrox_device *ndev = qvec->ndev;
+
+ /* if pf mode do queue recovery */
+ if (ndev->mode == __NDEV_MODE_PF) {
+ } else {
+ /**
+ * if VF(s) enabled communicate the error information
+ * to VF(s)
+ */
+ }
+}
+
/**
- * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
- * @ndev: NITROX device
+ * nps_core_int_isr - interrupt handler for NITROX errors and
+ * mailbox communication
*/
-static void clear_nps_core_int_active(struct nitrox_device *ndev)
+static irqreturn_t nps_core_int_isr(int irq, void *data)
{
- union nps_core_int_active core_int_active;
+ struct nitrox_device *ndev = data;
+ union nps_core_int_active core_int;
- core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
+ core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
- if (core_int_active.s.nps_core)
+ if (core_int.s.nps_core)
clear_nps_core_err_intr(ndev);
- if (core_int_active.s.nps_pkt)
+ if (core_int.s.nps_pkt)
clear_nps_pkt_err_intr(ndev);
- if (core_int_active.s.pom)
+ if (core_int.s.pom)
clear_pom_err_intr(ndev);
- if (core_int_active.s.pem)
+ if (core_int.s.pem)
clear_pem_err_intr(ndev);
- if (core_int_active.s.lbc)
+ if (core_int.s.lbc)
clear_lbc_err_intr(ndev);
- if (core_int_active.s.efl)
+ if (core_int.s.efl)
clear_efl_err_intr(ndev);
- if (core_int_active.s.bmi)
+ if (core_int.s.bmi)
clear_bmi_err_intr(ndev);
/* If more work callback the ISR, set resend */
- core_int_active.s.resend = 1;
- nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
-}
-
-static irqreturn_t nps_core_int_isr(int irq, void *data)
-{
- struct nitrox_device *ndev = data;
-
- clear_nps_core_int_active(ndev);
+ core_int.s.resend = 1;
+ nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
return IRQ_HANDLED;
}
-static int nitrox_enable_msix(struct nitrox_device *ndev)
+void nitrox_unregister_interrupts(struct nitrox_device *ndev)
{
- struct msix_entry *entries;
- char **names;
- int i, nr_entries, ret;
-
- /*
- * PF MSI-X vectors
- *
- * Entry 0: NPS PKT ring 0
- * Entry 1: AQMQ ring 0
- * Entry 2: ZQM ring 0
- * Entry 3: NPS PKT ring 1
- * Entry 4: AQMQ ring 1
- * Entry 5: ZQM ring 1
- * ....
- * Entry 192: NPS_CORE_INT_ACTIVE
- */
- nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
- entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
- GFP_KERNEL, ndev->node);
- if (!entries)
- return -ENOMEM;
-
- names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
- if (!names) {
- kfree(entries);
- return -ENOMEM;
- }
-
- /* fill entires */
- for (i = 0; i < (nr_entries - 1); i++)
- entries[i].entry = i;
-
- entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
-
- for (i = 0; i < nr_entries; i++) {
- *(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
- if (!(*(names + i))) {
- ret = -ENOMEM;
- goto msix_fail;
- }
- }
- ndev->msix.entries = entries;
- ndev->msix.names = names;
- ndev->msix.nr_entries = nr_entries;
-
- ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
- ndev->msix.nr_entries);
- if (ret) {
- dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
- ret);
- goto msix_fail;
- }
- return 0;
-
-msix_fail:
- for (i = 0; i < nr_entries; i++)
- kfree(*(names + i));
-
- kfree(entries);
- kfree(names);
- return ret;
-}
-
-static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
-{
- int i;
-
- if (!ndev->bh.slc)
- return;
-
- for (i = 0; i < ndev->nr_queues; i++) {
- struct bh_data *bh = &ndev->bh.slc[i];
-
- tasklet_disable(&bh->resp_handler);
- tasklet_kill(&bh->resp_handler);
- }
- kfree(ndev->bh.slc);
- ndev->bh.slc = NULL;
-}
-
-static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
-{
- u32 size;
+ struct pci_dev *pdev = ndev->pdev;
int i;
- size = ndev->nr_queues * sizeof(struct bh_data);
- ndev->bh.slc = kzalloc(size, GFP_KERNEL);
- if (!ndev->bh.slc)
- return -ENOMEM;
+ for (i = 0; i < ndev->num_vecs; i++) {
+ struct nitrox_q_vector *qvec;
+ int vec;
- for (i = 0; i < ndev->nr_queues; i++) {
- struct bh_data *bh = &ndev->bh.slc[i];
- u64 offset;
+ qvec = ndev->qvec + i;
+ if (!qvec->valid)
+ continue;
- offset = NPS_PKT_SLC_CNTSX(i);
- /* pre calculate completion count address */
- bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
- bh->cmdq = &ndev->pkt_cmdqs[i];
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ irq_set_affinity_hint(vec, NULL);
+ free_irq(vec, qvec);
- tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
- (unsigned long)bh);
+ tasklet_disable(&qvec->resp_tasklet);
+ tasklet_kill(&qvec->resp_tasklet);
+ qvec->valid = false;
}
-
- return 0;
+ kfree(ndev->qvec);
+ pci_free_irq_vectors(pdev);
}
-static int nitrox_request_irqs(struct nitrox_device *ndev)
+int nitrox_register_interrupts(struct nitrox_device *ndev)
{
struct pci_dev *pdev = ndev->pdev;
- struct msix_entry *msix_ent = ndev->msix.entries;
- int nr_ring_vectors, i = 0, ring, cpu, ret;
- char *name;
+ struct nitrox_q_vector *qvec;
+ int nr_vecs, vec, cpu;
+ int ret, i;
/*
* PF MSI-X vectors
@@ -357,112 +292,76 @@ static int nitrox_request_irqs(struct nitrox_device *ndev)
* Entry 1: AQMQ ring 0
* Entry 2: ZQM ring 0
* Entry 3: NPS PKT ring 1
+ * Entry 4: AQMQ ring 1
+ * Entry 5: ZQM ring 1
* ....
* Entry 192: NPS_CORE_INT_ACTIVE
*/
- nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
-
- /* request irq for pkt ring/ports only */
- while (i < nr_ring_vectors) {
- name = *(ndev->msix.names + i);
- ring = (i / NR_RING_VECTORS);
- snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
- ndev->idx, ring);
+ nr_vecs = pci_msix_vec_count(pdev);
- ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
- name, &ndev->bh.slc[ring]);
- if (ret) {
- dev_err(&pdev->dev, "failed to get irq %d for %s\n",
- msix_ent[i].vector, name);
- return ret;
- }
- cpu = ring % num_online_cpus();
- irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
-
- set_bit(i, ndev->msix.irqs);
- i += NR_RING_VECTORS;
- }
-
- /* Request IRQ for NPS_CORE_INT_ACTIVE */
- name = *(ndev->msix.names + i);
- snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
- ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
- if (ret) {
- dev_err(&pdev->dev, "failed to get irq %d for %s\n",
- msix_ent[i].vector, name);
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
return ret;
}
- set_bit(i, ndev->msix.irqs);
+ ndev->num_vecs = nr_vecs;
- return 0;
-}
-
-static void nitrox_disable_msix(struct nitrox_device *ndev)
-{
- struct msix_entry *msix_ent = ndev->msix.entries;
- char **names = ndev->msix.names;
- int i = 0, ring, nr_ring_vectors;
-
- nr_ring_vectors = ndev->msix.nr_entries - 1;
-
- /* clear pkt ring irqs */
- while (i < nr_ring_vectors) {
- if (test_and_clear_bit(i, ndev->msix.irqs)) {
- ring = (i / NR_RING_VECTORS);
- irq_set_affinity_hint(msix_ent[i].vector, NULL);
- free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
- }
- i += NR_RING_VECTORS;
+ ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
+ if (!ndev->qvec) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
}
- irq_set_affinity_hint(msix_ent[i].vector, NULL);
- free_irq(msix_ent[i].vector, ndev);
- clear_bit(i, ndev->msix.irqs);
- kfree(ndev->msix.entries);
- for (i = 0; i < ndev->msix.nr_entries; i++)
- kfree(*(names + i));
+ /* request irqs for packet rings/ports */
+ for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
+ qvec = &ndev->qvec[i];
- kfree(names);
- pci_disable_msix(ndev->pdev);
-}
-
-/**
- * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
- * @ndev: NITROX device
- */
-void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
-{
- nitrox_disable_msix(ndev);
- nitrox_cleanup_pkt_slc_bh(ndev);
-}
+ qvec->ring = i / NR_RING_VECTORS;
+ if (qvec->ring >= ndev->nr_queues)
+ break;
-/**
- * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
- * @ndev: NITROX device
- *
- * Return: 0 on success, a negative value on failure.
- */
-int nitrox_pf_init_isr(struct nitrox_device *ndev)
-{
- int err;
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
+ if (ret) {
+ dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
+ qvec->ring);
+ goto irq_fail;
+ }
+ cpu = qvec->ring % num_online_cpus();
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
- err = nitrox_setup_pkt_slc_bh(ndev);
- if (err)
- return err;
+ tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
+ (unsigned long)qvec);
+ qvec->cmdq = &ndev->pkt_inq[qvec->ring];
+ qvec->valid = true;
+ }
- err = nitrox_enable_msix(ndev);
- if (err)
- goto msix_fail;
+ /* request irqs for non ring vectors */
+ i = NON_RING_MSIX_BASE;
+ qvec = &ndev->qvec[i];
- err = nitrox_request_irqs(ndev);
- if (err)
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
+ if (ret) {
+ dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
goto irq_fail;
+ }
+ cpu = num_online_cpus();
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
+
+ tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
+ (unsigned long)qvec);
+ qvec->ndev = ndev;
+ qvec->valid = true;
return 0;
irq_fail:
- nitrox_disable_msix(ndev);
-msix_fail:
- nitrox_cleanup_pkt_slc_bh(ndev);
- return err;
+ nitrox_unregister_interrupts(ndev);
+ return ret;
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.h b/drivers/crypto/cavium/nitrox/nitrox_isr.h
new file mode 100644
index 000000000000..63418a6cc52c
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_ISR_H
+#define __NITROX_ISR_H
+
+#include "nitrox_dev.h"
+
+int nitrox_register_interrupts(struct nitrox_device *ndev);
+void nitrox_unregister_interrupts(struct nitrox_device *ndev);
+
+#endif /* __NITROX_ISR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4d31df07777f..2260efa42308 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -17,30 +17,27 @@
#define CRYPTO_CTX_SIZE 256
-/* command queue alignments */
-#define PKT_IN_ALIGN 16
+/* packet inuput ring alignments */
+#define PKTIN_Q_ALIGN_BYTES 16
-static int cmdq_common_init(struct nitrox_cmdq *cmdq)
+static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
{
struct nitrox_device *ndev = cmdq->ndev;
- u32 qsize;
-
- qsize = (ndev->qlen) * cmdq->instr_size;
- cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
- (qsize + PKT_IN_ALIGN),
- &cmdq->dma_unaligned,
- GFP_KERNEL);
- if (!cmdq->head_unaligned)
+
+ cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
+ cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
+ &cmdq->unalign_dma,
+ GFP_KERNEL);
+ if (!cmdq->unalign_base)
return -ENOMEM;
- cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
- cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
- cmdq->qsize = (qsize + PKT_IN_ALIGN);
+ cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
+ cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
cmdq->write_idx = 0;
- spin_lock_init(&cmdq->response_lock);
- spin_lock_init(&cmdq->cmdq_lock);
- spin_lock_init(&cmdq->backlog_lock);
+ spin_lock_init(&cmdq->cmd_qlock);
+ spin_lock_init(&cmdq->resp_qlock);
+ spin_lock_init(&cmdq->backlog_qlock);
INIT_LIST_HEAD(&cmdq->response_head);
INIT_LIST_HEAD(&cmdq->backlog_head);
@@ -51,68 +48,83 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
return 0;
}
-static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
+static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
+{
+ cmdq->write_idx = 0;
+ atomic_set(&cmdq->pending_count, 0);
+ atomic_set(&cmdq->backlog_count, 0);
+}
+
+static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = cmdq->ndev;
+ if (!cmdq->unalign_base)
+ return;
+
cancel_work_sync(&cmdq->backlog_qflush);
dma_free_coherent(DEV(ndev), cmdq->qsize,
- cmdq->head_unaligned, cmdq->dma_unaligned);
-
- atomic_set(&cmdq->pending_count, 0);
- atomic_set(&cmdq->backlog_count, 0);
+ cmdq->unalign_base, cmdq->unalign_dma);
+ nitrox_cmdq_reset(cmdq);
cmdq->dbell_csr_addr = NULL;
- cmdq->head = NULL;
+ cmdq->compl_cnt_csr_addr = NULL;
+ cmdq->unalign_base = NULL;
+ cmdq->base = NULL;
+ cmdq->unalign_dma = 0;
cmdq->dma = 0;
cmdq->qsize = 0;
cmdq->instr_size = 0;
}
-static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
+static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
{
int i;
for (i = 0; i < ndev->nr_queues; i++) {
- struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
+ struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
- cmdq_common_cleanup(cmdq);
+ nitrox_cmdq_cleanup(cmdq);
}
- kfree(ndev->pkt_cmdqs);
- ndev->pkt_cmdqs = NULL;
+ kfree(ndev->pkt_inq);
+ ndev->pkt_inq = NULL;
}
-static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
+static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
{
- int i, err, size;
+ int i, err;
- size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
- ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
- if (!ndev->pkt_cmdqs)
+ ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
+ sizeof(struct nitrox_cmdq),
+ GFP_KERNEL, ndev->node);
+ if (!ndev->pkt_inq)
return -ENOMEM;
for (i = 0; i < ndev->nr_queues; i++) {
struct nitrox_cmdq *cmdq;
u64 offset;
- cmdq = &ndev->pkt_cmdqs[i];
+ cmdq = &ndev->pkt_inq[i];
cmdq->ndev = ndev;
cmdq->qno = i;
cmdq->instr_size = sizeof(struct nps_pkt_instr);
+ /* packet input ring doorbell address */
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
- /* SE ring doorbell address for this queue */
cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
+ /* packet solicit port completion count address */
+ offset = NPS_PKT_SLC_CNTSX(i);
+ cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
- err = cmdq_common_init(cmdq);
+ err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
if (err)
- goto pkt_cmdq_fail;
+ goto pktq_fail;
}
return 0;
-pkt_cmdq_fail:
- nitrox_cleanup_pkt_cmdqs(ndev);
+pktq_fail:
+ nitrox_free_pktin_queues(ndev);
return err;
}
@@ -122,7 +134,7 @@ static int create_crypto_dma_pool(struct nitrox_device *ndev)
/* Crypto context pool, 16 byte aligned */
size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
- ndev->ctx_pool = dma_pool_create("crypto-context",
+ ndev->ctx_pool = dma_pool_create("nitrox-context",
DEV(ndev), size, 16, 0);
if (!ndev->ctx_pool)
return -ENOMEM;
@@ -149,7 +161,7 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
void *vaddr;
dma_addr_t dma;
- vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
+ vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
if (!vaddr)
return NULL;
@@ -194,7 +206,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
if (err)
return err;
- err = nitrox_init_pkt_cmdqs(ndev);
+ err = nitrox_alloc_pktin_queues(ndev);
if (err)
destroy_crypto_dma_pool(ndev);
@@ -207,6 +219,6 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
*/
void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
{
- nitrox_cleanup_pkt_cmdqs(ndev);
+ nitrox_free_pktin_queues(ndev);
destroy_crypto_dma_pool(ndev);
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index fee7cb2ce747..6595c95af9f1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -11,13 +11,15 @@
#include "nitrox_dev.h"
#include "nitrox_common.h"
#include "nitrox_csr.h"
+#include "nitrox_hal.h"
+#include "nitrox_isr.h"
#define CNN55XX_DEV_ID 0x12
#define MAX_PF_QUEUES 64
#define UCODE_HLEN 48
#define SE_GROUP 0
-#define DRIVER_VERSION "1.0"
+#define DRIVER_VERSION "1.1"
#define FW_DIR "cavium/"
/* SE microcode */
#define SE_FW FW_DIR "cnn55xx_se.fw"
@@ -42,6 +44,15 @@ static unsigned int qlen = DEFAULT_CMD_QLEN;
module_param(qlen, uint, 0644);
MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
+#ifdef CONFIG_PCI_IOV
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
+#else
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ return 0;
+}
+#endif
+
/**
* struct ucode - Firmware Header
* @id: microcode ID
@@ -136,9 +147,6 @@ static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name)
write_to_ucd_unit(ndev, ucode);
release_firmware(fw);
- set_bit(NITROX_UCODE_LOADED, &ndev->status);
- /* barrier to sync with other cpus */
- smp_mb__after_atomic();
return 0;
}
@@ -210,7 +218,7 @@ void nitrox_put_device(struct nitrox_device *ndev)
smp_mb__after_atomic();
}
-static int nitrox_reset_device(struct pci_dev *pdev)
+static int nitrox_device_flr(struct pci_dev *pdev)
{
int pos = 0;
@@ -220,15 +228,10 @@ static int nitrox_reset_device(struct pci_dev *pdev)
return -ENOMEM;
}
- pos = pci_pcie_cap(pdev);
- if (!pos)
- return -ENOTTY;
+ /* check flr support */
+ if (pcie_has_flr(pdev))
+ pcie_flr(pdev);
- if (!pci_wait_for_pending_transaction(pdev))
- dev_err(&pdev->dev, "waiting for pending transaction\n");
-
- pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
- msleep(100);
pci_restore_state(pdev);
return 0;
@@ -242,7 +245,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
if (err)
return err;
- err = nitrox_pf_init_isr(ndev);
+ err = nitrox_register_interrupts(ndev);
if (err)
nitrox_common_sw_cleanup(ndev);
@@ -251,7 +254,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
{
- nitrox_pf_cleanup_isr(ndev);
+ nitrox_unregister_interrupts(ndev);
nitrox_common_sw_cleanup(ndev);
}
@@ -284,26 +287,6 @@ static int nitrox_bist_check(struct nitrox_device *ndev)
return 0;
}
-static void nitrox_get_hwinfo(struct nitrox_device *ndev)
-{
- union emu_fuse_map emu_fuse;
- u64 offset;
- int i;
-
- for (i = 0; i < NR_CLUSTERS; i++) {
- u8 dead_cores;
-
- offset = EMU_FUSE_MAPX(i);
- emu_fuse.value = nitrox_read_csr(ndev, offset);
- if (emu_fuse.s.valid) {
- dead_cores = hweight32(emu_fuse.s.ae_fuse);
- ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
- dead_cores = hweight16(emu_fuse.s.se_fuse);
- ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
- }
- }
-}
-
static int nitrox_pf_hw_init(struct nitrox_device *ndev)
{
int err;
@@ -336,135 +319,6 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev)
return 0;
}
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static int registers_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
- u64 offset;
-
- /* NPS DMA stats */
- offset = NPS_STATS_PKT_DMA_RD_CNT;
- seq_printf(s, "NPS_STATS_PKT_DMA_RD_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
- offset = NPS_STATS_PKT_DMA_WR_CNT;
- seq_printf(s, "NPS_STATS_PKT_DMA_WR_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
-
- /* BMI/BMO stats */
- offset = BMI_NPS_PKT_CNT;
- seq_printf(s, "BMI_NPS_PKT_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
- offset = BMO_NPS_SLC_PKT_CNT;
- seq_printf(s, "BMO_NPS_PKT_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
-
- return 0;
-}
-
-static int registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, registers_show, inode->i_private);
-}
-
-static const struct file_operations register_fops = {
- .owner = THIS_MODULE,
- .open = registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int firmware_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
-
- seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
- return 0;
-}
-
-static int firmware_open(struct inode *inode, struct file *file)
-{
- return single_open(file, firmware_show, inode->i_private);
-}
-
-static const struct file_operations firmware_fops = {
- .owner = THIS_MODULE,
- .open = firmware_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int nitrox_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
-
- seq_printf(s, "NITROX-5 [idx: %d]\n", ndev->idx);
- seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
- seq_printf(s, " Cores [AE: %u SE: %u]\n",
- ndev->hw.ae_cores, ndev->hw.se_cores);
- seq_printf(s, " Number of Queues: %u\n", ndev->nr_queues);
- seq_printf(s, " Queue length: %u\n", ndev->qlen);
- seq_printf(s, " Node: %u\n", ndev->node);
-
- return 0;
-}
-
-static int nitrox_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nitrox_show, inode->i_private);
-}
-
-static const struct file_operations nitrox_fops = {
- .owner = THIS_MODULE,
- .open = nitrox_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void nitrox_debugfs_exit(struct nitrox_device *ndev)
-{
- debugfs_remove_recursive(ndev->debugfs_dir);
- ndev->debugfs_dir = NULL;
-}
-
-static int nitrox_debugfs_init(struct nitrox_device *ndev)
-{
- struct dentry *dir, *f;
-
- dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!dir)
- return -ENOMEM;
-
- ndev->debugfs_dir = dir;
- f = debugfs_create_file("counters", 0400, dir, ndev, &register_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("nitrox", 0400, dir, ndev, &nitrox_fops);
- if (!f)
- goto err;
-
- return 0;
-
-err:
- nitrox_debugfs_exit(ndev);
- return -ENODEV;
-}
-#else
-static int nitrox_debugfs_init(struct nitrox_device *ndev)
-{
- return 0;
-}
-
-static void nitrox_debugfs_exit(struct nitrox_device *ndev)
-{
-}
-#endif
-
/**
* nitrox_probe - NITROX Initialization function.
* @pdev: PCI device information struct
@@ -487,7 +341,7 @@ static int nitrox_probe(struct pci_dev *pdev,
return err;
/* do FLR */
- err = nitrox_reset_device(pdev);
+ err = nitrox_device_flr(pdev);
if (err) {
dev_err(&pdev->dev, "FLR failed\n");
pci_disable_device(pdev);
@@ -555,7 +409,12 @@ static int nitrox_probe(struct pci_dev *pdev,
if (err)
goto pf_hw_fail;
- set_bit(NITROX_READY, &ndev->status);
+ /* clear the statistics */
+ atomic64_set(&ndev->stats.posted, 0);
+ atomic64_set(&ndev->stats.completed, 0);
+ atomic64_set(&ndev->stats.dropped, 0);
+
+ atomic_set(&ndev->state, __NDEV_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
@@ -567,7 +426,7 @@ static int nitrox_probe(struct pci_dev *pdev,
crypto_fail:
nitrox_debugfs_exit(ndev);
- clear_bit(NITROX_READY, &ndev->status);
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
pf_hw_fail:
@@ -602,11 +461,16 @@ static void nitrox_remove(struct pci_dev *pdev)
dev_info(DEV(ndev), "Removing Device %x:%x\n",
ndev->hw.vendor_id, ndev->hw.device_id);
- clear_bit(NITROX_READY, &ndev->status);
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
nitrox_remove_from_devlist(ndev);
+
+#ifdef CONFIG_PCI_IOV
+ /* disable SR-IOV */
+ nitrox_sriov_configure(pdev, 0);
+#endif
nitrox_crypto_unregister();
nitrox_debugfs_exit(ndev);
nitrox_pf_sw_cleanup(ndev);
@@ -632,6 +496,9 @@ static struct pci_driver nitrox_driver = {
.probe = nitrox_probe,
.remove = nitrox_remove,
.shutdown = nitrox_shutdown,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = nitrox_sriov_configure,
+#endif
};
module_pci_driver(nitrox_driver);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4a362fc22f62..3987cd84c033 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -382,11 +382,11 @@ static inline void backlog_list_add(struct nitrox_softreq *sr,
{
INIT_LIST_HEAD(&sr->backlog);
- spin_lock_bh(&cmdq->backlog_lock);
+ spin_lock_bh(&cmdq->backlog_qlock);
list_add_tail(&sr->backlog, &cmdq->backlog_head);
atomic_inc(&cmdq->backlog_count);
atomic_set(&sr->status, REQ_BACKLOG);
- spin_unlock_bh(&cmdq->backlog_lock);
+ spin_unlock_bh(&cmdq->backlog_qlock);
}
static inline void response_list_add(struct nitrox_softreq *sr,
@@ -394,17 +394,17 @@ static inline void response_list_add(struct nitrox_softreq *sr,
{
INIT_LIST_HEAD(&sr->response);
- spin_lock_bh(&cmdq->response_lock);
+ spin_lock_bh(&cmdq->resp_qlock);
list_add_tail(&sr->response, &cmdq->response_head);
- spin_unlock_bh(&cmdq->response_lock);
+ spin_unlock_bh(&cmdq->resp_qlock);
}
static inline void response_list_del(struct nitrox_softreq *sr,
struct nitrox_cmdq *cmdq)
{
- spin_lock_bh(&cmdq->response_lock);
+ spin_lock_bh(&cmdq->resp_qlock);
list_del(&sr->response);
- spin_unlock_bh(&cmdq->response_lock);
+ spin_unlock_bh(&cmdq->resp_qlock);
}
static struct nitrox_softreq *
@@ -439,11 +439,11 @@ static void post_se_instr(struct nitrox_softreq *sr,
int idx;
u8 *ent;
- spin_lock_bh(&cmdq->cmdq_lock);
+ spin_lock_bh(&cmdq->cmd_qlock);
idx = cmdq->write_idx;
/* copy the instruction */
- ent = cmdq->head + (idx * cmdq->instr_size);
+ ent = cmdq->base + (idx * cmdq->instr_size);
memcpy(ent, &sr->instr, cmdq->instr_size);
atomic_set(&sr->status, REQ_POSTED);
@@ -459,7 +459,10 @@ static void post_se_instr(struct nitrox_softreq *sr,
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
- spin_unlock_bh(&cmdq->cmdq_lock);
+ spin_unlock_bh(&cmdq->cmd_qlock);
+
+ /* increment the posted command count */
+ atomic64_inc(&ndev->stats.posted);
}
static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
@@ -471,7 +474,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
if (!atomic_read(&cmdq->backlog_count))
return 0;
- spin_lock_bh(&cmdq->backlog_lock);
+ spin_lock_bh(&cmdq->backlog_qlock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
struct skcipher_request *skreq;
@@ -494,7 +497,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* backlog requests are posted, wakeup with -EINPROGRESS */
skcipher_request_complete(skreq, -EINPROGRESS);
}
- spin_unlock_bh(&cmdq->backlog_lock);
+ spin_unlock_bh(&cmdq->backlog_qlock);
return ret;
}
@@ -508,8 +511,11 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
post_backlog_cmds(cmdq);
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
- if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ /* increment drop count */
+ atomic64_inc(&ndev->stats.dropped);
return -ENOSPC;
+ }
/* add to backlog list */
backlog_list_add(sr, cmdq);
return -EBUSY;
@@ -572,7 +578,7 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
/* select the queue */
qno = smp_processor_id() % ndev->nr_queues;
- sr->cmdq = &ndev->pkt_cmdqs[qno];
+ sr->cmdq = &ndev->pkt_inq[qno];
/*
* 64-Byte Instruction Format
@@ -694,6 +700,7 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
READ_ONCE(sr->resp.orh));
}
atomic_dec(&cmdq->pending_count);
+ atomic64_inc(&ndev->stats.completed);
/* sync with other cpus */
smp_mb__after_atomic();
/* remove from response list */
@@ -714,18 +721,18 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
}
/**
- * pkt_slc_resp_handler - post processing of SE responses
+ * pkt_slc_resp_tasklet - post processing of SE responses
*/
-void pkt_slc_resp_handler(unsigned long data)
+void pkt_slc_resp_tasklet(unsigned long data)
{
- struct bh_data *bh = (void *)(uintptr_t)(data);
- struct nitrox_cmdq *cmdq = bh->cmdq;
- union nps_pkt_slc_cnts pkt_slc_cnts;
+ struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
+ struct nitrox_cmdq *cmdq = qvec->cmdq;
+ union nps_pkt_slc_cnts slc_cnts;
/* read completion count */
- pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
+ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
/* resend the interrupt if more work to do */
- pkt_slc_cnts.s.resend = 1;
+ slc_cnts.s.resend = 1;
process_response_list(cmdq);
@@ -733,7 +740,7 @@ void pkt_slc_resp_handler(unsigned long data)
* clear the interrupt with resend bit enabled,
* MSI-X interrupt generates if Completion count > Threshold
*/
- writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
+ writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
/* order the writes */
mmiowb();
diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
new file mode 100644
index 000000000000..30c0aa874583
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_hal.h"
+#include "nitrox_common.h"
+#include "nitrox_isr.h"
+
+static inline bool num_vfs_valid(int num_vfs)
+{
+ bool valid = false;
+
+ switch (num_vfs) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ valid = true;
+ break;
+ }
+
+ return valid;
+}
+
+static inline enum vf_mode num_vfs_to_mode(int num_vfs)
+{
+ enum vf_mode mode = 0;
+
+ switch (num_vfs) {
+ case 0:
+ mode = __NDEV_MODE_PF;
+ break;
+ case 16:
+ mode = __NDEV_MODE_VF16;
+ break;
+ case 32:
+ mode = __NDEV_MODE_VF32;
+ break;
+ case 64:
+ mode = __NDEV_MODE_VF64;
+ break;
+ case 128:
+ mode = __NDEV_MODE_VF128;
+ break;
+ }
+
+ return mode;
+}
+
+static void pf_sriov_cleanup(struct nitrox_device *ndev)
+{
+ /* PF has no queues in SR-IOV mode */
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
+ /* unregister crypto algorithms */
+ nitrox_crypto_unregister();
+
+ /* cleanup PF resources */
+ nitrox_unregister_interrupts(ndev);
+ nitrox_common_sw_cleanup(ndev);
+}
+
+static int pf_sriov_init(struct nitrox_device *ndev)
+{
+ int err;
+
+ /* allocate resources for PF */
+ err = nitrox_common_sw_init(ndev);
+ if (err)
+ return err;
+
+ err = nitrox_register_interrupts(ndev);
+ if (err) {
+ nitrox_common_sw_cleanup(ndev);
+ return err;
+ }
+
+ /* configure the packet queues */
+ nitrox_config_pkt_input_rings(ndev);
+ nitrox_config_pkt_solicit_ports(ndev);
+
+ /* set device to ready state */
+ atomic_set(&ndev->state, __NDEV_READY);
+
+ /* register crypto algorithms */
+ return nitrox_crypto_register();
+}
+
+static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct nitrox_device *ndev = pci_get_drvdata(pdev);
+ int err;
+
+ if (!num_vfs_valid(num_vfs)) {
+ dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs);
+ return -EINVAL;
+ }
+
+ if (pci_num_vf(pdev) == num_vfs)
+ return num_vfs;
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err);
+ return err;
+ }
+ dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs);
+
+ ndev->num_vfs = num_vfs;
+ ndev->mode = num_vfs_to_mode(num_vfs);
+ /* set bit in flags */
+ set_bit(__NDEV_SRIOV_BIT, &ndev->flags);
+
+ /* cleanup PF resources */
+ pf_sriov_cleanup(ndev);
+
+ config_nps_core_vfcfg_mode(ndev, ndev->mode);
+
+ return num_vfs;
+}
+
+static int nitrox_sriov_disable(struct pci_dev *pdev)
+{
+ struct nitrox_device *ndev = pci_get_drvdata(pdev);
+
+ if (!test_bit(__NDEV_SRIOV_BIT, &ndev->flags))
+ return 0;
+
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(DEV(ndev), "VFs are attached to VM. Can't disable SR-IOV\n");
+ return -EPERM;
+ }
+ pci_disable_sriov(pdev);
+ /* clear bit in flags */
+ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
+
+ ndev->num_vfs = 0;
+ ndev->mode = __NDEV_MODE_PF;
+
+ config_nps_core_vfcfg_mode(ndev, ndev->mode);
+
+ return pf_sriov_init(ndev);
+}
+
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (!num_vfs)
+ return nitrox_sriov_disable(pdev);
+
+ return nitrox_sriov_enable(pdev, num_vfs);
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 94b5bcf5b628..ca4630b8395f 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -102,7 +102,7 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
- return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
+ return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
@@ -151,12 +151,13 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
(ctx->u.aes.key_len != AES_KEYSIZE_256))
fallback = 1;
if (fallback) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq,
+ ctx->u.aes.tfm_skcipher);
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
*/
- skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -203,12 +204,12 @@ static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *fallback_tfm;
+ struct crypto_sync_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
ctx->u.aes.key_len = 0;
- fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
+ fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
@@ -226,7 +227,7 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
+ crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
}
static int ccp_register_aes_xts_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index b9fd090c46c2..28819e11db96 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -88,7 +88,7 @@ static inline struct ccp_crypto_ahash_alg *
/***** AES related defines *****/
struct ccp_aes_ctx {
/* Fallback cipher for XTS with unsupported unit sizes */
- struct crypto_skcipher *tfm_skcipher;
+ struct crypto_sync_skcipher *tfm_skcipher;
/* Cipher used to generate CMAC K1/K2 keys */
struct crypto_cipher *tfm_cipher;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 72790d88236d..d64a78ccc03e 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -31,8 +31,9 @@
((psp_master->api_major) >= _maj && \
(psp_master->api_minor) >= _min)
-#define DEVICE_NAME "sev"
-#define SEV_FW_FILE "amd/sev.fw"
+#define DEVICE_NAME "sev"
+#define SEV_FW_FILE "amd/sev.fw"
+#define SEV_FW_NAME_SIZE 64
static DEFINE_MUTEX(sev_cmd_mutex);
static struct sev_misc_dev *misc_dev;
@@ -423,7 +424,7 @@ EXPORT_SYMBOL_GPL(psp_copy_user_blob);
static int sev_get_api_version(void)
{
struct sev_user_data_status *status;
- int error, ret;
+ int error = 0, ret;
status = &psp_master->status_cmd_buf;
ret = sev_platform_status(status, &error);
@@ -440,6 +441,41 @@ static int sev_get_api_version(void)
return 0;
}
+static int sev_get_firmware(struct device *dev,
+ const struct firmware **firmware)
+{
+ char fw_name_specific[SEV_FW_NAME_SIZE];
+ char fw_name_subset[SEV_FW_NAME_SIZE];
+
+ snprintf(fw_name_specific, sizeof(fw_name_specific),
+ "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ snprintf(fw_name_subset, sizeof(fw_name_subset),
+ "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
+ boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
+
+ /* Check for SEV FW for a particular model.
+ * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
+ *
+ * or
+ *
+ * Check for SEV FW common to a subset of models.
+ * Ex. amd_sev_fam17h_model0xh.sbin for
+ * Family 17h Model 00h -- Family 17h Model 0Fh
+ *
+ * or
+ *
+ * Fall-back to using generic name: sev.fw
+ */
+ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ return 0;
+
+ return -ENOENT;
+}
+
/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
static int sev_update_firmware(struct device *dev)
{
@@ -449,9 +485,10 @@ static int sev_update_firmware(struct device *dev)
struct page *p;
u64 data_size;
- ret = request_firmware(&firmware, SEV_FW_FILE, dev);
- if (ret < 0)
+ if (sev_get_firmware(dev, &firmware) == -ENOENT) {
+ dev_dbg(dev, "No SEV firmware file present\n");
return -1;
+ }
/*
* SEV FW expects the physical address given to it to be 32
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 71734f254fd1..b75dc7db2d4a 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -33,8 +33,31 @@ struct sp_platform {
unsigned int irq_count;
};
-static const struct acpi_device_id sp_acpi_match[];
-static const struct of_device_id sp_of_match[];
+static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 0,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv3_platform,
+#endif
+ },
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id sp_acpi_match[] = {
+ { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id sp_of_match[] = {
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sp_of_match);
+#endif
static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
{
@@ -201,32 +224,6 @@ static int sp_platform_resume(struct platform_device *pdev)
}
#endif
-static const struct sp_dev_vdata dev_vdata[] = {
- {
- .bar = 0,
-#ifdef CONFIG_CRYPTO_DEV_SP_CCP
- .ccp_vdata = &ccpv3_platform,
-#endif
- },
-};
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id sp_acpi_match[] = {
- { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id sp_of_match[] = {
- { .compatible = "amd,ccp-seattle-v1a",
- .data = (const void *)&dev_vdata[0] },
- { },
-};
-MODULE_DEVICE_TABLE(of, sp_of_match);
-#endif
-
static struct platform_driver sp_platform_driver = {
.driver = {
.name = "ccp",
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index a091ae57f902..45985b955d2c 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -449,8 +449,7 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
-static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
- enum drv_cipher_mode mode)
+static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
}
@@ -461,8 +460,7 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
-static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
- enum drv_crypto_direction mode)
+static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
}
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 010bbf607797..db203f8be429 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -673,7 +673,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
return min(srclen, dstlen);
}
-static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
+static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
u32 flags,
struct scatterlist *src,
struct scatterlist *dst,
@@ -683,9 +683,9 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
{
int err;
- SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
- skcipher_request_set_tfm(subreq, cipher);
+ skcipher_request_set_sync_tfm(subreq, cipher);
skcipher_request_set_callback(subreq, flags, NULL, NULL);
skcipher_request_set_crypt(subreq, src, dst,
nbytes, iv);
@@ -856,13 +856,14 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
- crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
+ crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
+ cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |=
- crypto_skcipher_get_flags(ablkctx->sw_cipher) &
+ crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
CRYPTO_TFM_RES_MASK;
return err;
}
@@ -1337,8 +1338,7 @@ static int chcr_device_init(struct chcr_context *ctx)
}
ctx->dev = u_ctx->dev;
adap = padap(ctx->dev);
- ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
- adap->vres.ncrypto_fc);
+ ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
spin_lock(&ctx->dev->lock_chcr_dev);
@@ -1369,8 +1369,8 @@ static int chcr_cra_init(struct crypto_tfm *tfm)
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
@@ -1399,8 +1399,8 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm)
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
* cannot be used as fallback in chcr_handle_cipher_response
*/
- ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
@@ -1415,7 +1415,7 @@ static void chcr_cra_exit(struct crypto_tfm *tfm)
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- crypto_free_skcipher(ablkctx->sw_cipher);
+ crypto_free_sync_skcipher(ablkctx->sw_cipher);
if (ablkctx->aes_generic)
crypto_free_cipher(ablkctx->aes_generic);
}
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 62249d4ed373..2c472e3c6aeb 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -43,7 +43,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
static struct cxgb4_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
.nrxq = MAX_ULD_QSETS,
- .ntxq = MAX_ULD_QSETS,
+ /* Max ntxq will be derived from fw config file*/
.rxq_size = 1024,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 0d2c70c344f3..d37ef41f9ebe 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -170,7 +170,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
}
struct ablk_ctx {
- struct crypto_skcipher *sw_cipher;
+ struct crypto_sync_skcipher *sw_cipher;
struct crypto_cipher *aes_generic;
__be32 key_ctx_hdr;
unsigned int enckey_len;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 0997e166ea57..20209e29f814 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -234,8 +234,7 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
return;
out:
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
}
static void release_tcp_port(struct sock *sk)
@@ -406,12 +405,10 @@ static int wait_for_states(struct sock *sk, unsigned int states)
int chtls_disconnect(struct sock *sk, int flags)
{
- struct chtls_sock *csk;
struct tcp_sock *tp;
int err;
tp = tcp_sk(sk);
- csk = rcu_dereference_sk_user_data(sk);
chtls_purge_recv_queue(sk);
chtls_purge_receive_queue(sk);
chtls_purge_write_queue(sk);
@@ -1014,7 +1011,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
- const struct tcphdr *tcph;
struct inet_sock *newinet;
const struct iphdr *iph;
struct net_device *ndev;
@@ -1036,7 +1032,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (!dst)
goto free_sk;
- tcph = (struct tcphdr *)(iph + 1);
n = dst_neigh_lookup(dst, &iph->saddr);
if (!n)
goto free_sk;
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index f59b044ebd25..f472c51abe56 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -272,8 +272,7 @@ static void chtls_free_uld(struct chtls_dev *cdev)
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
kfree_skb(cdev->rspq_skb_cache[i]);
kfree(cdev->lldi);
- if (cdev->askb)
- kfree_skb(cdev->askb);
+ kfree_skb(cdev->askb);
kfree(cdev);
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 56bd28174f52..4e6ff32f8a7e 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -28,9 +28,24 @@
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
+#define DCP_SHA_PAY_SZ 64
#define DCP_ALIGNMENT 64
+/*
+ * Null hashes to align with hw behavior on imx6sl and ull
+ * these are flipped for consistency with hw output
+ */
+static const uint8_t sha1_null_hash[] =
+ "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
+ "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
+
+static const uint8_t sha256_null_hash[] =
+ "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
+ "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
+ "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
+ "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
+
/* DCP DMA descriptor. */
struct dcp_dma_desc {
uint32_t next_cmd_addr;
@@ -48,6 +63,7 @@ struct dcp_coherent_block {
uint8_t aes_in_buf[DCP_BUF_SZ];
uint8_t aes_out_buf[DCP_BUF_SZ];
uint8_t sha_in_buf[DCP_BUF_SZ];
+ uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
uint8_t aes_key[2 * AES_KEYSIZE_128];
@@ -84,7 +100,7 @@ struct dcp_async_ctx {
unsigned int hot:1;
/* Crypto-specific context */
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
};
@@ -99,6 +115,11 @@ struct dcp_sha_req_ctx {
unsigned int fini:1;
};
+struct dcp_export_state {
+ struct dcp_sha_req_ctx req_ctx;
+ struct dcp_async_ctx async_ctx;
+};
+
/*
* There can even be only one instance of the MXS DCP due to the
* design of Linux Crypto API.
@@ -209,6 +230,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
DCP_BUF_SZ, DMA_FROM_DEVICE);
+ if (actx->fill % AES_BLOCK_SIZE) {
+ dev_err(sdcp->dev, "Invalid block size!\n");
+ ret = -EINVAL;
+ goto aes_done_run;
+ }
+
/* Fill in the DMA descriptor. */
desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
MXS_DCP_CONTROL0_INTERRUPT |
@@ -238,6 +265,7 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
ret = mxs_dcp_start_dma(actx);
+aes_done_run:
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -264,13 +292,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
uint32_t dst_off = 0;
+ uint32_t last_out_len = 0;
uint8_t *key = sdcp->coh->aes_key;
int ret = 0;
int split = 0;
- unsigned int i, len, clen, rem = 0;
+ unsigned int i, len, clen, rem = 0, tlen = 0;
int init = 0;
+ bool limit_hit = false;
actx->fill = 0;
@@ -289,6 +319,11 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
for_each_sg(req->src, src, nents, i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
+ tlen += len;
+ limit_hit = tlen > req->nbytes;
+
+ if (limit_hit)
+ len = req->nbytes - (tlen - len);
do {
if (actx->fill + len > out_off)
@@ -305,13 +340,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
* If we filled the buffer or this is the last SG,
* submit the buffer.
*/
- if (actx->fill == out_off || sg_is_last(src)) {
+ if (actx->fill == out_off || sg_is_last(src) ||
+ limit_hit) {
ret = mxs_dcp_run_aes(actx, req, init);
if (ret)
return ret;
init = 0;
out_tmp = out_buf;
+ last_out_len = actx->fill;
while (dst && actx->fill) {
if (!split) {
dst_buf = sg_virt(dst);
@@ -334,6 +371,19 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
}
}
} while (len);
+
+ if (limit_hit)
+ break;
+ }
+
+ /* Copy the IV for CBC for chaining */
+ if (!rctx->ecb) {
+ if (rctx->enc)
+ memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
+ AES_BLOCK_SIZE);
+ else
+ memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
+ AES_BLOCK_SIZE);
}
return ret;
@@ -380,10 +430,10 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->nbytes, req->info);
@@ -464,16 +514,16 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
* but is supported by in-kernel software implementation, we use
* software fallback.
*/
- crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(actx->fallback,
+ crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(actx->fallback, key, len);
+ ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
if (!ret)
return 0;
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
+ tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
CRYPTO_TFM_RES_MASK;
return ret;
@@ -482,11 +532,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *blk;
+ struct crypto_sync_skcipher *blk;
- blk = crypto_alloc_skcipher(name, 0, flags);
+ blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
@@ -499,7 +548,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
{
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(actx->fallback);
+ crypto_free_sync_skcipher(actx->fallback);
}
/*
@@ -513,8 +562,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
-
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
dma_addr_t digest_phys = 0;
@@ -536,10 +583,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
desc->payload = 0;
desc->status = 0;
+ /*
+ * Align driver with hw behavior when generating null hashes
+ */
+ if (rctx->init && rctx->fini && desc->size == 0) {
+ struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+ const uint8_t *sha_buf =
+ (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
+ sha1_null_hash : sha256_null_hash;
+ memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
+ ret = 0;
+ goto done_run;
+ }
+
/* Set HASH_TERM bit for last transfer block. */
if (rctx->fini) {
- digest_phys = dma_map_single(sdcp->dev, req->result,
- halg->digestsize, DMA_FROM_DEVICE);
+ digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
+ DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
desc->payload = digest_phys;
}
@@ -547,9 +607,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
ret = mxs_dcp_start_dma(actx);
if (rctx->fini)
- dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
+ dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
DMA_FROM_DEVICE);
+done_run:
dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
return ret;
@@ -567,6 +628,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf;
+ uint8_t *out_buf = sdcp->coh->sha_out_buf;
uint8_t *src_buf;
@@ -621,11 +683,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
actx->fill = 0;
- /* For some reason, the result is flipped. */
- for (i = 0; i < halg->digestsize / 2; i++) {
- swap(req->result[i],
- req->result[halg->digestsize - i - 1]);
- }
+ /* For some reason the result is flipped */
+ for (i = 0; i < halg->digestsize; i++)
+ req->result[i] = out_buf[halg->digestsize - i - 1];
}
return 0;
@@ -766,14 +826,32 @@ static int dcp_sha_digest(struct ahash_request *req)
return dcp_sha_finup(req);
}
-static int dcp_sha_noimport(struct ahash_request *req, const void *in)
+static int dcp_sha_import(struct ahash_request *req, const void *in)
{
- return -ENOSYS;
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+ const struct dcp_export_state *export = in;
+
+ memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
+ memset(actx, 0, sizeof(struct dcp_async_ctx));
+ memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
+ memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
+
+ return 0;
}
-static int dcp_sha_noexport(struct ahash_request *req, void *out)
+static int dcp_sha_export(struct ahash_request *req, void *out)
{
- return -ENOSYS;
+ struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
+ struct dcp_export_state *export = out;
+
+ memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
+ memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
+
+ return 0;
}
static int dcp_sha_cra_init(struct crypto_tfm *tfm)
@@ -846,10 +924,11 @@ static struct ahash_alg dcp_sha1_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
- .import = dcp_sha_noimport,
- .export = dcp_sha_noexport,
+ .import = dcp_sha_import,
+ .export = dcp_sha_export,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct dcp_export_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-dcp",
@@ -872,10 +951,11 @@ static struct ahash_alg dcp_sha256_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
- .import = dcp_sha_noimport,
- .export = dcp_sha_noexport,
+ .import = dcp_sha_import,
+ .export = dcp_sha_export,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct dcp_export_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-dcp",
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9019f6b67986..a553ffddb11b 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -522,9 +522,9 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
!!(mode & FLAGS_CBC));
if (req->nbytes < aes_fallback_sz) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL,
NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -564,11 +564,11 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
return 0;
@@ -613,11 +613,10 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *blk;
+ struct crypto_sync_skcipher *blk;
- blk = crypto_alloc_skcipher(name, 0, flags);
+ blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
@@ -667,7 +666,7 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm)
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback)
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index fc3b46a85809..7e02920ef6f8 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -101,7 +101,7 @@ struct omap_aes_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct crypto_skcipher *ctr;
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 321d5e2ac833..a28f1d18fe01 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -171,7 +171,7 @@ struct spacc_ablk_ctx {
* The fallback cipher. If the operation can't be done in hardware,
* fallback to a software version.
*/
- struct crypto_skcipher *sw_cipher;
+ struct crypto_sync_skcipher *sw_cipher;
};
/* AEAD cipher context. */
@@ -799,17 +799,17 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* Set the fallback transform to use the same request flags as
* the hardware transform.
*/
- crypto_skcipher_clear_flags(ctx->sw_cipher,
+ crypto_sync_skcipher_clear_flags(ctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->sw_cipher,
+ crypto_sync_skcipher_set_flags(ctx->sw_cipher,
cipher->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
+ err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |=
- crypto_skcipher_get_flags(ctx->sw_cipher) &
+ crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
CRYPTO_TFM_RES_MASK;
if (err)
@@ -914,7 +914,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
struct crypto_tfm *old_tfm =
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
int err;
/*
@@ -922,7 +922,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
* the ciphering has completed, put the old transform back into the
* request.
*/
- skcipher_request_set_tfm(subreq, ctx->sw_cipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->nbytes, req->info);
@@ -1020,9 +1020,8 @@ static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->sw_cipher = crypto_alloc_skcipher(
- alg->cra_name, 0, CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->sw_cipher = crypto_alloc_sync_skcipher(
+ alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
alg->cra_name);
@@ -1041,7 +1040,7 @@ static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
{
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->sw_cipher);
+ crypto_free_sync_skcipher(ctx->sw_cipher);
}
static int spacc_ablk_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 9225d060e18f..f5e960d23a7a 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -198,7 +198,6 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
pr_err("QAT: Can't find acceleration device\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1138e41d6805..d2698299896f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -113,6 +113,13 @@ struct qat_alg_aead_ctx {
struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
struct qat_crypto_instance *inst;
+ union {
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ struct sha512_state sha512;
+ };
+ char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
+ char opad[SHA512_BLOCK_SIZE];
};
struct qat_alg_ablkcipher_ctx {
@@ -148,37 +155,32 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
unsigned int auth_keylen)
{
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
- struct sha1_state sha1;
- struct sha256_state sha256;
- struct sha512_state sha512;
int block_size = crypto_shash_blocksize(ctx->hash_tfm);
int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- char ipad[block_size];
- char opad[block_size];
__be32 *hash_state_out;
__be64 *hash512_state_out;
int i, offset;
- memset(ipad, 0, block_size);
- memset(opad, 0, block_size);
+ memset(ctx->ipad, 0, block_size);
+ memset(ctx->opad, 0, block_size);
shash->tfm = ctx->hash_tfm;
shash->flags = 0x0;
if (auth_keylen > block_size) {
int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, ipad);
+ auth_keylen, ctx->ipad);
if (ret)
return ret;
- memcpy(opad, ipad, digest_size);
+ memcpy(ctx->opad, ctx->ipad, digest_size);
} else {
- memcpy(ipad, auth_key, auth_keylen);
- memcpy(opad, auth_key, auth_keylen);
+ memcpy(ctx->ipad, auth_key, auth_keylen);
+ memcpy(ctx->opad, auth_key, auth_keylen);
}
for (i = 0; i < block_size; i++) {
- char *ipad_ptr = ipad + i;
- char *opad_ptr = opad + i;
+ char *ipad_ptr = ctx->ipad + i;
+ char *opad_ptr = ctx->opad + i;
*ipad_ptr ^= HMAC_IPAD_VALUE;
*opad_ptr ^= HMAC_OPAD_VALUE;
}
@@ -186,7 +188,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
if (crypto_shash_init(shash))
return -EFAULT;
- if (crypto_shash_update(shash, ipad, block_size))
+ if (crypto_shash_update(shash, ctx->ipad, block_size))
return -EFAULT;
hash_state_out = (__be32 *)hash->sha.state1;
@@ -194,22 +196,22 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &sha1))
+ if (crypto_shash_export(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha1.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &sha256))
+ if (crypto_shash_export(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha256.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &sha512))
+ if (crypto_shash_export(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
break;
default:
return -EFAULT;
@@ -218,7 +220,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
if (crypto_shash_init(shash))
return -EFAULT;
- if (crypto_shash_update(shash, opad, block_size))
+ if (crypto_shash_update(shash, ctx->opad, block_size))
return -EFAULT;
offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
@@ -227,28 +229,28 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &sha1))
+ if (crypto_shash_export(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha1.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &sha256))
+ if (crypto_shash_export(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha256.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &sha512))
+ if (crypto_shash_export(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
break;
default:
return -EFAULT;
}
- memzero_explicit(ipad, block_size);
- memzero_explicit(opad, block_size);
+ memzero_explicit(ctx->ipad, block_size);
+ memzero_explicit(ctx->opad, block_size);
return 0;
}
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index ea4d96bf47e8..585e1cab9ae3 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -189,7 +189,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
fallback:
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
ctx->enc_keylen = keylen;
return ret;
@@ -212,9 +212,9 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -245,9 +245,8 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
memset(ctx, 0, sizeof(*ctx));
tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
- ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
}
@@ -255,7 +254,7 @@ static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
{
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
}
struct qce_ablkcipher_def {
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 2b0278bb6e92..ee055bfe98a0 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -22,7 +22,7 @@
struct qce_cipher_ctx {
u8 enc_key[QCE_MAX_KEY_SIZE];
unsigned int enc_keylen;
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
/**
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index faa282074e5a..0064be0e3941 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -249,8 +249,8 @@ struct s5p_aes_reqctx {
struct s5p_aes_ctx {
struct s5p_aes_dev *dev;
- uint8_t aes_key[AES_MAX_KEY_SIZE];
- uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
+ u8 aes_key[AES_MAX_KEY_SIZE];
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
int keylen;
};
@@ -475,9 +475,9 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
}
/* Calls the completion. Cannot be called with dev->lock hold. */
-static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+static void s5p_aes_complete(struct ablkcipher_request *req, int err)
{
- dev->req->base.complete(&dev->req->base, err);
+ req->base.complete(&req->base, err);
}
static void s5p_unset_outdata(struct s5p_aes_dev *dev)
@@ -491,7 +491,7 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev)
}
static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
- struct scatterlist **dst)
+ struct scatterlist **dst)
{
void *pages;
int len;
@@ -518,46 +518,28 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
- int err;
-
- if (!sg->length) {
- err = -EINVAL;
- goto exit;
- }
+ if (!sg->length)
+ return -EINVAL;
- err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
- if (!err) {
- err = -ENOMEM;
- goto exit;
- }
+ if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
+ return -ENOMEM;
dev->sg_dst = sg;
- err = 0;
-exit:
- return err;
+ return 0;
}
static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
- int err;
-
- if (!sg->length) {
- err = -EINVAL;
- goto exit;
- }
+ if (!sg->length)
+ return -EINVAL;
- err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
- if (!err) {
- err = -ENOMEM;
- goto exit;
- }
+ if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
+ return -ENOMEM;
dev->sg_src = sg;
- err = 0;
-exit:
- return err;
+ return 0;
}
/*
@@ -655,14 +637,14 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
+ struct ablkcipher_request *req;
int err_dma_tx = 0;
int err_dma_rx = 0;
int err_dma_hx = 0;
bool tx_end = false;
bool hx_end = false;
unsigned long flags;
- uint32_t status;
- u32 st_bits;
+ u32 status, st_bits;
int err;
spin_lock_irqsave(&dev->lock, flags);
@@ -727,7 +709,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, 0);
+ s5p_aes_complete(dev->req, 0);
/* Device is still busy */
tasklet_schedule(&dev->tasklet);
} else {
@@ -752,11 +734,12 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
error:
s5p_sg_done(dev);
dev->busy = false;
+ req = dev->req;
if (err_dma_hx == 1)
s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, err);
+ s5p_aes_complete(req, err);
hash_irq_end:
/*
@@ -1830,7 +1813,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
};
static void s5p_set_aes(struct s5p_aes_dev *dev,
- const uint8_t *key, const uint8_t *iv,
+ const u8 *key, const u8 *iv, const u8 *ctr,
unsigned int keylen)
{
void __iomem *keystart;
@@ -1838,6 +1821,9 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
if (iv)
memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+ if (ctr)
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10);
+
if (keylen == AES_KEYSIZE_256)
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
else if (keylen == AES_KEYSIZE_192)
@@ -1887,7 +1873,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev,
}
static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct ablkcipher_request *req)
{
struct scatterlist *sg;
int err;
@@ -1916,11 +1902,12 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
struct ablkcipher_request *req = dev->req;
- uint32_t aes_control;
+ u32 aes_control;
unsigned long flags;
int err;
- u8 *iv;
+ u8 *iv, *ctr;
+ /* This sets bit [13:12] to 00, which selects 128-bit counter */
aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT)
aes_control |= SSS_AES_MODE_DECRYPT;
@@ -1928,11 +1915,14 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
iv = req->info;
+ ctr = NULL;
} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
- iv = req->info;
+ iv = NULL;
+ ctr = req->info;
} else {
iv = NULL; /* AES_ECB */
+ ctr = NULL;
}
if (dev->ctx->keylen == AES_KEYSIZE_192)
@@ -1964,7 +1954,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
- s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
+ s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
s5p_set_dma_indata(dev, dev->sg_src);
s5p_set_dma_outdata(dev, dev->sg_dst);
@@ -1983,7 +1973,7 @@ indata_error:
s5p_sg_done(dev);
dev->busy = false;
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, err);
+ s5p_aes_complete(req, err);
}
static void s5p_tasklet_cb(unsigned long data)
@@ -2024,7 +2014,7 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
err = ablkcipher_enqueue_request(&dev->queue, req);
if (dev->busy) {
spin_unlock_irqrestore(&dev->lock, flags);
- goto exit;
+ return err;
}
dev->busy = true;
@@ -2032,7 +2022,6 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
tasklet_schedule(&dev->tasklet);
-exit:
return err;
}
@@ -2043,7 +2032,8 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct s5p_aes_dev *dev = ctx->dev;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
+ ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
return -EINVAL;
}
@@ -2054,7 +2044,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
}
static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
- const uint8_t *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2090,6 +2080,11 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
}
+static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
+{
+ return s5p_aes_crypt(req, FLAGS_AES_CTR);
+}
+
static int s5p_aes_cra_init(struct crypto_tfm *tfm)
{
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2144,6 +2139,28 @@ static struct crypto_alg algs[] = {
.decrypt = s5p_aes_cbc_decrypt,
}
},
+ {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-s5p",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_ctr_crypt,
+ .decrypt = s5p_aes_ctr_crypt,
+ }
+ },
};
static int s5p_aes_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index e7540a5b8197..bbf166a97ad3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -149,7 +149,7 @@ struct sahara_ctx {
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
struct sahara_aes_reqctx {
@@ -621,14 +621,14 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback.
*/
- crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
+ tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
CRYPTO_TFM_RES_MASK;
return ret;
}
@@ -666,9 +666,9 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -688,9 +688,9 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -710,9 +710,9 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -732,9 +732,9 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -752,8 +752,7 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
const char *name = crypto_tfm_alg_name(tfm);
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->fallback = crypto_alloc_skcipher(name, 0,
- CRYPTO_ALG_ASYNC |
+ ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("Error allocating fallback algo %s\n", name);
@@ -769,7 +768,7 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
{
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
}
static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index b71895871be3..c5c5ff82b52e 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -32,7 +32,7 @@
#include "aesp8-ppc.h"
struct p8_aes_cbc_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
};
@@ -40,11 +40,11 @@ struct p8_aes_cbc_ctx {
static int p8_aes_cbc_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
@@ -53,7 +53,7 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -66,7 +66,7 @@ static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -86,7 +86,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -100,8 +100,8 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_encrypt(req);
@@ -139,8 +139,8 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_decrypt(req);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index cd777c75291d..8a2fe092cb8e 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -32,18 +32,18 @@
#include "aesp8-ppc.h"
struct p8_aes_ctr_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
};
static int p8_aes_ctr_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -51,7 +51,7 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -64,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -83,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -119,8 +119,8 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_encrypt(req);
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index e9954a7d4694..ecd64e5cc5bb 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -33,7 +33,7 @@
#include "aesp8-ppc.h"
struct p8_aes_xts_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
struct aes_key tweak_key;
@@ -42,11 +42,11 @@ struct p8_aes_xts_ctx {
static int p8_aes_xts_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -54,7 +54,7 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -67,7 +67,7 @@ static void p8_aes_xts_exit(struct crypto_tfm *tfm)
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -92,7 +92,7 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -109,8 +109,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index dacf3f42426d..de511db021cc 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -143,7 +143,7 @@ config DMA_JZ4740
config DMA_JZ4780
tristate "JZ4780 DMA support"
- depends on MACH_JZ4780 || COMPILE_TEST
+ depends on MIPS || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -321,6 +321,17 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
+config MCF_EDMA
+ tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
+ depends on M5441x || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the Freescale ColdFire eDMA engine, 64-channel
+ implementation that performs complex data transfers with
+ minimal intervention from a host processor.
+ This module can be found on Freescale ColdFire mcf5441x SoCs.
+
config MMP_PDMA
bool "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c91702d88b95..7fcc4d8e336d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -31,7 +31,8 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
-obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
+obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 75f38d19fcbe..7cbac6e8c113 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1320,7 +1320,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
if (unlikely(!is_slave_direction(direction)))
goto err_out;
- if (sconfig->direction == DMA_MEM_TO_DEV)
+ if (direction == DMA_MEM_TO_DEV)
reg_width = convert_buswidth(sconfig->dst_addr_width);
else
reg_width = convert_buswidth(sconfig->src_addr_width);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 4bf72561667c..4e557684f792 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1600,7 +1600,7 @@ static void at_xdmac_tasklet(unsigned long data)
if (atchan->status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
- spin_lock_bh(&atchan->lock);
+ spin_lock(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc,
xfer_node);
@@ -1610,7 +1610,7 @@ static void at_xdmac_tasklet(unsigned long data)
txd = &desc->tx_dma_desc;
at_xdmac_remove_xfer(atchan, desc);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock(&atchan->lock);
if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 847f84a41a69..cad55ab80d41 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -778,14 +778,6 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan,
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
- if ((cfg->direction == DMA_DEV_TO_MEM &&
- cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- (cfg->direction == DMA_MEM_TO_DEV &&
- cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- !is_slave_direction(cfg->direction)) {
- return -EINVAL;
- }
-
c->cfg = *cfg;
return 0;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index da74fd74636b..eebaba3d9e78 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1306,6 +1306,7 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
+ struct dma_slave_config config;
u32 addr;
u32 ctrl;
@@ -1402,6 +1403,10 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
return container_of(chan, struct coh901318_chan, chan);
}
+static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
+ struct dma_slave_config *config,
+ enum dma_transfer_direction direction);
+
static inline const struct coh901318_params *
cohc_chan_param(struct coh901318_chan *cohc)
{
@@ -2360,6 +2365,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (lli == NULL)
goto err_dma_alloc;
+ coh901318_dma_set_runtimeconfig(chan, &cohc->config, direction);
+
/* initiate allocated lli list */
ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
cohc->addr,
@@ -2499,7 +2506,8 @@ static const struct burst_table burst_sizes[] = {
};
static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
- struct dma_slave_config *config)
+ struct dma_slave_config *config,
+ enum dma_transfer_direction direction)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_addr_t addr;
@@ -2509,11 +2517,11 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
int i = 0;
/* We only support mem to per or per to mem transfers */
- if (config->direction == DMA_DEV_TO_MEM) {
+ if (direction == DMA_DEV_TO_MEM) {
addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
- } else if (config->direction == DMA_MEM_TO_DEV) {
+ } else if (direction == DMA_MEM_TO_DEV) {
addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
@@ -2579,6 +2587,16 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
return 0;
}
+static int coh901318_dma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+
+ memcpy(&cohc->config, config, sizeof(*config));
+
+ return 0;
+}
+
static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
{
@@ -2684,7 +2702,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
base->dma_slave.device_tx_status = coh901318_tx_status;
base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_config = coh901318_dma_set_runtimeconfig;
+ base->dma_slave.device_config = coh901318_dma_slave_config;
base->dma_slave.device_pause = coh901318_pause;
base->dma_slave.device_resume = coh901318_resume;
base->dma_slave.device_terminate_all = coh901318_terminate_all;
@@ -2707,7 +2725,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
base->dma_memcpy.device_tx_status = coh901318_tx_status;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig;
+ base->dma_memcpy.device_config = coh901318_dma_slave_config;
base->dma_memcpy.device_pause = coh901318_pause;
base->dma_memcpy.device_resume = coh901318_resume;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index afd5e10f8927..5253e3c0dc04 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -113,6 +113,7 @@ struct jz4740_dma_desc {
struct jz4740_dmaengine_chan {
struct virt_dma_chan vchan;
unsigned int id;
+ struct dma_slave_config config;
dma_addr_t fifo_addr;
unsigned int transfer_shift;
@@ -203,8 +204,9 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
}
-static int jz4740_dma_slave_config(struct dma_chan *c,
- struct dma_slave_config *config)
+static int jz4740_dma_slave_config_write(struct dma_chan *c,
+ struct dma_slave_config *config,
+ enum dma_transfer_direction direction)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -214,7 +216,7 @@ static int jz4740_dma_slave_config(struct dma_chan *c,
enum jz4740_dma_flags flags;
uint32_t cmd;
- switch (config->direction) {
+ switch (direction) {
case DMA_MEM_TO_DEV:
flags = JZ4740_DMA_SRC_AUTOINC;
transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
@@ -265,6 +267,15 @@ static int jz4740_dma_slave_config(struct dma_chan *c,
return 0;
}
+static int jz4740_dma_slave_config(struct dma_chan *c,
+ struct dma_slave_config *config)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+
+ memcpy(&chan->config, config, sizeof(*config));
+ return 0;
+}
+
static int jz4740_dma_terminate_all(struct dma_chan *c)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
@@ -407,6 +418,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
desc->direction = direction;
desc->cyclic = false;
+ jz4740_dma_slave_config_write(c, &chan->config, direction);
+
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}
@@ -438,6 +451,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
desc->direction = direction;
desc->cyclic = true;
+ jz4740_dma_slave_config_write(c, &chan->config, direction);
+
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 85820a2d69d4..a8b6225faa12 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -23,33 +24,35 @@
#include "dmaengine.h"
#include "virt-dma.h"
-#define JZ_DMA_NR_CHANNELS 32
-
/* Global registers. */
-#define JZ_DMA_REG_DMAC 0x1000
-#define JZ_DMA_REG_DIRQP 0x1004
-#define JZ_DMA_REG_DDR 0x1008
-#define JZ_DMA_REG_DDRS 0x100c
-#define JZ_DMA_REG_DMACP 0x101c
-#define JZ_DMA_REG_DSIRQP 0x1020
-#define JZ_DMA_REG_DSIRQM 0x1024
-#define JZ_DMA_REG_DCIRQP 0x1028
-#define JZ_DMA_REG_DCIRQM 0x102c
+#define JZ_DMA_REG_DMAC 0x00
+#define JZ_DMA_REG_DIRQP 0x04
+#define JZ_DMA_REG_DDR 0x08
+#define JZ_DMA_REG_DDRS 0x0c
+#define JZ_DMA_REG_DCKE 0x10
+#define JZ_DMA_REG_DCKES 0x14
+#define JZ_DMA_REG_DCKEC 0x18
+#define JZ_DMA_REG_DMACP 0x1c
+#define JZ_DMA_REG_DSIRQP 0x20
+#define JZ_DMA_REG_DSIRQM 0x24
+#define JZ_DMA_REG_DCIRQP 0x28
+#define JZ_DMA_REG_DCIRQM 0x2c
/* Per-channel registers. */
#define JZ_DMA_REG_CHAN(n) (n * 0x20)
-#define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DSA 0x00
+#define JZ_DMA_REG_DTA 0x04
+#define JZ_DMA_REG_DTC 0x08
+#define JZ_DMA_REG_DRT 0x0c
+#define JZ_DMA_REG_DCS 0x10
+#define JZ_DMA_REG_DCM 0x14
+#define JZ_DMA_REG_DDA 0x18
+#define JZ_DMA_REG_DSD 0x1c
#define JZ_DMA_DMAC_DMAE BIT(0)
#define JZ_DMA_DMAC_AR BIT(2)
#define JZ_DMA_DMAC_HLT BIT(3)
+#define JZ_DMA_DMAC_FAIC BIT(27)
#define JZ_DMA_DMAC_FMSC BIT(31)
#define JZ_DMA_DRT_AUTO 0x8
@@ -86,6 +89,14 @@
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+#define JZ4780_DMA_CTRL_OFFSET 0x1000
+
+/* macros for use with jz4780_dma_soc_data.flags */
+#define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
+#define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
+#define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
+#define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
+
/**
* struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
* @dcm: value for the DCM (channel command) register
@@ -94,17 +105,12 @@
* @dtc: transfer count (number of blocks of the transfer size specified in DCM
* to transfer) in the low 24 bits, offset of the next descriptor from the
* descriptor base address in the upper 8 bits.
- * @sd: target/source stride difference (in stride transfer mode).
- * @drt: request type
*/
struct jz4780_dma_hwdesc {
uint32_t dcm;
uint32_t dsa;
uint32_t dta;
uint32_t dtc;
- uint32_t sd;
- uint32_t drt;
- uint32_t reserved[2];
};
/* Size of allocations for hardware descriptor blocks. */
@@ -135,14 +141,22 @@ struct jz4780_dma_chan {
unsigned int curr_hwdesc;
};
+struct jz4780_dma_soc_data {
+ unsigned int nb_channels;
+ unsigned int transfer_ord_max;
+ unsigned long flags;
+};
+
struct jz4780_dma_dev {
struct dma_device dma_device;
- void __iomem *base;
+ void __iomem *chn_base;
+ void __iomem *ctrl_base;
struct clk *clk;
unsigned int irq;
+ const struct jz4780_dma_soc_data *soc_data;
uint32_t chan_reserved;
- struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
+ struct jz4780_dma_chan chan[];
};
struct jz4780_dma_filter_data {
@@ -169,16 +183,51 @@ static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
dma_device);
}
-static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma,
+static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
+ unsigned int chn, unsigned int reg)
+{
+ return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
+}
+
+static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
+ unsigned int chn, unsigned int reg, uint32_t val)
+{
+ writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
+}
+
+static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
unsigned int reg)
{
- return readl(jzdma->base + reg);
+ return readl(jzdma->ctrl_base + reg);
}
-static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma,
+static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
unsigned int reg, uint32_t val)
{
- writel(val, jzdma->base + reg);
+ writel(val, jzdma->ctrl_base + reg);
+}
+
+static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
+ unsigned int chn)
+{
+ if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
+ unsigned int reg;
+
+ if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
+ reg = JZ_DMA_REG_DCKE;
+ else
+ reg = JZ_DMA_REG_DCKES;
+
+ jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
+ }
+}
+
+static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
+ unsigned int chn)
+{
+ if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
+ !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
}
static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
@@ -215,8 +264,10 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(desc);
}
-static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
+static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
+ unsigned long val, uint32_t *shift)
{
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
int ord = ffs(val) - 1;
/*
@@ -228,8 +279,8 @@ static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
*/
if (ord == 3)
ord = 2;
- else if (ord > 7)
- ord = 7;
+ else if (ord > jzdma->soc_data->transfer_ord_max)
+ ord = jzdma->soc_data->transfer_ord_max;
*shift = ord;
@@ -262,7 +313,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
desc->dcm = JZ_DMA_DCM_SAI;
desc->dsa = addr;
desc->dta = config->dst_addr;
- desc->drt = jzchan->transfer_type;
width = config->dst_addr_width;
maxburst = config->dst_maxburst;
@@ -270,7 +320,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
desc->dcm = JZ_DMA_DCM_DAI;
desc->dsa = config->src_addr;
desc->dta = addr;
- desc->drt = jzchan->transfer_type;
width = config->src_addr_width;
maxburst = config->src_maxburst;
@@ -283,7 +332,7 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
* divisible by the transfer size, and we must not use more than the
* maximum burst specified by the user.
*/
- tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
+ tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
&jzchan->transfer_shift);
switch (width) {
@@ -412,12 +461,13 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
if (!desc)
return NULL;
- tsz = jz4780_dma_transfer_size(dest | src | len,
+ tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
&jzchan->transfer_shift);
+ jzchan->transfer_type = JZ_DMA_DRT_AUTO;
+
desc->desc[0].dsa = src;
desc->desc[0].dta = dest;
- desc->desc[0].drt = JZ_DMA_DRT_AUTO;
desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
tsz << JZ_DMA_DCM_TSZ_SHIFT |
JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
@@ -472,18 +522,34 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
(jzchan->curr_hwdesc + 1) % jzchan->desc->count;
}
- /* Use 8-word descriptors. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8);
+ /* Enable the channel's clock. */
+ jz4780_dma_chan_enable(jzdma, jzchan->id);
+
+ /* Use 4-word descriptors. */
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
+
+ /* Set transfer type. */
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
+ jzchan->transfer_type);
+
+ /*
+ * Set the transfer count. This is redundant for a descriptor-driven
+ * transfer. However, there can be a delay between the transfer start
+ * time and when DTCn reg contains the new transfer count. Setting
+ * it explicitly ensures residue is computed correctly at all times.
+ */
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
+ jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
/* Write descriptor address and initiate descriptor fetch. */
desc_phys = jzchan->desc->desc_phys +
(jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys);
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
/* Enable the channel. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id),
- JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE);
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
+ JZ_DMA_DCS_CTE);
}
static void jz4780_dma_issue_pending(struct dma_chan *chan)
@@ -509,12 +575,14 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&jzchan->vchan.lock, flags);
/* Clear the DMA status and stop the transfer. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
if (jzchan->desc) {
vchan_terminate_vdesc(&jzchan->desc->vdesc);
jzchan->desc = NULL;
}
+ jz4780_dma_chan_disable(jzdma, jzchan->id);
+
vchan_get_all_descriptors(&jzchan->vchan, &head);
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
@@ -526,8 +594,10 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
static void jz4780_dma_synchronize(struct dma_chan *chan)
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
vchan_synchronize(&jzchan->vchan);
+ jz4780_dma_chan_disable(jzdma, jzchan->id);
}
static int jz4780_dma_config(struct dma_chan *chan,
@@ -549,21 +619,17 @@ static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
struct jz4780_dma_desc *desc, unsigned int next_sg)
{
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
- unsigned int residue, count;
+ unsigned int count = 0;
unsigned int i;
- residue = 0;
-
for (i = next_sg; i < desc->count; i++)
- residue += desc->desc[i].dtc << jzchan->transfer_shift;
+ count += desc->desc[i].dtc & GENMASK(23, 0);
- if (next_sg != 0) {
- count = jz4780_dma_readl(jzdma,
- JZ_DMA_REG_DTC(jzchan->id));
- residue += count << jzchan->transfer_shift;
- }
+ if (next_sg != 0)
+ count += jz4780_dma_chn_readl(jzdma, jzchan->id,
+ JZ_DMA_REG_DTC);
- return residue;
+ return count << jzchan->transfer_shift;
}
static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
@@ -573,6 +639,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
struct virt_dma_desc *vdesc;
enum dma_status status;
unsigned long flags;
+ unsigned long residue = 0;
status = dma_cookie_status(chan, cookie, txstate);
if ((status == DMA_COMPLETE) || (txstate == NULL))
@@ -583,13 +650,13 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
vdesc = vchan_find_desc(&jzchan->vchan, cookie);
if (vdesc) {
/* On the issued list, so hasn't been processed yet */
- txstate->residue = jz4780_dma_desc_residue(jzchan,
+ residue = jz4780_dma_desc_residue(jzchan,
to_jz4780_dma_desc(vdesc), 0);
} else if (cookie == jzchan->desc->vdesc.tx.cookie) {
- txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
- (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
- } else
- txstate->residue = 0;
+ residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
+ jzchan->curr_hwdesc + 1);
+ }
+ dma_set_residue(txstate, residue);
if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
@@ -606,8 +673,8 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
spin_lock(&jzchan->vchan.lock);
- dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id));
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+ dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
if (dcs & JZ_DMA_DCS_AR) {
dev_warn(&jzchan->vchan.chan.dev->device,
@@ -646,9 +713,9 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
uint32_t pending, dmac;
int i;
- pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP);
+ pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
- for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+ for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
if (!(pending & (1<<i)))
continue;
@@ -656,12 +723,12 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
}
/* Clear halt and address error status of all channels. */
- dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC);
+ dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
/* Clear interrupt pending status. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
return IRQ_HANDLED;
}
@@ -728,7 +795,7 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
data.channel = dma_spec->args[1];
if (data.channel > -1) {
- if (data.channel >= JZ_DMA_NR_CHANNELS) {
+ if (data.channel >= jzdma->soc_data->nb_channels) {
dev_err(jzdma->dma_device.dev,
"device requested non-existent channel %u\n",
data.channel);
@@ -755,16 +822,29 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
static int jz4780_dma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct jz4780_dma_soc_data *soc_data;
struct jz4780_dma_dev *jzdma;
struct jz4780_dma_chan *jzchan;
struct dma_device *dd;
struct resource *res;
int i, ret;
- jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
+ if (!dev->of_node) {
+ dev_err(dev, "This driver must be probed from devicetree\n");
+ return -EINVAL;
+ }
+
+ soc_data = device_get_match_data(dev);
+ if (!soc_data)
+ return -EINVAL;
+
+ jzdma = devm_kzalloc(dev, sizeof(*jzdma)
+ + sizeof(*jzdma->chan) * soc_data->nb_channels,
+ GFP_KERNEL);
if (!jzdma)
return -ENOMEM;
+ jzdma->soc_data = soc_data;
platform_set_drvdata(pdev, jzdma);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -773,9 +853,26 @@ static int jz4780_dma_probe(struct platform_device *pdev)
return -EINVAL;
}
- jzdma->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(jzdma->base))
- return PTR_ERR(jzdma->base);
+ jzdma->chn_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(jzdma->chn_base))
+ return PTR_ERR(jzdma->chn_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ jzdma->ctrl_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(jzdma->ctrl_base))
+ return PTR_ERR(jzdma->ctrl_base);
+ } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
+ /*
+ * On JZ4780, if the second memory resource was not supplied,
+ * assume we're using an old devicetree, and calculate the
+ * offset to the control registers.
+ */
+ jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
+ } else {
+ dev_err(dev, "failed to get I/O memory\n");
+ return -EINVAL;
+ }
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
@@ -833,13 +930,15 @@ static int jz4780_dma_probe(struct platform_device *pdev)
* Also set the FMSC bit - it increases MSC performance, so it makes
* little sense not to enable it.
*/
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC,
- JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
+ JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
+
+ if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
INIT_LIST_HEAD(&dd->channels);
- for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+ for (i = 0; i < soc_data->nb_channels; i++) {
jzchan = &jzdma->chan[i];
jzchan->id = i;
@@ -847,7 +946,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzchan->vchan.desc_free = jz4780_dma_desc_free;
}
- ret = dma_async_device_register(dd);
+ ret = dmaenginem_async_device_register(dd);
if (ret) {
dev_err(dev, "failed to register device\n");
goto err_disable_clk;
@@ -858,15 +957,12 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzdma);
if (ret) {
dev_err(dev, "failed to register OF DMA controller\n");
- goto err_unregister_dev;
+ goto err_disable_clk;
}
dev_info(dev, "JZ4780 DMA controller initialised\n");
return 0;
-err_unregister_dev:
- dma_async_device_unregister(dd);
-
err_disable_clk:
clk_disable_unprepare(jzdma->clk);
@@ -884,15 +980,40 @@ static int jz4780_dma_remove(struct platform_device *pdev)
free_irq(jzdma->irq, jzdma);
- for (i = 0; i < JZ_DMA_NR_CHANNELS; i++)
+ for (i = 0; i < jzdma->soc_data->nb_channels; i++)
tasklet_kill(&jzdma->chan[i].vchan.task);
- dma_async_device_unregister(&jzdma->dma_device);
return 0;
}
+static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
+ .nb_channels = 6,
+ .transfer_ord_max = 5,
+};
+
+static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
+ .nb_channels = 6,
+ .transfer_ord_max = 5,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+};
+
+static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
+ .nb_channels = 6,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM,
+};
+
+static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
+ .nb_channels = 32,
+ .transfer_ord_max = 7,
+ .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
static const struct of_device_id jz4780_dma_dt_match[] = {
- { .compatible = "ingenic,jz4780-dma", .data = NULL },
+ { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
+ { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
+ { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
+ { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index c4eb55e3011c..b2ac1d2c5b86 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -934,7 +934,7 @@ static int dw_probe(struct platform_device *pdev)
pm_runtime_put(chip->dev);
- ret = dma_async_device_register(&dw->dma);
+ ret = dmaenginem_async_device_register(&dw->dma);
if (ret)
goto err_pm_disable;
@@ -977,8 +977,6 @@ static int dw_remove(struct platform_device *pdev)
tasklet_kill(&chan->vc.task);
}
- dma_async_device_unregister(&dw->dma);
-
return 0;
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index f43e6dafe446..d0c3e50b39fb 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -886,12 +886,7 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
*/
u32 s = dw->pdata->is_idma32 ? 1 : 2;
- /* Check if chan will be configured for slave transfers */
- if (!is_slave_direction(sconfig->direction))
- return -EINVAL;
-
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
- dwc->direction = sconfig->direction;
sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index f62dd0944908..f01b2c173fa6 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -284,6 +284,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#ifdef CONFIG_ACPI
static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "INTL9C60", 0 },
+ { "80862286", 0 },
+ { "808622C0", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index a15592383d4e..f674eb5fbbef 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -109,6 +109,9 @@
#define DMA_MAX_CHAN_DESCRIPTORS 32
struct ep93xx_dma_engine;
+static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *config);
/**
* struct ep93xx_dma_desc - EP93xx specific transaction descriptor
@@ -180,6 +183,7 @@ struct ep93xx_dma_chan {
struct list_head free_list;
u32 runtime_addr;
u32 runtime_ctrl;
+ struct dma_slave_config slave_config;
};
/**
@@ -1051,6 +1055,8 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
+ ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
+
first = NULL;
for_each_sg(sgl, sg, sg_len, i) {
size_t len = sg_dma_len(sg);
@@ -1136,6 +1142,8 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
return NULL;
}
+ ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
+
/* Split the buffer into period size chunks */
first = NULL;
for (offset = 0; offset < buf_len; offset += period_len) {
@@ -1227,6 +1235,17 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+
+ memcpy(&edmac->slave_config, config, sizeof(*config));
+
+ return 0;
+}
+
+static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *config)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
enum dma_slave_buswidth width;
unsigned long flags;
u32 addr, ctrl;
@@ -1234,7 +1253,7 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan,
if (!edmac->edma->m2m)
return -EINVAL;
- switch (config->direction) {
+ switch (dir) {
case DMA_DEV_TO_MEM:
width = config->src_addr_width;
addr = config->src_addr;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
new file mode 100644
index 000000000000..8876c4c1bb2c
--- /dev/null
+++ b/drivers/dma/fsl-edma-common.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
+// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
+
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "fsl-edma-common.h"
+
+#define EDMA_CR 0x00
+#define EDMA_ES 0x04
+#define EDMA_ERQ 0x0C
+#define EDMA_EEI 0x14
+#define EDMA_SERQ 0x1B
+#define EDMA_CERQ 0x1A
+#define EDMA_SEEI 0x19
+#define EDMA_CEEI 0x18
+#define EDMA_CINT 0x1F
+#define EDMA_CERR 0x1E
+#define EDMA_SSRT 0x1D
+#define EDMA_CDNE 0x1C
+#define EDMA_INTR 0x24
+#define EDMA_ERR 0x2C
+
+#define EDMA64_ERQH 0x08
+#define EDMA64_EEIH 0x10
+#define EDMA64_SERQ 0x18
+#define EDMA64_CERQ 0x19
+#define EDMA64_SEEI 0x1a
+#define EDMA64_CEEI 0x1b
+#define EDMA64_CINT 0x1c
+#define EDMA64_CERR 0x1d
+#define EDMA64_SSRT 0x1e
+#define EDMA64_CDNE 0x1f
+#define EDMA64_INTH 0x20
+#define EDMA64_INTL 0x24
+#define EDMA64_ERRH 0x28
+#define EDMA64_ERRL 0x2c
+
+#define EDMA_TCD 0x1000
+
+static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ if (fsl_chan->edma->version == v1) {
+ edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
+ edma_writeb(fsl_chan->edma, ch, regs->serq);
+ } else {
+ /* ColdFire is big endian, and accesses natively
+ * big endian I/O peripherals
+ */
+ iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
+ iowrite8(ch, regs->serq);
+ }
+}
+
+void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ if (fsl_chan->edma->version == v1) {
+ edma_writeb(fsl_chan->edma, ch, regs->cerq);
+ edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
+ } else {
+ /* ColdFire is big endian, and accesses natively
+ * big endian I/O peripherals
+ */
+ iowrite8(ch, regs->cerq);
+ iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
+ }
+}
+EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
+
+void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
+ unsigned int slot, bool enable)
+{
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+ void __iomem *muxaddr;
+ unsigned int chans_per_mux, ch_off;
+
+ chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
+ ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+ muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
+ slot = EDMAMUX_CHCFG_SOURCE(slot);
+
+ if (enable)
+ iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
+ else
+ iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
+
+static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
+{
+ switch (addr_width) {
+ case 1:
+ return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
+ case 2:
+ return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
+ case 4:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ case 8:
+ return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
+ default:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ }
+}
+
+void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct fsl_edma_desc *fsl_desc;
+ int i;
+
+ fsl_desc = to_fsl_edma_desc(vdesc);
+ for (i = 0; i < fsl_desc->n_tcds; i++)
+ dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
+
+int fsl_edma_terminate_all(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma_disable_request(fsl_chan);
+ fsl_chan->edesc = NULL;
+ fsl_chan->idle = true;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
+
+int fsl_edma_pause(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma_disable_request(fsl_chan);
+ fsl_chan->status = DMA_PAUSED;
+ fsl_chan->idle = true;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_pause);
+
+int fsl_edma_resume(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_resume);
+
+int fsl_edma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+ memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
+
+static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
+ struct virt_dma_desc *vdesc, bool in_progress)
+{
+ struct fsl_edma_desc *edesc = fsl_chan->edesc;
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+ enum dma_transfer_direction dir = edesc->dirn;
+ dma_addr_t cur_addr, dma_addr;
+ size_t len, size;
+ int i;
+
+ /* calculate the total size in this desc */
+ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
+ len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+
+ if (!in_progress)
+ return len;
+
+ if (dir == DMA_MEM_TO_DEV)
+ cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
+ else
+ cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
+
+ /* figure out the finished and calculate the residue */
+ for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+ size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ if (dir == DMA_MEM_TO_DEV)
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
+ else
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
+
+ len -= size;
+ if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
+ len += dma_addr + size - cur_addr;
+ break;
+ }
+ }
+
+ return len;
+}
+
+enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ if (!txstate)
+ return fsl_chan->status;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
+ if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
+ txstate->residue =
+ fsl_edma_desc_residue(fsl_chan, vdesc, true);
+ else if (vdesc)
+ txstate->residue =
+ fsl_edma_desc_residue(fsl_chan, vdesc, false);
+ else
+ txstate->residue = 0;
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ return fsl_chan->status;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
+
+static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd)
+{
+ struct fsl_edma_engine *edma = fsl_chan->edma;
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ /*
+ * TCD parameters are stored in struct fsl_edma_hw_tcd in little
+ * endian format. However, we need to load the TCD registers in
+ * big- or little-endian obeying the eDMA engine model endian.
+ */
+ edma_writew(edma, 0, &regs->tcd[ch].csr);
+ edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
+ edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
+
+ edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
+ edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
+
+ edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
+ edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
+
+ edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
+ edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
+ edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
+
+ edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
+ &regs->tcd[ch].dlast_sga);
+
+ edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
+}
+
+static inline
+void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+ u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
+ u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+ bool disable_req, bool enable_sg)
+{
+ u16 csr = 0;
+
+ /*
+ * eDMA hardware SGs require the TCDs to be stored in little
+ * endian format irrespective of the register endian model.
+ * So we put the value in little endian in memory, waiting
+ * for fsl_edma_set_tcd_regs doing the swap.
+ */
+ tcd->saddr = cpu_to_le32(src);
+ tcd->daddr = cpu_to_le32(dst);
+
+ tcd->attr = cpu_to_le16(attr);
+
+ tcd->soff = cpu_to_le16(soff);
+
+ tcd->nbytes = cpu_to_le32(nbytes);
+ tcd->slast = cpu_to_le32(slast);
+
+ tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
+ tcd->doff = cpu_to_le16(doff);
+
+ tcd->dlast_sga = cpu_to_le32(dlast_sga);
+
+ tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
+ if (major_int)
+ csr |= EDMA_TCD_CSR_INT_MAJOR;
+
+ if (disable_req)
+ csr |= EDMA_TCD_CSR_D_REQ;
+
+ if (enable_sg)
+ csr |= EDMA_TCD_CSR_E_SG;
+
+ tcd->csr = cpu_to_le16(csr);
+}
+
+static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
+ int sg_len)
+{
+ struct fsl_edma_desc *fsl_desc;
+ int i;
+
+ fsl_desc = kzalloc(sizeof(*fsl_desc) +
+ sizeof(struct fsl_edma_sw_tcd) *
+ sg_len, GFP_NOWAIT);
+ if (!fsl_desc)
+ return NULL;
+
+ fsl_desc->echan = fsl_chan;
+ fsl_desc->n_tcds = sg_len;
+ for (i = 0; i < sg_len; i++) {
+ fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
+ GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
+ if (!fsl_desc->tcd[i].vtcd)
+ goto err;
+ }
+ return fsl_desc;
+
+err:
+ while (--i >= 0)
+ dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+ return NULL;
+}
+
+struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct fsl_edma_desc *fsl_desc;
+ dma_addr_t dma_buf_next;
+ int sg_len, i;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ sg_len = buf_len / period_len;
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = true;
+ fsl_desc->dirn = direction;
+
+ dma_buf_next = dma_addr;
+ if (direction == DMA_MEM_TO_DEV) {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
+ nbytes = fsl_chan->cfg.dst_addr_width *
+ fsl_chan->cfg.dst_maxburst;
+ } else {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
+ nbytes = fsl_chan->cfg.src_addr_width *
+ fsl_chan->cfg.src_maxburst;
+ }
+
+ iter = period_len / nbytes;
+
+ for (i = 0; i < sg_len; i++) {
+ if (dma_buf_next >= dma_addr + buf_len)
+ dma_buf_next = dma_addr;
+
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = dma_buf_next;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = fsl_chan->cfg.dst_addr_width;
+ doff = 0;
+ } else {
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = dma_buf_next;
+ soff = 0;
+ doff = fsl_chan->cfg.src_addr_width;
+ }
+
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
+ fsl_chan->attr, soff, nbytes, 0, iter,
+ iter, doff, last_sg, true, false, true);
+ dma_buf_next += period_len;
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
+
+struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct fsl_edma_desc *fsl_desc;
+ struct scatterlist *sg;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+ int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = false;
+ fsl_desc->dirn = direction;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
+ nbytes = fsl_chan->cfg.dst_addr_width *
+ fsl_chan->cfg.dst_maxburst;
+ } else {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
+ nbytes = fsl_chan->cfg.src_addr_width *
+ fsl_chan->cfg.src_maxburst;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = sg_dma_address(sg);
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = fsl_chan->cfg.dst_addr_width;
+ doff = 0;
+ } else {
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = sg_dma_address(sg);
+ soff = 0;
+ doff = fsl_chan->cfg.src_addr_width;
+ }
+
+ iter = sg_dma_len(sg) / nbytes;
+ if (i < sg_len - 1) {
+ last_sg = fsl_desc->tcd[(i + 1)].ptcd;
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->attr, soff,
+ nbytes, 0, iter, iter, doff, last_sg,
+ false, false, true);
+ } else {
+ last_sg = 0;
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->attr, soff,
+ nbytes, 0, iter, iter, doff, last_sg,
+ true, true, false);
+ }
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
+
+void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
+{
+ struct virt_dma_desc *vdesc;
+
+ vdesc = vchan_next_desc(&fsl_chan->vchan);
+ if (!vdesc)
+ return;
+ fsl_chan->edesc = to_fsl_edma_desc(vdesc);
+ fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
+ fsl_edma_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
+
+void fsl_edma_issue_pending(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+
+ if (unlikely(fsl_chan->pm_state != RUNNING)) {
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ /* cannot submit due to suspend */
+ return;
+ }
+
+ if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
+
+int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+ fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
+ sizeof(struct fsl_edma_hw_tcd),
+ 32, 0);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
+
+void fsl_edma_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma_disable_request(fsl_chan);
+ fsl_edma_chan_mux(fsl_chan, 0, false);
+ fsl_chan->edesc = NULL;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ dma_pool_destroy(fsl_chan->tcd_pool);
+ fsl_chan->tcd_pool = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
+
+void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
+{
+ struct fsl_edma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&chan->vchan.chan.device_node);
+ tasklet_kill(&chan->vchan.task);
+ }
+}
+EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
+
+/*
+ * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
+ * register offsets are different compared to ColdFire mcf5441x 64 channels
+ * edma (here called "v2").
+ *
+ * This function sets up register offsets as per proper declared version
+ * so must be called in xxx_edma_probe() just after setting the
+ * edma "version" and "membase" appropriately.
+ */
+void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
+{
+ edma->regs.cr = edma->membase + EDMA_CR;
+ edma->regs.es = edma->membase + EDMA_ES;
+ edma->regs.erql = edma->membase + EDMA_ERQ;
+ edma->regs.eeil = edma->membase + EDMA_EEI;
+
+ edma->regs.serq = edma->membase + ((edma->version == v1) ?
+ EDMA_SERQ : EDMA64_SERQ);
+ edma->regs.cerq = edma->membase + ((edma->version == v1) ?
+ EDMA_CERQ : EDMA64_CERQ);
+ edma->regs.seei = edma->membase + ((edma->version == v1) ?
+ EDMA_SEEI : EDMA64_SEEI);
+ edma->regs.ceei = edma->membase + ((edma->version == v1) ?
+ EDMA_CEEI : EDMA64_CEEI);
+ edma->regs.cint = edma->membase + ((edma->version == v1) ?
+ EDMA_CINT : EDMA64_CINT);
+ edma->regs.cerr = edma->membase + ((edma->version == v1) ?
+ EDMA_CERR : EDMA64_CERR);
+ edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
+ EDMA_SSRT : EDMA64_SSRT);
+ edma->regs.cdne = edma->membase + ((edma->version == v1) ?
+ EDMA_CDNE : EDMA64_CDNE);
+ edma->regs.intl = edma->membase + ((edma->version == v1) ?
+ EDMA_INTR : EDMA64_INTL);
+ edma->regs.errl = edma->membase + ((edma->version == v1) ?
+ EDMA_ERR : EDMA64_ERRL);
+
+ if (edma->version == v2) {
+ edma->regs.erqh = edma->membase + EDMA64_ERQH;
+ edma->regs.eeih = edma->membase + EDMA64_EEIH;
+ edma->regs.errh = edma->membase + EDMA64_ERRH;
+ edma->regs.inth = edma->membase + EDMA64_INTH;
+ }
+
+ edma->regs.tcd = edma->membase + EDMA_TCD;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
new file mode 100644
index 000000000000..8917e8865959
--- /dev/null
+++ b/drivers/dma/fsl-edma-common.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2018 Angelo Dureghello <angelo@sysam.it>
+ */
+#ifndef _FSL_EDMA_COMMON_H_
+#define _FSL_EDMA_COMMON_H_
+
+#include "virt-dma.h"
+
+#define EDMA_CR_EDBG BIT(1)
+#define EDMA_CR_ERCA BIT(2)
+#define EDMA_CR_ERGA BIT(3)
+#define EDMA_CR_HOE BIT(4)
+#define EDMA_CR_HALT BIT(5)
+#define EDMA_CR_CLM BIT(6)
+#define EDMA_CR_EMLM BIT(7)
+#define EDMA_CR_ECX BIT(16)
+#define EDMA_CR_CX BIT(17)
+
+#define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
+#define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
+#define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
+#define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
+
+#define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
+#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
+#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
+#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
+#define EDMA_TCD_ATTR_DSIZE_8BIT 0
+#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
+#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
+#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
+#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0))
+#define EDMA_TCD_ATTR_SSIZE_8BIT 0
+#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
+#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
+#define EDMA_TCD_ATTR_SSIZE_64BIT (EDMA_TCD_ATTR_DSIZE_64BIT << 8)
+#define EDMA_TCD_ATTR_SSIZE_32BYTE (EDMA_TCD_ATTR_DSIZE_32BYTE << 8)
+
+#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
+#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
+
+#define EDMA_TCD_CSR_START BIT(0)
+#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
+#define EDMA_TCD_CSR_INT_HALF BIT(2)
+#define EDMA_TCD_CSR_D_REQ BIT(3)
+#define EDMA_TCD_CSR_E_SG BIT(4)
+#define EDMA_TCD_CSR_E_LINK BIT(5)
+#define EDMA_TCD_CSR_ACTIVE BIT(6)
+#define EDMA_TCD_CSR_DONE BIT(7)
+
+#define EDMAMUX_CHCFG_DIS 0x0
+#define EDMAMUX_CHCFG_ENBL 0x80
+#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
+
+#define DMAMUX_NR 2
+
+#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+enum fsl_edma_pm_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
+
+struct fsl_edma_hw_tcd {
+ __le32 saddr;
+ __le16 soff;
+ __le16 attr;
+ __le32 nbytes;
+ __le32 slast;
+ __le32 daddr;
+ __le16 doff;
+ __le16 citer;
+ __le32 dlast_sga;
+ __le16 csr;
+ __le16 biter;
+};
+
+/*
+ * These are iomem pointers, for both v32 and v64.
+ */
+struct edma_regs {
+ void __iomem *cr;
+ void __iomem *es;
+ void __iomem *erqh;
+ void __iomem *erql; /* aka erq on v32 */
+ void __iomem *eeih;
+ void __iomem *eeil; /* aka eei on v32 */
+ void __iomem *seei;
+ void __iomem *ceei;
+ void __iomem *serq;
+ void __iomem *cerq;
+ void __iomem *cint;
+ void __iomem *cerr;
+ void __iomem *ssrt;
+ void __iomem *cdne;
+ void __iomem *inth;
+ void __iomem *intl;
+ void __iomem *errh;
+ void __iomem *errl;
+ struct fsl_edma_hw_tcd __iomem *tcd;
+};
+
+struct fsl_edma_sw_tcd {
+ dma_addr_t ptcd;
+ struct fsl_edma_hw_tcd *vtcd;
+};
+
+struct fsl_edma_chan {
+ struct virt_dma_chan vchan;
+ enum dma_status status;
+ enum fsl_edma_pm_state pm_state;
+ bool idle;
+ u32 slave_id;
+ struct fsl_edma_engine *edma;
+ struct fsl_edma_desc *edesc;
+ struct dma_slave_config cfg;
+ u32 attr;
+ struct dma_pool *tcd_pool;
+};
+
+struct fsl_edma_desc {
+ struct virt_dma_desc vdesc;
+ struct fsl_edma_chan *echan;
+ bool iscyclic;
+ enum dma_transfer_direction dirn;
+ unsigned int n_tcds;
+ struct fsl_edma_sw_tcd tcd[];
+};
+
+enum edma_version {
+ v1, /* 32ch, Vybdir, mpc57x, etc */
+ v2, /* 64ch Coldfire */
+};
+
+struct fsl_edma_engine {
+ struct dma_device dma_dev;
+ void __iomem *membase;
+ void __iomem *muxbase[DMAMUX_NR];
+ struct clk *muxclk[DMAMUX_NR];
+ struct mutex fsl_edma_mutex;
+ u32 n_chans;
+ int txirq;
+ int errirq;
+ bool big_endian;
+ enum edma_version version;
+ struct edma_regs regs;
+ struct fsl_edma_chan chans[];
+};
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The eDMA controller's endian is independent of the CPU core's endian.
+ * For the big-endian IP module, the offset for 8-bit or 16-bit registers
+ * should also be swapped opposite to that in little-endian IP.
+ */
+static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ if (edma->big_endian)
+ return ioread32be(addr);
+ else
+ return ioread32(addr);
+}
+
+static inline void edma_writeb(struct fsl_edma_engine *edma,
+ u8 val, void __iomem *addr)
+{
+ /* swap the reg offset for these in big-endian mode */
+ if (edma->big_endian)
+ iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
+ else
+ iowrite8(val, addr);
+}
+
+static inline void edma_writew(struct fsl_edma_engine *edma,
+ u16 val, void __iomem *addr)
+{
+ /* swap the reg offset for these in big-endian mode */
+ if (edma->big_endian)
+ iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
+ else
+ iowrite16(val, addr);
+}
+
+static inline void edma_writel(struct fsl_edma_engine *edma,
+ u32 val, void __iomem *addr)
+{
+ if (edma->big_endian)
+ iowrite32be(val, addr);
+ else
+ iowrite32(val, addr);
+}
+
+static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct fsl_edma_chan, vchan.chan);
+}
+
+static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct fsl_edma_desc, vdesc);
+}
+
+void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
+void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
+ unsigned int slot, bool enable);
+void fsl_edma_free_desc(struct virt_dma_desc *vdesc);
+int fsl_edma_terminate_all(struct dma_chan *chan);
+int fsl_edma_pause(struct dma_chan *chan);
+int fsl_edma_resume(struct dma_chan *chan);
+int fsl_edma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg);
+enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate);
+struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags);
+struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
+void fsl_edma_issue_pending(struct dma_chan *chan);
+int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
+void fsl_edma_free_chan_resources(struct dma_chan *chan);
+void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
+void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
+
+#endif /* _FSL_EDMA_COMMON_H_ */
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index c7568869284e..34d70112fcc9 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -13,671 +13,31 @@
* option) any later version.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
-#include "virt-dma.h"
-
-#define EDMA_CR 0x00
-#define EDMA_ES 0x04
-#define EDMA_ERQ 0x0C
-#define EDMA_EEI 0x14
-#define EDMA_SERQ 0x1B
-#define EDMA_CERQ 0x1A
-#define EDMA_SEEI 0x19
-#define EDMA_CEEI 0x18
-#define EDMA_CINT 0x1F
-#define EDMA_CERR 0x1E
-#define EDMA_SSRT 0x1D
-#define EDMA_CDNE 0x1C
-#define EDMA_INTR 0x24
-#define EDMA_ERR 0x2C
-
-#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
-#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
-#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
-#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
-#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
-#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
-#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
-#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
-#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
-#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
-#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
-#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
-#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
-
-#define EDMA_CR_EDBG BIT(1)
-#define EDMA_CR_ERCA BIT(2)
-#define EDMA_CR_ERGA BIT(3)
-#define EDMA_CR_HOE BIT(4)
-#define EDMA_CR_HALT BIT(5)
-#define EDMA_CR_CLM BIT(6)
-#define EDMA_CR_EMLM BIT(7)
-#define EDMA_CR_ECX BIT(16)
-#define EDMA_CR_CX BIT(17)
-
-#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
-#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
-#define EDMA_CINT_CINT(x) ((x) & 0x1F)
-#define EDMA_CERR_CERR(x) ((x) & 0x1F)
-
-#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
-#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
-#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
-#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
-#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
-#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
-#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
-#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
-#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
-#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
-#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
-#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
-#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
-#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
-
-#define EDMA_TCD_SOFF_SOFF(x) (x)
-#define EDMA_TCD_NBYTES_NBYTES(x) (x)
-#define EDMA_TCD_SLAST_SLAST(x) (x)
-#define EDMA_TCD_DADDR_DADDR(x) (x)
-#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
-#define EDMA_TCD_DOFF_DOFF(x) (x)
-#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
-#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
-
-#define EDMA_TCD_CSR_START BIT(0)
-#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
-#define EDMA_TCD_CSR_INT_HALF BIT(2)
-#define EDMA_TCD_CSR_D_REQ BIT(3)
-#define EDMA_TCD_CSR_E_SG BIT(4)
-#define EDMA_TCD_CSR_E_LINK BIT(5)
-#define EDMA_TCD_CSR_ACTIVE BIT(6)
-#define EDMA_TCD_CSR_DONE BIT(7)
-
-#define EDMAMUX_CHCFG_DIS 0x0
-#define EDMAMUX_CHCFG_ENBL 0x80
-#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
-
-#define DMAMUX_NR 2
-
-#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
-enum fsl_edma_pm_state {
- RUNNING = 0,
- SUSPENDED,
-};
-
-struct fsl_edma_hw_tcd {
- __le32 saddr;
- __le16 soff;
- __le16 attr;
- __le32 nbytes;
- __le32 slast;
- __le32 daddr;
- __le16 doff;
- __le16 citer;
- __le32 dlast_sga;
- __le16 csr;
- __le16 biter;
-};
-
-struct fsl_edma_sw_tcd {
- dma_addr_t ptcd;
- struct fsl_edma_hw_tcd *vtcd;
-};
-
-struct fsl_edma_slave_config {
- enum dma_transfer_direction dir;
- enum dma_slave_buswidth addr_width;
- u32 dev_addr;
- u32 burst;
- u32 attr;
-};
-
-struct fsl_edma_chan {
- struct virt_dma_chan vchan;
- enum dma_status status;
- enum fsl_edma_pm_state pm_state;
- bool idle;
- u32 slave_id;
- struct fsl_edma_engine *edma;
- struct fsl_edma_desc *edesc;
- struct fsl_edma_slave_config fsc;
- struct dma_pool *tcd_pool;
-};
-
-struct fsl_edma_desc {
- struct virt_dma_desc vdesc;
- struct fsl_edma_chan *echan;
- bool iscyclic;
- unsigned int n_tcds;
- struct fsl_edma_sw_tcd tcd[];
-};
-
-struct fsl_edma_engine {
- struct dma_device dma_dev;
- void __iomem *membase;
- void __iomem *muxbase[DMAMUX_NR];
- struct clk *muxclk[DMAMUX_NR];
- struct mutex fsl_edma_mutex;
- u32 n_chans;
- int txirq;
- int errirq;
- bool big_endian;
- struct fsl_edma_chan chans[];
-};
-
-/*
- * R/W functions for big- or little-endian registers:
- * The eDMA controller's endian is independent of the CPU core's endian.
- * For the big-endian IP module, the offset for 8-bit or 16-bit registers
- * should also be swapped opposite to that in little-endian IP.
- */
-
-static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
-{
- if (edma->big_endian)
- return ioread32be(addr);
- else
- return ioread32(addr);
-}
-
-static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
-{
- /* swap the reg offset for these in big-endian mode */
- if (edma->big_endian)
- iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
- else
- iowrite8(val, addr);
-}
-
-static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
-{
- /* swap the reg offset for these in big-endian mode */
- if (edma->big_endian)
- iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
- else
- iowrite16(val, addr);
-}
-
-static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
-{
- if (edma->big_endian)
- iowrite32be(val, addr);
- else
- iowrite32(val, addr);
-}
-
-static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct fsl_edma_chan, vchan.chan);
-}
-
-static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
-{
- return container_of(vd, struct fsl_edma_desc, vdesc);
-}
-
-static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
-{
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
-
- edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
- edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
-}
-
-static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
-{
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
-
- edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
- edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
-}
-
-static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
- unsigned int slot, bool enable)
-{
- u32 ch = fsl_chan->vchan.chan.chan_id;
- void __iomem *muxaddr;
- unsigned chans_per_mux, ch_off;
-
- chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
- ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
- muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
- slot = EDMAMUX_CHCFG_SOURCE(slot);
-
- if (enable)
- iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
- else
- iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
-}
-
-static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
-{
- switch (addr_width) {
- case 1:
- return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
- case 2:
- return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
- case 4:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- case 8:
- return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
- default:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- }
-}
-
-static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
-{
- struct fsl_edma_desc *fsl_desc;
- int i;
-
- fsl_desc = to_fsl_edma_desc(vdesc);
- for (i = 0; i < fsl_desc->n_tcds; i++)
- dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
- fsl_desc->tcd[i].ptcd);
- kfree(fsl_desc);
-}
-
-static int fsl_edma_terminate_all(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
- LIST_HEAD(head);
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- fsl_edma_disable_request(fsl_chan);
- fsl_chan->edesc = NULL;
- fsl_chan->idle = true;
- vchan_get_all_descriptors(&fsl_chan->vchan, &head);
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- return 0;
-}
-
-static int fsl_edma_pause(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- if (fsl_chan->edesc) {
- fsl_edma_disable_request(fsl_chan);
- fsl_chan->status = DMA_PAUSED;
- fsl_chan->idle = true;
- }
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- return 0;
-}
-
-static int fsl_edma_resume(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- if (fsl_chan->edesc) {
- fsl_edma_enable_request(fsl_chan);
- fsl_chan->status = DMA_IN_PROGRESS;
- fsl_chan->idle = false;
- }
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- return 0;
-}
-
-static int fsl_edma_slave_config(struct dma_chan *chan,
- struct dma_slave_config *cfg)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-
- fsl_chan->fsc.dir = cfg->direction;
- if (cfg->direction == DMA_DEV_TO_MEM) {
- fsl_chan->fsc.dev_addr = cfg->src_addr;
- fsl_chan->fsc.addr_width = cfg->src_addr_width;
- fsl_chan->fsc.burst = cfg->src_maxburst;
- fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
- } else if (cfg->direction == DMA_MEM_TO_DEV) {
- fsl_chan->fsc.dev_addr = cfg->dst_addr;
- fsl_chan->fsc.addr_width = cfg->dst_addr_width;
- fsl_chan->fsc.burst = cfg->dst_maxburst;
- fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
- } else {
- return -EINVAL;
- }
- return 0;
-}
-
-static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
- struct virt_dma_desc *vdesc, bool in_progress)
-{
- struct fsl_edma_desc *edesc = fsl_chan->edesc;
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
- enum dma_transfer_direction dir = fsl_chan->fsc.dir;
- dma_addr_t cur_addr, dma_addr;
- size_t len, size;
- int i;
-
- /* calculate the total size in this desc */
- for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
- len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
-
- if (!in_progress)
- return len;
-
- if (dir == DMA_MEM_TO_DEV)
- cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
- else
- cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
-
- /* figure out the finished and calculate the residue */
- for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
- if (dir == DMA_MEM_TO_DEV)
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
- else
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
-
- len -= size;
- if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
- len += dma_addr + size - cur_addr;
- break;
- }
- }
-
- return len;
-}
-
-static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- struct virt_dma_desc *vdesc;
- enum dma_status status;
- unsigned long flags;
-
- status = dma_cookie_status(chan, cookie, txstate);
- if (status == DMA_COMPLETE)
- return status;
-
- if (!txstate)
- return fsl_chan->status;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
- if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
- txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
- else if (vdesc)
- txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
- else
- txstate->residue = 0;
-
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-
- return fsl_chan->status;
-}
-
-static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
- struct fsl_edma_hw_tcd *tcd)
-{
- struct fsl_edma_engine *edma = fsl_chan->edma;
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
-
- /*
- * TCD parameters are stored in struct fsl_edma_hw_tcd in little
- * endian format. However, we need to load the TCD registers in
- * big- or little-endian obeying the eDMA engine model endian.
- */
- edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
- edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
- edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
-
- edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
- edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
-
- edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
- edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
-
- edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
- edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
- edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
-
- edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
-
- edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
-}
-
-static inline
-void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
- u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
- u16 biter, u16 doff, u32 dlast_sga, bool major_int,
- bool disable_req, bool enable_sg)
-{
- u16 csr = 0;
-
- /*
- * eDMA hardware SGs require the TCDs to be stored in little
- * endian format irrespective of the register endian model.
- * So we put the value in little endian in memory, waiting
- * for fsl_edma_set_tcd_regs doing the swap.
- */
- tcd->saddr = cpu_to_le32(src);
- tcd->daddr = cpu_to_le32(dst);
-
- tcd->attr = cpu_to_le16(attr);
-
- tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
-
- tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
- tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
-
- tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
- tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
-
- tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
-
- tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
- if (major_int)
- csr |= EDMA_TCD_CSR_INT_MAJOR;
-
- if (disable_req)
- csr |= EDMA_TCD_CSR_D_REQ;
-
- if (enable_sg)
- csr |= EDMA_TCD_CSR_E_SG;
-
- tcd->csr = cpu_to_le16(csr);
-}
-
-static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
- int sg_len)
-{
- struct fsl_edma_desc *fsl_desc;
- int i;
-
- fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
- GFP_NOWAIT);
- if (!fsl_desc)
- return NULL;
-
- fsl_desc->echan = fsl_chan;
- fsl_desc->n_tcds = sg_len;
- for (i = 0; i < sg_len; i++) {
- fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
- GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
- if (!fsl_desc->tcd[i].vtcd)
- goto err;
- }
- return fsl_desc;
-
-err:
- while (--i >= 0)
- dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
- fsl_desc->tcd[i].ptcd);
- kfree(fsl_desc);
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- struct fsl_edma_desc *fsl_desc;
- dma_addr_t dma_buf_next;
- int sg_len, i;
- u32 src_addr, dst_addr, last_sg, nbytes;
- u16 soff, doff, iter;
-
- if (!is_slave_direction(fsl_chan->fsc.dir))
- return NULL;
-
- sg_len = buf_len / period_len;
- fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
- if (!fsl_desc)
- return NULL;
- fsl_desc->iscyclic = true;
-
- dma_buf_next = dma_addr;
- nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
- iter = period_len / nbytes;
-
- for (i = 0; i < sg_len; i++) {
- if (dma_buf_next >= dma_addr + buf_len)
- dma_buf_next = dma_addr;
-
- /* get next sg's physical address */
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
-
- if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
- src_addr = dma_buf_next;
- dst_addr = fsl_chan->fsc.dev_addr;
- soff = fsl_chan->fsc.addr_width;
- doff = 0;
- } else {
- src_addr = fsl_chan->fsc.dev_addr;
- dst_addr = dma_buf_next;
- soff = 0;
- doff = fsl_chan->fsc.addr_width;
- }
-
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
- fsl_chan->fsc.attr, soff, nbytes, 0, iter,
- iter, doff, last_sg, true, false, true);
- dma_buf_next += period_len;
- }
-
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
-}
-
-static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- struct fsl_edma_desc *fsl_desc;
- struct scatterlist *sg;
- u32 src_addr, dst_addr, last_sg, nbytes;
- u16 soff, doff, iter;
- int i;
-
- if (!is_slave_direction(fsl_chan->fsc.dir))
- return NULL;
-
- fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
- if (!fsl_desc)
- return NULL;
- fsl_desc->iscyclic = false;
-
- nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
- for_each_sg(sgl, sg, sg_len, i) {
- /* get next sg's physical address */
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
-
- if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
- src_addr = sg_dma_address(sg);
- dst_addr = fsl_chan->fsc.dev_addr;
- soff = fsl_chan->fsc.addr_width;
- doff = 0;
- } else {
- src_addr = fsl_chan->fsc.dev_addr;
- dst_addr = sg_dma_address(sg);
- soff = 0;
- doff = fsl_chan->fsc.addr_width;
- }
-
- iter = sg_dma_len(sg) / nbytes;
- if (i < sg_len - 1) {
- last_sg = fsl_desc->tcd[(i + 1)].ptcd;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
- dst_addr, fsl_chan->fsc.attr, soff,
- nbytes, 0, iter, iter, doff, last_sg,
- false, false, true);
- } else {
- last_sg = 0;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
- dst_addr, fsl_chan->fsc.attr, soff,
- nbytes, 0, iter, iter, doff, last_sg,
- true, true, false);
- }
- }
-
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
-}
-
-static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
-{
- struct virt_dma_desc *vdesc;
-
- vdesc = vchan_next_desc(&fsl_chan->vchan);
- if (!vdesc)
- return;
- fsl_chan->edesc = to_fsl_edma_desc(vdesc);
- fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
- fsl_edma_enable_request(fsl_chan);
- fsl_chan->status = DMA_IN_PROGRESS;
- fsl_chan->idle = false;
-}
+#include "fsl-edma-common.h"
static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int intr, ch;
- void __iomem *base_addr;
+ struct edma_regs *regs = &fsl_edma->regs;
struct fsl_edma_chan *fsl_chan;
- base_addr = fsl_edma->membase;
-
- intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
+ intr = edma_readl(fsl_edma, regs->intl);
if (!intr)
return IRQ_NONE;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (intr & (0x1 << ch)) {
- edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
- base_addr + EDMA_CINT);
+ edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
fsl_chan = &fsl_edma->chans[ch];
@@ -705,16 +65,16 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int err, ch;
+ struct edma_regs *regs = &fsl_edma->regs;
- err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
+ err = edma_readl(fsl_edma, regs->errl);
if (!err)
return IRQ_NONE;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (err & (0x1 << ch)) {
fsl_edma_disable_request(&fsl_edma->chans[ch]);
- edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
- fsl_edma->membase + EDMA_CERR);
+ edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
fsl_edma->chans[ch].status = DMA_ERROR;
fsl_edma->chans[ch].idle = true;
}
@@ -730,25 +90,6 @@ static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
return fsl_edma_err_handler(irq, dev_id);
}
-static void fsl_edma_issue_pending(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
-
- if (unlikely(fsl_chan->pm_state != RUNNING)) {
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- /* cannot submit due to suspend */
- return;
- }
-
- if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
- fsl_edma_xfer_desc(fsl_chan);
-
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-}
-
static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
@@ -781,34 +122,6 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
-static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-
- fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
- sizeof(struct fsl_edma_hw_tcd),
- 32, 0);
- return 0;
-}
-
-static void fsl_edma_free_chan_resources(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
- LIST_HEAD(head);
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- fsl_edma_disable_request(fsl_chan);
- fsl_edma_chan_mux(fsl_chan, 0, false);
- fsl_chan->edesc = NULL;
- vchan_get_all_descriptors(&fsl_chan->vchan, &head);
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-
- vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- dma_pool_destroy(fsl_chan->tcd_pool);
- fsl_chan->tcd_pool = NULL;
-}
-
static int
fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
@@ -876,6 +189,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
struct fsl_edma_chan *fsl_chan;
+ struct edma_regs *regs;
struct resource *res;
int len, chans;
int ret, i;
@@ -891,6 +205,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (!fsl_edma)
return -ENOMEM;
+ fsl_edma->version = v1;
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
@@ -899,6 +214,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
+ fsl_edma_setup_regs(fsl_edma);
+ regs = &fsl_edma->regs;
+
for (i = 0; i < DMAMUX_NR; i++) {
char clkname[32];
@@ -939,11 +257,11 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
- edma_writel(fsl_edma, ~0, fsl_edma->membase + EDMA_INTR);
+ edma_writel(fsl_edma, ~0, regs->intl);
ret = fsl_edma_irq_init(pdev, fsl_edma);
if (ret)
return ret;
@@ -990,22 +308,11 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
/* enable round robin arbitration */
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
-static void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
-{
- struct fsl_edma_chan *chan, *_chan;
-
- list_for_each_entry_safe(chan, _chan,
- &dmadev->channels, vchan.chan.device_node) {
- list_del(&chan->vchan.chan.device_node);
- tasklet_kill(&chan->vchan.task);
- }
-}
-
static int fsl_edma_remove(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1048,18 +355,18 @@ static int fsl_edma_resume_early(struct device *dev)
{
struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
struct fsl_edma_chan *fsl_chan;
+ struct edma_regs *regs = &fsl_edma->regs;
int i;
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
fsl_chan->pm_state = RUNNING;
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
}
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA,
- fsl_edma->membase + EDMA_CR);
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 1117b5123a6f..9d360a3fbae3 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -987,7 +987,7 @@ static void dma_do_tasklet(unsigned long data)
chan_dbg(chan, "tasklet entry\n");
- spin_lock_bh(&chan->desc_lock);
+ spin_lock(&chan->desc_lock);
/* the hardware is now idle and ready for more */
chan->idle = true;
@@ -995,7 +995,7 @@ static void dma_do_tasklet(unsigned long data)
/* Run all cleanup for descriptors which have been completed */
fsldma_cleanup_descriptors(chan);
- spin_unlock_bh(&chan->desc_lock);
+ spin_unlock(&chan->desc_lock);
chan_dbg(chan, "tasklet exit\n");
}
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 202ffa9f7611..e06f20272fd7 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -348,10 +348,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan,
{
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
- /* Check if chan will be configured for slave transfers */
- if (!is_slave_direction(config->direction))
- return -EINVAL;
-
memcpy(&hsuc->config, config, sizeof(hsuc->config));
return 0;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 1fbf9cb9b742..0baf9797cc09 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -142,9 +142,8 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
{
struct idma64_chan *idma64c = &idma64->chan[c];
struct idma64_desc *desc;
- unsigned long flags;
- spin_lock_irqsave(&idma64c->vchan.lock, flags);
+ spin_lock(&idma64c->vchan.lock);
desc = idma64c->desc;
if (desc) {
if (status_err & (1 << c)) {
@@ -161,7 +160,7 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
if (idma64c->desc == NULL || desc->status == DMA_ERROR)
idma64_stop_transfer(idma64c);
}
- spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+ spin_unlock(&idma64c->vchan.lock);
}
static irqreturn_t idma64_irq(int irq, void *dev)
@@ -408,10 +407,6 @@ static int idma64_slave_config(struct dma_chan *chan,
{
struct idma64_chan *idma64c = to_idma64_chan(chan);
- /* Check if chan will be configured for slave transfers */
- if (!is_slave_direction(config->direction))
- return -EINVAL;
-
memcpy(&idma64c->config, config, sizeof(idma64c->config));
convert_burst(&idma64c->config.src_maxburst);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 75b6ff0415ee..c2fff3f6c9ca 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -162,6 +162,7 @@ struct imxdma_channel {
bool enabled_2d;
int slot_2d;
unsigned int irq;
+ struct dma_slave_config config;
};
enum imx_dma_type {
@@ -675,14 +676,15 @@ static int imxdma_terminate_all(struct dma_chan *chan)
return 0;
}
-static int imxdma_config(struct dma_chan *chan,
- struct dma_slave_config *dmaengine_cfg)
+static int imxdma_config_write(struct dma_chan *chan,
+ struct dma_slave_config *dmaengine_cfg,
+ enum dma_transfer_direction direction)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
unsigned int mode = 0;
- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ if (direction == DMA_DEV_TO_MEM) {
imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -723,6 +725,16 @@ static int imxdma_config(struct dma_chan *chan,
return 0;
}
+static int imxdma_config(struct dma_chan *chan,
+ struct dma_slave_config *dmaengine_cfg)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+
+ memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
+
+ return 0;
+}
+
static enum dma_status imxdma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
@@ -905,6 +917,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
desc->desc.callback = NULL;
desc->desc.callback_param = NULL;
+ imxdma_config_write(chan, &imxdmac->config, direction);
+
return &desc->desc;
}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 4fa4c06c9edb..2d810dfcdc48 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -129,7 +129,7 @@ static void
ioat_init_channel(struct ioatdma_device *ioat_dma,
struct ioatdma_chan *ioat_chan, int idx);
static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
-static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
+static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
static int ioat_dca_enabled = 1;
@@ -575,7 +575,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
* ioat_enumerate_channels - find and initialize the device's channels
* @ioat_dma: the ioat dma device to be enumerated
*/
-static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
{
struct ioatdma_chan *ioat_chan;
struct device *dev = &ioat_dma->pdev->dev;
@@ -594,7 +594,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */
if (xfercap_log == 0)
- return 0;
+ return;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
@@ -611,7 +611,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
}
}
dma->chancnt = i;
- return i;
}
/**
@@ -1205,8 +1204,15 @@ static void ioat_shutdown(struct pci_dev *pdev)
spin_lock_bh(&ioat_chan->prep_lock);
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- del_timer_sync(&ioat_chan->timer);
spin_unlock_bh(&ioat_chan->prep_lock);
+ /*
+ * Synchronization rule for del_timer_sync():
+ * - The caller must not hold locks which would prevent
+ * completion of the timer's handler.
+ * So prep_lock cannot be held before calling it.
+ */
+ del_timer_sync(&ioat_chan->timer);
+
/* this should quiesce then reset */
ioat_reset_hw(ioat_chan);
}
@@ -1252,7 +1258,6 @@ static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
- int err;
dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
@@ -1267,12 +1272,6 @@ static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "AER uncorrect error status clear failed: %#x\n", err);
- }
-
return result;
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 6bfa217ed6d0..fdec2b6cfbb0 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -87,10 +87,10 @@ struct k3_dma_chan {
struct virt_dma_chan vc;
struct k3_dma_phy *phy;
struct list_head node;
- enum dma_transfer_direction dir;
dma_addr_t dev_addr;
enum dma_status status;
bool cyclic;
+ struct dma_slave_config slave_config;
};
struct k3_dma_phy {
@@ -118,6 +118,10 @@ struct k3_dma_dev {
#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
+static int k3_dma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *cfg);
+
static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
{
return container_of(chan, struct k3_dma_chan, vc.chan);
@@ -501,14 +505,8 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
copy = min_t(size_t, len, DMA_MAX_SIZE);
k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
- if (c->dir == DMA_MEM_TO_DEV) {
- src += copy;
- } else if (c->dir == DMA_DEV_TO_MEM) {
- dst += copy;
- } else {
- src += copy;
- dst += copy;
- }
+ src += copy;
+ dst += copy;
len -= copy;
} while (len);
@@ -542,6 +540,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
if (!ds)
return NULL;
num = 0;
+ k3_dma_config_write(chan, dir, &c->slave_config);
for_each_sg(sgl, sg, sglen, i) {
addr = sg_dma_address(sg);
@@ -602,6 +601,7 @@ k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
avail = buf_len;
total = avail;
num = 0;
+ k3_dma_config_write(chan, dir, &c->slave_config);
if (period_len < modulo)
modulo = period_len;
@@ -642,18 +642,26 @@ static int k3_dma_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct k3_dma_chan *c = to_k3_chan(chan);
+
+ memcpy(&c->slave_config, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int k3_dma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *cfg)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
u32 maxburst = 0, val = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
- if (cfg == NULL)
- return -EINVAL;
- c->dir = cfg->direction;
- if (c->dir == DMA_DEV_TO_MEM) {
+ if (dir == DMA_DEV_TO_MEM) {
c->ccfg = CX_CFG_DSTINCR;
c->dev_addr = cfg->src_addr;
maxburst = cfg->src_maxburst;
width = cfg->src_addr_width;
- } else if (c->dir == DMA_MEM_TO_DEV) {
+ } else if (dir == DMA_MEM_TO_DEV) {
c->ccfg = CX_CFG_SRCINCR;
c->dev_addr = cfg->dst_addr;
maxburst = cfg->dst_maxburst;
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
new file mode 100644
index 000000000000..5de1b07eddff
--- /dev/null
+++ b/drivers/dma/mcf-edma.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
+// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/dma-mcf-edma.h>
+
+#include "fsl-edma-common.h"
+
+#define EDMA_CHANNELS 64
+#define EDMA_MASK_CH(x) ((x) & GENMASK(5, 0))
+
+static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *mcf_edma = dev_id;
+ struct edma_regs *regs = &mcf_edma->regs;
+ unsigned int ch;
+ struct fsl_edma_chan *mcf_chan;
+ u64 intmap;
+
+ intmap = ioread32(regs->inth);
+ intmap <<= 32;
+ intmap |= ioread32(regs->intl);
+ if (!intmap)
+ return IRQ_NONE;
+
+ for (ch = 0; ch < mcf_edma->n_chans; ch++) {
+ if (intmap & BIT(ch)) {
+ iowrite8(EDMA_MASK_CH(ch), regs->cint);
+
+ mcf_chan = &mcf_edma->chans[ch];
+
+ spin_lock(&mcf_chan->vchan.lock);
+ if (!mcf_chan->edesc->iscyclic) {
+ list_del(&mcf_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&mcf_chan->edesc->vdesc);
+ mcf_chan->edesc = NULL;
+ mcf_chan->status = DMA_COMPLETE;
+ mcf_chan->idle = true;
+ } else {
+ vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
+ }
+
+ if (!mcf_chan->edesc)
+ fsl_edma_xfer_desc(mcf_chan);
+
+ spin_unlock(&mcf_chan->vchan.lock);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *mcf_edma = dev_id;
+ struct edma_regs *regs = &mcf_edma->regs;
+ unsigned int err, ch;
+
+ err = ioread32(regs->errl);
+ if (!err)
+ return IRQ_NONE;
+
+ for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
+ if (err & BIT(ch)) {
+ fsl_edma_disable_request(&mcf_edma->chans[ch]);
+ iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
+ mcf_edma->chans[ch].status = DMA_ERROR;
+ mcf_edma->chans[ch].idle = true;
+ }
+ }
+
+ err = ioread32(regs->errh);
+ if (!err)
+ return IRQ_NONE;
+
+ for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
+ if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
+ fsl_edma_disable_request(&mcf_edma->chans[ch]);
+ iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
+ mcf_edma->chans[ch].status = DMA_ERROR;
+ mcf_edma->chans[ch].idle = true;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mcf_edma_irq_init(struct platform_device *pdev,
+ struct fsl_edma_engine *mcf_edma)
+{
+ int ret = 0, i;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-00-15");
+ if (!res)
+ return -1;
+
+ for (ret = 0, i = res->start; i <= res->end; ++i)
+ ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-16-55");
+ if (!res)
+ return -1;
+
+ for (ret = 0, i = res->start; i <= res->end; ++i)
+ ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+
+ ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
+ if (ret != -ENXIO) {
+ ret = request_irq(ret, mcf_edma_tx_handler,
+ 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+ }
+
+ ret = platform_get_irq_byname(pdev, "edma-err");
+ if (ret != -ENXIO) {
+ ret = request_irq(ret, mcf_edma_err_handler,
+ 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mcf_edma_irq_free(struct platform_device *pdev,
+ struct fsl_edma_engine *mcf_edma)
+{
+ int irq;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-00-15");
+ if (res) {
+ for (irq = res->start; irq <= res->end; irq++)
+ free_irq(irq, mcf_edma);
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-16-55");
+ if (res) {
+ for (irq = res->start; irq <= res->end; irq++)
+ free_irq(irq, mcf_edma);
+ }
+
+ irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
+ if (irq != -ENXIO)
+ free_irq(irq, mcf_edma);
+
+ irq = platform_get_irq_byname(pdev, "edma-err");
+ if (irq != -ENXIO)
+ free_irq(irq, mcf_edma);
+}
+
+static int mcf_edma_probe(struct platform_device *pdev)
+{
+ struct mcf_edma_platform_data *pdata;
+ struct fsl_edma_engine *mcf_edma;
+ struct fsl_edma_chan *mcf_chan;
+ struct edma_regs *regs;
+ struct resource *res;
+ int ret, i, len, chans;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -EINVAL;
+ }
+
+ chans = pdata->dma_channels;
+ len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
+ mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!mcf_edma)
+ return -ENOMEM;
+
+ mcf_edma->n_chans = chans;
+
+ /* Set up version for ColdFire edma */
+ mcf_edma->version = v2;
+ mcf_edma->big_endian = 1;
+
+ if (!mcf_edma->n_chans) {
+ dev_info(&pdev->dev, "setting default channel number to 64");
+ mcf_edma->n_chans = 64;
+ }
+
+ mutex_init(&mcf_edma->fsl_edma_mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcf_edma->membase))
+ return PTR_ERR(mcf_edma->membase);
+
+ fsl_edma_setup_regs(mcf_edma);
+ regs = &mcf_edma->regs;
+
+ INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
+ for (i = 0; i < mcf_edma->n_chans; i++) {
+ struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
+
+ mcf_chan->edma = mcf_edma;
+ mcf_chan->slave_id = i;
+ mcf_chan->idle = true;
+ mcf_chan->vchan.desc_free = fsl_edma_free_desc;
+ vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
+ iowrite32(0x0, &regs->tcd[i].csr);
+ }
+
+ iowrite32(~0, regs->inth);
+ iowrite32(~0, regs->intl);
+
+ ret = mcf_edma_irq_init(pdev, mcf_edma);
+ if (ret)
+ return ret;
+
+ dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
+
+ mcf_edma->dma_dev.dev = &pdev->dev;
+ mcf_edma->dma_dev.device_alloc_chan_resources =
+ fsl_edma_alloc_chan_resources;
+ mcf_edma->dma_dev.device_free_chan_resources =
+ fsl_edma_free_chan_resources;
+ mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
+ mcf_edma->dma_dev.device_prep_dma_cyclic =
+ fsl_edma_prep_dma_cyclic;
+ mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
+ mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
+ mcf_edma->dma_dev.device_pause = fsl_edma_pause;
+ mcf_edma->dma_dev.device_resume = fsl_edma_resume;
+ mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
+ mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
+
+ mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
+ mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+ mcf_edma->dma_dev.directions =
+ BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
+ mcf_edma->dma_dev.filter.map = pdata->slave_map;
+ mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
+
+ platform_set_drvdata(pdev, mcf_edma);
+
+ ret = dma_async_device_register(&mcf_edma->dma_dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't register Freescale eDMA engine. (%d)\n", ret);
+ return ret;
+ }
+
+ /* Enable round robin arbitration */
+ iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+
+ return 0;
+}
+
+static int mcf_edma_remove(struct platform_device *pdev)
+{
+ struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
+
+ mcf_edma_irq_free(pdev, mcf_edma);
+ fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
+ dma_async_device_unregister(&mcf_edma->dma_dev);
+
+ return 0;
+}
+
+static struct platform_driver mcf_edma_driver = {
+ .driver = {
+ .name = "mcf-edma",
+ },
+ .probe = mcf_edma_probe,
+ .remove = mcf_edma_remove,
+};
+
+bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &mcf_edma_driver.driver) {
+ struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
+
+ return (mcf_chan->slave_id == (uintptr_t)param);
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(mcf_edma_filter_fn);
+
+static int __init mcf_edma_init(void)
+{
+ return platform_driver_register(&mcf_edma_driver);
+}
+subsys_initcall(mcf_edma_init);
+
+static void __exit mcf_edma_exit(void)
+{
+ platform_driver_unregister(&mcf_edma_driver);
+}
+module_exit(mcf_edma_exit);
+
+MODULE_ALIAS("platform:mcf-edma");
+MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 13c68b6434ce..0c56faa03e9a 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -116,6 +116,7 @@ struct mmp_tdma_chan {
u32 burst_sz;
enum dma_slave_buswidth buswidth;
enum dma_status status;
+ struct dma_slave_config slave_config;
int idx;
enum mmp_tdma_type type;
@@ -139,6 +140,10 @@ struct mmp_tdma_device {
#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
+static int mmp_tdma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *dmaengine_cfg);
+
static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
{
writel(phys, tdmac->reg_base + TDNDPR);
@@ -442,6 +447,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (!desc)
goto err_out;
+ mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
+
while (buf < buf_len) {
desc = &tdmac->desc_arr[i];
@@ -495,7 +502,18 @@ static int mmp_tdma_config(struct dma_chan *chan,
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ memcpy(&tdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
+
+ return 0;
+}
+
+static int mmp_tdma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *dmaengine_cfg)
+{
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
+ if (dir == DMA_DEV_TO_MEM) {
tdmac->dev_addr = dmaengine_cfg->src_addr;
tdmac->burst_sz = dmaengine_cfg->src_maxburst;
tdmac->buswidth = dmaengine_cfg->src_addr_width;
@@ -504,7 +522,7 @@ static int mmp_tdma_config(struct dma_chan *chan,
tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
tdmac->buswidth = dmaengine_cfg->dst_addr_width;
}
- tdmac->dir = dmaengine_cfg->direction;
+ tdmac->dir = dir;
return mmp_tdma_config_chan(chan);
}
@@ -530,9 +548,6 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
static int mmp_tdma_remove(struct platform_device *pdev)
{
- struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
-
- dma_async_device_unregister(&tdev->device);
return 0;
}
@@ -696,7 +711,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
platform_set_drvdata(pdev, tdev);
- ret = dma_async_device_register(&tdev->device);
+ ret = dmaenginem_async_device_register(&tdev->device);
if (ret) {
dev_err(tdev->device.dev, "unable to register\n");
return ret;
@@ -708,7 +723,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (ret) {
dev_err(tdev->device.dev,
"failed to register controller\n");
- dma_async_device_unregister(&tdev->device);
+ return ret;
}
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 969534c1a6c6..7f595355fb79 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -348,9 +348,9 @@ static void mv_xor_tasklet(unsigned long data)
{
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
- spin_lock_bh(&chan->lock);
+ spin_lock(&chan->lock);
mv_chan_slot_cleanup(chan);
- spin_unlock_bh(&chan->lock);
+ spin_unlock(&chan->lock);
}
static struct mv_xor_desc_slot *
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index ae5182ff0128..35193b31a9e0 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -847,7 +847,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
- ret = dma_async_device_register(&mxs_dma->dma_device);
+ ret = dmaenginem_async_device_register(&mxs_dma->dma_device);
if (ret) {
dev_err(mxs_dma->dma_device.dev, "unable to register\n");
return ret;
@@ -857,7 +857,6 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
if (ret) {
dev_err(mxs_dma->dma_device.dev,
"failed to register controller\n");
- dma_async_device_unregister(&mxs_dma->dma_device);
}
dev_info(mxs_dma->dma_device.dev, "initialized\n");
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 8c7b2e8703da..a67b292190f4 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/bitmap.h>
@@ -1095,8 +1092,8 @@ static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
if (!dchan)
return NULL;
- dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__,
- dma_spec->np->name);
+ dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n", __func__,
+ dma_spec->np);
chan = nbpf_to_chan(dchan);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 7812a6338acd..90bbcef99ef8 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -21,6 +21,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/slab.h>
#include "virt-dma.h"
@@ -161,10 +162,12 @@ struct owl_dma_lli {
* struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
* @vd: virtual DMA descriptor
* @lli_list: link list of lli nodes
+ * @cyclic: flag to indicate cyclic transfers
*/
struct owl_dma_txd {
struct virt_dma_desc vd;
struct list_head lli_list;
+ bool cyclic;
};
/**
@@ -186,11 +189,15 @@ struct owl_dma_pchan {
* @vc: wrappped virtual channel
* @pchan: the physical channel utilized by this channel
* @txd: active transaction on this channel
+ * @cfg: slave configuration for this channel
+ * @drq: physical DMA request ID for this channel
*/
struct owl_dma_vchan {
struct virt_dma_chan vc;
struct owl_dma_pchan *pchan;
struct owl_dma_txd *txd;
+ struct dma_slave_config cfg;
+ u8 drq;
};
/**
@@ -200,6 +207,7 @@ struct owl_dma_vchan {
* @clk: clock for the DMA controller
* @lock: a lock to use when change DMA controller global register
* @lli_pool: a pool for the LLI descriptors
+ * @irq: interrupt ID for the DMA controller
* @nr_pchans: the number of physical channels
* @pchans: array of data for the physical channels
* @nr_vchans: the number of physical channels
@@ -336,9 +344,11 @@ static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
struct owl_dma_lli *prev,
- struct owl_dma_lli *next)
+ struct owl_dma_lli *next,
+ bool is_cyclic)
{
- list_add_tail(&next->node, &txd->lli_list);
+ if (!is_cyclic)
+ list_add_tail(&next->node, &txd->lli_list);
if (prev) {
prev->hw.next_lli = next->phys;
@@ -351,7 +361,9 @@ static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
struct owl_dma_lli *lli,
dma_addr_t src, dma_addr_t dst,
- u32 len, enum dma_transfer_direction dir)
+ u32 len, enum dma_transfer_direction dir,
+ struct dma_slave_config *sconfig,
+ bool is_cyclic)
{
struct owl_dma_lli_hw *hw = &lli->hw;
u32 mode;
@@ -365,6 +377,32 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
OWL_DMA_MODE_DAM_INC;
break;
+ case DMA_MEM_TO_DEV:
+ mode |= OWL_DMA_MODE_TS(vchan->drq)
+ | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV
+ | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST;
+
+ /*
+ * Hardware only supports 32bit and 8bit buswidth. Since the
+ * default is 32bit, select 8bit only when requested.
+ */
+ if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+ mode |= OWL_DMA_MODE_NDDBW_8BIT;
+
+ break;
+ case DMA_DEV_TO_MEM:
+ mode |= OWL_DMA_MODE_TS(vchan->drq)
+ | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU
+ | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC;
+
+ /*
+ * Hardware only supports 32bit and 8bit buswidth. Since the
+ * default is 32bit, select 8bit only when requested.
+ */
+ if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+ mode |= OWL_DMA_MODE_NDDBW_8BIT;
+
+ break;
default:
return -EINVAL;
}
@@ -381,7 +419,10 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
OWL_DMA_LLC_SAV_LOAD_NEXT |
OWL_DMA_LLC_DAV_LOAD_NEXT);
- hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
+ if (is_cyclic)
+ hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK);
+ else
+ hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
return 0;
}
@@ -443,6 +484,16 @@ static void owl_dma_terminate_pchan(struct owl_dma *od,
spin_unlock_irqrestore(&od->lock, flags);
}
+static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan)
+{
+ pchan_writel(pchan, 1, OWL_DMAX_PAUSE);
+}
+
+static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan)
+{
+ pchan_writel(pchan, 0, OWL_DMAX_PAUSE);
+}
+
static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
{
struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
@@ -464,7 +515,10 @@ static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
lli = list_first_entry(&txd->lli_list,
struct owl_dma_lli, node);
- int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
+ if (txd->cyclic)
+ int_ctl = OWL_DMA_INTCTL_BLOCK;
+ else
+ int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
@@ -627,6 +681,54 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static int owl_dma_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+
+ /* Reject definitely invalid configurations */
+ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config));
+
+ return 0;
+}
+
+static int owl_dma_pause(struct dma_chan *chan)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ owl_dma_pause_pchan(vchan->pchan);
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ return 0;
+}
+
+static int owl_dma_resume(struct dma_chan *chan)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ unsigned long flags;
+
+ if (!vchan->pchan && !vchan->txd)
+ return 0;
+
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ owl_dma_resume_pchan(vchan->pchan);
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ return 0;
+}
+
static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
{
struct owl_dma_pchan *pchan;
@@ -754,13 +856,14 @@ static struct dma_async_tx_descriptor
bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
- bytes, DMA_MEM_TO_MEM);
+ bytes, DMA_MEM_TO_MEM,
+ &vchan->cfg, txd->cyclic);
if (ret) {
dev_warn(chan2dev(chan), "failed to config lli\n");
goto err_txd_free;
}
- prev = owl_dma_add_lli(txd, prev, lli);
+ prev = owl_dma_add_lli(txd, prev, lli, false);
}
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
@@ -770,6 +873,133 @@ err_txd_free:
return NULL;
}
+static struct dma_async_tx_descriptor
+ *owl_dma_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct owl_dma *od = to_owl_dma(chan->device);
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct owl_dma_txd *txd;
+ struct owl_dma_lli *lli, *prev = NULL;
+ struct scatterlist *sg;
+ dma_addr_t addr, src = 0, dst = 0;
+ size_t len;
+ int ret, i;
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ INIT_LIST_HEAD(&txd->lli_list);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if (len > OWL_DMA_FRAME_MAX_LENGTH) {
+ dev_err(od->dma.dev,
+ "frame length exceeds max supported length");
+ goto err_txd_free;
+ }
+
+ lli = owl_dma_alloc_lli(od);
+ if (!lli) {
+ dev_err(chan2dev(chan), "failed to allocate lli");
+ goto err_txd_free;
+ }
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = sconfig->dst_addr;
+ } else {
+ src = sconfig->src_addr;
+ dst = addr;
+ }
+
+ ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig,
+ txd->cyclic);
+ if (ret) {
+ dev_warn(chan2dev(chan), "failed to config lli");
+ goto err_txd_free;
+ }
+
+ prev = owl_dma_add_lli(txd, prev, lli, false);
+ }
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_txd_free:
+ owl_dma_free_txd(od, txd);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor
+ *owl_prep_dma_cyclic(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct owl_dma *od = to_owl_dma(chan->device);
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct owl_dma_txd *txd;
+ struct owl_dma_lli *lli, *prev = NULL, *first = NULL;
+ dma_addr_t src = 0, dst = 0;
+ unsigned int periods = buf_len / period_len;
+ int ret, i;
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ INIT_LIST_HEAD(&txd->lli_list);
+ txd->cyclic = true;
+
+ for (i = 0; i < periods; i++) {
+ lli = owl_dma_alloc_lli(od);
+ if (!lli) {
+ dev_warn(chan2dev(chan), "failed to allocate lli");
+ goto err_txd_free;
+ }
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = buf_addr + (period_len * i);
+ dst = sconfig->dst_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = sconfig->src_addr;
+ dst = buf_addr + (period_len * i);
+ }
+
+ ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len,
+ dir, sconfig, txd->cyclic);
+ if (ret) {
+ dev_warn(chan2dev(chan), "failed to config lli");
+ goto err_txd_free;
+ }
+
+ if (!first)
+ first = lli;
+
+ prev = owl_dma_add_lli(txd, prev, lli, false);
+ }
+
+ /* close the cyclic list */
+ owl_dma_add_lli(txd, prev, first, true);
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_txd_free:
+ owl_dma_free_txd(od, txd);
+
+ return NULL;
+}
+
static void owl_dma_free_chan_resources(struct dma_chan *chan)
{
struct owl_dma_vchan *vchan = to_owl_vchan(chan);
@@ -790,6 +1020,27 @@ static inline void owl_dma_free(struct owl_dma *od)
}
}
+static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct owl_dma *od = ofdma->of_dma_data;
+ struct owl_dma_vchan *vchan;
+ struct dma_chan *chan;
+ u8 drq = dma_spec->args[0];
+
+ if (drq > od->nr_vchans)
+ return NULL;
+
+ chan = dma_get_any_slave_channel(&od->dma);
+ if (!chan)
+ return NULL;
+
+ vchan = to_owl_vchan(chan);
+ vchan->drq = drq;
+
+ return chan;
+}
+
static int owl_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -833,12 +1084,19 @@ static int owl_dma_probe(struct platform_device *pdev)
spin_lock_init(&od->lock);
dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
+ dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
od->dma.dev = &pdev->dev;
od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
od->dma.device_tx_status = owl_dma_tx_status;
od->dma.device_issue_pending = owl_dma_issue_pending;
od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
+ od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
+ od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
+ od->dma.device_config = owl_dma_config;
+ od->dma.device_pause = owl_dma_pause;
+ od->dma.device_resume = owl_dma_resume;
od->dma.device_terminate_all = owl_dma_terminate_all;
od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -910,8 +1168,18 @@ static int owl_dma_probe(struct platform_device *pdev)
goto err_pool_free;
}
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ owl_dma_of_xlate, od);
+ if (ret) {
+ dev_err(&pdev->dev, "of_dma_controller_register failed\n");
+ goto err_dma_unregister;
+ }
+
return 0;
+err_dma_unregister:
+ dma_async_device_unregister(&od->dma);
err_pool_free:
clk_disable_unprepare(od->clk);
dma_pool_destroy(od->lli_pool);
@@ -923,6 +1191,7 @@ static int owl_dma_remove(struct platform_device *pdev)
{
struct owl_dma *od = platform_get_drvdata(pdev);
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&od->dma);
/* Mask all interrupts for this execution environment */
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 4cf0d4d0cecf..25610286979f 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4360,7 +4360,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
}
static DRIVER_ATTR_RW(enable);
-static ssize_t poly_store(struct device_driver *dev, char *buf)
+static ssize_t poly_show(struct device_driver *dev, char *buf)
{
ssize_t size = 0;
u32 reg;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index b31c28b67ad3..825725057e00 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1285,7 +1285,6 @@ static int pxad_remove(struct platform_device *op)
pxad_cleanup_debugfs(pdev);
pxad_free_channels(&pdev->slave);
- dma_async_device_unregister(&pdev->slave);
return 0;
}
@@ -1396,7 +1395,7 @@ static int pxad_init_dmadev(struct platform_device *op,
init_waitqueue_head(&c->wq_state);
}
- return dma_async_device_register(&pdev->slave);
+ return dmaenginem_async_device_register(&pdev->slave);
}
static int pxad_probe(struct platform_device *op)
@@ -1433,7 +1432,7 @@ static int pxad_probe(struct platform_device *op)
"#dma-requests set to default 32 as missing in OF: %d",
ret);
nb_requestors = 32;
- };
+ }
} else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
nb_requestors = pdata->nb_requestors;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 48ee35e2bce6..74fa2b1a6a86 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -198,6 +198,7 @@ struct rcar_dmac {
struct dma_device engine;
struct device *dev;
void __iomem *iomem;
+ struct device_dma_parameters parms;
unsigned int n_channels;
struct rcar_dmac_chan *channels;
@@ -1792,6 +1793,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
+ dmac->dev->dma_parms = &dmac->parms;
+ dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h
index a1b0ef45d6a2..7459f9a13b5b 100644
--- a/drivers/dma/sh/shdma-arm.h
+++ b/drivers/dma/sh/shdma-arm.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Renesas SuperH DMA Engine support
*
* Copyright (C) 2013 Renesas Electronics, Inc.
- *
- * This is free software; you can redistribute it and/or modify it under the
- * terms of version 2 the GNU General Public License as published by the Free
- * Software Foundation.
*/
#ifndef SHDMA_ARM_H
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 6b5626e299b2..c51de498b5b4 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
*
@@ -7,10 +8,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index f999f9b0d314..be89dd894328 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SHDMA Device Tree glue
*
* Copyright (C) 2013 Renesas Electronics Inc.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c
index 96ea3828c3eb..ddc9a3578353 100644
--- a/drivers/dma/sh/shdma-r8a73a4.c
+++ b/drivers/dma/sh/shdma-r8a73a4.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
*
* Copyright (C) 2013 Renesas Electronics, Inc.
- *
- * This is free software; you can redistribute it and/or modify it under the
- * terms of version 2 the GNU General Public License as published by the Free
- * Software Foundation.
*/
#include <linux/sh_dma.h>
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 2c0a969adc9f..bfb69909bd19 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Renesas SuperH DMA Engine support
*
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
*
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#ifndef __DMA_SHDMA_H
#define __DMA_SHDMA_H
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 04a74e0a95b7..7971ea275387 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas SuperH DMA Engine support
*
@@ -8,11 +9,6 @@
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* - DMA of SuperH does not have Hardware DMA chain mode.
* - MAX DMA size is 16MB.
*
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index 69b9564dc9d9..30cc3553cb8b 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas SUDMAC support
*
@@ -8,10 +9,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 1bb1a8e09025..7f7184c3cf95 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB DMA Controller Driver
*
@@ -6,10 +7,6 @@
* based on rcar-dmac.c
* Copyright (C) 2014 Renesas Electronics Inc.
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 55df0d41355b..38d4e4f07c66 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -68,6 +68,7 @@
/* SPRD_DMA_CHN_CFG register definition */
#define SPRD_DMA_CHN_EN BIT(0)
+#define SPRD_DMA_LINKLIST_EN BIT(4)
#define SPRD_DMA_WAIT_BDONE_OFFSET 24
#define SPRD_DMA_DONOT_WAIT_BDONE 1
@@ -103,7 +104,7 @@
#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
#define SPRD_DMA_FIX_SEL_OFFSET 21
#define SPRD_DMA_FIX_EN_OFFSET 20
-#define SPRD_DMA_LLIST_END_OFFSET 19
+#define SPRD_DMA_LLIST_END BIT(19)
#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
/* SPRD_DMA_CHN_BLK_LEN register definition */
@@ -164,6 +165,7 @@ struct sprd_dma_desc {
struct sprd_dma_chn {
struct virt_dma_chan vc;
void __iomem *chn_base;
+ struct sprd_dma_linklist linklist;
struct dma_slave_config slave_cfg;
u32 chn_num;
u32 dev_id;
@@ -582,7 +584,8 @@ static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
}
static int sprd_dma_fill_desc(struct dma_chan *chan,
- struct sprd_dma_desc *sdesc,
+ struct sprd_dma_chn_hw *hw,
+ unsigned int sglen, int sg_index,
dma_addr_t src, dma_addr_t dst, u32 len,
enum dma_transfer_direction dir,
unsigned long flags,
@@ -590,7 +593,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
{
struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
- struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
u32 int_mode = flags & SPRD_DMA_INT_MASK;
int src_datawidth, dst_datawidth, src_step, dst_step;
@@ -670,12 +672,52 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
hw->trsf_step = temp;
+ /* link-list configuration */
+ if (schan->linklist.phy_addr) {
+ if (sg_index == sglen - 1)
+ hw->frg_len |= SPRD_DMA_LLIST_END;
+
+ hw->cfg |= SPRD_DMA_LINKLIST_EN;
+
+ /* link-list index */
+ temp = (sg_index + 1) % sglen;
+ /* Next link-list configuration's physical address offset */
+ temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
+ /*
+ * Set the link-list pointer point to next link-list
+ * configuration's physical address.
+ */
+ hw->llist_ptr = schan->linklist.phy_addr + temp;
+ } else {
+ hw->llist_ptr = 0;
+ }
+
hw->frg_step = 0;
hw->src_blk_step = 0;
hw->des_blk_step = 0;
return 0;
}
+static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
+ unsigned int sglen, int sg_index,
+ dma_addr_t src, dma_addr_t dst, u32 len,
+ enum dma_transfer_direction dir,
+ unsigned long flags,
+ struct dma_slave_config *slave_cfg)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct sprd_dma_chn_hw *hw;
+
+ if (!schan->linklist.virt_addr)
+ return -EINVAL;
+
+ hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
+ sg_index * sizeof(*hw));
+
+ return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
+ dir, flags, slave_cfg);
+}
+
static struct dma_async_tx_descriptor *
sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
@@ -744,10 +786,20 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 len = 0;
int ret, i;
- /* TODO: now we only support one sg for each DMA configuration. */
- if (!is_slave_direction(dir) || sglen > 1)
+ if (!is_slave_direction(dir))
return NULL;
+ if (context) {
+ struct sprd_dma_linklist *ll_cfg =
+ (struct sprd_dma_linklist *)context;
+
+ schan->linklist.phy_addr = ll_cfg->phy_addr;
+ schan->linklist.virt_addr = ll_cfg->virt_addr;
+ } else {
+ schan->linklist.phy_addr = 0;
+ schan->linklist.virt_addr = 0;
+ }
+
sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
if (!sdesc)
return NULL;
@@ -762,10 +814,25 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
src = slave_cfg->src_addr;
dst = sg_dma_address(sg);
}
+
+ /*
+ * The link-list mode needs at least 2 link-list
+ * configurations. If there is only one sg, it doesn't
+ * need to fill the link-list configuration.
+ */
+ if (sglen < 2)
+ break;
+
+ ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
+ dir, flags, slave_cfg);
+ if (ret) {
+ kfree(sdesc);
+ return NULL;
+ }
}
- ret = sprd_dma_fill_desc(chan, sdesc, src, dst, len, dir, flags,
- slave_cfg);
+ ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
+ dir, flags, slave_cfg);
if (ret) {
kfree(sdesc);
return NULL;
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index bfb79bd0c6de..07c20aa2e955 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -833,7 +833,7 @@ static int st_fdma_probe(struct platform_device *pdev)
fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- ret = dma_async_device_register(&fdev->dma_device);
+ ret = dmaenginem_async_device_register(&fdev->dma_device);
if (ret) {
dev_err(&pdev->dev,
"Failed to register DMA device (%d)\n", ret);
@@ -844,15 +844,13 @@ static int st_fdma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to register controller (%d)\n", ret);
- goto err_dma_dev;
+ goto err_rproc;
}
dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
return 0;
-err_dma_dev:
- dma_async_device_unregister(&fdev->dma_device);
err_rproc:
st_fdma_free(fdev);
st_slim_rproc_put(fdev->slim_rproc);
@@ -867,7 +865,6 @@ static int st_fdma_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, fdev->irq, fdev);
st_slim_rproc_put(fdev->slim_rproc);
of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&fdev->dma_device);
return 0;
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index f4edfc56f34e..5e328bd10c27 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2839,7 +2839,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
d40_ops_init(base, &base->dma_slave);
- err = dma_async_device_register(&base->dma_slave);
+ err = dmaenginem_async_device_register(&base->dma_slave);
if (err) {
d40_err(base->dev, "Failed to register slave channels\n");
@@ -2854,12 +2854,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
d40_ops_init(base, &base->dma_memcpy);
- err = dma_async_device_register(&base->dma_memcpy);
+ err = dmaenginem_async_device_register(&base->dma_memcpy);
if (err) {
d40_err(base->dev,
"Failed to register memcpy only channels\n");
- goto unregister_slave;
+ goto exit;
}
d40_chan_init(base, &base->dma_both, base->phy_chans,
@@ -2871,18 +2871,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
d40_ops_init(base, &base->dma_both);
- err = dma_async_device_register(&base->dma_both);
+ err = dmaenginem_async_device_register(&base->dma_both);
if (err) {
d40_err(base->dev,
"Failed to register logical and physical capable channels\n");
- goto unregister_memcpy;
+ goto exit;
}
return 0;
- unregister_memcpy:
- dma_async_device_unregister(&base->dma_memcpy);
- unregister_slave:
- dma_async_device_unregister(&base->dma_slave);
exit:
return err;
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 379e8d534e61..4903a408fc14 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -308,20 +308,12 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
{
- switch (threshold) {
- case STM32_DMA_FIFO_THRESHOLD_FULL:
- if (buf_len >= STM32_DMA_MAX_BURST)
- return true;
- else
- return false;
- case STM32_DMA_FIFO_THRESHOLD_HALFFULL:
- if (buf_len >= STM32_DMA_MAX_BURST / 2)
- return true;
- else
- return false;
- default:
- return false;
- }
+ /*
+ * Buffer or period length has to be aligned on FIFO depth.
+ * Otherwise bytes may be stuck within FIFO at buffer or period
+ * length.
+ */
+ return ((buf_len % ((threshold + 1) * 4)) == 0);
}
static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 06dd1725375e..390e4cae0e1a 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1656,7 +1656,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return ret;
}
- ret = dma_async_device_register(dd);
+ ret = dmaenginem_async_device_register(dd);
if (ret)
return ret;
@@ -1674,8 +1674,6 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return 0;
err_unregister:
- dma_async_device_unregister(dd);
-
return ret;
}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 395c698edb4d..fc0f9c8766a8 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
- td_desc->desc_list_len, DMA_MEM_TO_DEV);
+ td_desc->desc_list_len, DMA_TO_DEVICE);
return &td_desc->txd;
}
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index ab7c5a937ab0..c89d82aa2776 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -69,25 +69,6 @@ static const struct altr_sdram_prv_data a10_data = {
.ue_set_mask = A10_DIAGINT_TDERRA_MASK,
};
-static const struct altr_sdram_prv_data s10_data = {
- .ecc_ctrl_offset = S10_ECCCTRL1_OFST,
- .ecc_ctl_en_mask = A10_ECCCTRL1_ECC_EN,
- .ecc_stat_offset = S10_INTSTAT_OFST,
- .ecc_stat_ce_mask = A10_INTSTAT_SBEERR,
- .ecc_stat_ue_mask = A10_INTSTAT_DBEERR,
- .ecc_saddr_offset = S10_SERRADDR_OFST,
- .ecc_daddr_offset = S10_DERRADDR_OFST,
- .ecc_irq_en_offset = S10_ERRINTEN_OFST,
- .ecc_irq_en_mask = A10_ECC_IRQ_EN_MASK,
- .ecc_irq_clr_offset = S10_INTSTAT_OFST,
- .ecc_irq_clr_mask = (A10_INTSTAT_SBEERR | A10_INTSTAT_DBEERR),
- .ecc_cnt_rst_offset = S10_ECCCTRL1_OFST,
- .ecc_cnt_rst_mask = A10_ECC_CNT_RESET_MASK,
- .ce_ue_trgr_offset = S10_DIAGINTTEST_OFST,
- .ce_set_mask = A10_DIAGINT_TSERRA_MASK,
- .ue_set_mask = A10_DIAGINT_TDERRA_MASK,
-};
-
/*********************** EDAC Memory Controller Functions ****************/
/* The SDRAM controller uses the EDAC Memory Controller framework. */
@@ -239,7 +220,7 @@ static unsigned long get_total_mem(void)
static const struct of_device_id altr_sdram_ctrl_of_match[] = {
{ .compatible = "altr,sdram-edac", .data = &c5_data},
{ .compatible = "altr,sdram-edac-a10", .data = &a10_data},
- { .compatible = "altr,sdram-edac-s10", .data = &s10_data},
+ { .compatible = "altr,sdram-edac-s10", .data = &a10_data},
{},
};
MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
@@ -293,6 +274,7 @@ release:
return ret;
}
+static int socfpga_is_a10(void);
static int altr_sdram_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
@@ -416,7 +398,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
goto err;
/* Only the Arria10 has separate IRQs */
- if (irq2 > 0) {
+ if (socfpga_is_a10()) {
/* Arria10 specific initialization */
res = a10_init(mc_vbase);
if (res < 0)
@@ -502,8 +484,9 @@ static int s10_protected_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct arm_smccc_res result;
+ unsigned long offset = (unsigned long)context;
- arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, reg, val, 0, 0,
+ arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, offset + reg, val, 0, 0,
0, 0, 0, &result);
return (int)result.a0;
@@ -523,8 +506,9 @@ static int s10_protected_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct arm_smccc_res result;
+ unsigned long offset = (unsigned long)context;
- arm_smccc_smc(INTEL_SIP_SMC_REG_READ, reg, 0, 0, 0,
+ arm_smccc_smc(INTEL_SIP_SMC_REG_READ, offset + reg, 0, 0, 0,
0, 0, 0, &result);
*val = (unsigned int)result.a1;
@@ -532,246 +516,18 @@ static int s10_protected_reg_read(void *context, unsigned int reg,
return (int)result.a0;
}
-static bool s10_sdram_writeable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S10_ECCCTRL1_OFST:
- case S10_ERRINTEN_OFST:
- case S10_INTMODE_OFST:
- case S10_INTSTAT_OFST:
- case S10_DIAGINTTEST_OFST:
- case S10_SYSMGR_ECC_INTMASK_VAL_OFST:
- case S10_SYSMGR_ECC_INTMASK_SET_OFST:
- case S10_SYSMGR_ECC_INTMASK_CLR_OFST:
- return true;
- }
- return false;
-}
-
-static bool s10_sdram_readable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S10_ECCCTRL1_OFST:
- case S10_ERRINTEN_OFST:
- case S10_INTMODE_OFST:
- case S10_INTSTAT_OFST:
- case S10_DERRADDR_OFST:
- case S10_SERRADDR_OFST:
- case S10_DIAGINTTEST_OFST:
- case S10_SYSMGR_ECC_INTMASK_VAL_OFST:
- case S10_SYSMGR_ECC_INTMASK_SET_OFST:
- case S10_SYSMGR_ECC_INTMASK_CLR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_SERR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_DERR_OFST:
- return true;
- }
- return false;
-}
-
-static bool s10_sdram_volatile_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S10_ECCCTRL1_OFST:
- case S10_ERRINTEN_OFST:
- case S10_INTMODE_OFST:
- case S10_INTSTAT_OFST:
- case S10_DERRADDR_OFST:
- case S10_SERRADDR_OFST:
- case S10_DIAGINTTEST_OFST:
- case S10_SYSMGR_ECC_INTMASK_VAL_OFST:
- case S10_SYSMGR_ECC_INTMASK_SET_OFST:
- case S10_SYSMGR_ECC_INTMASK_CLR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_SERR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_DERR_OFST:
- return true;
- }
- return false;
-}
-
static const struct regmap_config s10_sdram_regmap_cfg = {
.name = "s10_ddr",
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0xffffffff,
- .writeable_reg = s10_sdram_writeable_reg,
- .readable_reg = s10_sdram_readable_reg,
- .volatile_reg = s10_sdram_volatile_reg,
+ .max_register = 0xffd12228,
.reg_read = s10_protected_reg_read,
.reg_write = s10_protected_reg_write,
.use_single_read = true,
.use_single_write = true,
};
-static int altr_s10_sdram_probe(struct platform_device *pdev)
-{
- const struct of_device_id *id;
- struct edac_mc_layer layers[2];
- struct mem_ctl_info *mci;
- struct altr_sdram_mc_data *drvdata;
- const struct altr_sdram_prv_data *priv;
- struct regmap *regmap;
- struct dimm_info *dimm;
- u32 read_reg;
- int irq, ret = 0;
- unsigned long mem_size;
-
- id = of_match_device(altr_sdram_ctrl_of_match, &pdev->dev);
- if (!id)
- return -ENODEV;
-
- /* Grab specific offsets and masks for Stratix10 */
- priv = of_match_node(altr_sdram_ctrl_of_match,
- pdev->dev.of_node)->data;
-
- regmap = devm_regmap_init(&pdev->dev, NULL, (void *)priv,
- &s10_sdram_regmap_cfg);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Validate the SDRAM controller has ECC enabled */
- if (regmap_read(regmap, priv->ecc_ctrl_offset, &read_reg) ||
- ((read_reg & priv->ecc_ctl_en_mask) != priv->ecc_ctl_en_mask)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "No ECC/ECC disabled [0x%08X]\n", read_reg);
- return -ENODEV;
- }
-
- /* Grab memory size from device tree. */
- mem_size = get_total_mem();
- if (!mem_size) {
- edac_printk(KERN_ERR, EDAC_MC, "Unable to calculate memory size\n");
- return -ENODEV;
- }
-
- /* Ensure the SDRAM Interrupt is disabled */
- if (regmap_update_bits(regmap, priv->ecc_irq_en_offset,
- priv->ecc_irq_en_mask, 0)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error disabling SDRAM ECC IRQ\n");
- return -ENODEV;
- }
-
- /* Toggle to clear the SDRAM Error count */
- if (regmap_update_bits(regmap, priv->ecc_cnt_rst_offset,
- priv->ecc_cnt_rst_mask,
- priv->ecc_cnt_rst_mask)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error clearing SDRAM ECC count\n");
- return -ENODEV;
- }
-
- if (regmap_update_bits(regmap, priv->ecc_cnt_rst_offset,
- priv->ecc_cnt_rst_mask, 0)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error clearing SDRAM ECC count\n");
- return -ENODEV;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- edac_printk(KERN_ERR, EDAC_MC,
- "No irq %d in DT\n", irq);
- return -ENODEV;
- }
-
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = 1;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = 1;
- layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
- sizeof(struct altr_sdram_mc_data));
- if (!mci)
- return -ENOMEM;
-
- mci->pdev = &pdev->dev;
- drvdata = mci->pvt_info;
- drvdata->mc_vbase = regmap;
- drvdata->data = priv;
- platform_set_drvdata(pdev, mci);
-
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Unable to get managed device resource\n");
- ret = -ENOMEM;
- goto free;
- }
-
- mci->mtype_cap = MEM_FLAG_DDR3;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = dev_name(&pdev->dev);
- mci->scrub_mode = SCRUB_SW_SRC;
- mci->dev_name = dev_name(&pdev->dev);
-
- dimm = *mci->dimms;
- dimm->nr_pages = ((mem_size - 1) >> PAGE_SHIFT) + 1;
- dimm->grain = 8;
- dimm->dtype = DEV_X8;
- dimm->mtype = MEM_DDR3;
- dimm->edac_mode = EDAC_SECDED;
-
- ret = edac_mc_add_mc(mci);
- if (ret < 0)
- goto err;
-
- ret = devm_request_irq(&pdev->dev, irq, altr_sdram_mc_err_handler,
- IRQF_SHARED, dev_name(&pdev->dev), mci);
- if (ret < 0) {
- edac_mc_printk(mci, KERN_ERR,
- "Unable to request irq %d\n", irq);
- ret = -ENODEV;
- goto err2;
- }
-
- if (regmap_write(regmap, S10_SYSMGR_ECC_INTMASK_CLR_OFST,
- S10_DDR0_IRQ_MASK)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error clearing SDRAM ECC count\n");
- ret = -ENODEV;
- goto err2;
- }
-
- if (regmap_update_bits(drvdata->mc_vbase, priv->ecc_irq_en_offset,
- priv->ecc_irq_en_mask, priv->ecc_irq_en_mask)) {
- edac_mc_printk(mci, KERN_ERR,
- "Error enabling SDRAM ECC IRQ\n");
- ret = -ENODEV;
- goto err2;
- }
-
- altr_sdr_mc_create_debugfs_nodes(mci);
-
- devres_close_group(&pdev->dev, NULL);
-
- return 0;
-
-err2:
- edac_mc_del_mc(&pdev->dev);
-err:
- devres_release_group(&pdev->dev, NULL);
-free:
- edac_mc_free(mci);
- edac_printk(KERN_ERR, EDAC_MC,
- "EDAC Probe Failed; Error %d\n", ret);
-
- return ret;
-}
-
-static int altr_s10_sdram_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = platform_get_drvdata(pdev);
-
- edac_mc_del_mc(&pdev->dev);
- edac_mc_free(mci);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
/************** </Stratix10 EDAC Memory Controller Functions> ***********/
/*
@@ -805,20 +561,6 @@ static struct platform_driver altr_sdram_edac_driver = {
module_platform_driver(altr_sdram_edac_driver);
-static struct platform_driver altr_s10_sdram_edac_driver = {
- .probe = altr_s10_sdram_probe,
- .remove = altr_s10_sdram_remove,
- .driver = {
- .name = "altr_s10_sdram_edac",
-#ifdef CONFIG_PM
- .pm = &altr_sdram_pm_ops,
-#endif
- .of_match_table = altr_sdram_ctrl_of_match,
- },
-};
-
-module_platform_driver(altr_s10_sdram_edac_driver);
-
/************************* EDAC Parent Probe *************************/
static const struct of_device_id altr_edac_device_of_match[];
@@ -972,6 +714,16 @@ static const struct file_operations altr_edac_a10_device_inject_fops = {
.llseek = generic_file_llseek,
};
+static ssize_t altr_edac_a10_device_trig2(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations altr_edac_a10_device_inject2_fops = {
+ .open = simple_open,
+ .write = altr_edac_a10_device_trig2,
+ .llseek = generic_file_llseek,
+};
+
static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
const struct edac_device_prv_data *priv)
{
@@ -1253,6 +1005,16 @@ static int __maybe_unused altr_init_memory_port(void __iomem *ioaddr, int port)
return ret;
}
+static int socfpga_is_a10(void)
+{
+ return of_machine_is_compatible("altr,socfpga-arria10");
+}
+
+static int socfpga_is_s10(void)
+{
+ return of_machine_is_compatible("altr,socfpga-stratix10");
+}
+
static __init int __maybe_unused
altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
u32 ecc_ctrl_en_mask, bool dual_port)
@@ -1267,8 +1029,32 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
/* Get the ECC Manager - parent of the device EDACs */
np_eccmgr = of_get_parent(np);
- ecc_mgr_map = syscon_regmap_lookup_by_phandle(np_eccmgr,
- "altr,sysmgr-syscon");
+
+ if (socfpga_is_a10()) {
+ ecc_mgr_map = syscon_regmap_lookup_by_phandle(np_eccmgr,
+ "altr,sysmgr-syscon");
+ } else {
+ struct device_node *sysmgr_np;
+ struct resource res;
+ uintptr_t base;
+
+ sysmgr_np = of_parse_phandle(np_eccmgr,
+ "altr,sysmgr-syscon", 0);
+ if (!sysmgr_np) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to find altr,sysmgr-syscon\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(sysmgr_np, 0, &res))
+ return -ENOMEM;
+
+ /* Need physical address for SMCC call */
+ base = res.start;
+
+ ecc_mgr_map = regmap_init(NULL, NULL, (void *)base,
+ &s10_sdram_regmap_cfg);
+ }
of_node_put(np_eccmgr);
if (IS_ERR(ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -1326,11 +1112,6 @@ out:
return ret;
}
-static int socfpga_is_a10(void)
-{
- return of_machine_is_compatible("altr,socfpga-arria10");
-}
-
static int validate_parent_available(struct device_node *np);
static const struct of_device_id altr_edac_a10_device_of_match[];
static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
@@ -1338,7 +1119,7 @@ static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
int irq;
struct device_node *child, *np;
- if (!socfpga_is_a10())
+ if (!socfpga_is_a10() && !socfpga_is_s10())
return -ENODEV;
np = of_find_compatible_node(NULL, NULL,
@@ -1584,7 +1365,7 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject_fops,
+ .inject_fops = &altr_edac_a10_device_inject2_fops,
};
static int __init socfpga_init_ethernet_ecc(void)
@@ -1662,7 +1443,7 @@ static const struct edac_device_prv_data a10_usbecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject_fops,
+ .inject_fops = &altr_edac_a10_device_inject2_fops,
};
static int __init socfpga_init_usb_ecc(void)
@@ -1860,7 +1641,7 @@ static int __init socfpga_init_sdmmc_ecc(void)
int rc = -ENODEV;
struct device_node *child;
- if (!socfpga_is_a10())
+ if (!socfpga_is_a10() && !socfpga_is_s10())
return -ENODEV;
child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
@@ -1944,6 +1725,74 @@ static ssize_t altr_edac_a10_device_trig(struct file *file,
writel(priv->ue_set_mask, set_addr);
else
writel(priv->ce_set_mask, set_addr);
+
+ /* Ensure the interrupt test bits are set */
+ wmb();
+ local_irq_restore(flags);
+
+ return count;
+}
+
+/*
+ * The Stratix10 EDAC Error Injection Functions differ from Arria10
+ * slightly. A few Arria10 peripherals can use this injection function.
+ * Inject the error into the memory and then readback to trigger the IRQ.
+ */
+static ssize_t altr_edac_a10_device_trig2(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dci = file->private_data;
+ struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+ const struct edac_device_prv_data *priv = drvdata->data;
+ void __iomem *set_addr = (drvdata->base + priv->set_err_ofst);
+ unsigned long flags;
+ u8 trig_type;
+
+ if (!user_buf || get_user(trig_type, user_buf))
+ return -EFAULT;
+
+ local_irq_save(flags);
+ if (trig_type == ALTR_UE_TRIGGER_CHAR) {
+ writel(priv->ue_set_mask, set_addr);
+ } else {
+ /* Setup write of 0 to first 4 bytes */
+ writel(0x0, drvdata->base + ECC_BLK_WDATA0_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
+ /* Setup write of 4 bytes */
+ writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
+ /* Setup Address to 0 */
+ writel(0x0, drvdata->base + ECC_BLK_ADDRESS_OFST);
+ /* Setup accctrl to write & data override */
+ writel(ECC_WRITE_DOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ /* Setup accctrl to read & ecc override */
+ writel(ECC_READ_EOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ /* Setup write for single bit change */
+ writel(0x1, drvdata->base + ECC_BLK_WDATA0_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
+ /* Copy Read ECC to Write ECC */
+ writel(readl(drvdata->base + ECC_BLK_RECC0_OFST),
+ drvdata->base + ECC_BLK_WECC0_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RECC1_OFST),
+ drvdata->base + ECC_BLK_WECC1_OFST);
+ /* Setup accctrl to write & ecc override & data override */
+ writel(ECC_WRITE_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ /* Setup accctrl to read & ecc overwrite & data overwrite */
+ writel(ECC_READ_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ }
+
/* Ensure the interrupt test bits are set */
wmb();
local_irq_restore(flags);
@@ -2147,6 +1996,35 @@ static const struct irq_domain_ops a10_eccmgr_ic_ops = {
.xlate = irq_domain_xlate_twocell,
};
+/************** Stratix 10 EDAC Double Bit Error Handler ************/
+#define to_a10edac(p, m) container_of(p, struct altr_arria10_edac, m)
+
+/*
+ * The double bit error is handled through SError which is fatal. This is
+ * called as a panic notifier to printout ECC error info as part of the panic.
+ */
+static int s10_edac_dberr_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct altr_arria10_edac *edac = to_a10edac(this, panic_notifier);
+ int err_addr, dberror;
+
+ regmap_read(edac->ecc_mgr_map, S10_SYSMGR_ECC_INTSTAT_DERR_OFST,
+ &dberror);
+ regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST, dberror);
+ if (dberror & S10_DDR0_IRQ_MASK) {
+ regmap_read(edac->ecc_mgr_map, A10_DERRADDR_OFST, &err_addr);
+ regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST,
+ err_addr);
+ edac_printk(KERN_ERR, EDAC_MC,
+ "EDAC: [Uncorrectable errors @ 0x%08X]\n\n",
+ err_addr);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/****************** Arria 10 EDAC Probe Function *********************/
static int altr_edac_a10_probe(struct platform_device *pdev)
{
struct altr_arria10_edac *edac;
@@ -2160,8 +2038,34 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, edac);
INIT_LIST_HEAD(&edac->a10_ecc_devices);
- edac->ecc_mgr_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ if (socfpga_is_a10()) {
+ edac->ecc_mgr_map =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"altr,sysmgr-syscon");
+ } else {
+ struct device_node *sysmgr_np;
+ struct resource res;
+ uintptr_t base;
+
+ sysmgr_np = of_parse_phandle(pdev->dev.of_node,
+ "altr,sysmgr-syscon", 0);
+ if (!sysmgr_np) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to find altr,sysmgr-syscon\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(sysmgr_np, 0, &res))
+ return -ENOMEM;
+
+ /* Need physical address for SMCC call */
+ base = res.start;
+
+ edac->ecc_mgr_map = devm_regmap_init(&pdev->dev, NULL,
+ (void *)base,
+ &s10_sdram_regmap_cfg);
+ }
+
if (IS_ERR(edac->ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Unable to get syscon altr,sysmgr-syscon\n");
@@ -2188,14 +2092,38 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
altr_edac_a10_irq_handler,
edac);
- edac->db_irq = platform_get_irq(pdev, 1);
- if (edac->db_irq < 0) {
- dev_err(&pdev->dev, "No DBERR IRQ resource\n");
- return edac->db_irq;
+ if (socfpga_is_a10()) {
+ edac->db_irq = platform_get_irq(pdev, 1);
+ if (edac->db_irq < 0) {
+ dev_err(&pdev->dev, "No DBERR IRQ resource\n");
+ return edac->db_irq;
+ }
+ irq_set_chained_handler_and_data(edac->db_irq,
+ altr_edac_a10_irq_handler,
+ edac);
+ } else {
+ int dberror, err_addr;
+
+ edac->panic_notifier.notifier_call = s10_edac_dberr_handler;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &edac->panic_notifier);
+
+ /* Printout a message if uncorrectable error previously. */
+ regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST,
+ &dberror);
+ if (dberror) {
+ regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST,
+ &err_addr);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Previous Boot UE detected[0x%X] @ 0x%X\n",
+ dberror, err_addr);
+ /* Reset the sticky registers */
+ regmap_write(edac->ecc_mgr_map,
+ S10_SYSMGR_UE_VAL_OFST, 0);
+ regmap_write(edac->ecc_mgr_map,
+ S10_SYSMGR_UE_ADDR_OFST, 0);
+ }
}
- irq_set_chained_handler_and_data(edac->db_irq,
- altr_edac_a10_irq_handler,
- edac);
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
@@ -2212,7 +2140,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
altr_edac_a10_device_add(edac, child);
- else if (of_device_is_compatible(child, "altr,sdram-edac-a10"))
+ else if ((of_device_is_compatible(child, "altr,sdram-edac-a10")) ||
+ (of_device_is_compatible(child, "altr,sdram-edac-s10")))
of_platform_populate(pdev->dev.of_node,
altr_sdram_ctrl_of_match,
NULL, &pdev->dev);
@@ -2223,6 +2152,7 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
static const struct of_device_id altr_edac_a10_of_match[] = {
{ .compatible = "altr,socfpga-a10-ecc-manager" },
+ { .compatible = "altr,socfpga-s10-ecc-manager" },
{},
};
MODULE_DEVICE_TABLE(of, altr_edac_a10_of_match);
@@ -2236,171 +2166,6 @@ static struct platform_driver altr_edac_a10_driver = {
};
module_platform_driver(altr_edac_a10_driver);
-/************** Stratix 10 EDAC Device Controller Functions> ************/
-
-#define to_s10edac(p, m) container_of(p, struct altr_stratix10_edac, m)
-
-/*
- * The double bit error is handled through SError which is fatal. This is
- * called as a panic notifier to printout ECC error info as part of the panic.
- */
-static int s10_edac_dberr_handler(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct altr_stratix10_edac *edac = to_s10edac(this, panic_notifier);
- int err_addr, dberror;
-
- s10_protected_reg_read(edac, S10_SYSMGR_ECC_INTSTAT_DERR_OFST,
- &dberror);
- /* Remember the UE Errors for a reboot */
- s10_protected_reg_write(edac, S10_SYSMGR_UE_VAL_OFST, dberror);
- if (dberror & S10_DDR0_IRQ_MASK) {
- s10_protected_reg_read(edac, S10_DERRADDR_OFST, &err_addr);
- /* Remember the UE Error address */
- s10_protected_reg_write(edac, S10_SYSMGR_UE_ADDR_OFST,
- err_addr);
- edac_printk(KERN_ERR, EDAC_MC,
- "EDAC: [Uncorrectable errors @ 0x%08X]\n\n",
- err_addr);
- }
-
- return NOTIFY_DONE;
-}
-
-static void altr_edac_s10_irq_handler(struct irq_desc *desc)
-{
- struct altr_stratix10_edac *edac = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int irq = irq_desc_get_irq(desc);
- int bit, sm_offset, irq_status;
-
- sm_offset = S10_SYSMGR_ECC_INTSTAT_SERR_OFST;
-
- chained_irq_enter(chip, desc);
-
- s10_protected_reg_read(NULL, sm_offset, &irq_status);
-
- for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
- irq = irq_linear_revmap(edac->domain, bit);
- if (irq)
- generic_handle_irq(irq);
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void s10_eccmgr_irq_mask(struct irq_data *d)
-{
- struct altr_stratix10_edac *edac = irq_data_get_irq_chip_data(d);
-
- s10_protected_reg_write(edac, S10_SYSMGR_ECC_INTMASK_SET_OFST,
- BIT(d->hwirq));
-}
-
-static void s10_eccmgr_irq_unmask(struct irq_data *d)
-{
- struct altr_stratix10_edac *edac = irq_data_get_irq_chip_data(d);
-
- s10_protected_reg_write(edac, S10_SYSMGR_ECC_INTMASK_CLR_OFST,
- BIT(d->hwirq));
-}
-
-static int s10_eccmgr_irqdomain_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct altr_stratix10_edac *edac = d->host_data;
-
- irq_set_chip_and_handler(irq, &edac->irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, edac);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops s10_eccmgr_ic_ops = {
- .map = s10_eccmgr_irqdomain_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static int altr_edac_s10_probe(struct platform_device *pdev)
-{
- struct altr_stratix10_edac *edac;
- struct device_node *child;
- int dberror, err_addr;
-
- edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL);
- if (!edac)
- return -ENOMEM;
-
- edac->dev = &pdev->dev;
- platform_set_drvdata(pdev, edac);
- INIT_LIST_HEAD(&edac->s10_ecc_devices);
-
- edac->irq_chip.name = pdev->dev.of_node->name;
- edac->irq_chip.irq_mask = s10_eccmgr_irq_mask;
- edac->irq_chip.irq_unmask = s10_eccmgr_irq_unmask;
- edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64,
- &s10_eccmgr_ic_ops, edac);
- if (!edac->domain) {
- dev_err(&pdev->dev, "Error adding IRQ domain\n");
- return -ENOMEM;
- }
-
- edac->sb_irq = platform_get_irq(pdev, 0);
- if (edac->sb_irq < 0) {
- dev_err(&pdev->dev, "No SBERR IRQ resource\n");
- return edac->sb_irq;
- }
-
- irq_set_chained_handler_and_data(edac->sb_irq,
- altr_edac_s10_irq_handler,
- edac);
-
- edac->panic_notifier.notifier_call = s10_edac_dberr_handler;
- atomic_notifier_chain_register(&panic_notifier_list,
- &edac->panic_notifier);
-
- /* Printout a message if uncorrectable error previously. */
- s10_protected_reg_read(edac, S10_SYSMGR_UE_VAL_OFST, &dberror);
- if (dberror) {
- s10_protected_reg_read(edac, S10_SYSMGR_UE_ADDR_OFST,
- &err_addr);
- edac_printk(KERN_ERR, EDAC_DEVICE,
- "Previous Boot UE detected[0x%X] @ 0x%X\n",
- dberror, err_addr);
- /* Reset the sticky registers */
- s10_protected_reg_write(edac, S10_SYSMGR_UE_VAL_OFST, 0);
- s10_protected_reg_write(edac, S10_SYSMGR_UE_ADDR_OFST, 0);
- }
-
- for_each_child_of_node(pdev->dev.of_node, child) {
- if (!of_device_is_available(child))
- continue;
-
- if (of_device_is_compatible(child, "altr,sdram-edac-s10"))
- of_platform_populate(pdev->dev.of_node,
- altr_sdram_ctrl_of_match,
- NULL, &pdev->dev);
- }
-
- return 0;
-}
-
-static const struct of_device_id altr_edac_s10_of_match[] = {
- { .compatible = "altr,socfpga-s10-ecc-manager" },
- {},
-};
-MODULE_DEVICE_TABLE(of, altr_edac_s10_of_match);
-
-static struct platform_driver altr_edac_s10_driver = {
- .probe = altr_edac_s10_probe,
- .driver = {
- .name = "socfpga_s10_ecc_manager",
- .of_match_table = altr_edac_s10_of_match,
- },
-};
-module_platform_driver(altr_edac_s10_driver);
-
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Thor Thayer");
MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 81f0554e09de..4213cb0bb2a7 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -156,34 +156,6 @@
#define A10_INTMASK_CLR_OFST 0x10
#define A10_DDR0_IRQ_MASK BIT(17)
-/************* Stratix10 Defines **************/
-
-/* SDRAM Controller EccCtrl Register */
-#define S10_ECCCTRL1_OFST 0xF8011100
-
-/* SDRAM Controller DRAM IRQ Register */
-#define S10_ERRINTEN_OFST 0xF8011110
-
-/* SDRAM Interrupt Mode Register */
-#define S10_INTMODE_OFST 0xF801111C
-
-/* SDRAM Controller Error Status Register */
-#define S10_INTSTAT_OFST 0xF8011120
-
-/* SDRAM Controller ECC Error Address Register */
-#define S10_DERRADDR_OFST 0xF801112C
-#define S10_SERRADDR_OFST 0xF8011130
-
-/* SDRAM Controller ECC Diagnostic Register */
-#define S10_DIAGINTTEST_OFST 0xF8011124
-
-/* SDRAM Single Bit Error Count Compare Set Register */
-#define S10_SERRCNTREG_OFST 0xF801113C
-
-/* Sticky registers for Uncorrected Errors */
-#define S10_SYSMGR_UE_VAL_OFST 0xFFD12220
-#define S10_SYSMGR_UE_ADDR_OFST 0xFFD12224
-
struct altr_sdram_prv_data {
int ecc_ctrl_offset;
int ecc_ctl_en_mask;
@@ -319,15 +291,40 @@ struct altr_sdram_mc_data {
/************* Stratix10 Defines **************/
/* Stratix10 ECC Manager Defines */
-#define S10_SYSMGR_ECC_INTMASK_VAL_OFST 0xFFD12090
-#define S10_SYSMGR_ECC_INTMASK_SET_OFST 0xFFD12094
-#define S10_SYSMGR_ECC_INTMASK_CLR_OFST 0xFFD12098
+#define S10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
+#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
-#define S10_SYSMGR_ECC_INTSTAT_SERR_OFST 0xFFD1209C
-#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xFFD120A0
+/* Sticky registers for Uncorrected Errors */
+#define S10_SYSMGR_UE_VAL_OFST 0x120
+#define S10_SYSMGR_UE_ADDR_OFST 0x124
#define S10_DDR0_IRQ_MASK BIT(16)
+/* Define ECC Block Offsets for peripherals */
+#define ECC_BLK_ADDRESS_OFST 0x40
+#define ECC_BLK_RDATA0_OFST 0x44
+#define ECC_BLK_RDATA1_OFST 0x48
+#define ECC_BLK_RDATA2_OFST 0x4C
+#define ECC_BLK_RDATA3_OFST 0x50
+#define ECC_BLK_WDATA0_OFST 0x54
+#define ECC_BLK_WDATA1_OFST 0x58
+#define ECC_BLK_WDATA2_OFST 0x5C
+#define ECC_BLK_WDATA3_OFST 0x60
+#define ECC_BLK_RECC0_OFST 0x64
+#define ECC_BLK_RECC1_OFST 0x68
+#define ECC_BLK_WECC0_OFST 0x6C
+#define ECC_BLK_WECC1_OFST 0x70
+#define ECC_BLK_DBYTECTRL_OFST 0x74
+#define ECC_BLK_ACCCTRL_OFST 0x78
+#define ECC_BLK_STARTACC_OFST 0x7C
+
+#define ECC_XACT_KICK 0x10000
+#define ECC_WORD_WRITE 0xF
+#define ECC_WRITE_DOVR 0x101
+#define ECC_WRITE_EDOVR 0x103
+#define ECC_READ_EOVR 0x2
+#define ECC_READ_EDOVR 0x3
+
struct altr_edac_device_dev;
struct edac_device_prv_data {
@@ -370,6 +367,7 @@ struct altr_arria10_edac {
struct irq_domain *domain;
struct irq_chip irq_chip;
struct list_head a10_ecc_devices;
+ struct notifier_block panic_notifier;
};
/*
@@ -437,13 +435,4 @@ struct altr_arria10_edac {
#define INTEL_SIP_SMC_REG_WRITE \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
-struct altr_stratix10_edac {
- struct device *dev;
- int sb_irq;
- struct irq_domain *domain;
- struct irq_chip irq_chip;
- struct list_head s10_ecc_devices;
- struct notifier_block panic_notifier;
-};
-
#endif /* #ifndef _ALTERA_EDAC_H */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 18aeabb1d5ee..6ea98575a402 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -211,7 +211,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->fam == 0x17) {
+ if (pvt->fam == 0x17 || pvt->fam == 0x18) {
__f17h_set_scrubval(pvt, scrubval);
} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
@@ -264,6 +264,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
break;
case 0x17:
+ case 0x18:
amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
if (scrubval & BIT(0)) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
@@ -1044,6 +1045,7 @@ static void determine_memory_type(struct amd64_pvt *pvt)
goto ddr3;
case 0x17:
+ case 0x18:
if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
pvt->dram_type = MEM_LRDDR4;
else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
@@ -2200,6 +2202,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_base_addr_to_cs_size,
}
},
+ [F17_M10H_CPUS] = {
+ .ctl_name = "F17h_M10h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
};
/*
@@ -3188,8 +3199,18 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
break;
case 0x17:
+ if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
+ fam_type = &family_types[F17_M10H_CPUS];
+ pvt->ops = &family_types[F17_M10H_CPUS].ops;
+ break;
+ }
+ /* fall through */
+ case 0x18:
fam_type = &family_types[F17_CPUS];
pvt->ops = &family_types[F17_CPUS].ops;
+
+ if (pvt->fam == 0x18)
+ family_types[F17_CPUS].ctl_name = "F18h";
break;
default:
@@ -3428,6 +3449,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 1d4b74e9a037..4242f8e39c18 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -115,6 +115,8 @@
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
#define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
/*
* Function 1 - Address Map
@@ -281,6 +283,7 @@ enum amd_families {
F16_CPUS,
F16_M30H_CPUS,
F17_CPUS,
+ F17_M10H_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 2c98e020df05..3c0881ac9880 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -593,8 +593,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
/******************** CPU err device********************************/
static u32 cpc925_cpu_mask_disabled(void)
{
- struct device_node *cpus;
- struct device_node *cpunode = NULL;
+ struct device_node *cpunode;
static u32 mask = 0;
/* use cached value if available */
@@ -603,20 +602,8 @@ static u32 cpc925_cpu_mask_disabled(void)
mask = APIMASK_ADI0 | APIMASK_ADI1;
- cpus = of_find_node_by_path("/cpus");
- if (cpus == NULL) {
- cpc925_printk(KERN_DEBUG, "No /cpus node !\n");
- return 0;
- }
-
- while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) {
+ for_each_of_cpu_node(cpunode) {
const u32 *reg = of_get_property(cpunode, "reg", NULL);
-
- if (strcmp(cpunode->type, "cpu")) {
- cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name);
- continue;
- }
-
if (reg == NULL || *reg > 2) {
cpc925_printk(KERN_ERR, "Bad reg value at %pOF\n", cpunode);
continue;
@@ -633,9 +620,6 @@ static u32 cpc925_cpu_mask_disabled(void)
"Assuming PI id is equal to CPU MPIC id!\n");
}
- of_node_put(cpunode);
- of_node_put(cpus);
-
return mask;
}
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 473aeec4b1da..49396bf6ad88 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -81,6 +81,18 @@ static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
(*num_dimm)++;
}
+static int get_dimm_smbios_index(u16 handle)
+{
+ struct mem_ctl_info *mci = ghes_pvt->mci;
+ int i;
+
+ for (i = 0; i < mci->tot_dimms; i++) {
+ if (mci->dimms[i]->smbios_handle == handle)
+ return i;
+ }
+ return -1;
+}
+
static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
{
struct ghes_edac_dimm_fill *dimm_fill = arg;
@@ -177,6 +189,8 @@ static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
entry->total_width, entry->data_width);
}
+ dimm->smbios_handle = entry->handle;
+
dimm_fill->count++;
}
}
@@ -327,12 +341,21 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
const char *bank = NULL, *device = NULL;
+ int index = -1;
+
dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device);
if (bank != NULL && device != NULL)
p += sprintf(p, "DIMM location:%s %s ", bank, device);
else
p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
mem_err->mem_dev_handle);
+
+ index = get_dimm_smbios_index(mem_err->mem_dev_handle);
+ if (index >= 0) {
+ e->top_layer = index;
+ e->enable_per_layer_report = true;
+ }
+
}
if (p > e->location)
*(p - 1) = '\0';
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index d92d56cee101..299b441647cd 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -399,7 +399,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
if (nr_pages == 0)
continue;
- edac_dbg(0, "csrow %d, channel %d%s, size = %ld Mb\n", i, j,
+ edac_dbg(0, "csrow %d, channel %d%s, size = %ld MiB\n", i, j,
stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages));
dimm->nr_pages = nr_pages;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8e120bf60624..9ef448fef12f 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -597,7 +597,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
/* DDR3 has 8 I/O banks */
size = (rows * cols * banks * ranks) >> (20 - 3);
- edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "\tdimm %d %d MiB offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
j, size,
RANKOFFSET(dimm_dod[j]),
banks, ranks, rows, cols);
@@ -1711,6 +1711,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
u32 errnum = find_first_bit(&error, 32);
if (uncorrected_error) {
+ core_err_cnt = 1;
if (ripv)
tp_event = HW_EVENT_ERR_FATAL;
else
@@ -1815,14 +1816,12 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
struct mce *mce = (struct mce *)data;
struct i7core_dev *i7_dev;
struct mem_ctl_info *mci;
- struct i7core_pvt *pvt;
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev)
return NOTIFY_DONE;
mci = i7_dev->mci;
- pvt = mci->pvt_info;
/*
* Just let mcelog handle it if the error is
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 2ab4d61ee47e..c605089d899f 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1059,7 +1059,8 @@ static int __init mce_amd_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (c->x86_vendor != X86_VENDOR_AMD)
+ if (c->x86_vendor != X86_VENDOR_AMD &&
+ c->x86_vendor != X86_VENDOR_HYGON)
return -ENODEV;
fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
@@ -1113,6 +1114,7 @@ static int __init mce_amd_init(void)
break;
case 0x17:
+ case 0x18:
xec_mask = 0x3f;
if (!boot_cpu_has(X86_FEATURE_SMCA)) {
printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 07726fb00321..9353c3fc7c05 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -326,6 +326,7 @@ struct sbridge_info {
const struct interleave_pkg *interleave_pkg;
u8 max_sad;
u8 (*get_node_id)(struct sbridge_pvt *pvt);
+ u8 (*get_ha)(u8 bank);
enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
struct pci_dev *pci_vtd;
@@ -1002,6 +1003,39 @@ static u8 knl_get_node_id(struct sbridge_pvt *pvt)
return GET_BITFIELD(reg, 0, 2);
}
+/*
+ * Use the reporting bank number to determine which memory
+ * controller (also known as "ha" for "home agent"). Sandy
+ * Bridge only has one memory controller per socket, so the
+ * answer is always zero.
+ */
+static u8 sbridge_get_ha(u8 bank)
+{
+ return 0;
+}
+
+/*
+ * On Ivy Bridge, Haswell and Broadwell the error may be in a
+ * home agent bank (7, 8), or one of the per-channel memory
+ * controller banks (9 .. 16).
+ */
+static u8 ibridge_get_ha(u8 bank)
+{
+ switch (bank) {
+ case 7 ... 8:
+ return bank - 7;
+ case 9 ... 16:
+ return (bank - 9) / 4;
+ default:
+ return 0xff;
+ }
+}
+
+/* Not used, but included for safety/symmetry */
+static u8 knl_get_ha(u8 bank)
+{
+ return 0xff;
+}
static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
{
@@ -1622,7 +1656,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
- edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
size, npages,
banks, ranks, rows, cols);
@@ -2207,6 +2241,60 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
return 0;
}
+static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
+ const struct mce *m, u8 *socket,
+ u8 *ha, long *channel_mask,
+ char *msg)
+{
+ u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
+ struct mem_ctl_info *new_mci;
+ struct sbridge_pvt *pvt;
+ struct pci_dev *pci_ha;
+ bool tad0;
+
+ if (channel >= NUM_CHANNELS) {
+ sprintf(msg, "Invalid channel 0x%x", channel);
+ return -EINVAL;
+ }
+
+ pvt = mci->pvt_info;
+ if (!pvt->info.get_ha) {
+ sprintf(msg, "No get_ha()");
+ return -EINVAL;
+ }
+ *ha = pvt->info.get_ha(m->bank);
+ if (*ha != 0 && *ha != 1) {
+ sprintf(msg, "Impossible bank %d", m->bank);
+ return -EINVAL;
+ }
+
+ *socket = m->socketid;
+ new_mci = get_mci_for_node_id(*socket, *ha);
+ if (!new_mci) {
+ strcpy(msg, "mci socket got corrupted!");
+ return -EINVAL;
+ }
+
+ pvt = new_mci->pvt_info;
+ pci_ha = pvt->pci_ha;
+ pci_read_config_dword(pci_ha, tad_dram_rule[0], &reg);
+ tad0 = m->addr <= TAD_LIMIT(reg);
+
+ *channel_mask = 1 << channel;
+ if (pvt->mirror_mode == FULL_MIRRORING ||
+ (pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
+ *channel_mask |= 1 << ((channel + 2) % 4);
+ pvt->is_cur_addr_mirrored = true;
+ } else {
+ pvt->is_cur_addr_mirrored = false;
+ }
+
+ if (pvt->is_lockstep)
+ *channel_mask |= 1 << ((channel + 1) % 4);
+
+ return 0;
+}
+
/****************************************************************************
Device initialization routines: put/get, init/exit
****************************************************************************/
@@ -2877,10 +2965,16 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
u32 errcode = GET_BITFIELD(m->status, 0, 15);
u32 channel = GET_BITFIELD(m->status, 0, 3);
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+ /*
+ * Bits 5-0 of MCi_MISC give the least significant bit that is valid.
+ * A value 6 is for cache line aligned address, a value 12 is for page
+ * aligned address reported by patrol scrubber.
+ */
+ u32 lsb = GET_BITFIELD(m->misc, 0, 5);
long channel_mask, first_channel;
- u8 rank, socket, ha;
+ u8 rank = 0xff, socket, ha;
int rc, dimm;
- char *area_type = NULL;
+ char *area_type = "DRAM";
if (pvt->info.type != SANDY_BRIDGE)
recoverable = true;
@@ -2888,6 +2982,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
recoverable = GET_BITFIELD(m->status, 56, 56);
if (uncorrected_error) {
+ core_err_cnt = 1;
if (ripv) {
type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
@@ -2911,35 +3006,27 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
* cccc = channel
* If the mask doesn't match, report an error to the parsing logic
*/
- if (! ((errcode & 0xef80) == 0x80)) {
- optype = "Can't parse: it is not a mem";
- } else {
- switch (optypenum) {
- case 0:
- optype = "generic undef request error";
- break;
- case 1:
- optype = "memory read error";
- break;
- case 2:
- optype = "memory write error";
- break;
- case 3:
- optype = "addr/cmd error";
- break;
- case 4:
- optype = "memory scrubbing error";
- break;
- default:
- optype = "reserved";
- break;
- }
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request error";
+ break;
+ case 1:
+ optype = "memory read error";
+ break;
+ case 2:
+ optype = "memory write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "memory scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
}
- /* Only decode errors with an valid address (ADDRV) */
- if (!GET_BITFIELD(m->status, 58, 58))
- return;
-
if (pvt->info.type == KNIGHTS_LANDING) {
if (channel == 14) {
edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
@@ -2972,9 +3059,13 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
optype, msg);
}
return;
- } else {
+ } else if (lsb < 12) {
rc = get_memory_error_data(mci, m->addr, &socket, &ha,
- &channel_mask, &rank, &area_type, msg);
+ &channel_mask, &rank,
+ &area_type, msg);
+ } else {
+ rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
+ &channel_mask, msg);
}
if (rc < 0)
@@ -2989,14 +3080,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
- if (rank < 4)
+ if (rank == 0xff)
+ dimm = -1;
+ else if (rank < 4)
dimm = 0;
else if (rank < 8)
dimm = 1;
else
dimm = 2;
-
/*
* FIXME: On some memory configurations (mirror, lockstep), the
* Memory Controller can't point the error to a single DIMM. The
@@ -3045,17 +3137,11 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
{
struct mce *mce = (struct mce *)data;
struct mem_ctl_info *mci;
- struct sbridge_pvt *pvt;
char *type;
if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
return NOTIFY_DONE;
- mci = get_mci_for_node_id(mce->socketid, IMC0);
- if (!mci)
- return NOTIFY_DONE;
- pvt = mci->pvt_info;
-
/*
* Just let mcelog handle it if the error is
* outside the memory controller. A memory error
@@ -3065,6 +3151,22 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
if ((mce->status & 0xefff) >> 7 != 1)
return NOTIFY_DONE;
+ /* Check ADDRV bit in STATUS */
+ if (!GET_BITFIELD(mce->status, 58, 58))
+ return NOTIFY_DONE;
+
+ /* Check MISCV bit in STATUS */
+ if (!GET_BITFIELD(mce->status, 59, 59))
+ return NOTIFY_DONE;
+
+ /* Check address type in MISC (physical address only) */
+ if (GET_BITFIELD(mce->misc, 6, 8) != 2)
+ return NOTIFY_DONE;
+
+ mci = get_mci_for_node_id(mce->socketid, IMC0);
+ if (!mci)
+ return NOTIFY_DONE;
+
if (mce->mcgstatus & MCG_STATUS_MCIP)
type = "Exception";
else
@@ -3173,6 +3275,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = get_memory_type;
pvt->info.get_node_id = get_node_id;
+ pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3197,6 +3300,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = sbridge_dram_rule;
pvt->info.get_memory_type = get_memory_type;
pvt->info.get_node_id = get_node_id;
+ pvt->info.get_ha = sbridge_get_ha;
pvt->info.rir_limit = rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3221,6 +3325,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = haswell_get_memory_type;
pvt->info.get_node_id = haswell_get_node_id;
+ pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = haswell_rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3245,6 +3350,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = haswell_get_memory_type;
pvt->info.get_node_id = haswell_get_node_id;
+ pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = haswell_rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3269,6 +3375,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = knl_dram_rule;
pvt->info.get_memory_type = knl_get_memory_type;
pvt->info.get_node_id = knl_get_node_id;
+ pvt->info.get_ha = knl_get_ha;
pvt->info.rir_limit = NULL;
pvt->info.sad_limit = knl_sad_limit;
pvt->info.interleave_mode = knl_interleave_mode;
@@ -3320,17 +3427,14 @@ fail0:
return rc;
}
-#define ICPU(model, table) \
- { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
-
static const struct x86_cpu_id sbridge_cpuids[] = {
- ICPU(INTEL_FAM6_SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
- ICPU(INTEL_FAM6_IVYBRIDGE_X, pci_dev_descr_ibridge_table),
- ICPU(INTEL_FAM6_HASWELL_X, pci_dev_descr_haswell_table),
- ICPU(INTEL_FAM6_BROADWELL_X, pci_dev_descr_broadwell_table),
- ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
- ICPU(INTEL_FAM6_XEON_PHI_KNL, pci_dev_descr_knl_table),
- ICPU(INTEL_FAM6_XEON_PHI_KNM, pci_dev_descr_knl_table),
+ INTEL_CPU_FAM6(SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
+ INTEL_CPU_FAM6(IVYBRIDGE_X, pci_dev_descr_ibridge_table),
+ INTEL_CPU_FAM6(HASWELL_X, pci_dev_descr_haswell_table),
+ INTEL_CPU_FAM6(BROADWELL_X, pci_dev_descr_broadwell_table),
+ INTEL_CPU_FAM6(BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
+ INTEL_CPU_FAM6(XEON_PHI_KNL, pci_dev_descr_knl_table),
+ INTEL_CPU_FAM6(XEON_PHI_KNM, pci_dev_descr_knl_table),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index fae095162c01..dd209e0dd9ab 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -364,7 +364,7 @@ static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
- edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
imc->mc, chan, dimmno, size, npages,
banks, 1 << ranks, rows, cols);
@@ -424,7 +424,7 @@ unknown_size:
dimm->mtype = MEM_NVDIMM;
dimm->edac_mode = EDAC_SECDED; /* likely better than this */
- edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu Mb (%u pages)\n",
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu MiB (%u pages)\n",
imc->mc, chan, dimmno, size >> 20, dimm->nr_pages);
snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
@@ -668,7 +668,7 @@ sad_found:
break;
case 2:
lchan = (addr >> shift) % 2;
- lchan = (lchan << 1) | ~lchan;
+ lchan = (lchan << 1) | !lchan;
break;
case 3:
lchan = ((addr >> shift) % 2) << 1;
@@ -959,6 +959,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
recoverable = GET_BITFIELD(m->status, 56, 56);
if (uncorrected_error) {
+ core_err_cnt = 1;
if (ripv) {
type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index c009d94f40c5..34be60fe6892 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1884,7 +1884,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
default:
dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n",
l2c->pdev->device);
- return IRQ_NONE;
+ goto err_free;
}
while (CIRC_CNT(l2c->ring_head, l2c->ring_tail,
@@ -1906,7 +1906,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
l2c->ring_tail++;
}
- return IRQ_HANDLED;
+ ret = IRQ_HANDLED;
err_free:
kfree(other);
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 5e1dd2772278..5ef215297101 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Extcon charger detection driver for Intel Cherrytrail Whiskey Cove PMIC
* Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
*
* Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
* Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/extcon-provider.h>
@@ -32,10 +24,10 @@
#define CHT_WC_CHGRCTRL0_EMRGCHREN BIT(1)
#define CHT_WC_CHGRCTRL0_EXTCHRDIS BIT(2)
#define CHT_WC_CHGRCTRL0_SWCONTROL BIT(3)
-#define CHT_WC_CHGRCTRL0_TTLCK_MASK BIT(4)
-#define CHT_WC_CHGRCTRL0_CCSM_OFF_MASK BIT(5)
-#define CHT_WC_CHGRCTRL0_DBPOFF_MASK BIT(6)
-#define CHT_WC_CHGRCTRL0_WDT_NOKICK BIT(7)
+#define CHT_WC_CHGRCTRL0_TTLCK BIT(4)
+#define CHT_WC_CHGRCTRL0_CCSM_OFF BIT(5)
+#define CHT_WC_CHGRCTRL0_DBPOFF BIT(6)
+#define CHT_WC_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
#define CHT_WC_CHGRCTRL1 0x5e17
@@ -52,7 +44,7 @@
#define CHT_WC_USBSRC_TYPE_ACA 4
#define CHT_WC_USBSRC_TYPE_SE1 5
#define CHT_WC_USBSRC_TYPE_MHL 6
-#define CHT_WC_USBSRC_TYPE_FLOAT_DP_DN 7
+#define CHT_WC_USBSRC_TYPE_FLOATING 7
#define CHT_WC_USBSRC_TYPE_OTHER 8
#define CHT_WC_USBSRC_TYPE_DCP_EXTPHY 9
@@ -61,9 +53,12 @@
#define CHT_WC_PWRSRC_STS 0x6e1e
#define CHT_WC_PWRSRC_VBUS BIT(0)
#define CHT_WC_PWRSRC_DC BIT(1)
-#define CHT_WC_PWRSRC_BAT BIT(2)
-#define CHT_WC_PWRSRC_ID_GND BIT(3)
-#define CHT_WC_PWRSRC_ID_FLOAT BIT(4)
+#define CHT_WC_PWRSRC_BATT BIT(2)
+#define CHT_WC_PWRSRC_USBID_MASK GENMASK(4, 3)
+#define CHT_WC_PWRSRC_USBID_SHIFT 3
+#define CHT_WC_PWRSRC_RID_ACA 0
+#define CHT_WC_PWRSRC_RID_GND 1
+#define CHT_WC_PWRSRC_RID_FLOAT 2
#define CHT_WC_VBUS_GPIO_CTLO 0x6e2d
#define CHT_WC_VBUS_GPIO_CTLO_OUTPUT BIT(0)
@@ -104,16 +99,20 @@ struct cht_wc_extcon_data {
static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
{
- if (pwrsrc_sts & CHT_WC_PWRSRC_ID_GND)
+ switch ((pwrsrc_sts & CHT_WC_PWRSRC_USBID_MASK) >> CHT_WC_PWRSRC_USBID_SHIFT) {
+ case CHT_WC_PWRSRC_RID_GND:
return USB_ID_GND;
- if (pwrsrc_sts & CHT_WC_PWRSRC_ID_FLOAT)
+ case CHT_WC_PWRSRC_RID_FLOAT:
return USB_ID_FLOAT;
-
- /*
- * Once we have iio support for the gpadc we should read the USBID
- * gpadc channel here and determine ACA role based on that.
- */
- return USB_ID_FLOAT;
+ case CHT_WC_PWRSRC_RID_ACA:
+ default:
+ /*
+ * Once we have IIO support for the GPADC we should read
+ * the USBID GPADC channel here and determine ACA role
+ * based on that.
+ */
+ return USB_ID_FLOAT;
+ }
}
static int cht_wc_extcon_get_charger(struct cht_wc_extcon_data *ext,
@@ -156,9 +155,9 @@ static int cht_wc_extcon_get_charger(struct cht_wc_extcon_data *ext,
dev_warn(ext->dev,
"Unhandled charger type %d, defaulting to SDP\n",
ret);
- /* Fall through, treat as SDP */
+ return EXTCON_CHG_USB_SDP;
case CHT_WC_USBSRC_TYPE_SDP:
- case CHT_WC_USBSRC_TYPE_FLOAT_DP_DN:
+ case CHT_WC_USBSRC_TYPE_FLOATING:
case CHT_WC_USBSRC_TYPE_OTHER:
return EXTCON_CHG_USB_SDP;
case CHT_WC_USBSRC_TYPE_CDP:
@@ -279,7 +278,7 @@ static int cht_wc_extcon_sw_control(struct cht_wc_extcon_data *ext, bool enable)
{
int ret, mask, val;
- mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF_MASK;
+ mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF;
val = enable ? mask : 0;
ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL0, mask, val);
if (ret)
@@ -292,6 +291,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct cht_wc_extcon_data *ext;
+ unsigned long mask = ~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_USBID_MASK);
int irq, ret;
irq = platform_get_irq(pdev, 0);
@@ -352,9 +352,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
}
/* Unmask irqs */
- ret = regmap_write(ext->regmap, CHT_WC_PWRSRC_IRQ_MASK,
- (int)~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_ID_GND |
- CHT_WC_PWRSRC_ID_FLOAT));
+ ret = regmap_write(ext->regmap, CHT_WC_PWRSRC_IRQ_MASK, mask);
if (ret) {
dev_err(ext->dev, "Error writing irq-mask: %d\n", ret);
goto disable_sw_control;
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index fd24debe58a3..80c9abcc3f97 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel INT3496 ACPI device extcon driver
*
@@ -7,15 +8,6 @@
*
* Copyright (c) 2014, Intel Corporation.
* Author: David Cohen <david.a.cohen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/acpi.h>
@@ -192,4 +184,4 @@ module_platform_driver(int3496_driver);
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Intel INT3496 ACPI device extcon driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index b871836da8a4..22d2feb1f8bc 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -1,20 +1,10 @@
-/*
- * extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC
- *
- * Copyright (C) 2013,2014 Samsung Electronics
- * Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC
+//
+// Copyright (C) 2013,2014 Samsung Electronics
+// Chanwoo Choi <cw00.choi@samsung.com>
+// Krzysztof Kozlowski <krzk@kernel.org>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 227651ff9666..a79537ebb671 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -1,19 +1,9 @@
-/*
- * extcon-max77693.c - MAX77693 extcon driver to support MAX77693 MUIC
- *
- * Copyright (C) 2012 Samsung Electrnoics
- * Chanwoo Choi <cw00.choi@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// extcon-max77693.c - MAX77693 extcon driver to support MAX77693 MUIC
+//
+// Copyright (C) 2012 Samsung Electrnoics
+// Chanwoo Choi <cw00.choi@samsung.com>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index c9fcd6cd41cb..b98cbd0362f5 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -1,15 +1,10 @@
-/*
- * extcon-max77843.c - Maxim MAX77843 extcon driver to support
- * MUIC(Micro USB Interface Controller)
- *
- * Copyright (C) 2015 Samsung Electronics
- * Author: Jaewon Kim <jaewon02.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// extcon-max77843.c - Maxim MAX77843 extcon driver to support
+// MUIC(Micro USB Interface Controller)
+//
+// Copyright (C) 2015 Samsung Electronics
+// Author: Jaewon Kim <jaewon02.kim@samsung.com>
#include <linux/extcon-provider.h>
#include <linux/i2c.h>
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 9f30f4929b72..bdabb2479e0d 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -1,19 +1,9 @@
-/*
- * extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
- *
- * Copyright (C) 2012 Samsung Electronics
- * Donggeun Kim <dg77.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
+//
+// Copyright (C) 2012 Samsung Electronics
+// Donggeun Kim <dg77.kim@samsung.com>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index b9d27c8fe57e..5ab0498be652 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -628,7 +628,7 @@ int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned long flags;
int index, ret = 0;
- *prop_val = (union extcon_property_value)(0);
+ *prop_val = (union extcon_property_value){0};
if (!edev)
return -EINVAL;
@@ -1123,7 +1123,6 @@ int extcon_dev_register(struct extcon_dev *edev)
(unsigned long)atomic_inc_return(&edev_no));
if (edev->max_supported) {
- char buf[10];
char *str;
struct extcon_cable *cable;
@@ -1137,9 +1136,7 @@ int extcon_dev_register(struct extcon_dev *edev)
for (index = 0; index < edev->max_supported; index++) {
cable = &edev->cables[index];
- snprintf(buf, 10, "cable.%d", index);
- str = kzalloc(strlen(buf) + 1,
- GFP_KERNEL);
+ str = kasprintf(GFP_KERNEL, "cable.%d", index);
if (!str) {
for (index--; index >= 0; index--) {
cable = &edev->cables[index];
@@ -1149,7 +1146,6 @@ int extcon_dev_register(struct extcon_dev *edev)
goto err_alloc_cables;
}
- strcpy(str, buf);
cable->edev = edev;
cable->cable_index = index;
@@ -1172,7 +1168,6 @@ int extcon_dev_register(struct extcon_dev *edev)
}
if (edev->max_supported && edev->mutually_exclusive) {
- char buf[80];
char *name;
/* Count the size of mutually_exclusive array */
@@ -1197,9 +1192,8 @@ int extcon_dev_register(struct extcon_dev *edev)
}
for (index = 0; edev->mutually_exclusive[index]; index++) {
- sprintf(buf, "0x%x", edev->mutually_exclusive[index]);
- name = kzalloc(strlen(buf) + 1,
- GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "0x%x",
+ edev->mutually_exclusive[index]);
if (!name) {
for (index--; index >= 0; index--) {
kfree(edev->d_attrs_muex[index].attr.
@@ -1210,7 +1204,6 @@ int extcon_dev_register(struct extcon_dev *edev)
ret = -ENOMEM;
goto err_muex;
}
- strcpy(name, buf);
sysfs_attr_init(&edev->d_attrs_muex[index].attr);
edev->d_attrs_muex[index].attr.name = name;
edev->d_attrs_muex[index].attr.mode = 0000;
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index a456a000048b..91a0404affe2 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -10,37 +10,31 @@ if GOOGLE_FIRMWARE
config GOOGLE_SMI
tristate "SMI interface for Google platforms"
- depends on X86 && ACPI && DMI && EFI
- select EFI_VARS
+ depends on X86 && ACPI && DMI
help
Say Y here if you want to enable SMI callbacks for Google
platforms. This provides an interface for writing to and
- clearing the EFI event log and reading and writing NVRAM
+ clearing the event log. If EFI_VARS is also enabled this
+ driver provides an interface for reading and writing NVRAM
variables.
config GOOGLE_COREBOOT_TABLE
- tristate
- depends on GOOGLE_COREBOOT_TABLE_ACPI || GOOGLE_COREBOOT_TABLE_OF
-
-config GOOGLE_COREBOOT_TABLE_ACPI
- tristate "Coreboot Table Access - ACPI"
- depends on ACPI
- select GOOGLE_COREBOOT_TABLE
+ tristate "Coreboot Table Access"
+ depends on ACPI || OF
help
This option enables the coreboot_table module, which provides other
- firmware modules to access to the coreboot table. The coreboot table
- pointer is accessed through the ACPI "GOOGCB00" object.
+ firmware modules access to the coreboot table. The coreboot table
+ pointer is accessed through the ACPI "GOOGCB00" object or the
+ device tree node /firmware/coreboot.
If unsure say N.
+config GOOGLE_COREBOOT_TABLE_ACPI
+ tristate
+ select GOOGLE_COREBOOT_TABLE
+
config GOOGLE_COREBOOT_TABLE_OF
- tristate "Coreboot Table Access - Device Tree"
- depends on OF
+ tristate
select GOOGLE_COREBOOT_TABLE
- help
- This option enable the coreboot_table module, which provide other
- firmware modules to access coreboot table. The coreboot table pointer
- is accessed through the device tree node /firmware/coreboot.
- If unsure say N.
config GOOGLE_MEMCONSOLE
tristate
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile
index d0b3fba96194..d17caded5d88 100644
--- a/drivers/firmware/google/Makefile
+++ b/drivers/firmware/google/Makefile
@@ -2,8 +2,6 @@
obj-$(CONFIG_GOOGLE_SMI) += gsmi.o
obj-$(CONFIG_GOOGLE_COREBOOT_TABLE) += coreboot_table.o
-obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_ACPI) += coreboot_table-acpi.o
-obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_OF) += coreboot_table-of.o
obj-$(CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT) += framebuffer-coreboot.o
obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o
obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT) += memconsole-coreboot.o
diff --git a/drivers/firmware/google/coreboot_table-acpi.c b/drivers/firmware/google/coreboot_table-acpi.c
deleted file mode 100644
index 77197fe3d42f..000000000000
--- a/drivers/firmware/google/coreboot_table-acpi.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * coreboot_table-acpi.c
- *
- * Using ACPI to locate Coreboot table and provide coreboot table access.
- *
- * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/acpi.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "coreboot_table.h"
-
-static int coreboot_table_acpi_probe(struct platform_device *pdev)
-{
- phys_addr_t phyaddr;
- resource_size_t len;
- struct coreboot_table_header __iomem *header = NULL;
- struct resource *res;
- void __iomem *ptr = NULL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- len = resource_size(res);
- if (!res->start || !len)
- return -EINVAL;
-
- phyaddr = res->start;
- header = ioremap_cache(phyaddr, sizeof(*header));
- if (header == NULL)
- return -ENOMEM;
-
- ptr = ioremap_cache(phyaddr,
- header->header_bytes + header->table_bytes);
- iounmap(header);
- if (!ptr)
- return -ENOMEM;
-
- return coreboot_table_init(&pdev->dev, ptr);
-}
-
-static int coreboot_table_acpi_remove(struct platform_device *pdev)
-{
- return coreboot_table_exit();
-}
-
-static const struct acpi_device_id cros_coreboot_acpi_match[] = {
- { "GOOGCB00", 0 },
- { "BOOT0000", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
-
-static struct platform_driver coreboot_table_acpi_driver = {
- .probe = coreboot_table_acpi_probe,
- .remove = coreboot_table_acpi_remove,
- .driver = {
- .name = "coreboot_table_acpi",
- .acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
- },
-};
-
-static int __init coreboot_table_acpi_init(void)
-{
- return platform_driver_register(&coreboot_table_acpi_driver);
-}
-
-module_init(coreboot_table_acpi_init);
-
-MODULE_AUTHOR("Google, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table-of.c b/drivers/firmware/google/coreboot_table-of.c
deleted file mode 100644
index f15bf404c579..000000000000
--- a/drivers/firmware/google/coreboot_table-of.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * coreboot_table-of.c
- *
- * Coreboot table access through open firmware.
- *
- * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include "coreboot_table.h"
-
-static int coreboot_table_of_probe(struct platform_device *pdev)
-{
- struct device_node *fw_dn = pdev->dev.of_node;
- void __iomem *ptr;
-
- ptr = of_iomap(fw_dn, 0);
- of_node_put(fw_dn);
- if (!ptr)
- return -ENOMEM;
-
- return coreboot_table_init(&pdev->dev, ptr);
-}
-
-static int coreboot_table_of_remove(struct platform_device *pdev)
-{
- return coreboot_table_exit();
-}
-
-static const struct of_device_id coreboot_of_match[] = {
- { .compatible = "coreboot" },
- {},
-};
-
-static struct platform_driver coreboot_table_of_driver = {
- .probe = coreboot_table_of_probe,
- .remove = coreboot_table_of_remove,
- .driver = {
- .name = "coreboot_table_of",
- .of_match_table = coreboot_of_match,
- },
-};
-
-static int __init platform_coreboot_table_of_init(void)
-{
- struct platform_device *pdev;
- struct device_node *of_node;
-
- /* Limit device creation to the presence of /firmware/coreboot node */
- of_node = of_find_node_by_path("/firmware/coreboot");
- if (!of_node)
- return -ENODEV;
-
- if (!of_match_node(coreboot_of_match, of_node))
- return -ENODEV;
-
- pdev = of_platform_device_create(of_node, "coreboot_table_of", NULL);
- if (!pdev)
- return -ENODEV;
-
- return platform_driver_register(&coreboot_table_of_driver);
-}
-
-module_init(platform_coreboot_table_of_init);
-
-MODULE_AUTHOR("Google, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 19db5709ae28..078d3bbe632f 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -16,12 +16,15 @@
* GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "coreboot_table.h"
@@ -29,8 +32,6 @@
#define CB_DEV(d) container_of(d, struct coreboot_device, dev)
#define CB_DRV(d) container_of(d, struct coreboot_driver, drv)
-static struct coreboot_table_header __iomem *ptr_header;
-
static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
{
struct coreboot_device *device = CB_DEV(dev);
@@ -70,12 +71,6 @@ static struct bus_type coreboot_bus_type = {
.remove = coreboot_bus_remove,
};
-static int __init coreboot_bus_init(void)
-{
- return bus_register(&coreboot_bus_type);
-}
-module_init(coreboot_bus_init);
-
static void coreboot_device_release(struct device *dev)
{
struct coreboot_device *device = CB_DEV(dev);
@@ -97,62 +92,117 @@ void coreboot_driver_unregister(struct coreboot_driver *driver)
}
EXPORT_SYMBOL(coreboot_driver_unregister);
-int coreboot_table_init(struct device *dev, void __iomem *ptr)
+static int coreboot_table_populate(struct device *dev, void *ptr)
{
int i, ret;
void *ptr_entry;
struct coreboot_device *device;
- struct coreboot_table_entry entry;
- struct coreboot_table_header header;
-
- ptr_header = ptr;
- memcpy_fromio(&header, ptr_header, sizeof(header));
-
- if (strncmp(header.signature, "LBIO", sizeof(header.signature))) {
- pr_warn("coreboot_table: coreboot table missing or corrupt!\n");
- return -ENODEV;
- }
+ struct coreboot_table_entry *entry;
+ struct coreboot_table_header *header = ptr;
- ptr_entry = (void *)ptr_header + header.header_bytes;
- for (i = 0; i < header.table_entries; i++) {
- memcpy_fromio(&entry, ptr_entry, sizeof(entry));
+ ptr_entry = ptr + header->header_bytes;
+ for (i = 0; i < header->table_entries; i++) {
+ entry = ptr_entry;
- device = kzalloc(sizeof(struct device) + entry.size, GFP_KERNEL);
- if (!device) {
- ret = -ENOMEM;
- break;
- }
+ device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
dev_set_name(&device->dev, "coreboot%d", i);
device->dev.parent = dev;
device->dev.bus = &coreboot_bus_type;
device->dev.release = coreboot_device_release;
- memcpy_fromio(&device->entry, ptr_entry, entry.size);
+ memcpy(&device->entry, ptr_entry, entry->size);
ret = device_register(&device->dev);
if (ret) {
put_device(&device->dev);
- break;
+ return ret;
}
- ptr_entry += entry.size;
+ ptr_entry += entry->size;
}
- return ret;
+ return 0;
}
-EXPORT_SYMBOL(coreboot_table_init);
-int coreboot_table_exit(void)
+static int coreboot_table_probe(struct platform_device *pdev)
{
- if (ptr_header) {
- bus_unregister(&coreboot_bus_type);
- iounmap(ptr_header);
- ptr_header = NULL;
+ resource_size_t len;
+ struct coreboot_table_header *header;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void *ptr;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ len = resource_size(res);
+ if (!res->start || !len)
+ return -EINVAL;
+
+ /* Check just the header first to make sure things are sane */
+ header = memremap(res->start, sizeof(*header), MEMREMAP_WB);
+ if (!header)
+ return -ENOMEM;
+
+ len = header->header_bytes + header->table_bytes;
+ ret = strncmp(header->signature, "LBIO", sizeof(header->signature));
+ memunmap(header);
+ if (ret) {
+ dev_warn(dev, "coreboot table missing or corrupt!\n");
+ return -ENODEV;
}
+ ptr = memremap(res->start, len, MEMREMAP_WB);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = bus_register(&coreboot_bus_type);
+ if (!ret) {
+ ret = coreboot_table_populate(dev, ptr);
+ if (ret)
+ bus_unregister(&coreboot_bus_type);
+ }
+ memunmap(ptr);
+
+ return ret;
+}
+
+static int coreboot_table_remove(struct platform_device *pdev)
+{
+ bus_unregister(&coreboot_bus_type);
return 0;
}
-EXPORT_SYMBOL(coreboot_table_exit);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_coreboot_acpi_match[] = {
+ { "GOOGCB00", 0 },
+ { "BOOT0000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id coreboot_of_match[] = {
+ { .compatible = "coreboot" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, coreboot_of_match);
+#endif
+
+static struct platform_driver coreboot_table_driver = {
+ .probe = coreboot_table_probe,
+ .remove = coreboot_table_remove,
+ .driver = {
+ .name = "coreboot_table",
+ .acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
+ .of_match_table = of_match_ptr(coreboot_of_match),
+ },
+};
+module_platform_driver(coreboot_table_driver);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 8ad95a94481b..71a9de6b15fa 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -91,10 +91,4 @@ int coreboot_driver_register(struct coreboot_driver *driver);
/* Unregister a driver that uses the data from a coreboot table. */
void coreboot_driver_unregister(struct coreboot_driver *driver);
-/* Initialize coreboot table module given a pointer to iomem */
-int coreboot_table_init(struct device *dev, void __iomem *ptr);
-
-/* Cleanup coreboot table module */
-int coreboot_table_exit(void);
-
#endif /* __COREBOOT_TABLE_H */
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index c8f169bf2e27..82ce1e6d261e 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -29,6 +29,7 @@
#include <linux/efi.h>
#include <linux/module.h>
#include <linux/ucs2_string.h>
+#include <linux/suspend.h>
#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */
/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
@@ -70,6 +71,8 @@
#define GSMI_CMD_SET_NVRAM_VAR 0x03
#define GSMI_CMD_SET_EVENT_LOG 0x08
#define GSMI_CMD_CLEAR_EVENT_LOG 0x09
+#define GSMI_CMD_LOG_S0IX_SUSPEND 0x0a
+#define GSMI_CMD_LOG_S0IX_RESUME 0x0b
#define GSMI_CMD_CLEAR_CONFIG 0x20
#define GSMI_CMD_HANDSHAKE_TYPE 0xC1
@@ -84,7 +87,7 @@ struct gsmi_buf {
u32 address; /* physical address of buffer */
};
-struct gsmi_device {
+static struct gsmi_device {
struct platform_device *pdev; /* platform device */
struct gsmi_buf *name_buf; /* variable name buffer */
struct gsmi_buf *data_buf; /* generic data buffer */
@@ -122,7 +125,6 @@ struct gsmi_log_entry_type_1 {
u32 instance;
} __packed;
-
/*
* Some platforms don't have explicit SMI handshake
* and need to wait for SMI to complete.
@@ -133,6 +135,15 @@ module_param(spincount, uint, 0600);
MODULE_PARM_DESC(spincount,
"The number of loop iterations to use when using the spin handshake.");
+/*
+ * Platforms might not support S0ix logging in their GSMI handlers. In order to
+ * avoid any side-effects of generating an SMI for S0ix logging, use the S0ix
+ * related GSMI commands only for those platforms that explicitly enable this
+ * option.
+ */
+static bool s0ix_logging_enable;
+module_param(s0ix_logging_enable, bool, 0600);
+
static struct gsmi_buf *gsmi_buf_alloc(void)
{
struct gsmi_buf *smibuf;
@@ -289,6 +300,10 @@ static int gsmi_exec(u8 func, u8 sub)
return rc;
}
+#ifdef CONFIG_EFI_VARS
+
+static struct efivars efivars;
+
static efi_status_t gsmi_get_variable(efi_char16_t *name,
efi_guid_t *vendor, u32 *attr,
unsigned long *data_size,
@@ -466,6 +481,8 @@ static const struct efivar_operations efivar_ops = {
.get_next_variable = gsmi_get_next_variable,
};
+#endif /* CONFIG_EFI_VARS */
+
static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
@@ -480,11 +497,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
if (count < sizeof(u32))
return -EINVAL;
param.type = *(u32 *)buf;
- count -= sizeof(u32);
buf += sizeof(u32);
/* The remaining buffer is the data payload */
- if (count > gsmi_dev.data_buf->length)
+ if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
return -EINVAL;
param.data_len = count - sizeof(u32);
@@ -504,7 +520,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
- return rc;
+ return (rc == 0) ? count : rc;
}
@@ -716,6 +732,12 @@ static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
},
},
+ {
+ .ident = "Coreboot Firmware",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ },
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
@@ -762,7 +784,6 @@ static __init int gsmi_system_valid(void)
}
static struct kobject *gsmi_kobj;
-static struct efivars efivars;
static const struct platform_device_info gsmi_dev_info = {
.name = "gsmi",
@@ -771,6 +792,78 @@ static const struct platform_device_info gsmi_dev_info = {
.dma_mask = DMA_BIT_MASK(32),
};
+#ifdef CONFIG_PM
+static void gsmi_log_s0ix_info(u8 cmd)
+{
+ unsigned long flags;
+
+ /*
+ * If platform has not enabled S0ix logging, then no action is
+ * necessary.
+ */
+ if (!s0ix_logging_enable)
+ return;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+
+ gsmi_exec(GSMI_CALLBACK, cmd);
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+}
+
+static int gsmi_log_s0ix_suspend(struct device *dev)
+{
+ /*
+ * If system is not suspending via firmware using the standard ACPI Sx
+ * types, then make a GSMI call to log the suspend info.
+ */
+ if (!pm_suspend_via_firmware())
+ gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_SUSPEND);
+
+ /*
+ * Always return success, since we do not want suspend
+ * to fail just because of logging failure.
+ */
+ return 0;
+}
+
+static int gsmi_log_s0ix_resume(struct device *dev)
+{
+ /*
+ * If system did not resume via firmware, then make a GSMI call to log
+ * the resume info and wake source.
+ */
+ if (!pm_resume_via_firmware())
+ gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_RESUME);
+
+ /*
+ * Always return success, since we do not want resume
+ * to fail just because of logging failure.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops gsmi_pm_ops = {
+ .suspend_noirq = gsmi_log_s0ix_suspend,
+ .resume_noirq = gsmi_log_s0ix_resume,
+};
+
+static int gsmi_platform_driver_probe(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver gsmi_driver_info = {
+ .driver = {
+ .name = "gsmi",
+ .pm = &gsmi_pm_ops,
+ },
+ .probe = gsmi_platform_driver_probe,
+};
+#endif
+
static __init int gsmi_init(void)
{
unsigned long flags;
@@ -782,6 +875,14 @@ static __init int gsmi_init(void)
gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
+#ifdef CONFIG_PM
+ ret = platform_driver_register(&gsmi_driver_info);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "gsmi: unable to register platform driver\n");
+ return ret;
+ }
+#endif
+
/* register device */
gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
if (IS_ERR(gsmi_dev.pdev)) {
@@ -886,11 +987,14 @@ static __init int gsmi_init(void)
goto out_remove_bin_file;
}
+#ifdef CONFIG_EFI_VARS
ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
if (ret) {
printk(KERN_INFO "gsmi: Failed to register efivars\n");
- goto out_remove_sysfs_files;
+ sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+ goto out_remove_bin_file;
}
+#endif
register_reboot_notifier(&gsmi_reboot_notifier);
register_die_notifier(&gsmi_die_notifier);
@@ -901,8 +1005,6 @@ static __init int gsmi_init(void)
return 0;
-out_remove_sysfs_files:
- sysfs_remove_files(gsmi_kobj, gsmi_attrs);
out_remove_bin_file:
sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
out_err:
@@ -922,7 +1024,9 @@ static void __exit gsmi_exit(void)
unregister_die_notifier(&gsmi_die_notifier);
atomic_notifier_chain_unregister(&panic_notifier_list,
&gsmi_panic_notifier);
+#ifdef CONFIG_EFI_VARS
efivars_unregister(&efivars);
+#endif
sysfs_remove_files(gsmi_kobj, gsmi_attrs);
sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1aa67bb5d8c0..c0c0b4e4e281 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -198,7 +198,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->name = name;
- /* We want to export the raw partion with name ${name}_raw */
+ /* We want to export the raw partition with name ${name}_raw */
sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
if (!sec->raw_name) {
err = -ENOMEM;
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
index f395dec27113..390aa13391e4 100644
--- a/drivers/firmware/scpi_pm_domain.c
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -121,7 +121,7 @@ static int scpi_pm_domain_probe(struct platform_device *pdev)
scpi_pd->domain = i;
scpi_pd->ops = scpi_ops;
- sprintf(scpi_pd->name, "%s.%d", np->name, i);
+ sprintf(scpi_pd->name, "%pOFn.%d", np, i);
scpi_pd->genpd.name = scpi_pd->name;
scpi_pd->genpd.power_off = scpi_pd_power_off;
scpi_pd->genpd.power_on = scpi_pd_power_on;
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 7fa793672a7a..610a1558e0ed 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -453,8 +453,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
ALTERA_CVP_MGR_NAME, pci_name(pdev));
- mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
- &altera_cvp_ops, conf);
+ mgr = devm_fpga_mgr_create(&pdev->dev, conf->mgr_name,
+ &altera_cvp_ops, conf);
if (!mgr) {
ret = -ENOMEM;
goto err_unmap;
@@ -463,10 +463,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, mgr);
ret = fpga_mgr_register(mgr);
- if (ret) {
- fpga_mgr_free(mgr);
+ if (ret)
goto err_unmap;
- }
ret = driver_create_file(&altera_cvp_driver.driver,
&driver_attr_chkcfg);
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index 23660ccd634b..a78e49c63c64 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -121,18 +121,16 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
/* Get f2s bridge configuration saved in handoff register */
regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask);
- br = fpga_bridge_create(dev, F2S_BRIDGE_NAME,
- &altera_fpga2sdram_br_ops, priv);
+ br = devm_fpga_bridge_create(dev, F2S_BRIDGE_NAME,
+ &altera_fpga2sdram_br_ops, priv);
if (!br)
return -ENOMEM;
platform_set_drvdata(pdev, br);
ret = fpga_bridge_register(br);
- if (ret) {
- fpga_bridge_free(br);
+ if (ret)
return ret;
- }
dev_info(dev, "driver initialized with handoff %08x\n", priv->mask);
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index ffd586c48ecf..dd58c4aea92e 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -213,7 +213,6 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
struct fpga_bridge *br;
struct resource *res;
u32 status, revision;
- int ret;
if (!np)
return -ENODEV;
@@ -245,20 +244,14 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
priv->base_addr = base_addr;
- br = fpga_bridge_create(dev, FREEZE_BRIDGE_NAME,
- &altera_freeze_br_br_ops, priv);
+ br = devm_fpga_bridge_create(dev, FREEZE_BRIDGE_NAME,
+ &altera_freeze_br_br_ops, priv);
if (!br)
return -ENOMEM;
platform_set_drvdata(pdev, br);
- ret = fpga_bridge_register(br);
- if (ret) {
- fpga_bridge_free(br);
- return ret;
- }
-
- return 0;
+ return fpga_bridge_register(br);
}
static int altera_freeze_br_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index a974d3f60321..77b95f251821 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -180,7 +180,8 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
}
}
- br = fpga_bridge_create(dev, priv->name, &altera_hps2fpga_br_ops, priv);
+ br = devm_fpga_bridge_create(dev, priv->name,
+ &altera_hps2fpga_br_ops, priv);
if (!br) {
ret = -ENOMEM;
goto err;
@@ -190,12 +191,10 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
ret = fpga_bridge_register(br);
if (ret)
- goto err_free;
+ goto err;
return 0;
-err_free:
- fpga_bridge_free(br);
err:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index 65e0b6a2c031..a7a3bf0b5202 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -177,7 +177,6 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
{
struct alt_pr_priv *priv;
struct fpga_manager *mgr;
- int ret;
u32 val;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -192,17 +191,13 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
(val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT,
(int)(val & ALT_PR_CSR_PR_START));
- mgr = fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv);
if (!mgr)
return -ENOMEM;
dev_set_drvdata(dev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
EXPORT_SYMBOL_GPL(alt_pr_register);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 24b25c626036..33aafda50af5 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -239,7 +239,6 @@ static int altera_ps_probe(struct spi_device *spi)
struct altera_ps_conf *conf;
const struct of_device_id *of_id;
struct fpga_manager *mgr;
- int ret;
conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
if (!conf)
@@ -275,18 +274,14 @@ static int altera_ps_probe(struct spi_device *spi)
snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s",
dev_driver_string(&spi->dev), dev_name(&spi->dev));
- mgr = fpga_mgr_create(&spi->dev, conf->mgr_name,
- &altera_ps_ops, conf);
+ mgr = devm_fpga_mgr_create(&spi->dev, conf->mgr_name,
+ &altera_ps_ops, conf);
if (!mgr)
return -ENOMEM;
spi_set_drvdata(spi, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int altera_ps_remove(struct spi_device *spi)
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 0e81d33af856..025aba3ea76c 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -70,7 +70,7 @@ static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr)
dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid,
incr ? '+' : '-', npages << PAGE_SHIFT,
current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK),
- ret ? "- execeeded" : "");
+ ret ? "- exceeded" : "");
up_write(&current->mm->mmap_sem);
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 7cc041def8b3..3ff9f3a687ce 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -61,7 +61,6 @@ static int fme_br_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fme_br_priv *priv;
struct fpga_bridge *br;
- int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -69,18 +68,14 @@ static int fme_br_probe(struct platform_device *pdev)
priv->pdata = dev_get_platdata(dev);
- br = fpga_bridge_create(dev, "DFL FPGA FME Bridge",
- &fme_bridge_ops, priv);
+ br = devm_fpga_bridge_create(dev, "DFL FPGA FME Bridge",
+ &fme_bridge_ops, priv);
if (!br)
return -ENOMEM;
platform_set_drvdata(pdev, br);
- ret = fpga_bridge_register(br);
- if (ret)
- fpga_bridge_free(br);
-
- return ret;
+ return fpga_bridge_register(br);
}
static int fme_br_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
index b5ef405b6d88..76f37709dd1a 100644
--- a/drivers/fpga/dfl-fme-mgr.c
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -201,7 +201,7 @@ static int fme_mgr_write(struct fpga_manager *mgr,
}
if (count < 4) {
- dev_err(dev, "Invaild PR bitstream size\n");
+ dev_err(dev, "Invalid PR bitstream size\n");
return -EINVAL;
}
@@ -287,7 +287,6 @@ static int fme_mgr_probe(struct platform_device *pdev)
struct fme_mgr_priv *priv;
struct fpga_manager *mgr;
struct resource *res;
- int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -309,19 +308,15 @@ static int fme_mgr_probe(struct platform_device *pdev)
fme_mgr_get_compat_id(priv->ioaddr, compat_id);
- mgr = fpga_mgr_create(dev, "DFL FME FPGA Manager",
- &fme_mgr_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "DFL FME FPGA Manager",
+ &fme_mgr_ops, priv);
if (!mgr)
return -ENOMEM;
mgr->compat_id = compat_id;
platform_set_drvdata(pdev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int fme_mgr_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 51a5ac2293a7..ec134ec93f08 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -39,7 +39,7 @@ static int fme_region_probe(struct platform_device *pdev)
if (IS_ERR(mgr))
return -EPROBE_DEFER;
- region = fpga_region_create(dev, mgr, fme_region_get_bridges);
+ region = devm_fpga_region_create(dev, mgr, fme_region_get_bridges);
if (!region) {
ret = -ENOMEM;
goto eprobe_mgr_put;
@@ -51,14 +51,12 @@ static int fme_region_probe(struct platform_device *pdev)
ret = fpga_region_register(region);
if (ret)
- goto region_free;
+ goto eprobe_mgr_put;
dev_dbg(dev, "DFL FME FPGA Region probed\n");
return 0;
-region_free:
- fpga_region_free(region);
eprobe_mgr_put:
fpga_mgr_put(mgr);
return ret;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index a9b521bccb06..2c09e502e721 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -899,7 +899,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
if (!cdev)
return ERR_PTR(-ENOMEM);
- cdev->region = fpga_region_create(info->dev, NULL, NULL);
+ cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
if (!cdev->region) {
ret = -ENOMEM;
goto free_cdev_exit;
@@ -911,7 +911,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
ret = fpga_region_register(cdev->region);
if (ret)
- goto free_region_exit;
+ goto free_cdev_exit;
/* create and init build info for enumeration */
binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
@@ -942,8 +942,6 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
unregister_region_exit:
fpga_region_unregister(cdev->region);
-free_region_exit:
- fpga_region_free(cdev->region);
free_cdev_exit:
devm_kfree(info->dev, cdev);
return ERR_PTR(ret);
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index c983dac97501..80bd8f1b2aa6 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -324,6 +324,9 @@ ATTRIBUTE_GROUPS(fpga_bridge);
* @br_ops: pointer to structure of fpga bridge ops
* @priv: FPGA bridge private data
*
+ * The caller of this function is responsible for freeing the bridge with
+ * fpga_bridge_free(). Using devm_fpga_bridge_create() instead is recommended.
+ *
* Return: struct fpga_bridge or NULL
*/
struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
@@ -378,8 +381,8 @@ error_kfree:
EXPORT_SYMBOL_GPL(fpga_bridge_create);
/**
- * fpga_bridge_free - free a fpga bridge and its id
- * @bridge: FPGA bridge struct created by fpga_bridge_create
+ * fpga_bridge_free - free a fpga bridge created by fpga_bridge_create()
+ * @bridge: FPGA bridge struct
*/
void fpga_bridge_free(struct fpga_bridge *bridge)
{
@@ -388,9 +391,56 @@ void fpga_bridge_free(struct fpga_bridge *bridge)
}
EXPORT_SYMBOL_GPL(fpga_bridge_free);
+static void devm_fpga_bridge_release(struct device *dev, void *res)
+{
+ struct fpga_bridge *bridge = *(struct fpga_bridge **)res;
+
+ fpga_bridge_free(bridge);
+}
+
/**
- * fpga_bridge_register - register a fpga bridge
- * @bridge: FPGA bridge struct created by fpga_bridge_create
+ * devm_fpga_bridge_create - create and init a managed struct fpga_bridge
+ * @dev: FPGA bridge device from pdev
+ * @name: FPGA bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: FPGA bridge private data
+ *
+ * This function is intended for use in a FPGA bridge driver's probe function.
+ * After the bridge driver creates the struct with devm_fpga_bridge_create(), it
+ * should register the bridge with fpga_bridge_register(). The bridge driver's
+ * remove function should call fpga_bridge_unregister(). The bridge struct
+ * allocated with this function will be freed automatically on driver detach.
+ * This includes the case of a probe function returning error before calling
+ * fpga_bridge_register(), the struct will still get cleaned up.
+ *
+ * Return: struct fpga_bridge or NULL
+ */
+struct fpga_bridge
+*devm_fpga_bridge_create(struct device *dev, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv)
+{
+ struct fpga_bridge **ptr, *bridge;
+
+ ptr = devres_alloc(devm_fpga_bridge_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ bridge = fpga_bridge_create(dev, name, br_ops, priv);
+ if (!bridge) {
+ devres_free(ptr);
+ } else {
+ *ptr = bridge;
+ devres_add(dev, ptr);
+ }
+
+ return bridge;
+}
+EXPORT_SYMBOL_GPL(devm_fpga_bridge_create);
+
+/**
+ * fpga_bridge_register - register a FPGA bridge
+ *
+ * @bridge: FPGA bridge struct
*
* Return: 0 for success, error code otherwise.
*/
@@ -412,8 +462,11 @@ int fpga_bridge_register(struct fpga_bridge *bridge)
EXPORT_SYMBOL_GPL(fpga_bridge_register);
/**
- * fpga_bridge_unregister - unregister and free a fpga bridge
- * @bridge: FPGA bridge struct created by fpga_bridge_create
+ * fpga_bridge_unregister - unregister a FPGA bridge
+ *
+ * @bridge: FPGA bridge struct
+ *
+ * This function is intended for use in a FPGA bridge driver's remove function.
*/
void fpga_bridge_unregister(struct fpga_bridge *bridge)
{
@@ -430,9 +483,6 @@ EXPORT_SYMBOL_GPL(fpga_bridge_unregister);
static void fpga_bridge_dev_release(struct device *dev)
{
- struct fpga_bridge *bridge = to_fpga_bridge(dev);
-
- fpga_bridge_free(bridge);
}
static int __init fpga_bridge_dev_init(void)
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index a41b07e37884..c3866816456a 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -558,6 +558,9 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
*
+ * The caller of this function is responsible for freeing the struct with
+ * fpga_mgr_free(). Using devm_fpga_mgr_create() instead is recommended.
+ *
* Return: pointer to struct fpga_manager or NULL
*/
struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
@@ -618,8 +621,8 @@ error_kfree:
EXPORT_SYMBOL_GPL(fpga_mgr_create);
/**
- * fpga_mgr_free - deallocate a FPGA manager
- * @mgr: fpga manager struct created by fpga_mgr_create
+ * fpga_mgr_free - free a FPGA manager created with fpga_mgr_create()
+ * @mgr: fpga manager struct
*/
void fpga_mgr_free(struct fpga_manager *mgr)
{
@@ -628,9 +631,55 @@ void fpga_mgr_free(struct fpga_manager *mgr)
}
EXPORT_SYMBOL_GPL(fpga_mgr_free);
+static void devm_fpga_mgr_release(struct device *dev, void *res)
+{
+ struct fpga_manager *mgr = *(struct fpga_manager **)res;
+
+ fpga_mgr_free(mgr);
+}
+
+/**
+ * devm_fpga_mgr_create - create and initialize a managed FPGA manager struct
+ * @dev: fpga manager device from pdev
+ * @name: fpga manager name
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * This function is intended for use in a FPGA manager driver's probe function.
+ * After the manager driver creates the manager struct with
+ * devm_fpga_mgr_create(), it should register it with fpga_mgr_register(). The
+ * manager driver's remove function should call fpga_mgr_unregister(). The
+ * manager struct allocated with this function will be freed automatically on
+ * driver detach. This includes the case of a probe function returning error
+ * before calling fpga_mgr_register(), the struct will still get cleaned up.
+ *
+ * Return: pointer to struct fpga_manager or NULL
+ */
+struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
+ const struct fpga_manager_ops *mops,
+ void *priv)
+{
+ struct fpga_manager **ptr, *mgr;
+
+ ptr = devres_alloc(devm_fpga_mgr_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ mgr = fpga_mgr_create(dev, name, mops, priv);
+ if (!mgr) {
+ devres_free(ptr);
+ } else {
+ *ptr = mgr;
+ devres_add(dev, ptr);
+ }
+
+ return mgr;
+}
+EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
+
/**
* fpga_mgr_register - register a FPGA manager
- * @mgr: fpga manager struct created by fpga_mgr_create
+ * @mgr: fpga manager struct
*
* Return: 0 on success, negative error code otherwise.
*/
@@ -661,8 +710,10 @@ error_device:
EXPORT_SYMBOL_GPL(fpga_mgr_register);
/**
- * fpga_mgr_unregister - unregister and free a FPGA manager
- * @mgr: fpga manager struct
+ * fpga_mgr_unregister - unregister a FPGA manager
+ * @mgr: fpga manager struct
+ *
+ * This function is intended for use in a FPGA manager driver's remove function.
*/
void fpga_mgr_unregister(struct fpga_manager *mgr)
{
@@ -681,9 +732,6 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unregister);
static void fpga_mgr_dev_release(struct device *dev)
{
- struct fpga_manager *mgr = to_fpga_manager(dev);
-
- fpga_mgr_free(mgr);
}
static int __init fpga_mgr_class_init(void)
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 0d65220d5ec5..bde5a9d460c5 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -185,6 +185,10 @@ ATTRIBUTE_GROUPS(fpga_region);
* @mgr: manager that programs this region
* @get_bridges: optional function to get bridges to a list
*
+ * The caller of this function is responsible for freeing the resulting region
+ * struct with fpga_region_free(). Using devm_fpga_region_create() instead is
+ * recommended.
+ *
* Return: struct fpga_region or NULL
*/
struct fpga_region
@@ -230,8 +234,8 @@ err_free:
EXPORT_SYMBOL_GPL(fpga_region_create);
/**
- * fpga_region_free - free a struct fpga_region
- * @region: FPGA region created by fpga_region_create
+ * fpga_region_free - free a FPGA region created by fpga_region_create()
+ * @region: FPGA region
*/
void fpga_region_free(struct fpga_region *region)
{
@@ -240,21 +244,69 @@ void fpga_region_free(struct fpga_region *region)
}
EXPORT_SYMBOL_GPL(fpga_region_free);
+static void devm_fpga_region_release(struct device *dev, void *res)
+{
+ struct fpga_region *region = *(struct fpga_region **)res;
+
+ fpga_region_free(region);
+}
+
+/**
+ * devm_fpga_region_create - create and initialize a managed FPGA region struct
+ * @dev: device parent
+ * @mgr: manager that programs this region
+ * @get_bridges: optional function to get bridges to a list
+ *
+ * This function is intended for use in a FPGA region driver's probe function.
+ * After the region driver creates the region struct with
+ * devm_fpga_region_create(), it should register it with fpga_region_register().
+ * The region driver's remove function should call fpga_region_unregister().
+ * The region struct allocated with this function will be freed automatically on
+ * driver detach. This includes the case of a probe function returning error
+ * before calling fpga_region_register(), the struct will still get cleaned up.
+ *
+ * Return: struct fpga_region or NULL
+ */
+struct fpga_region
+*devm_fpga_region_create(struct device *dev,
+ struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *))
+{
+ struct fpga_region **ptr, *region;
+
+ ptr = devres_alloc(devm_fpga_region_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ region = fpga_region_create(dev, mgr, get_bridges);
+ if (!region) {
+ devres_free(ptr);
+ } else {
+ *ptr = region;
+ devres_add(dev, ptr);
+ }
+
+ return region;
+}
+EXPORT_SYMBOL_GPL(devm_fpga_region_create);
+
/**
* fpga_region_register - register a FPGA region
- * @region: FPGA region created by fpga_region_create
+ * @region: FPGA region
+ *
* Return: 0 or -errno
*/
int fpga_region_register(struct fpga_region *region)
{
return device_add(&region->dev);
-
}
EXPORT_SYMBOL_GPL(fpga_region_register);
/**
- * fpga_region_unregister - unregister and free a FPGA region
+ * fpga_region_unregister - unregister a FPGA region
* @region: FPGA region
+ *
+ * This function is intended for use in a FPGA region driver's remove function.
*/
void fpga_region_unregister(struct fpga_region *region)
{
@@ -264,9 +316,6 @@ EXPORT_SYMBOL_GPL(fpga_region_unregister);
static void fpga_region_dev_release(struct device *dev)
{
- struct fpga_region *region = to_fpga_region(dev);
-
- fpga_region_free(region);
}
/**
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 5981c7ee7a7d..6154661b8f76 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -175,18 +175,14 @@ static int ice40_fpga_probe(struct spi_device *spi)
return ret;
}
- mgr = fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager",
- &ice40_fpga_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager",
+ &ice40_fpga_ops, priv);
if (!mgr)
return -ENOMEM;
spi_set_drvdata(spi, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int ice40_fpga_remove(struct spi_device *spi)
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index a582e0000c97..4d8a87641587 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -356,25 +356,20 @@ static int machxo2_spi_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct fpga_manager *mgr;
- int ret;
if (spi->max_speed_hz > MACHXO2_MAX_SPEED) {
dev_err(dev, "Speed is too high\n");
return -EINVAL;
}
- mgr = fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager",
- &machxo2_ops, spi);
+ mgr = devm_fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager",
+ &machxo2_ops, spi);
if (!mgr)
return -ENOMEM;
spi_set_drvdata(spi, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int machxo2_spi_remove(struct spi_device *spi)
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 052a1342ab7e..122286fd255a 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -410,7 +410,7 @@ static int of_fpga_region_probe(struct platform_device *pdev)
if (IS_ERR(mgr))
return -EPROBE_DEFER;
- region = fpga_region_create(dev, mgr, of_fpga_region_get_bridges);
+ region = devm_fpga_region_create(dev, mgr, of_fpga_region_get_bridges);
if (!region) {
ret = -ENOMEM;
goto eprobe_mgr_put;
@@ -418,7 +418,7 @@ static int of_fpga_region_probe(struct platform_device *pdev)
ret = fpga_region_register(region);
if (ret)
- goto eprobe_free;
+ goto eprobe_mgr_put;
of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
dev_set_drvdata(dev, region);
@@ -427,8 +427,6 @@ static int of_fpga_region_probe(struct platform_device *pdev)
return 0;
-eprobe_free:
- fpga_region_free(region);
eprobe_mgr_put:
fpga_mgr_put(mgr);
return ret;
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index be30c48eb6e4..573d88bdf730 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -508,8 +508,8 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
return -EBUSY;
}
- mgr = fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager",
- &socfpga_a10_fpga_mgr_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager",
+ &socfpga_a10_fpga_mgr_ops, priv);
if (!mgr)
return -ENOMEM;
@@ -517,7 +517,6 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
ret = fpga_mgr_register(mgr);
if (ret) {
- fpga_mgr_free(mgr);
clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 959d71f26896..4a8a2fcd4e6c 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -571,18 +571,14 @@ static int socfpga_fpga_probe(struct platform_device *pdev)
if (ret)
return ret;
- mgr = fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
- &socfpga_fpga_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
+ &socfpga_fpga_ops, priv);
if (!mgr)
return -ENOMEM;
platform_set_drvdata(pdev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int socfpga_fpga_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 08efd1895b1b..dc22a5842609 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -118,7 +118,6 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
struct ts73xx_fpga_priv *priv;
struct fpga_manager *mgr;
struct resource *res;
- int ret;
priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -133,18 +132,14 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
return PTR_ERR(priv->io_base);
}
- mgr = fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
- &ts73xx_fpga_ops, priv);
+ mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
+ &ts73xx_fpga_ops, priv);
if (!mgr)
return -ENOMEM;
platform_set_drvdata(pdev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int ts73xx_fpga_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index 07ba1539e82c..641036135207 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -121,8 +121,8 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
clk_disable(priv->clk);
- br = fpga_bridge_create(&pdev->dev, "Xilinx PR Decoupler",
- &xlnx_pr_decoupler_br_ops, priv);
+ br = devm_fpga_bridge_create(&pdev->dev, "Xilinx PR Decoupler",
+ &xlnx_pr_decoupler_br_ops, priv);
if (!br) {
err = -ENOMEM;
goto err_clk;
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 8d1945966533..469486be20c4 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -144,7 +144,6 @@ static int xilinx_spi_probe(struct spi_device *spi)
{
struct xilinx_spi_conf *conf;
struct fpga_manager *mgr;
- int ret;
conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
if (!conf)
@@ -167,18 +166,15 @@ static int xilinx_spi_probe(struct spi_device *spi)
return PTR_ERR(conf->done);
}
- mgr = fpga_mgr_create(&spi->dev, "Xilinx Slave Serial FPGA Manager",
- &xilinx_spi_ops, conf);
+ mgr = devm_fpga_mgr_create(&spi->dev,
+ "Xilinx Slave Serial FPGA Manager",
+ &xilinx_spi_ops, conf);
if (!mgr)
return -ENOMEM;
spi_set_drvdata(spi, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int xilinx_spi_remove(struct spi_device *spi)
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 3110e00121ca..bb82efeebb9d 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -614,8 +614,8 @@ static int zynq_fpga_probe(struct platform_device *pdev)
clk_disable(priv->clk);
- mgr = fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager",
- &zynq_fpga_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager",
+ &zynq_fpga_ops, priv);
if (!mgr)
return -ENOMEM;
@@ -624,7 +624,6 @@ static int zynq_fpga_probe(struct platform_device *pdev)
err = fpga_mgr_register(mgr);
if (err) {
dev_err(dev, "unable to register FPGA manager\n");
- fpga_mgr_free(mgr);
clk_unprepare(priv->clk);
return err;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 2b1a7b455aa8..55b72fbe1631 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1194,7 +1194,7 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
{
/* Never allow fallback if the device has properties */
- if (adev->data.properties || adev->driver_gpios)
+ if (acpi_dev_has_props(adev) || adev->driver_gpios)
return false;
return con_id == NULL;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 2d45d1dd9554..643f5edd68fe 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1446,8 +1446,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
}
/* The CEC module handles HDMI hotplug detection */
- cec_np = of_find_compatible_node(np->parent, NULL,
- "mediatek,mt8173-cec");
+ cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
if (!cec_np) {
dev_err(dev, "Failed to find CEC node\n");
return -EINVAL;
@@ -1457,8 +1456,10 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
if (!cec_pdev) {
dev_err(hdmi->dev, "Waiting for CEC device %pOF\n",
cec_np);
+ of_node_put(cec_np);
return -EPROBE_DEFER;
}
+ of_node_put(cec_np);
hdmi->cec_dev = &cec_pdev->dev;
/*
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index da1363a0c54d..93d70f4a2154 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -633,8 +633,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
struct device_node *child, *node;
int ret;
- node = of_find_compatible_node(dev->of_node, NULL,
- "qcom,gpu-pwrlevels");
+ node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
dev_err(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
@@ -655,6 +654,8 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
dev_pm_opp_add(dev, val, 0);
}
+ of_node_put(node);
+
return 0;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 61e1953ff921..18c846477ba2 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -182,6 +182,19 @@ config HID_BETOP_FF
Currently the following devices are known to be supported:
- BETOP 2185 PC & BFM MODE
+config HID_BIGBEN_FF
+ tristate "BigBen Interactive Kids' gamepad support"
+ depends on USB_HID
+ depends on NEW_LEDS
+ depends on LEDS_CLASS
+ select INPUT_FF_MEMLESS
+ default !EXPERT
+ help
+ Support for the "Kid-friendly Wired Controller" PS3OFMINIPAD
+ gamepad made by BigBen Interactive, originally sold as a PS3
+ accessory. This driver fixes input mapping and adds support for
+ force feedback effects and LEDs on the device.
+
config HID_CHERRY
tristate "Cherry Cymotion keyboard"
depends on HID
@@ -351,7 +364,7 @@ config HOLTEK_FF
config HID_GOOGLE_HAMMER
tristate "Google Hammer Keyboard"
- depends on USB_HID && LEDS_CLASS
+ depends on USB_HID && LEDS_CLASS && MFD_CROS_EC
---help---
Say Y here if you have a Google Hammer device.
@@ -596,6 +609,7 @@ config HID_MICROSOFT
tristate "Microsoft non-fully HID-compliant devices"
depends on HID
default !EXPERT
+ select INPUT_FF_MEMLESS
---help---
Support for Microsoft devices that are not fully compliant with HID standard.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index bd7ac53b75c5..896a51ce7ce0 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_HID_ASUS) += hid-asus.o
obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o
+obj-$(CONFIG_HID_BIGBEN_FF) += hid-bigbenff.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CMEDIA) += hid-cmedia.o
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
new file mode 100644
index 000000000000..3f6abd190df4
--- /dev/null
+++ b/drivers/hid/hid-bigbenff.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * LED & force feedback support for BigBen Interactive
+ *
+ * 0x146b:0x0902 "Bigben Interactive Bigben Game Pad"
+ * "Kid-friendly Wired Controller" PS3OFMINIPAD SONY
+ * sold for use with the PS3
+ *
+ * Copyright (c) 2018 Hanno Zulla <kontakt@hanno.de>
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/leds.h>
+#include <linux/hid.h>
+
+#include "hid-ids.h"
+
+
+/*
+ * The original descriptor for 0x146b:0x0902
+ *
+ * 0x05, 0x01, // Usage Page (Generic Desktop Ctrls)
+ * 0x09, 0x05, // Usage (Game Pad)
+ * 0xA1, 0x01, // Collection (Application)
+ * 0x15, 0x00, // Logical Minimum (0)
+ * 0x25, 0x01, // Logical Maximum (1)
+ * 0x35, 0x00, // Physical Minimum (0)
+ * 0x45, 0x01, // Physical Maximum (1)
+ * 0x75, 0x01, // Report Size (1)
+ * 0x95, 0x0D, // Report Count (13)
+ * 0x05, 0x09, // Usage Page (Button)
+ * 0x19, 0x01, // Usage Minimum (0x01)
+ * 0x29, 0x0D, // Usage Maximum (0x0D)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x95, 0x03, // Report Count (3)
+ * 0x81, 0x01, // Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x05, 0x01, // Usage Page (Generic Desktop Ctrls)
+ * 0x25, 0x07, // Logical Maximum (7)
+ * 0x46, 0x3B, 0x01, // Physical Maximum (315)
+ * 0x75, 0x04, // Report Size (4)
+ * 0x95, 0x01, // Report Count (1)
+ * 0x65, 0x14, // Unit (System: English Rotation, Length: Centimeter)
+ * 0x09, 0x39, // Usage (Hat switch)
+ * 0x81, 0x42, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,Null State)
+ * 0x65, 0x00, // Unit (None)
+ * 0x95, 0x01, // Report Count (1)
+ * 0x81, 0x01, // Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x26, 0xFF, 0x00, // Logical Maximum (255)
+ * 0x46, 0xFF, 0x00, // Physical Maximum (255)
+ * 0x09, 0x30, // Usage (X)
+ * 0x09, 0x31, // Usage (Y)
+ * 0x09, 0x32, // Usage (Z)
+ * 0x09, 0x35, // Usage (Rz)
+ * 0x75, 0x08, // Report Size (8)
+ * 0x95, 0x04, // Report Count (4)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x06, 0x00, 0xFF, // Usage Page (Vendor Defined 0xFF00)
+ * 0x09, 0x20, // Usage (0x20)
+ * 0x09, 0x21, // Usage (0x21)
+ * 0x09, 0x22, // Usage (0x22)
+ * 0x09, 0x23, // Usage (0x23)
+ * 0x09, 0x24, // Usage (0x24)
+ * 0x09, 0x25, // Usage (0x25)
+ * 0x09, 0x26, // Usage (0x26)
+ * 0x09, 0x27, // Usage (0x27)
+ * 0x09, 0x28, // Usage (0x28)
+ * 0x09, 0x29, // Usage (0x29)
+ * 0x09, 0x2A, // Usage (0x2A)
+ * 0x09, 0x2B, // Usage (0x2B)
+ * 0x95, 0x0C, // Report Count (12)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x0A, 0x21, 0x26, // Usage (0x2621)
+ * 0x95, 0x08, // Report Count (8)
+ * 0xB1, 0x02, // Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile)
+ * 0x0A, 0x21, 0x26, // Usage (0x2621)
+ * 0x91, 0x02, // Output (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile)
+ * 0x26, 0xFF, 0x03, // Logical Maximum (1023)
+ * 0x46, 0xFF, 0x03, // Physical Maximum (1023)
+ * 0x09, 0x2C, // Usage (0x2C)
+ * 0x09, 0x2D, // Usage (0x2D)
+ * 0x09, 0x2E, // Usage (0x2E)
+ * 0x09, 0x2F, // Usage (0x2F)
+ * 0x75, 0x10, // Report Size (16)
+ * 0x95, 0x04, // Report Count (4)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0xC0, // End Collection
+ */
+
+#define PID0902_RDESC_ORIG_SIZE 137
+
+/*
+ * The fixed descriptor for 0x146b:0x0902
+ *
+ * - map buttons according to gamepad.rst
+ * - assign right stick from Z/Rz to Rx/Ry
+ * - map previously unused analog trigger data to Z/RZ
+ * - simplify feature and output descriptor
+ */
+static __u8 pid0902_rdesc_fixed[] = {
+ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
+ 0x09, 0x05, /* Usage (Game Pad) */
+ 0xA1, 0x01, /* Collection (Application) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x25, 0x01, /* Logical Maximum (1) */
+ 0x35, 0x00, /* Physical Minimum (0) */
+ 0x45, 0x01, /* Physical Maximum (1) */
+ 0x75, 0x01, /* Report Size (1) */
+ 0x95, 0x0D, /* Report Count (13) */
+ 0x05, 0x09, /* Usage Page (Button) */
+ 0x09, 0x05, /* Usage (BTN_WEST) */
+ 0x09, 0x01, /* Usage (BTN_SOUTH) */
+ 0x09, 0x02, /* Usage (BTN_EAST) */
+ 0x09, 0x04, /* Usage (BTN_NORTH) */
+ 0x09, 0x07, /* Usage (BTN_TL) */
+ 0x09, 0x08, /* Usage (BTN_TR) */
+ 0x09, 0x09, /* Usage (BTN_TL2) */
+ 0x09, 0x0A, /* Usage (BTN_TR2) */
+ 0x09, 0x0B, /* Usage (BTN_SELECT) */
+ 0x09, 0x0C, /* Usage (BTN_START) */
+ 0x09, 0x0E, /* Usage (BTN_THUMBL) */
+ 0x09, 0x0F, /* Usage (BTN_THUMBR) */
+ 0x09, 0x0D, /* Usage (BTN_MODE) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x75, 0x01, /* Report Size (1) */
+ 0x95, 0x03, /* Report Count (3) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
+ 0x25, 0x07, /* Logical Maximum (7) */
+ 0x46, 0x3B, 0x01, /* Physical Maximum (315) */
+ 0x75, 0x04, /* Report Size (4) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x65, 0x14, /* Unit (System: English Rotation, Length: Centimeter) */
+ 0x09, 0x39, /* Usage (Hat switch) */
+ 0x81, 0x42, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,Null State) */
+ 0x65, 0x00, /* Unit (None) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
+ 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
+ 0x09, 0x30, /* Usage (X) */
+ 0x09, 0x31, /* Usage (Y) */
+ 0x09, 0x33, /* Usage (Rx) */
+ 0x09, 0x34, /* Usage (Ry) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x04, /* Report Count (4) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x95, 0x0A, /* Report Count (10) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
+ 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
+ 0x09, 0x32, /* Usage (Z) */
+ 0x09, 0x35, /* Usage (Rz) */
+ 0x95, 0x02, /* Report Count (2) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined 0xFF00) */
+ 0xB1, 0x02, /* Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) */
+ 0x0A, 0x21, 0x26, /* Usage (0x2621) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x91, 0x02, /* Output (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) */
+ 0x0A, 0x21, 0x26, /* Usage (0x2621) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0xC0, /* End Collection */
+};
+
+#define NUM_LEDS 4
+
+struct bigben_device {
+ struct hid_device *hid;
+ struct hid_report *report;
+ u8 led_state; /* LED1 = 1 .. LED4 = 8 */
+ u8 right_motor_on; /* right motor off/on 0/1 */
+ u8 left_motor_force; /* left motor force 0-255 */
+ struct led_classdev *leds[NUM_LEDS];
+ bool work_led;
+ bool work_ff;
+ struct work_struct worker;
+};
+
+
+static void bigben_worker(struct work_struct *work)
+{
+ struct bigben_device *bigben = container_of(work,
+ struct bigben_device, worker);
+ struct hid_field *report_field = bigben->report->field[0];
+
+ if (bigben->work_led) {
+ bigben->work_led = false;
+ report_field->value[0] = 0x01; /* 1 = led message */
+ report_field->value[1] = 0x08; /* reserved value, always 8 */
+ report_field->value[2] = bigben->led_state;
+ report_field->value[3] = 0x00; /* padding */
+ report_field->value[4] = 0x00; /* padding */
+ report_field->value[5] = 0x00; /* padding */
+ report_field->value[6] = 0x00; /* padding */
+ report_field->value[7] = 0x00; /* padding */
+ hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
+ }
+
+ if (bigben->work_ff) {
+ bigben->work_ff = false;
+ report_field->value[0] = 0x02; /* 2 = rumble effect message */
+ report_field->value[1] = 0x08; /* reserved value, always 8 */
+ report_field->value[2] = bigben->right_motor_on;
+ report_field->value[3] = bigben->left_motor_force;
+ report_field->value[4] = 0xff; /* duration 0-254 (255 = nonstop) */
+ report_field->value[5] = 0x00; /* padding */
+ report_field->value[6] = 0x00; /* padding */
+ report_field->value[7] = 0x00; /* padding */
+ hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
+ }
+}
+
+static int hid_bigben_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct bigben_device *bigben = data;
+ u8 right_motor_on;
+ u8 left_motor_force;
+
+ if (effect->type != FF_RUMBLE)
+ return 0;
+
+ right_motor_on = effect->u.rumble.weak_magnitude ? 1 : 0;
+ left_motor_force = effect->u.rumble.strong_magnitude / 256;
+
+ if (right_motor_on != bigben->right_motor_on ||
+ left_motor_force != bigben->left_motor_force) {
+ bigben->right_motor_on = right_motor_on;
+ bigben->left_motor_force = left_motor_force;
+ bigben->work_ff = true;
+ schedule_work(&bigben->worker);
+ }
+
+ return 0;
+}
+
+static void bigben_set_led(struct led_classdev *led,
+ enum led_brightness value)
+{
+ struct device *dev = led->dev->parent;
+ struct hid_device *hid = to_hid_device(dev);
+ struct bigben_device *bigben = hid_get_drvdata(hid);
+ int n;
+ bool work;
+
+ if (!bigben) {
+ hid_err(hid, "no device data\n");
+ return;
+ }
+
+ for (n = 0; n < NUM_LEDS; n++) {
+ if (led == bigben->leds[n]) {
+ if (value == LED_OFF) {
+ work = (bigben->led_state & BIT(n));
+ bigben->led_state &= ~BIT(n);
+ } else {
+ work = !(bigben->led_state & BIT(n));
+ bigben->led_state |= BIT(n);
+ }
+
+ if (work) {
+ bigben->work_led = true;
+ schedule_work(&bigben->worker);
+ }
+ return;
+ }
+ }
+}
+
+static enum led_brightness bigben_get_led(struct led_classdev *led)
+{
+ struct device *dev = led->dev->parent;
+ struct hid_device *hid = to_hid_device(dev);
+ struct bigben_device *bigben = hid_get_drvdata(hid);
+ int n;
+
+ if (!bigben) {
+ hid_err(hid, "no device data\n");
+ return LED_OFF;
+ }
+
+ for (n = 0; n < NUM_LEDS; n++) {
+ if (led == bigben->leds[n])
+ return (bigben->led_state & BIT(n)) ? LED_ON : LED_OFF;
+ }
+
+ return LED_OFF;
+}
+
+static void bigben_remove(struct hid_device *hid)
+{
+ struct bigben_device *bigben = hid_get_drvdata(hid);
+
+ cancel_work_sync(&bigben->worker);
+ hid_hw_close(hid);
+ hid_hw_stop(hid);
+}
+
+static int bigben_probe(struct hid_device *hid,
+ const struct hid_device_id *id)
+{
+ struct bigben_device *bigben;
+ struct hid_input *hidinput;
+ struct list_head *report_list;
+ struct led_classdev *led;
+ char *name;
+ size_t name_sz;
+ int n, error;
+
+ bigben = devm_kzalloc(&hid->dev, sizeof(*bigben), GFP_KERNEL);
+ if (!bigben)
+ return -ENOMEM;
+ hid_set_drvdata(hid, bigben);
+ bigben->hid = hid;
+
+ error = hid_parse(hid);
+ if (error) {
+ hid_err(hid, "parse failed\n");
+ return error;
+ }
+
+ error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (error) {
+ hid_err(hid, "hw start failed\n");
+ return error;
+ }
+
+ report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ bigben->report = list_entry(report_list->next,
+ struct hid_report, list);
+
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ set_bit(FF_RUMBLE, hidinput->input->ffbit);
+
+ INIT_WORK(&bigben->worker, bigben_worker);
+
+ error = input_ff_create_memless(hidinput->input, bigben,
+ hid_bigben_play_effect);
+ if (error)
+ return error;
+
+ name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1;
+
+ for (n = 0; n < NUM_LEDS; n++) {
+ led = devm_kzalloc(
+ &hid->dev,
+ sizeof(struct led_classdev) + name_sz,
+ GFP_KERNEL
+ );
+ if (!led)
+ return -ENOMEM;
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz,
+ "%s:red:bigben%d",
+ dev_name(&hid->dev), n + 1
+ );
+ led->name = name;
+ led->brightness = (n == 0) ? LED_ON : LED_OFF;
+ led->max_brightness = 1;
+ led->brightness_get = bigben_get_led;
+ led->brightness_set = bigben_set_led;
+ bigben->leds[n] = led;
+ error = devm_led_classdev_register(&hid->dev, led);
+ if (error)
+ return error;
+ }
+
+ /* initial state: LED1 is on, no rumble effect */
+ bigben->led_state = BIT(0);
+ bigben->right_motor_on = 0;
+ bigben->left_motor_force = 0;
+ bigben->work_led = true;
+ bigben->work_ff = true;
+ schedule_work(&bigben->worker);
+
+ hid_info(hid, "LED and force feedback support for BigBen gamepad\n");
+
+ return 0;
+}
+
+static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize == PID0902_RDESC_ORIG_SIZE) {
+ rdesc = pid0902_rdesc_fixed;
+ *rsize = sizeof(pid0902_rdesc_fixed);
+ } else
+ hid_warn(hid, "unexpected rdesc, please submit for review\n");
+ return rdesc;
+}
+
+static const struct hid_device_id bigben_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_BIGBEN, USB_DEVICE_ID_BIGBEN_PS3OFMINIPAD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, bigben_devices);
+
+static struct hid_driver bigben_driver = {
+ .name = "bigben",
+ .id_table = bigben_devices,
+ .probe = bigben_probe,
+ .report_fixup = bigben_report_fixup,
+ .remove = bigben_remove,
+};
+module_hid_driver(bigben_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 44564f61e9cc..5bec9244c45b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -406,7 +406,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
parser->global.report_size = item_udata(item);
- if (parser->global.report_size > 128) {
+ if (parser->global.report_size > 256) {
hid_err(parser->device, "invalid report_size %d\n",
parser->global.report_size);
return -1;
diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
index ad2e87de7dc5..3f0916b64c60 100644
--- a/drivers/hid/hid-cougar.c
+++ b/drivers/hid/hid-cougar.c
@@ -7,6 +7,7 @@
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/printk.h>
#include "hid-ids.h"
@@ -15,11 +16,9 @@ MODULE_DESCRIPTION("Cougar 500k Gaming Keyboard");
MODULE_LICENSE("GPL");
MODULE_INFO(key_mappings, "G1-G6 are mapped to F13-F18");
-static int cougar_g6_is_space = 1;
-module_param_named(g6_is_space, cougar_g6_is_space, int, 0600);
+static bool g6_is_space = true;
MODULE_PARM_DESC(g6_is_space,
- "If set, G6 programmable key sends SPACE instead of F18 (0=off, 1=on) (default=1)");
-
+ "If true, G6 programmable key sends SPACE instead of F18 (default=true)");
#define COUGAR_VENDOR_USAGE 0xff00ff00
@@ -82,20 +81,23 @@ struct cougar {
static LIST_HEAD(cougar_udev_list);
static DEFINE_MUTEX(cougar_udev_list_lock);
-static void cougar_fix_g6_mapping(struct hid_device *hdev)
+/**
+ * cougar_fix_g6_mapping - configure the mapping for key G6/Spacebar
+ */
+static void cougar_fix_g6_mapping(void)
{
int i;
for (i = 0; cougar_mapping[i][0]; i++) {
if (cougar_mapping[i][0] == COUGAR_KEY_G6) {
cougar_mapping[i][1] =
- cougar_g6_is_space ? KEY_SPACE : KEY_F18;
- hid_info(hdev, "G6 mapped to %s\n",
- cougar_g6_is_space ? "space" : "F18");
+ g6_is_space ? KEY_SPACE : KEY_F18;
+ pr_info("cougar: G6 mapped to %s\n",
+ g6_is_space ? "space" : "F18");
return;
}
}
- hid_warn(hdev, "no mapping defined for G6/spacebar");
+ pr_warn("cougar: no mappings defined for G6/spacebar");
}
/*
@@ -154,7 +156,8 @@ static void cougar_remove_shared_data(void *resource)
* Bind the device group's shared data to this cougar struct.
* If no shared data exists for this group, create and initialize it.
*/
-static int cougar_bind_shared_data(struct hid_device *hdev, struct cougar *cougar)
+static int cougar_bind_shared_data(struct hid_device *hdev,
+ struct cougar *cougar)
{
struct cougar_shared *shared;
int error = 0;
@@ -228,7 +231,6 @@ static int cougar_probe(struct hid_device *hdev,
* to it.
*/
if (hdev->collection->usage == HID_GD_KEYBOARD) {
- cougar_fix_g6_mapping(hdev);
list_for_each_entry_safe(hidinput, next, &hdev->inputs, list) {
if (hidinput->registered && hidinput->input != NULL) {
cougar->shared->input = hidinput->input;
@@ -237,6 +239,8 @@ static int cougar_probe(struct hid_device *hdev,
}
}
} else if (hdev->collection->usage == COUGAR_VENDOR_USAGE) {
+ /* Preinit the mapping table */
+ cougar_fix_g6_mapping();
error = hid_hw_open(hdev);
if (error)
goto fail_stop_and_cleanup;
@@ -257,26 +261,32 @@ static int cougar_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *data, int size)
{
struct cougar *cougar;
+ struct cougar_shared *shared;
unsigned char code, action;
int i;
cougar = hid_get_drvdata(hdev);
- if (!cougar->special_intf || !cougar->shared ||
- !cougar->shared->input || !cougar->shared->enabled)
+ shared = cougar->shared;
+ if (!cougar->special_intf || !shared)
return 0;
+ if (!shared->enabled || !shared->input)
+ return -EPERM;
+
code = data[COUGAR_FIELD_CODE];
action = data[COUGAR_FIELD_ACTION];
for (i = 0; cougar_mapping[i][0]; i++) {
if (code == cougar_mapping[i][0]) {
- input_event(cougar->shared->input, EV_KEY,
+ input_event(shared->input, EV_KEY,
cougar_mapping[i][1], action);
- input_sync(cougar->shared->input);
- return 0;
+ input_sync(shared->input);
+ return -EPERM;
}
}
- hid_warn(hdev, "unmapped special key code %x: ignoring\n", code);
- return 0;
+ /* Avoid warnings on the same unmapped key twice */
+ if (action != 0)
+ hid_warn(hdev, "unmapped special key code %0x: ignoring\n", code);
+ return -EPERM;
}
static void cougar_remove(struct hid_device *hdev)
@@ -293,6 +303,26 @@ static void cougar_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+static int cougar_param_set_g6_is_space(const char *val,
+ const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+
+ cougar_fix_g6_mapping();
+
+ return 0;
+}
+
+static const struct kernel_param_ops cougar_g6_is_space_ops = {
+ .set = cougar_param_set_g6_is_space,
+ .get = param_get_bool,
+};
+module_param_cb(g6_is_space, &cougar_g6_is_space_ops, &g6_is_space, 0644);
+
static struct hid_device_id cougar_id_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD) },
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 07e26c3567eb..0bfd6d1b44c1 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -497,7 +497,7 @@ static int elan_probe(struct hid_device *hdev, const struct hid_device_id *id)
return 0;
if (!drvdata->input) {
- hid_err(hdev, "Input device is not registred\n");
+ hid_err(hdev, "Input device is not registered\n");
ret = -ENAVAIL;
goto err;
}
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 6bf4da7ad63a..ee5e0bdcf078 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -13,16 +13,268 @@
* any later version.
*/
+#include <linux/acpi.h>
#include <linux/hid.h>
#include <linux/leds.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <asm/unaligned.h>
#include "hid-ids.h"
-#define MAX_BRIGHTNESS 100
+/*
+ * C(hrome)B(ase)A(ttached)S(witch) - switch exported by Chrome EC and reporting
+ * state of the "Whiskers" base - attached or detached. Whiskers USB device also
+ * reports position of the keyboard - folded or not. Combining base state and
+ * position allows us to generate proper "Tablet mode" events.
+ */
+struct cbas_ec {
+ struct device *dev; /* The platform device (EC) */
+ struct input_dev *input;
+ bool base_present;
+ struct notifier_block notifier;
+};
-/* HID usage for keyboard backlight (Alphanumeric display brightness) */
-#define HID_AD_BRIGHTNESS 0x00140046
+static struct cbas_ec cbas_ec;
+static DEFINE_SPINLOCK(cbas_ec_lock);
+static DEFINE_MUTEX(cbas_ec_reglock);
+
+static bool cbas_parse_base_state(const void *data)
+{
+ u32 switches = get_unaligned_le32(data);
+
+ return !!(switches & BIT(EC_MKBP_BASE_ATTACHED));
+}
+
+static int cbas_ec_query_base(struct cros_ec_device *ec_dev, bool get_state,
+ bool *state)
+{
+ struct ec_params_mkbp_info *params;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(sizeof(u32), sizeof(*params)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_MKBP_INFO;
+ msg->version = 1;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(u32);
+ params = (struct ec_params_mkbp_info *)msg->data;
+ params->info_type = get_state ?
+ EC_MKBP_INFO_CURRENT : EC_MKBP_INFO_SUPPORTED;
+ params->event_type = EC_MKBP_EVENT_SWITCH;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret >= 0) {
+ if (ret != sizeof(u32)) {
+ dev_warn(ec_dev->dev, "wrong result size: %d != %zu\n",
+ ret, sizeof(u32));
+ ret = -EPROTO;
+ } else {
+ *state = cbas_parse_base_state(msg->data);
+ ret = 0;
+ }
+ }
+
+ kfree(msg);
+
+ return ret;
+}
+
+static int cbas_ec_notify(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_device *ec = _notify;
+ unsigned long flags;
+ bool base_present;
+
+ if (ec->event_data.event_type == EC_MKBP_EVENT_SWITCH) {
+ base_present = cbas_parse_base_state(
+ &ec->event_data.data.switches);
+ dev_dbg(cbas_ec.dev,
+ "%s: base: %d\n", __func__, base_present);
+
+ if (device_may_wakeup(cbas_ec.dev) ||
+ !queued_during_suspend) {
+
+ pm_wakeup_event(cbas_ec.dev, 0);
+
+ spin_lock_irqsave(&cbas_ec_lock, flags);
+
+ /*
+ * While input layer dedupes the events, we do not want
+ * to disrupt the state reported by the base by
+ * overriding it with state reported by the LID. Only
+ * report changes, as we assume that on attach the base
+ * is not folded.
+ */
+ if (base_present != cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input,
+ SW_TABLET_MODE,
+ !base_present);
+ input_sync(cbas_ec.input);
+ cbas_ec.base_present = base_present;
+ }
+
+ spin_unlock_irqrestore(&cbas_ec_lock, flags);
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static __maybe_unused int cbas_ec_resume(struct device *dev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(dev->parent);
+ bool base_present;
+ int error;
+
+ error = cbas_ec_query_base(ec, true, &base_present);
+ if (error) {
+ dev_warn(dev, "failed to fetch base state on resume: %d\n",
+ error);
+ } else {
+ spin_lock_irq(&cbas_ec_lock);
+
+ cbas_ec.base_present = base_present;
+
+ /*
+ * Only report if base is disconnected. If base is connected,
+ * it will resend its state on resume, and we'll update it
+ * in hammer_event().
+ */
+ if (!cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input, SW_TABLET_MODE, 1);
+ input_sync(cbas_ec.input);
+ }
+
+ spin_unlock_irq(&cbas_ec_lock);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cbas_ec_pm_ops, NULL, cbas_ec_resume);
+
+static void cbas_ec_set_input(struct input_dev *input)
+{
+ /* Take the lock so hammer_event() does not race with us here */
+ spin_lock_irq(&cbas_ec_lock);
+ cbas_ec.input = input;
+ spin_unlock_irq(&cbas_ec_lock);
+}
+
+static int __cbas_ec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct input_dev *input;
+ bool base_supported;
+ int error;
+
+ error = cbas_ec_query_base(ec, false, &base_supported);
+ if (error)
+ return error;
+
+ if (!base_supported)
+ return -ENXIO;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Whiskers Tablet Mode Switch";
+ input->id.bustype = BUS_HOST;
+
+ input_set_capability(input, EV_SW, SW_TABLET_MODE);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev, "cannot register input device: %d\n",
+ error);
+ return error;
+ }
+
+ /* Seed the state */
+ error = cbas_ec_query_base(ec, true, &cbas_ec.base_present);
+ if (error) {
+ dev_err(&pdev->dev, "cannot query base state: %d\n", error);
+ return error;
+ }
+
+ input_report_switch(input, SW_TABLET_MODE, !cbas_ec.base_present);
+
+ cbas_ec_set_input(input);
+
+ cbas_ec.dev = &pdev->dev;
+ cbas_ec.notifier.notifier_call = cbas_ec_notify;
+ error = blocking_notifier_chain_register(&ec->event_notifier,
+ &cbas_ec.notifier);
+ if (error) {
+ dev_err(&pdev->dev, "cannot register notifier: %d\n", error);
+ cbas_ec_set_input(NULL);
+ return error;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+ return 0;
+}
+
+static int cbas_ec_probe(struct platform_device *pdev)
+{
+ int retval;
+
+ mutex_lock(&cbas_ec_reglock);
+
+ if (cbas_ec.input) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ retval = __cbas_ec_probe(pdev);
+
+out:
+ mutex_unlock(&cbas_ec_reglock);
+ return retval;
+}
+
+static int cbas_ec_remove(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+
+ mutex_lock(&cbas_ec_reglock);
+
+ blocking_notifier_chain_unregister(&ec->event_notifier,
+ &cbas_ec.notifier);
+ cbas_ec_set_input(NULL);
+
+ mutex_unlock(&cbas_ec_reglock);
+ return 0;
+}
+
+static const struct acpi_device_id cbas_ec_acpi_ids[] = {
+ { "GOOG000B", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids);
+
+static struct platform_driver cbas_ec_driver = {
+ .probe = cbas_ec_probe,
+ .remove = cbas_ec_remove,
+ .driver = {
+ .name = "cbas_ec",
+ .acpi_match_table = ACPI_PTR(cbas_ec_acpi_ids),
+ .pm = &cbas_ec_pm_ops,
+ },
+};
+
+#define MAX_BRIGHTNESS 100
struct hammer_kbd_leds {
struct led_classdev cdev;
@@ -90,33 +342,130 @@ static int hammer_register_leds(struct hid_device *hdev)
return devm_led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
}
-static int hammer_input_configured(struct hid_device *hdev,
- struct hid_input *hi)
+#define HID_UP_GOOGLEVENDOR 0xffd10000
+#define HID_VD_KBD_FOLDED 0x00000019
+#define WHISKERS_KBD_FOLDED (HID_UP_GOOGLEVENDOR | HID_VD_KBD_FOLDED)
+
+/* HID usage for keyboard backlight (Alphanumeric display brightness) */
+#define HID_AD_BRIGHTNESS 0x00140046
+
+static int hammer_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
{
- struct list_head *report_list =
- &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
+ if (hdev->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
+ usage->hid == WHISKERS_KBD_FOLDED) {
+ /*
+ * We do not want to have this usage mapped as it will get
+ * mixed in with "base attached" signal and delivered over
+ * separate input device for tablet switch mode.
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int hammer_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ unsigned long flags;
+
+ if (hid->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
+ usage->hid == WHISKERS_KBD_FOLDED) {
+ spin_lock_irqsave(&cbas_ec_lock, flags);
+
+ hid_dbg(hid, "%s: base: %d, folded: %d\n", __func__,
+ cbas_ec.base_present, value);
+
+ /*
+ * We should not get event if base is detached, but in case
+ * we happen to service HID and EC notifications out of order
+ * let's still check the "base present" flag.
+ */
+ if (cbas_ec.input && cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input,
+ SW_TABLET_MODE, value);
+ input_sync(cbas_ec.input);
+ }
+
+ spin_unlock_irqrestore(&cbas_ec_lock, flags);
+ return 1; /* We handled this event */
+ }
+
+ return 0;
+}
+
+static bool hammer_is_keyboard_interface(struct hid_device *hdev)
+{
+ struct hid_report_enum *re = &hdev->report_enum[HID_INPUT_REPORT];
struct hid_report *report;
- if (list_empty(report_list))
- return 0;
+ list_for_each_entry(report, &re->report_list, list)
+ if (report->application == HID_GD_KEYBOARD)
+ return true;
- report = list_first_entry(report_list, struct hid_report, list);
+ return false;
+}
+
+static bool hammer_has_backlight_control(struct hid_device *hdev)
+{
+ struct hid_report_enum *re = &hdev->report_enum[HID_OUTPUT_REPORT];
+ struct hid_report *report;
+ int i, j;
- if (report->maxfield == 1 &&
- report->field[0]->application == HID_GD_KEYBOARD &&
- report->field[0]->maxusage == 1 &&
- report->field[0]->usage[0].hid == HID_AD_BRIGHTNESS) {
- int err = hammer_register_leds(hdev);
+ list_for_each_entry(report, &re->report_list, list) {
+ if (report->application != HID_GD_KEYBOARD)
+ continue;
- if (err)
+ for (i = 0; i < report->maxfield; i++) {
+ struct hid_field *field = report->field[i];
+
+ for (j = 0; j < field->maxusage; j++)
+ if (field->usage[j].hid == HID_AD_BRIGHTNESS)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int hammer_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int error;
+
+ /*
+ * We always want to poll for, and handle tablet mode events from
+ * Whiskers, even when nobody has opened the input device. This also
+ * prevents the hid core from dropping early tablet mode events from
+ * the device.
+ */
+ if (hdev->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
+ hammer_is_keyboard_interface(hdev))
+ hdev->quirks |= HID_QUIRK_ALWAYS_POLL;
+
+ error = hid_parse(hdev);
+ if (error)
+ return error;
+
+ error = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (error)
+ return error;
+
+ if (hammer_has_backlight_control(hdev)) {
+ error = hammer_register_leds(hdev);
+ if (error)
hid_warn(hdev,
"Failed to register keyboard backlight: %d\n",
- err);
+ error);
}
return 0;
}
+
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
@@ -133,8 +482,34 @@ MODULE_DEVICE_TABLE(hid, hammer_devices);
static struct hid_driver hammer_driver = {
.name = "hammer",
.id_table = hammer_devices,
- .input_configured = hammer_input_configured,
+ .probe = hammer_probe,
+ .input_mapping = hammer_input_mapping,
+ .event = hammer_event,
};
-module_hid_driver(hammer_driver);
+
+static int __init hammer_init(void)
+{
+ int error;
+
+ error = platform_driver_register(&cbas_ec_driver);
+ if (error)
+ return error;
+
+ error = hid_register_driver(&hammer_driver);
+ if (error) {
+ platform_driver_unregister(&cbas_ec_driver);
+ return error;
+ }
+
+ return 0;
+}
+module_init(hammer_init);
+
+static void __exit hammer_exit(void)
+{
+ hid_unregister_driver(&hammer_driver);
+ platform_driver_unregister(&cbas_ec_driver);
+}
+module_exit(hammer_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index bc49909aba8e..f63489c882bb 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -92,6 +92,7 @@
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
+#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f
#define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214
@@ -229,6 +230,9 @@
#define USB_VENDOR_ID_BETOP_2185V2PC 0x8380
#define USB_VENDOR_ID_BETOP_2185V2BFM 0x20bc
+#define USB_VENDOR_ID_BIGBEN 0x146b
+#define USB_DEVICE_ID_BIGBEN_PS3OFMINIPAD 0x0902
+
#define USB_VENDOR_ID_BTC 0x046e
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
@@ -342,6 +346,7 @@
#define USB_DEVICE_ID_DMI_ENC 0x5fab
#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_DEVICE_ID_REDRAGON_SEYMUR2 0x0006
#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
@@ -799,6 +804,7 @@
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
+#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a481eaf39e88..567c3bf64515 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -758,6 +758,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
case HID_UP_DIGITIZER:
+ if ((field->application & 0xff) == 0x01) /* Digitizer */
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
+ else if ((field->application & 0xff) == 0x02) /* Pen */
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
+
switch (usage->hid & 0xff) {
case 0x00: /* Undefined */
goto ignore;
@@ -1516,6 +1521,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
struct hid_input *hidinput = kzalloc(sizeof(*hidinput), GFP_KERNEL);
struct input_dev *input_dev = input_allocate_device();
const char *suffix = NULL;
+ size_t suffix_len, name_len;
if (!hidinput || !input_dev)
goto fail;
@@ -1559,10 +1565,15 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
}
if (suffix) {
- hidinput->name = kasprintf(GFP_KERNEL, "%s %s",
- hid->name, suffix);
- if (!hidinput->name)
- goto fail;
+ name_len = strlen(hid->name);
+ suffix_len = strlen(suffix);
+ if ((name_len < suffix_len) ||
+ strcmp(hid->name + name_len - suffix_len, suffix)) {
+ hidinput->name = kasprintf(GFP_KERNEL, "%s %s",
+ hid->name, suffix);
+ if (!hidinput->name)
+ goto fail;
+ }
}
input_set_drvdata(input_dev, hid);
@@ -1827,3 +1838,48 @@ void hidinput_disconnect(struct hid_device *hid)
}
EXPORT_SYMBOL_GPL(hidinput_disconnect);
+/**
+ * hid_scroll_counter_handle_scroll() - Send high- and low-resolution scroll
+ * events given a high-resolution wheel
+ * movement.
+ * @counter: a hid_scroll_counter struct describing the wheel.
+ * @hi_res_value: the movement of the wheel, in the mouse's high-resolution
+ * units.
+ *
+ * Given a high-resolution movement, this function converts the movement into
+ * microns and emits high-resolution scroll events for the input device. It also
+ * uses the multiplier from &struct hid_scroll_counter to emit low-resolution
+ * scroll events when appropriate for backwards-compatibility with userspace
+ * input libraries.
+ */
+void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
+ int hi_res_value)
+{
+ int low_res_scroll_amount;
+ /* Some wheels will rest 7/8ths of a notch from the previous notch
+ * after slow movement, so we want the threshold for low-res events to
+ * be in the middle of the notches (e.g. after 4/8ths) as opposed to on
+ * the notches themselves (8/8ths).
+ */
+ int threshold = counter->resolution_multiplier / 2;
+
+ input_report_rel(counter->dev, REL_WHEEL_HI_RES,
+ hi_res_value * counter->microns_per_hi_res_unit);
+
+ counter->remainder += hi_res_value;
+ if (abs(counter->remainder) >= threshold) {
+ /* Add (or subtract) 1 because we want to trigger when the wheel
+ * is half-way to the next notch (i.e. scroll 1 notch after a
+ * 1/2 notch movement, 2 notches after a 1 1/2 notch movement,
+ * etc.).
+ */
+ low_res_scroll_amount =
+ counter->remainder / counter->resolution_multiplier
+ + (hi_res_value > 0 ? 1 : -1);
+ input_report_rel(counter->dev, REL_WHEEL,
+ low_res_scroll_amount);
+ counter->remainder -=
+ low_res_scroll_amount * counter->resolution_multiplier;
+ }
+}
+EXPORT_SYMBOL_GPL(hid_scroll_counter_handle_scroll);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 19cc980eebce..f01280898b24 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -64,6 +64,14 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
#define HIDPP_QUIRK_UNIFYING BIT(25)
+#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
+#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
+#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
+
+/* Convenience constant to check for any high-res support. */
+#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
+ HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
+ HIDPP_QUIRK_HI_RES_SCROLL_X2121)
#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
@@ -149,6 +157,7 @@ struct hidpp_device {
unsigned long capabilities;
struct hidpp_battery battery;
+ struct hid_scroll_counter vertical_wheel_counter;
};
/* HID++ 1.0 error codes */
@@ -400,32 +409,53 @@ static void hidpp_prefix_name(char **name, int name_length)
#define HIDPP_SET_LONG_REGISTER 0x82
#define HIDPP_GET_LONG_REGISTER 0x83
-#define HIDPP_REG_GENERAL 0x00
-
-static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
+/**
+ * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register.
+ * @hidpp_dev: the device to set the register on.
+ * @register_address: the address of the register to modify.
+ * @byte: the byte of the register to modify. Should be less than 3.
+ * Return: 0 if successful, otherwise a negative error code.
+ */
+static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
+ u8 register_address, u8 byte, u8 bit)
{
struct hidpp_report response;
int ret;
u8 params[3] = { 0 };
ret = hidpp_send_rap_command_sync(hidpp_dev,
- REPORT_ID_HIDPP_SHORT,
- HIDPP_GET_REGISTER,
- HIDPP_REG_GENERAL,
- NULL, 0, &response);
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_GET_REGISTER,
+ register_address,
+ NULL, 0, &response);
if (ret)
return ret;
memcpy(params, response.rap.params, 3);
- /* Set the battery bit */
- params[0] |= BIT(4);
+ params[byte] |= BIT(bit);
return hidpp_send_rap_command_sync(hidpp_dev,
- REPORT_ID_HIDPP_SHORT,
- HIDPP_SET_REGISTER,
- HIDPP_REG_GENERAL,
- params, 3, &response);
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_SET_REGISTER,
+ register_address,
+ params, 3, &response);
+}
+
+
+#define HIDPP_REG_GENERAL 0x00
+
+static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
+{
+ return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4);
+}
+
+#define HIDPP_REG_FEATURES 0x01
+
+/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
+static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
+{
+ return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
}
#define HIDPP_REG_BATTERY_STATUS 0x07
@@ -1137,6 +1167,100 @@ static int hidpp_battery_get_property(struct power_supply *psy,
}
/* -------------------------------------------------------------------------- */
+/* 0x2120: Hi-resolution scrolling */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_HI_RESOLUTION_SCROLLING 0x2120
+
+#define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10
+
+static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp,
+ bool enabled, u8 *multiplier)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ u8 params[1];
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
+ &feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+
+ params[0] = enabled ? BIT(0) : 0;
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE,
+ params, sizeof(params), &response);
+ if (ret)
+ return ret;
+ *multiplier = response.fap.params[1];
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* 0x2121: HiRes Wheel */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_HIRES_WHEEL 0x2121
+
+#define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY 0x00
+#define CMD_HIRES_WHEEL_SET_WHEEL_MODE 0x20
+
+static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp,
+ u8 *multiplier)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (ret)
+ goto return_default;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY,
+ NULL, 0, &response);
+ if (ret)
+ goto return_default;
+
+ *multiplier = response.fap.params[0];
+ return 0;
+return_default:
+ hid_warn(hidpp->hid_dev,
+ "Couldn't get wheel multiplier (error %d), assuming %d.\n",
+ ret, *multiplier);
+ return ret;
+}
+
+static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert,
+ bool high_resolution, bool use_hidpp)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ u8 params[1];
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (ret)
+ return ret;
+
+ params[0] = (invert ? BIT(2) : 0) |
+ (high_resolution ? BIT(1) : 0) |
+ (use_hidpp ? BIT(0) : 0);
+
+ return hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HIRES_WHEEL_SET_WHEEL_MODE,
+ params, sizeof(params), &response);
+}
+
+/* -------------------------------------------------------------------------- */
/* 0x4301: Solar Keyboard */
/* -------------------------------------------------------------------------- */
@@ -2399,7 +2523,8 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
input_report_rel(mydata->input, REL_Y, v);
v = hid_snto32(data[6], 8);
- input_report_rel(mydata->input, REL_WHEEL, v);
+ hid_scroll_counter_handle_scroll(
+ &hidpp->vertical_wheel_counter, v);
input_sync(mydata->input);
}
@@ -2528,6 +2653,72 @@ static int g920_get_config(struct hidpp_device *hidpp)
}
/* -------------------------------------------------------------------------- */
+/* High-resolution scroll wheels */
+/* -------------------------------------------------------------------------- */
+
+/**
+ * struct hi_res_scroll_info - Stores info on a device's high-res scroll wheel.
+ * @product_id: the HID product ID of the device being described.
+ * @microns_per_hi_res_unit: the distance moved by the user's finger for each
+ * high-resolution unit reported by the device, in
+ * 256ths of a millimetre.
+ */
+struct hi_res_scroll_info {
+ __u32 product_id;
+ int microns_per_hi_res_unit;
+};
+
+static struct hi_res_scroll_info hi_res_scroll_devices[] = {
+ { /* Anywhere MX */
+ .product_id = 0x1017, .microns_per_hi_res_unit = 445 },
+ { /* Performance MX */
+ .product_id = 0x101a, .microns_per_hi_res_unit = 406 },
+ { /* M560 */
+ .product_id = 0x402d, .microns_per_hi_res_unit = 435 },
+ { /* MX Master 2S */
+ .product_id = 0x4069, .microns_per_hi_res_unit = 406 },
+};
+
+static int hi_res_scroll_look_up_microns(__u32 product_id)
+{
+ int i;
+ int num_devices = sizeof(hi_res_scroll_devices)
+ / sizeof(hi_res_scroll_devices[0]);
+ for (i = 0; i < num_devices; i++) {
+ if (hi_res_scroll_devices[i].product_id == product_id)
+ return hi_res_scroll_devices[i].microns_per_hi_res_unit;
+ }
+ /* We don't have a value for this device, so use a sensible default. */
+ return 406;
+}
+
+static int hi_res_scroll_enable(struct hidpp_device *hidpp)
+{
+ int ret;
+ u8 multiplier = 8;
+
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
+ ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
+ hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
+ } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
+ ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
+ &multiplier);
+ } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */
+ ret = hidpp10_enable_scrolling_acceleration(hidpp);
+
+ if (ret)
+ return ret;
+
+ hidpp->vertical_wheel_counter.resolution_multiplier = multiplier;
+ hidpp->vertical_wheel_counter.microns_per_hi_res_unit =
+ hi_res_scroll_look_up_microns(hidpp->hid_dev->product);
+ hid_info(hidpp->hid_dev, "multiplier = %d, microns = %d\n",
+ multiplier,
+ hidpp->vertical_wheel_counter.microns_per_hi_res_unit);
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
@@ -2572,6 +2763,11 @@ static void hidpp_populate_input(struct hidpp_device *hidpp,
wtp_populate_input(hidpp, input, origin_is_hid_core);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
m560_populate_input(hidpp, input, origin_is_hid_core);
+
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) {
+ input_set_capability(input, EV_REL, REL_WHEEL_HI_RES);
+ hidpp->vertical_wheel_counter.dev = input;
+ }
}
static int hidpp_input_configured(struct hid_device *hdev,
@@ -2690,6 +2886,27 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
return 0;
}
+static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ /* This function will only be called for scroll events, due to the
+ * restriction imposed in hidpp_usages.
+ */
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ struct hid_scroll_counter *counter = &hidpp->vertical_wheel_counter;
+ /* A scroll event may occur before the multiplier has been retrieved or
+ * the input device set, or high-res scroll enabling may fail. In such
+ * cases we must return early (falling back to default behaviour) to
+ * avoid a crash in hid_scroll_counter_handle_scroll.
+ */
+ if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
+ || counter->dev == NULL || counter->resolution_multiplier == 0)
+ return 0;
+
+ hid_scroll_counter_handle_scroll(counter, value);
+ return 1;
+}
+
static int hidpp_initialize_battery(struct hidpp_device *hidpp)
{
static atomic_t battery_no = ATOMIC_INIT(0);
@@ -2901,6 +3118,9 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
+ hi_res_scroll_enable(hidpp);
+
if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
/* if the input nodes are already created, we can stop now */
return;
@@ -3086,35 +3306,63 @@ static void hidpp_remove(struct hid_device *hdev)
mutex_destroy(&hidpp->send_mutex);
}
+#define LDJ_DEVICE(product) \
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
+ USB_VENDOR_ID_LOGITECH, (product))
+
static const struct hid_device_id hidpp_devices[] = {
{ /* wireless touchpad */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4011),
+ LDJ_DEVICE(0x4011),
.driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT |
HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS },
{ /* wireless touchpad T650 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4101),
+ LDJ_DEVICE(0x4101),
.driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
{ /* wireless touchpad T651 */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_T651),
.driver_data = HIDPP_QUIRK_CLASS_WTP },
+ { /* Mouse Logitech Anywhere MX */
+ LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ { /* Mouse Logitech Cube */
+ LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
+ { /* Mouse Logitech M335 */
+ LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech M515 */
+ LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
{ /* Mouse logitech M560 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x402d),
- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
+ LDJ_DEVICE(0x402d),
+ .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560
+ | HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
+ { /* Mouse Logitech M705 (firmware RQM17) */
+ LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ { /* Mouse Logitech M705 (firmware RQM67) */
+ LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech M720 */
+ LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Anywhere 2 */
+ LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Anywhere 2S */
+ LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master */
+ LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master 2S */
+ LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech Performance MX */
+ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
{ /* Keyboard logitech K400 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4024),
+ LDJ_DEVICE(0x4024),
.driver_data = HIDPP_QUIRK_CLASS_K400 },
{ /* Solar Keyboard Logitech K750 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4002),
+ LDJ_DEVICE(0x4002),
.driver_data = HIDPP_QUIRK_CLASS_K750 },
- { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
+ { LDJ_DEVICE(HID_ANY_ID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
@@ -3123,12 +3371,19 @@ static const struct hid_device_id hidpp_devices[] = {
MODULE_DEVICE_TABLE(hid, hidpp_devices);
+static const struct hid_usage_id hidpp_usages[] = {
+ { HID_GD_WHEEL, EV_REL, REL_WHEEL },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
.probe = hidpp_probe,
.remove = hidpp_remove,
.raw_event = hidpp_raw_event,
+ .usage_table = hidpp_usages,
+ .event = hidpp_event,
.input_configured = hidpp_input_configured,
.input_mapping = hidpp_input_mapping,
.input_mapped = hidpp_input_mapped,
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index b454c4386157..1d5ea678d268 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -54,6 +54,8 @@ module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
#define TRACKPAD_REPORT_ID 0x28
+#define TRACKPAD2_USB_REPORT_ID 0x02
+#define TRACKPAD2_BT_REPORT_ID 0x31
#define MOUSE_REPORT_ID 0x29
#define DOUBLE_REPORT_ID 0xf7
/* These definitions are not precise, but they're close enough. (Bits
@@ -91,6 +93,17 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define TRACKPAD_RES_Y \
((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
+#define TRACKPAD2_DIMENSION_X (float)16000
+#define TRACKPAD2_MIN_X -3678
+#define TRACKPAD2_MAX_X 3934
+#define TRACKPAD2_RES_X \
+ ((TRACKPAD2_MAX_X - TRACKPAD2_MIN_X) / (TRACKPAD2_DIMENSION_X / 100))
+#define TRACKPAD2_DIMENSION_Y (float)11490
+#define TRACKPAD2_MIN_Y -2478
+#define TRACKPAD2_MAX_Y 2587
+#define TRACKPAD2_RES_Y \
+ ((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100))
+
/**
* struct magicmouse_sc - Tracks Magic Mouse-specific data.
* @input: Input device through which we report events.
@@ -183,6 +196,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
{
struct input_dev *input = msc->input;
int id, x, y, size, orientation, touch_major, touch_minor, state, down;
+ int pressure = 0;
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
@@ -194,6 +208,17 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
touch_minor = tdata[4];
state = tdata[7] & TOUCH_STATE_MASK;
down = state != TOUCH_STATE_NONE;
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ id = tdata[8] & 0xf;
+ x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
+ y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19);
+ size = tdata[6];
+ orientation = (tdata[8] >> 5) - 4;
+ touch_major = tdata[4];
+ touch_minor = tdata[5];
+ pressure = tdata[7];
+ state = tdata[3] & 0xC0;
+ down = state == 0x80;
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
id = (tdata[7] << 2 | tdata[6] >> 6) & 0xf;
x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
@@ -215,7 +240,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
/* If requested, emulate a scroll wheel by detecting small
* vertical touch motions.
*/
- if (emulate_scroll_wheel) {
+ if (emulate_scroll_wheel && (input->id.product !=
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)) {
unsigned long now = jiffies;
int step_x = msc->touches[id].scroll_x - x;
int step_y = msc->touches[id].scroll_y - y;
@@ -269,10 +295,14 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+
if (report_undeciphered) {
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
- else /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ else if (input->id.product !=
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
input_event(input, EV_MSC, MSC_RAW, tdata[8]);
}
}
@@ -287,6 +317,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
switch (data[0]) {
case TRACKPAD_REPORT_ID:
+ case TRACKPAD2_BT_REPORT_ID:
/* Expect four bytes of prefix, and N*9 bytes of touch data. */
if (size < 4 || ((size - 4) % 9) != 0)
return 0;
@@ -308,6 +339,22 @@ static int magicmouse_raw_event(struct hid_device *hdev,
* ts = data[1] >> 6 | data[2] << 2 | data[3] << 10;
*/
break;
+ case TRACKPAD2_USB_REPORT_ID:
+ /* Expect twelve bytes of prefix and N*9 bytes of touch data. */
+ if (size < 12 || ((size - 12) % 9) != 0)
+ return 0;
+ npoints = (size - 12) / 9;
+ if (npoints > 15) {
+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD2_USB_REPORT_ID\n",
+ size);
+ return 0;
+ }
+ msc->ntouches = 0;
+ for (ii = 0; ii < npoints; ii++)
+ magicmouse_emit_touch(msc, ii, data + ii * 9 + 12);
+
+ clicks = data[1];
+ break;
case MOUSE_REPORT_ID:
/* Expect six bytes of prefix, and N*8 bytes of touch data. */
if (size < 6 || ((size - 6) % 8) != 0)
@@ -352,6 +399,9 @@ static int magicmouse_raw_event(struct hid_device *hdev,
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ input_mt_sync_frame(input);
+ input_report_key(input, BTN_MOUSE, clicks & 1);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_report_key(input, BTN_MOUSE, clicks & 1);
input_mt_report_pointer_emulation(input, true);
@@ -364,6 +414,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
{
int error;
+ int mt_flags = 0;
__set_bit(EV_KEY, input->evbit);
@@ -380,6 +431,22 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(REL_WHEEL, input->relbit);
__set_bit(REL_HWHEEL, input->relbit);
}
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ /* setting the device name to ensure the same driver settings
+ * get loaded, whether connected through bluetooth or USB
+ */
+ input->name = "Apple Inc. Magic Trackpad 2";
+
+ __clear_bit(EV_MSC, input->evbit);
+ __clear_bit(BTN_0, input->keybit);
+ __clear_bit(BTN_RIGHT, input->keybit);
+ __clear_bit(BTN_MIDDLE, input->keybit);
+ __set_bit(BTN_MOUSE, input->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+
+ mt_flags = INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK;
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
/* input->keybit is initialized with incorrect button info
* for Magic Trackpad. There really is only one physical
@@ -402,14 +469,13 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_ABS, input->evbit);
- error = input_mt_init_slots(input, 16, 0);
+ error = input_mt_init_slots(input, 16, mt_flags);
if (error)
return error;
input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
4, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
4, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
/* Note: Touch Y position from the device is inverted relative
* to how pointer motion is reported (and relative to how USB
@@ -418,6 +484,7 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
* inverse of the reported Y.
*/
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y,
@@ -427,7 +494,25 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
MOUSE_RES_X);
input_abs_set_res(input, ABS_MT_POSITION_Y,
MOUSE_RES_Y);
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 253, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 253, 0, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -3, 4, 0, 0);
+ input_set_abs_params(input, ABS_X, TRACKPAD2_MIN_X,
+ TRACKPAD2_MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_Y, TRACKPAD2_MIN_Y,
+ TRACKPAD2_MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ TRACKPAD2_MIN_X, TRACKPAD2_MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ TRACKPAD2_MIN_Y, TRACKPAD2_MAX_Y, 0, 0);
+
+ input_abs_set_res(input, ABS_X, TRACKPAD2_RES_X);
+ input_abs_set_res(input, ABS_Y, TRACKPAD2_RES_Y);
+ input_abs_set_res(input, ABS_MT_POSITION_X, TRACKPAD2_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, TRACKPAD2_RES_Y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
TRACKPAD_MAX_X, 4, 0);
input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
@@ -447,7 +532,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
input_set_events_per_packet(input, 60);
- if (report_undeciphered) {
+ if (report_undeciphered &&
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
__set_bit(EV_MSC, input->evbit);
__set_bit(MSC_RAW, input->mscbit);
}
@@ -465,7 +551,8 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
msc->input = hi->input;
/* Magic Trackpad does not give relative data after switching to MT */
- if (hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD &&
+ if ((hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD ||
+ hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
field->flags & HID_MAIN_ITEM_RELATIVE)
return -1;
@@ -494,11 +581,20 @@ static int magicmouse_input_configured(struct hid_device *hdev,
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- const u8 feature[] = { 0xd7, 0x01 };
+ const u8 *feature;
+ const u8 feature_mt[] = { 0xD7, 0x01 };
+ const u8 feature_mt_trackpad2_usb[] = { 0x02, 0x01 };
+ const u8 feature_mt_trackpad2_bt[] = { 0xF1, 0x02, 0x01 };
u8 *buf;
struct magicmouse_sc *msc;
struct hid_report *report;
int ret;
+ int feature_size;
+
+ if (id->vendor == USB_VENDOR_ID_APPLE &&
+ id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ hdev->type != HID_TYPE_USBMOUSE)
+ return 0;
msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
if (msc == NULL) {
@@ -532,7 +628,14 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
MOUSE_REPORT_ID, 0);
- else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (id->vendor == BT_VENDOR_ID_APPLE)
+ report = hid_register_report(hdev, HID_INPUT_REPORT,
+ TRACKPAD2_BT_REPORT_ID, 0);
+ else /* USB_VENDOR_ID_APPLE */
+ report = hid_register_report(hdev, HID_INPUT_REPORT,
+ TRACKPAD2_USB_REPORT_ID, 0);
+ } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD_REPORT_ID, 0);
report = hid_register_report(hdev, HID_INPUT_REPORT,
@@ -546,7 +649,20 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
- buf = kmemdup(feature, sizeof(feature), GFP_KERNEL);
+ if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (id->vendor == BT_VENDOR_ID_APPLE) {
+ feature_size = sizeof(feature_mt_trackpad2_bt);
+ feature = feature_mt_trackpad2_bt;
+ } else { /* USB_VENDOR_ID_APPLE */
+ feature_size = sizeof(feature_mt_trackpad2_usb);
+ feature = feature_mt_trackpad2_usb;
+ }
+ } else {
+ feature_size = sizeof(feature_mt);
+ feature = feature_mt;
+ }
+
+ buf = kmemdup(feature, feature_size, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_stop_hw;
@@ -560,10 +676,10 @@ static int magicmouse_probe(struct hid_device *hdev,
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
kfree(buf);
- if (ret != -EIO && ret != sizeof(feature)) {
+ if (ret != -EIO && ret != feature_size) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
@@ -579,6 +695,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
{ }
};
MODULE_DEVICE_TABLE(hid, magic_mice);
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 72d983626afd..330cb073cb66 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -29,11 +29,41 @@
#define MS_NOGET BIT(4)
#define MS_DUPLICATE_USAGES BIT(5)
#define MS_SURFACE_DIAL BIT(6)
+#define MS_QUIRK_FF BIT(7)
+
+struct ms_data {
+ unsigned long quirks;
+ struct hid_device *hdev;
+ struct work_struct ff_worker;
+ __u8 strong;
+ __u8 weak;
+ void *output_report_dmabuf;
+};
+
+#define XB1S_FF_REPORT 3
+#define ENABLE_WEAK BIT(0)
+#define ENABLE_STRONG BIT(1)
+
+enum {
+ MAGNITUDE_STRONG = 2,
+ MAGNITUDE_WEAK,
+ MAGNITUDE_NUM
+};
+
+struct xb1s_ff_report {
+ __u8 report_id;
+ __u8 enable;
+ __u8 magnitude[MAGNITUDE_NUM];
+ __u8 duration_10ms;
+ __u8 start_delay_10ms;
+ __u8 loop_count;
+} __packed;
static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
/*
* Microsoft Wireless Desktop Receiver (Model 1028) has
@@ -159,7 +189,8 @@ static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
if (quirks & MS_ERGONOMY) {
int ret = ms_ergonomy_kb_quirk(hi, usage, bit, max);
@@ -185,7 +216,8 @@ static int ms_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
if (quirks & MS_DUPLICATE_USAGES)
clear_bit(usage->code, *bit);
@@ -196,7 +228,8 @@ static int ms_input_mapped(struct hid_device *hdev, struct hid_input *hi,
static int ms_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
struct input_dev *input;
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
@@ -251,12 +284,97 @@ static int ms_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
+static void ms_ff_worker(struct work_struct *work)
+{
+ struct ms_data *ms = container_of(work, struct ms_data, ff_worker);
+ struct hid_device *hdev = ms->hdev;
+ struct xb1s_ff_report *r = ms->output_report_dmabuf;
+ int ret;
+
+ memset(r, 0, sizeof(*r));
+
+ r->report_id = XB1S_FF_REPORT;
+ r->enable = ENABLE_WEAK | ENABLE_STRONG;
+ /*
+ * Specifying maximum duration and maximum loop count should
+ * cover maximum duration of a single effect, which is 65536
+ * ms
+ */
+ r->duration_10ms = U8_MAX;
+ r->loop_count = U8_MAX;
+ r->magnitude[MAGNITUDE_STRONG] = ms->strong; /* left actuator */
+ r->magnitude[MAGNITUDE_WEAK] = ms->weak; /* right actuator */
+
+ ret = hid_hw_output_report(hdev, (__u8 *)r, sizeof(*r));
+ if (ret)
+ hid_warn(hdev, "failed to send FF report\n");
+}
+
+static int ms_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct ms_data *ms = hid_get_drvdata(hid);
+
+ if (effect->type != FF_RUMBLE)
+ return 0;
+
+ /*
+ * Magnitude is 0..100 so scale the 16-bit input here
+ */
+ ms->strong = ((u32) effect->u.rumble.strong_magnitude * 100) / U16_MAX;
+ ms->weak = ((u32) effect->u.rumble.weak_magnitude * 100) / U16_MAX;
+
+ schedule_work(&ms->ff_worker);
+ return 0;
+}
+
+static int ms_init_ff(struct hid_device *hdev)
+{
+ struct hid_input *hidinput = list_entry(hdev->inputs.next,
+ struct hid_input, list);
+ struct input_dev *input_dev = hidinput->input;
+ struct ms_data *ms = hid_get_drvdata(hdev);
+
+ if (!(ms->quirks & MS_QUIRK_FF))
+ return 0;
+
+ ms->hdev = hdev;
+ INIT_WORK(&ms->ff_worker, ms_ff_worker);
+
+ ms->output_report_dmabuf = devm_kzalloc(&hdev->dev,
+ sizeof(struct xb1s_ff_report),
+ GFP_KERNEL);
+ if (ms->output_report_dmabuf == NULL)
+ return -ENOMEM;
+
+ input_set_capability(input_dev, EV_FF, FF_RUMBLE);
+ return input_ff_create_memless(input_dev, NULL, ms_play_effect);
+}
+
+static void ms_remove_ff(struct hid_device *hdev)
+{
+ struct ms_data *ms = hid_get_drvdata(hdev);
+
+ if (!(ms->quirks & MS_QUIRK_FF))
+ return;
+
+ cancel_work_sync(&ms->ff_worker);
+}
+
static int ms_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
unsigned long quirks = id->driver_data;
+ struct ms_data *ms;
int ret;
- hid_set_drvdata(hdev, (void *)quirks);
+ ms = devm_kzalloc(&hdev->dev, sizeof(*ms), GFP_KERNEL);
+ if (ms == NULL)
+ return -ENOMEM;
+
+ ms->quirks = quirks;
+
+ hid_set_drvdata(hdev, ms);
if (quirks & MS_NOGET)
hdev->quirks |= HID_QUIRK_NOGET;
@@ -277,11 +395,21 @@ static int ms_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
+ ret = ms_init_ff(hdev);
+ if (ret)
+ hid_err(hdev, "could not initialize ff, continuing anyway");
+
return 0;
err_free:
return ret;
}
+static void ms_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ ms_remove_ff(hdev);
+}
+
static const struct hid_device_id ms_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV),
.driver_data = MS_HIDINPUT },
@@ -318,6 +446,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_PRESENTER },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, 0x091B),
.driver_data = MS_SURFACE_DIAL },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER),
+ .driver_data = MS_QUIRK_FF },
{ }
};
MODULE_DEVICE_TABLE(hid, ms_devices);
@@ -330,6 +460,7 @@ static struct hid_driver ms_driver = {
.input_mapped = ms_input_mapped,
.event = ms_event,
.probe = ms_probe,
+ .remove = ms_remove,
};
module_hid_driver(ms_driver);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index da954f3f4da7..f7c6de2b6730 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1319,6 +1319,13 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return mt_touch_input_mapping(hdev, hi, field, usage, bit, max,
application);
+ /*
+ * some egalax touchscreens have "application == DG_TOUCHSCREEN"
+ * for the stylus. Overwrite the hid_input application
+ */
+ if (field->physical == HID_DG_STYLUS)
+ hi->application = HID_DG_STYLUS;
+
/* let hid-core decide for the others */
return 0;
}
@@ -1507,14 +1514,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
struct mt_device *td = hid_get_drvdata(hdev);
char *name;
const char *suffix = NULL;
- unsigned int application = 0;
struct mt_report_data *rdata;
struct mt_application *mt_application = NULL;
struct hid_report *report;
int ret;
list_for_each_entry(report, &hi->reports, hidinput_list) {
- application = report->application;
rdata = mt_find_report_data(td, report);
if (!rdata) {
hid_err(hdev, "failed to allocate data for report\n");
@@ -1529,46 +1534,33 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (ret)
return ret;
}
-
- /*
- * some egalax touchscreens have "application == DG_TOUCHSCREEN"
- * for the stylus. Check this first, and then rely on
- * the application field.
- */
- if (report->field[0]->physical == HID_DG_STYLUS) {
- suffix = "Pen";
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
- }
}
- if (!suffix) {
- switch (application) {
- case HID_GD_KEYBOARD:
- case HID_GD_KEYPAD:
- case HID_GD_MOUSE:
- case HID_DG_TOUCHPAD:
- case HID_GD_SYSTEM_CONTROL:
- case HID_CP_CONSUMER_CONTROL:
- case HID_GD_WIRELESS_RADIO_CTLS:
- case HID_GD_SYSTEM_MULTIAXIS:
- /* already handled by hid core */
- break;
- case HID_DG_TOUCHSCREEN:
- /* we do not set suffix = "Touchscreen" */
- hi->input->name = hdev->name;
- break;
- case HID_DG_STYLUS:
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
- break;
- case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
- suffix = "Custom Media Keys";
- break;
- default:
- suffix = "UNKNOWN";
- break;
- }
+ switch (hi->application) {
+ case HID_GD_KEYBOARD:
+ case HID_GD_KEYPAD:
+ case HID_GD_MOUSE:
+ case HID_DG_TOUCHPAD:
+ case HID_GD_SYSTEM_CONTROL:
+ case HID_CP_CONSUMER_CONTROL:
+ case HID_GD_WIRELESS_RADIO_CTLS:
+ case HID_GD_SYSTEM_MULTIAXIS:
+ /* already handled by hid core */
+ break;
+ case HID_DG_TOUCHSCREEN:
+ /* we do not set suffix = "Touchscreen" */
+ hi->input->name = hdev->name;
+ break;
+ case HID_DG_STYLUS:
+ /* force BTN_STYLUS to allow tablet matching in udev */
+ __set_bit(BTN_STYLUS, hi->input->keybit);
+ break;
+ case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
+ suffix = "Custom Media Keys";
+ break;
+ default:
+ suffix = "UNKNOWN";
+ break;
}
if (suffix) {
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 249d49b6b16c..52c3b01917e7 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -70,6 +70,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
index 832d8f9aaba2..099e1ce2f234 100644
--- a/drivers/hid/i2c-hid/Makefile
+++ b/drivers/hid/i2c-hid/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_I2C_HID) += i2c-hid.o
+
+i2c-hid-objs = i2c-hid-core.o
+i2c-hid-$(CONFIG_DMI) += i2c-hid-dmi-quirks.o
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 4e3592e7a3f7..4aab96cf0818 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -43,6 +43,7 @@
#include <linux/platform_data/i2c-hid.h>
#include "../hid-ids.h"
+#include "i2c-hid.h"
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
@@ -668,6 +669,7 @@ static int i2c_hid_parse(struct hid_device *hid)
char *rdesc;
int ret;
int tries = 3;
+ char *use_override;
i2c_hid_dbg(ihid, "entering %s\n", __func__);
@@ -686,26 +688,37 @@ static int i2c_hid_parse(struct hid_device *hid)
if (ret)
return ret;
- rdesc = kzalloc(rsize, GFP_KERNEL);
+ use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
+ &rsize);
- if (!rdesc) {
- dbg_hid("couldn't allocate rdesc memory\n");
- return -ENOMEM;
- }
-
- i2c_hid_dbg(ihid, "asking HID report descriptor\n");
-
- ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize);
- if (ret) {
- hid_err(hid, "reading report descriptor failed\n");
- kfree(rdesc);
- return -EIO;
+ if (use_override) {
+ rdesc = use_override;
+ i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
+ } else {
+ rdesc = kzalloc(rsize, GFP_KERNEL);
+
+ if (!rdesc) {
+ dbg_hid("couldn't allocate rdesc memory\n");
+ return -ENOMEM;
+ }
+
+ i2c_hid_dbg(ihid, "asking HID report descriptor\n");
+
+ ret = i2c_hid_command(client, &hid_report_descr_cmd,
+ rdesc, rsize);
+ if (ret) {
+ hid_err(hid, "reading report descriptor failed\n");
+ kfree(rdesc);
+ return -EIO;
+ }
}
i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
ret = hid_parse_report(hid, rdesc, rsize);
- kfree(rdesc);
+ if (!use_override)
+ kfree(rdesc);
+
if (ret) {
dbg_hid("parsing report descriptor failed\n");
return ret;
@@ -832,12 +845,19 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
int ret;
/* i2c hid fetch using a fixed descriptor size (30 bytes) */
- i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
- ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer,
- sizeof(struct i2c_hid_desc));
- if (ret) {
- dev_err(&client->dev, "hid_descr_cmd failed\n");
- return -ENODEV;
+ if (i2c_hid_get_dmi_i2c_hid_desc_override(client->name)) {
+ i2c_hid_dbg(ihid, "Using a HID descriptor override\n");
+ ihid->hdesc =
+ *i2c_hid_get_dmi_i2c_hid_desc_override(client->name);
+ } else {
+ i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
+ ret = i2c_hid_command(client, &hid_descr_cmd,
+ ihid->hdesc_buffer,
+ sizeof(struct i2c_hid_desc));
+ if (ret) {
+ dev_err(&client->dev, "hid_descr_cmd failed\n");
+ return -ENODEV;
+ }
}
/* Validate the length of HID descriptor, the 4 first bytes:
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
new file mode 100644
index 000000000000..1d645c9ab417
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Quirks for I2C-HID devices that do not supply proper descriptors
+ *
+ * Copyright (c) 2018 Julian Sax <jsbc@gmx.de>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+
+#include "i2c-hid.h"
+
+
+struct i2c_hid_desc_override {
+ union {
+ struct i2c_hid_desc *i2c_hid_desc;
+ uint8_t *i2c_hid_desc_buffer;
+ };
+ uint8_t *hid_report_desc;
+ unsigned int hid_report_desc_size;
+ uint8_t *i2c_name;
+};
+
+
+/*
+ * descriptors for the SIPODEV SP1064 touchpad
+ *
+ * This device does not supply any descriptors and on windows a filter
+ * driver operates between the i2c-hid layer and the device and injects
+ * these descriptors when the device is prompted. The descriptors were
+ * extracted by listening to the i2c-hid traffic that occurs between the
+ * windows filter driver and the windows i2c-hid driver.
+ */
+
+static const struct i2c_hid_desc_override sipodev_desc = {
+ .i2c_hid_desc_buffer = (uint8_t [])
+ {0x1e, 0x00, /* Length of descriptor */
+ 0x00, 0x01, /* Version of descriptor */
+ 0xdb, 0x01, /* Length of report descriptor */
+ 0x21, 0x00, /* Location of report descriptor */
+ 0x24, 0x00, /* Location of input report */
+ 0x1b, 0x00, /* Max input report length */
+ 0x25, 0x00, /* Location of output report */
+ 0x11, 0x00, /* Max output report length */
+ 0x22, 0x00, /* Location of command register */
+ 0x23, 0x00, /* Location of data register */
+ 0x11, 0x09, /* Vendor ID */
+ 0x88, 0x52, /* Product ID */
+ 0x06, 0x00, /* Version ID */
+ 0x00, 0x00, 0x00, 0x00 /* Reserved */
+ },
+
+ .hid_report_desc = (uint8_t [])
+ {0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x02, /* Usage (Mouse), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0xA1, 0x00, /* Collection (Physical), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x02, /* Usage Maximum (02h), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x15, 0x81, /* Logical Minimum (-127), */
+ 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0xC0, /* End Collection, */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x05, /* Usage (Touchpad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x04, /* Report ID (4), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x55, 0x0E, /* Unit Exponent (14), */
+ 0x65, 0x11, /* Unit (Centimeter), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x55, 0x0C, /* Unit Exponent (12), */
+ 0x66, 0x01, 0x10, /* Unit (Seconds), */
+ 0x47, 0xFF, 0xFF, 0x00, 0x00,/* Physical Maximum (65535), */
+ 0x27, 0xFF, 0xFF, 0x00, 0x00,/* Logical Maximum (65535), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x09, 0x56, /* Usage (Scan Time), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x54, /* Usage (Contact Count), */
+ 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x09, 0x01, /* Usage (01h), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x07, /* Report Count (7), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x09, 0x55, /* Usage (Contact Count Maximum), */
+ 0x09, 0x59, /* Usage (59h), */
+ 0x75, 0x04, /* Report Size (4), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x25, 0x0F, /* Logical Maximum (15), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x85, 0x07, /* Report ID (7), */
+ 0x09, 0x60, /* Usage (60h), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x95, 0x07, /* Report Count (7), */
+ 0xB1, 0x03, /* Feature (Constant, Variable), */
+ 0x85, 0x06, /* Report ID (6), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0xC5, /* Usage (C5h), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x96, 0x00, 0x01, /* Report Count (256), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x01, /* Usage (01h), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x0D, /* Report ID (13), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x02, /* Usage Maximum (02h), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x0E, /* Usage (Configuration), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x03, /* Report ID (3), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x09, 0x52, /* Usage (Device Mode), */
+ 0x25, 0x0A, /* Logical Maximum (10), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x00, /* Collection (Physical), */
+ 0x85, 0x05, /* Report ID (5), */
+ 0x09, 0x57, /* Usage (57h), */
+ 0x09, 0x58, /* Usage (58h), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0xB1, 0x03, /* Feature (Constant, Variable),*/
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+ },
+ .hid_report_desc_size = 475,
+ .i2c_name = "SYNA3602:00"
+};
+
+
+static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+ {
+ .ident = "Teclast F6 Pro",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F6 Pro"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Teclast F7",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F7"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Trekstor Primebook C13",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Trekstor Primebook C11",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Direkt-Tek DTLAPY116-2",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY116-2"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Mediacom Flexbook Edge 11",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ }
+};
+
+
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+{
+ struct i2c_hid_desc_override *override;
+ const struct dmi_system_id *system_id;
+
+ system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
+ if (!system_id)
+ return NULL;
+
+ override = system_id->driver_data;
+ if (strcmp(override->i2c_name, i2c_name))
+ return NULL;
+
+ return override->i2c_hid_desc;
+}
+
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size)
+{
+ struct i2c_hid_desc_override *override;
+ const struct dmi_system_id *system_id;
+
+ system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
+ if (!system_id)
+ return NULL;
+
+ override = system_id->driver_data;
+ if (strcmp(override->i2c_name, i2c_name))
+ return NULL;
+
+ *size = override->hid_report_desc_size;
+ return override->hid_report_desc;
+}
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
new file mode 100644
index 000000000000..a8c19aef5824
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef I2C_HID_H
+#define I2C_HID_H
+
+
+#ifdef CONFIG_DMI
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size);
+#else
+static inline struct i2c_hid_desc
+ *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+{ return NULL; }
+static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size)
+{ return NULL; }
+#endif
+
+#endif
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index bfbca7ec54ce..742191bb24c6 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -280,14 +280,14 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
* if tx send list is empty - return 0;
* may happen, as RX_COMPLETE handler doesn't check list emptiness.
*/
- if (list_empty(&dev->wr_processing_list_head.link)) {
+ if (list_empty(&dev->wr_processing_list)) {
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
out_ipc_locked = 0;
return 0;
}
- ipc_link = list_entry(dev->wr_processing_list_head.link.next,
- struct wr_msg_ctl_info, link);
+ ipc_link = list_first_entry(&dev->wr_processing_list,
+ struct wr_msg_ctl_info, link);
/* first 4 bytes of the data is the doorbell value (IPC header) */
length = ipc_link->length - sizeof(uint32_t);
doorbell_val = *(uint32_t *)ipc_link->inline_data;
@@ -338,7 +338,7 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
ipc_send_compl = ipc_link->ipc_send_compl;
ipc_send_compl_prm = ipc_link->ipc_send_compl_prm;
list_del_init(&ipc_link->link);
- list_add_tail(&ipc_link->link, &dev->wr_free_list_head.link);
+ list_add(&ipc_link->link, &dev->wr_free_list);
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
/*
@@ -372,18 +372,18 @@ static int write_ipc_to_queue(struct ishtp_device *dev,
unsigned char *msg, int length)
{
struct wr_msg_ctl_info *ipc_link;
- unsigned long flags;
+ unsigned long flags;
if (length > IPC_FULL_MSG_SIZE)
return -EMSGSIZE;
spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
- if (list_empty(&dev->wr_free_list_head.link)) {
+ if (list_empty(&dev->wr_free_list)) {
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
return -ENOMEM;
}
- ipc_link = list_entry(dev->wr_free_list_head.link.next,
- struct wr_msg_ctl_info, link);
+ ipc_link = list_first_entry(&dev->wr_free_list,
+ struct wr_msg_ctl_info, link);
list_del_init(&ipc_link->link);
ipc_link->ipc_send_compl = ipc_send_compl;
@@ -391,7 +391,7 @@ static int write_ipc_to_queue(struct ishtp_device *dev,
ipc_link->length = length;
memcpy(ipc_link->inline_data, msg, length);
- list_add_tail(&ipc_link->link, &dev->wr_processing_list_head.link);
+ list_add_tail(&ipc_link->link, &dev->wr_processing_list);
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
write_ipc_from_queue(dev);
@@ -487,17 +487,13 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
{
uint32_t reset_id;
unsigned long flags;
- struct wr_msg_ctl_info *processing, *next;
/* Read reset ID */
reset_id = ish_reg_read(dev, IPC_REG_ISH2HOST_MSG) & 0xFFFF;
/* Clear IPC output queue */
spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
- list_for_each_entry_safe(processing, next,
- &dev->wr_processing_list_head.link, link) {
- list_move_tail(&processing->link, &dev->wr_free_list_head.link);
- }
+ list_splice_init(&dev->wr_processing_list, &dev->wr_free_list);
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
/* ISHTP notification in IPC_RESET */
@@ -921,9 +917,9 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
spin_lock_init(&dev->out_ipc_spinlock);
/* Init IPC processing and free lists */
- INIT_LIST_HEAD(&dev->wr_processing_list_head.link);
- INIT_LIST_HEAD(&dev->wr_free_list_head.link);
- for (i = 0; i < IPC_TX_FIFO_SIZE; ++i) {
+ INIT_LIST_HEAD(&dev->wr_processing_list);
+ INIT_LIST_HEAD(&dev->wr_free_list);
+ for (i = 0; i < IPC_TX_FIFO_SIZE; i++) {
struct wr_msg_ctl_info *tx_buf;
tx_buf = devm_kzalloc(&pdev->dev,
@@ -939,7 +935,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
i);
break;
}
- list_add_tail(&tx_buf->link, &dev->wr_free_list_head.link);
+ list_add_tail(&tx_buf->link, &dev->wr_free_list);
}
dev->ops = &ish_hw_ops;
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 256b3016116c..8793cc49f855 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -115,18 +115,19 @@ static const struct pci_device_id ish_invalid_pci_ids[] = {
*/
static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ishtp_device *dev;
+ int ret;
struct ish_hw *hw;
- int ret;
+ struct ishtp_device *ishtp;
+ struct device *dev = &pdev->dev;
/* Check for invalid platforms for ISH support */
if (pci_dev_present(ish_invalid_pci_ids))
return -ENODEV;
/* enable pci dev */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
- dev_err(&pdev->dev, "ISH: Failed to enable PCI device\n");
+ dev_err(dev, "ISH: Failed to enable PCI device\n");
return ret;
}
@@ -134,65 +135,44 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* pci request regions for ISH driver */
- ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ ret = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
if (ret) {
- dev_err(&pdev->dev, "ISH: Failed to get PCI regions\n");
- goto disable_device;
+ dev_err(dev, "ISH: Failed to get PCI regions\n");
+ return ret;
}
/* allocates and initializes the ISH dev structure */
- dev = ish_dev_init(pdev);
- if (!dev) {
+ ishtp = ish_dev_init(pdev);
+ if (!ishtp) {
ret = -ENOMEM;
- goto release_regions;
+ return ret;
}
- hw = to_ish_hw(dev);
- dev->print_log = ish_event_tracer;
+ hw = to_ish_hw(ishtp);
+ ishtp->print_log = ish_event_tracer;
/* mapping IO device memory */
- hw->mem_addr = pci_iomap(pdev, 0, 0);
- if (!hw->mem_addr) {
- dev_err(&pdev->dev, "ISH: mapping I/O range failure\n");
- ret = -ENOMEM;
- goto free_device;
- }
-
- dev->pdev = pdev;
-
+ hw->mem_addr = pcim_iomap_table(pdev)[0];
+ ishtp->pdev = pdev;
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
/* request and enable interrupt */
- ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED,
- KBUILD_MODNAME, dev);
+ ret = devm_request_irq(dev, pdev->irq, ish_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, ishtp);
if (ret) {
- dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n",
- pdev->irq);
- goto free_device;
+ dev_err(dev, "ISH: request IRQ %d failed\n", pdev->irq);
+ return ret;
}
- dev_set_drvdata(dev->devc, dev);
+ dev_set_drvdata(ishtp->devc, ishtp);
- init_waitqueue_head(&dev->suspend_wait);
- init_waitqueue_head(&dev->resume_wait);
+ init_waitqueue_head(&ishtp->suspend_wait);
+ init_waitqueue_head(&ishtp->resume_wait);
- ret = ish_init(dev);
+ ret = ish_init(ishtp);
if (ret)
- goto free_irq;
+ return ret;
return 0;
-
-free_irq:
- free_irq(pdev->irq, dev);
-free_device:
- pci_iounmap(pdev, hw->mem_addr);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_clear_master(pdev);
- pci_disable_device(pdev);
- dev_err(&pdev->dev, "ISH: PCI driver initialization failed.\n");
-
- return ret;
}
/**
@@ -204,16 +184,9 @@ disable_device:
static void ish_remove(struct pci_dev *pdev)
{
struct ishtp_device *ishtp_dev = pci_get_drvdata(pdev);
- struct ish_hw *hw = to_ish_hw(ishtp_dev);
ishtp_bus_remove_all_clients(ishtp_dev, false);
ish_device_disable(ishtp_dev);
-
- free_irq(pdev->irq, ishtp_dev);
- pci_iounmap(pdev, hw->mem_addr);
- pci_release_regions(pdev);
- pci_clear_master(pdev);
- pci_disable_device(pdev);
}
static struct device __maybe_unused *ish_resume_device;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 2d28cffc1404..e64243bc9c96 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -320,23 +320,14 @@ do_get_report:
*/
static void ish_cl_event_cb(struct ishtp_cl_device *device)
{
- struct ishtp_cl *hid_ishtp_cl = device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(device);
struct ishtp_cl_rb *rb_in_proc;
size_t r_length;
- unsigned long flags;
if (!hid_ishtp_cl)
return;
- spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags);
- while (!list_empty(&hid_ishtp_cl->in_process_list.list)) {
- rb_in_proc = list_entry(
- hid_ishtp_cl->in_process_list.list.next,
- struct ishtp_cl_rb, list);
- list_del_init(&rb_in_proc->list);
- spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock,
- flags);
-
+ while ((rb_in_proc = ishtp_cl_rx_get_rb(hid_ishtp_cl)) != NULL) {
if (!rb_in_proc->buffer.data)
return;
@@ -346,9 +337,7 @@ static void ish_cl_event_cb(struct ishtp_cl_device *device)
process_recv(hid_ishtp_cl, rb_in_proc->buffer.data, r_length);
ishtp_cl_io_rb_recycle(rb_in_proc);
- spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags);
}
- spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock, flags);
}
/**
@@ -637,8 +626,8 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
{
struct ishtp_device *dev;
- unsigned long flags;
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_fw_client *fw_client;
int i;
int rv;
@@ -660,16 +649,14 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
hid_ishtp_cl->rx_ring_size = HID_CL_RX_RING_SIZE;
hid_ishtp_cl->tx_ring_size = HID_CL_TX_RING_SIZE;
- spin_lock_irqsave(&dev->fw_clients_lock, flags);
- i = ishtp_fw_cl_by_uuid(dev, &hid_ishtp_guid);
- if (i < 0) {
- spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid);
+ if (!fw_client) {
dev_err(&client_data->cl_device->dev,
"ish client uuid not found\n");
- return i;
+ return -ENOENT;
}
- hid_ishtp_cl->fw_client_id = dev->fw_clients[i].client_id;
- spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+
+ hid_ishtp_cl->fw_client_id = fw_client->client_id;
hid_ishtp_cl->state = ISHTP_CL_CONNECTING;
rv = ishtp_cl_connect(hid_ishtp_cl);
@@ -765,7 +752,7 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
if (!hid_ishtp_cl)
return;
- cl_device->driver_data = hid_ishtp_cl;
+ ishtp_set_drvdata(cl_device, hid_ishtp_cl);
hid_ishtp_cl->client_data = client_data;
client_data->hid_ishtp_cl = hid_ishtp_cl;
@@ -814,7 +801,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
if (!hid_ishtp_cl)
return -ENOMEM;
- cl_device->driver_data = hid_ishtp_cl;
+ ishtp_set_drvdata(cl_device, hid_ishtp_cl);
hid_ishtp_cl->client_data = client_data;
client_data->hid_ishtp_cl = hid_ishtp_cl;
client_data->cl_device = cl_device;
@@ -844,7 +831,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
{
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
@@ -874,7 +861,7 @@ static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
{
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
@@ -898,7 +885,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
static int hid_ishtp_cl_suspend(struct device *device)
{
struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
@@ -919,7 +906,7 @@ static int hid_ishtp_cl_suspend(struct device *device)
static int hid_ishtp_cl_resume(struct device *device)
{
struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 2623a567ffba..728dc6d4561a 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -149,6 +149,31 @@ int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *uuid)
EXPORT_SYMBOL(ishtp_fw_cl_by_uuid);
/**
+ * ishtp_fw_cl_get_client() - return client information to client
+ * @dev: the ishtp device structure
+ * @uuid: uuid of the client to search
+ *
+ * Search firmware client using UUID and reture related client information.
+ *
+ * Return: pointer of client information on success, NULL on failure.
+ */
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const uuid_le *uuid)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ i = ishtp_fw_cl_by_uuid(dev, uuid);
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ if (i < 0 || dev->fw_clients[i].props.fixed_address)
+ return NULL;
+
+ return &dev->fw_clients[i];
+}
+EXPORT_SYMBOL(ishtp_fw_cl_get_client);
+
+/**
* ishtp_fw_cl_by_id() - return index to fw_clients for client_id
* @dev: the ishtp device structure
* @client_id: fw client id to search
@@ -564,6 +589,33 @@ void ishtp_put_device(struct ishtp_cl_device *cl_device)
EXPORT_SYMBOL(ishtp_put_device);
/**
+ * ishtp_set_drvdata() - set client driver data
+ * @cl_device: client device instance
+ * @data: driver data need to be set
+ *
+ * Set client driver data to cl_device->driver_data.
+ */
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data)
+{
+ cl_device->driver_data = data;
+}
+EXPORT_SYMBOL(ishtp_set_drvdata);
+
+/**
+ * ishtp_get_drvdata() - get client driver data
+ * @cl_device: client device instance
+ *
+ * Get client driver data from cl_device->driver_data.
+ *
+ * Return: pointer of driver data
+ */
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->driver_data;
+}
+EXPORT_SYMBOL(ishtp_get_drvdata);
+
+/**
* ishtp_bus_new_client() - Create a new client
* @dev: ISHTP device instance
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
index a1ffae7f26ad..b8a5bcc82536 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.h
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -101,6 +101,9 @@ void ishtp_reset_compl_handler(struct ishtp_device *dev);
void ishtp_put_device(struct ishtp_cl_device *);
void ishtp_get_device(struct ishtp_cl_device *);
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
+
int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
struct module *owner);
#define ishtp_cl_driver_register(driver) \
@@ -110,5 +113,7 @@ void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
int ishtp_register_event_cb(struct ishtp_cl_device *device,
void (*read_cb)(struct ishtp_cl_device *));
int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *cuuid);
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const uuid_le *uuid);
#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
index b9b917d2d50d..248651c35497 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -69,6 +69,8 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
int j;
unsigned long flags;
+ cl->tx_ring_free_size = 0;
+
/* Allocate pool to free Tx bufs */
for (j = 0; j < cl->tx_ring_size; ++j) {
struct ishtp_cl_tx_ring *tx_buf;
@@ -85,6 +87,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
}
return 0;
@@ -144,6 +147,7 @@ void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
tx_buf = list_entry(cl->tx_free_list.list.next,
struct ishtp_cl_tx_ring, list);
list_del(&tx_buf->list);
+ --cl->tx_ring_free_size;
kfree(tx_buf->send_buf.data);
kfree(tx_buf);
}
@@ -255,3 +259,48 @@ int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
return rets;
}
EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
+
+/**
+ * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
+ * @cl: Pointer to client device instance
+ *
+ * Look client device tx buffer list, and check whether this list is empty
+ *
+ * Return: true if client tx buffer list is empty else false
+ */
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
+{
+ int tx_list_empty;
+ unsigned long tx_flags;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ tx_list_empty = list_empty(&cl->tx_list.list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ return !!tx_list_empty;
+}
+EXPORT_SYMBOL(ishtp_cl_tx_empty);
+
+/**
+ * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
+ * @cl: Pointer to client device instance
+ *
+ * Check client device in-processing buffer list and get a rb from it.
+ *
+ * Return: rb pointer if buffer list isn't empty else NULL
+ */
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
+{
+ unsigned long rx_flags;
+ struct ishtp_cl_rb *rb;
+
+ spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
+ rb = list_first_entry_or_null(&cl->in_process_list.list,
+ struct ishtp_cl_rb, list);
+ if (rb)
+ list_del_init(&rb->list);
+ spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
+
+ return rb;
+}
+EXPORT_SYMBOL(ishtp_cl_rx_get_rb);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 007443ef5fca..faeccdb1475b 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -22,6 +22,25 @@
#include "hbm.h"
#include "client.h"
+int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
+{
+ unsigned long tx_free_flags;
+ int size;
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+
+ return size;
+}
+EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
+
+int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
+{
+ return cl->tx_ring_free_size;
+}
+EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
+
/**
* ishtp_read_list_flush() - Flush read queue
* @cl: ishtp client instance
@@ -90,6 +109,7 @@ static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
+ cl->tx_ring_free_size = cl->tx_ring_size;
/* dma */
cl->last_tx_path = CL_TX_PATH_IPC;
@@ -577,6 +597,8 @@ int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
* max ISHTP message size per client
*/
list_del_init(&cl_msg->list);
+ --cl->tx_ring_free_size;
+
spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
memcpy(cl_msg->send_buf.data, buf, length);
cl_msg->send_buf.size = length;
@@ -685,6 +707,7 @@ static void ipc_tx_callback(void *prm)
ishtp_write_message(dev, &ishtp_hdr, pmsg);
spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
tx_free_flags);
} else {
@@ -778,6 +801,7 @@ static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
++cl->send_msg_cnt_dma;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
index 79eade547f5d..042f4c4853b1 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.h
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -84,6 +84,7 @@ struct ishtp_cl {
/* Client Tx buffers list */
unsigned int tx_ring_size;
struct ishtp_cl_tx_ring tx_list, tx_free_list;
+ int tx_ring_free_size;
spinlock_t tx_list_spinlock;
spinlock_t tx_free_list_spinlock;
size_t tx_offs; /* Offset in buffer at head of 'tx_list' */
@@ -137,6 +138,8 @@ int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
+int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl);
+int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl);
/* DMA I/F functions */
void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
@@ -178,5 +181,7 @@ int ishtp_cl_flush_queues(struct ishtp_cl *cl);
/* exported functions from ISHTP client buffer management scope */
int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 6a6d927b78b0..e7c6bfefaf9e 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -207,7 +207,7 @@ struct ishtp_device {
struct work_struct bh_hbm_work;
/* IPC write queue */
- struct wr_msg_ctl_info wr_processing_list_head, wr_free_list_head;
+ struct list_head wr_processing_list, wr_free_list;
/* For both processing list and free list */
spinlock_t wr_processing_spinlock;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index e0a06be5ef5c..5dd3a8245f0f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -3335,6 +3335,7 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
void wacom_setup_device_quirks(struct wacom *wacom)
{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct wacom_features *features = &wacom->wacom_wac.features;
/* The pen and pad share the same interface on most devices */
@@ -3464,6 +3465,24 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type == REMOTE)
features->device_type |= WACOM_DEVICETYPE_WL_MONITOR;
+
+ /* HID descriptor for DTK-2451 / DTH-2452 claims to report lots
+ * of things it shouldn't. Lets fix up the damage...
+ */
+ if (wacom->hdev->product == 0x382 || wacom->hdev->product == 0x37d) {
+ features->quirks &= ~WACOM_QUIRK_TOOLSERIAL;
+ __clear_bit(BTN_TOOL_BRUSH, wacom_wac->pen_input->keybit);
+ __clear_bit(BTN_TOOL_PENCIL, wacom_wac->pen_input->keybit);
+ __clear_bit(BTN_TOOL_AIRBRUSH, wacom_wac->pen_input->keybit);
+ __clear_bit(ABS_Z, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_DISTANCE, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_TILT_X, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_TILT_Y, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_WHEEL, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_MISC, wacom_wac->pen_input->absbit);
+ __clear_bit(MSC_SERIAL, wacom_wac->pen_input->mscbit);
+ __clear_bit(EV_MSC, wacom_wac->pen_input->evbit);
+ }
}
int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 741857d80da1..de8193f3b838 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -79,85 +79,96 @@ void vmbus_setevent(struct vmbus_channel *channel)
}
EXPORT_SYMBOL_GPL(vmbus_setevent);
-/*
- * vmbus_open - Open the specified channel.
- */
-int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
- u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
- void (*onchannelcallback)(void *context), void *context)
+/* vmbus_free_ring - drop mapping of ring buffer */
+void vmbus_free_ring(struct vmbus_channel *channel)
+{
+ hv_ringbuffer_cleanup(&channel->outbound);
+ hv_ringbuffer_cleanup(&channel->inbound);
+
+ if (channel->ringbuffer_page) {
+ __free_pages(channel->ringbuffer_page,
+ get_order(channel->ringbuffer_pagecount
+ << PAGE_SHIFT));
+ channel->ringbuffer_page = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(vmbus_free_ring);
+
+/* vmbus_alloc_ring - allocate and map pages for ring buffer */
+int vmbus_alloc_ring(struct vmbus_channel *newchannel,
+ u32 send_size, u32 recv_size)
+{
+ struct page *page;
+ int order;
+
+ if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
+ return -EINVAL;
+
+ /* Allocate the ring buffer */
+ order = get_order(send_size + recv_size);
+ page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
+ GFP_KERNEL|__GFP_ZERO, order);
+
+ if (!page)
+ page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
+
+ if (!page)
+ return -ENOMEM;
+
+ newchannel->ringbuffer_page = page;
+ newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
+ newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
+
+static int __vmbus_open(struct vmbus_channel *newchannel,
+ void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
{
struct vmbus_channel_open_channel *open_msg;
struct vmbus_channel_msginfo *open_info = NULL;
+ struct page *page = newchannel->ringbuffer_page;
+ u32 send_pages, recv_pages;
unsigned long flags;
- int ret, err = 0;
- struct page *page;
+ int err;
- if (send_ringbuffer_size % PAGE_SIZE ||
- recv_ringbuffer_size % PAGE_SIZE)
+ if (userdatalen > MAX_USER_DEFINED_BYTES)
return -EINVAL;
+ send_pages = newchannel->ringbuffer_send_offset;
+ recv_pages = newchannel->ringbuffer_pagecount - send_pages;
+
spin_lock_irqsave(&newchannel->lock, flags);
- if (newchannel->state == CHANNEL_OPEN_STATE) {
- newchannel->state = CHANNEL_OPENING_STATE;
- } else {
+ if (newchannel->state != CHANNEL_OPEN_STATE) {
spin_unlock_irqrestore(&newchannel->lock, flags);
return -EINVAL;
}
spin_unlock_irqrestore(&newchannel->lock, flags);
+ newchannel->state = CHANNEL_OPENING_STATE;
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
- /* Allocate the ring buffer */
- page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
- GFP_KERNEL|__GFP_ZERO,
- get_order(send_ringbuffer_size +
- recv_ringbuffer_size));
-
- if (!page)
- page = alloc_pages(GFP_KERNEL|__GFP_ZERO,
- get_order(send_ringbuffer_size +
- recv_ringbuffer_size));
-
- if (!page) {
- err = -ENOMEM;
- goto error_set_chnstate;
- }
-
- newchannel->ringbuffer_pages = page_address(page);
- newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
- recv_ringbuffer_size) >> PAGE_SHIFT;
-
- ret = hv_ringbuffer_init(&newchannel->outbound, page,
- send_ringbuffer_size >> PAGE_SHIFT);
-
- if (ret != 0) {
- err = ret;
- goto error_free_pages;
- }
-
- ret = hv_ringbuffer_init(&newchannel->inbound,
- &page[send_ringbuffer_size >> PAGE_SHIFT],
- recv_ringbuffer_size >> PAGE_SHIFT);
- if (ret != 0) {
- err = ret;
- goto error_free_pages;
- }
+ err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
+ if (err)
+ goto error_clean_ring;
+ err = hv_ringbuffer_init(&newchannel->inbound,
+ &page[send_pages], recv_pages);
+ if (err)
+ goto error_clean_ring;
/* Establish the gpadl for the ring buffer */
newchannel->ringbuffer_gpadlhandle = 0;
- ret = vmbus_establish_gpadl(newchannel,
- page_address(page),
- send_ringbuffer_size +
- recv_ringbuffer_size,
+ err = vmbus_establish_gpadl(newchannel,
+ page_address(newchannel->ringbuffer_page),
+ (send_pages + recv_pages) << PAGE_SHIFT,
&newchannel->ringbuffer_gpadlhandle);
-
- if (ret != 0) {
- err = ret;
- goto error_free_pages;
- }
+ if (err)
+ goto error_clean_ring;
/* Create and init the channel open message */
open_info = kmalloc(sizeof(*open_info) +
@@ -176,15 +187,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
open_msg->openid = newchannel->offermsg.child_relid;
open_msg->child_relid = newchannel->offermsg.child_relid;
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
- open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
- PAGE_SHIFT;
+ open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
open_msg->target_vp = newchannel->target_vp;
- if (userdatalen > MAX_USER_DEFINED_BYTES) {
- err = -EINVAL;
- goto error_free_gpadl;
- }
-
if (userdatalen)
memcpy(open_msg->userdata, userdata, userdatalen);
@@ -195,18 +200,16 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
if (newchannel->rescind) {
err = -ENODEV;
- goto error_free_gpadl;
+ goto error_free_info;
}
- ret = vmbus_post_msg(open_msg,
+ err = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
- trace_vmbus_open(open_msg, ret);
+ trace_vmbus_open(open_msg, err);
- if (ret != 0) {
- err = ret;
+ if (err != 0)
goto error_clean_msglist;
- }
wait_for_completion(&open_info->waitevent);
@@ -216,12 +219,12 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
if (newchannel->rescind) {
err = -ENODEV;
- goto error_free_gpadl;
+ goto error_free_info;
}
if (open_info->response.open_result.status) {
err = -EAGAIN;
- goto error_free_gpadl;
+ goto error_free_info;
}
newchannel->state = CHANNEL_OPENED_STATE;
@@ -232,19 +235,50 @@ error_clean_msglist:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
+error_free_info:
+ kfree(open_info);
error_free_gpadl:
vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
- kfree(open_info);
-error_free_pages:
+ newchannel->ringbuffer_gpadlhandle = 0;
+error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
- __free_pages(page,
- get_order(send_ringbuffer_size + recv_ringbuffer_size));
-error_set_chnstate:
newchannel->state = CHANNEL_OPEN_STATE;
return err;
}
+
+/*
+ * vmbus_connect_ring - Open the channel but reuse ring buffer
+ */
+int vmbus_connect_ring(struct vmbus_channel *newchannel,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
+}
+EXPORT_SYMBOL_GPL(vmbus_connect_ring);
+
+/*
+ * vmbus_open - Open the specified channel.
+ */
+int vmbus_open(struct vmbus_channel *newchannel,
+ u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
+ void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ int err;
+
+ err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
+ recv_ringbuffer_size);
+ if (err)
+ return err;
+
+ err = __vmbus_open(newchannel, userdata, userdatalen,
+ onchannelcallback, context);
+ if (err)
+ vmbus_free_ring(newchannel);
+
+ return err;
+}
EXPORT_SYMBOL_GPL(vmbus_open);
/* Used for Hyper-V Socket: a guest client's connect() to the host */
@@ -612,10 +646,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* in Hyper-V Manager), the driver's remove() invokes vmbus_close():
* here we should skip most of the below cleanup work.
*/
- if (channel->state != CHANNEL_OPENED_STATE) {
- ret = -EINVAL;
- goto out;
- }
+ if (channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
channel->state = CHANNEL_OPEN_STATE;
@@ -637,11 +669,10 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* If we failed to post the close msg,
* it is perhaps better to leak memory.
*/
- goto out;
}
/* Tear down the gpadl for the channel's ring buffer */
- if (channel->ringbuffer_gpadlhandle) {
+ else if (channel->ringbuffer_gpadlhandle) {
ret = vmbus_teardown_gpadl(channel,
channel->ringbuffer_gpadlhandle);
if (ret) {
@@ -650,74 +681,78 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* If we failed to teardown gpadl,
* it is perhaps better to leak memory.
*/
- goto out;
}
- }
-
- /* Cleanup the ring buffers for this channel */
- hv_ringbuffer_cleanup(&channel->outbound);
- hv_ringbuffer_cleanup(&channel->inbound);
- free_pages((unsigned long)channel->ringbuffer_pages,
- get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+ channel->ringbuffer_gpadlhandle = 0;
+ }
-out:
return ret;
}
-/*
- * vmbus_close - Close the specified channel
- */
-void vmbus_close(struct vmbus_channel *channel)
+/* disconnect ring - close all channels */
+int vmbus_disconnect_ring(struct vmbus_channel *channel)
{
- struct list_head *cur, *tmp;
- struct vmbus_channel *cur_channel;
+ struct vmbus_channel *cur_channel, *tmp;
+ unsigned long flags;
+ LIST_HEAD(list);
+ int ret;
- if (channel->primary_channel != NULL) {
- /*
- * We will only close sub-channels when
- * the primary is closed.
- */
- return;
- }
- /*
- * Close all the sub-channels first and then close the
- * primary channel.
- */
- list_for_each_safe(cur, tmp, &channel->sc_list) {
- cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
- if (cur_channel->rescind) {
+ if (channel->primary_channel != NULL)
+ return -EINVAL;
+
+ /* Snapshot the list of subchannels */
+ spin_lock_irqsave(&channel->lock, flags);
+ list_splice_init(&channel->sc_list, &list);
+ channel->num_sc = 0;
+ spin_unlock_irqrestore(&channel->lock, flags);
+
+ list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
+ if (cur_channel->rescind)
wait_for_completion(&cur_channel->rescind_event);
- mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_close_internal(cur_channel);
- hv_process_channel_removal(
- cur_channel->offermsg.child_relid);
- } else {
- mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_close_internal(cur_channel);
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+ if (vmbus_close_internal(cur_channel) == 0) {
+ vmbus_free_ring(cur_channel);
+
+ if (cur_channel->rescind)
+ hv_process_channel_removal(cur_channel);
}
mutex_unlock(&vmbus_connection.channel_mutex);
}
+
/*
* Now close the primary.
*/
mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_close_internal(channel);
+ ret = vmbus_close_internal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
+
+/*
+ * vmbus_close - Close the specified channel
+ */
+void vmbus_close(struct vmbus_channel *channel)
+{
+ if (vmbus_disconnect_ring(channel) == 0)
+ vmbus_free_ring(channel);
}
EXPORT_SYMBOL_GPL(vmbus_close);
/**
* vmbus_sendpacket() - Send the specified buffer on the given channel
- * @channel: Pointer to vmbus_channel structure.
- * @buffer: Pointer to the buffer you want to receive the data into.
- * @bufferlen: Maximum size of what the the buffer will hold
+ * @channel: Pointer to vmbus_channel structure
+ * @buffer: Pointer to the buffer you want to send the data from.
+ * @bufferlen: Maximum size of what the buffer holds.
* @requestid: Identifier of the request
- * @type: Type of packet that is being send e.g. negotiate, time
- * packet etc.
+ * @type: Type of packet that is being sent e.g. negotiate, time
+ * packet etc.
+ * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
*
- * Sends data in @buffer directly to hyper-v via the vmbus
- * This will send the data unparsed to hyper-v.
+ * Sends data in @buffer directly to Hyper-V via the vmbus.
+ * This will send the data unparsed to Hyper-V.
*
* Mainly used by Hyper-V drivers.
*/
@@ -850,12 +885,13 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
/**
- * vmbus_recvpacket() - Retrieve the user packet on the specified channel
- * @channel: Pointer to vmbus_channel structure.
+ * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
+ * @channel: Pointer to vmbus_channel structure
* @buffer: Pointer to the buffer you want to receive the data into.
- * @bufferlen: Maximum size of what the the buffer will hold
- * @buffer_actual_len: The actual size of the data after it was received
+ * @bufferlen: Maximum size of what the buffer can hold.
+ * @buffer_actual_len: The actual size of the data after it was received.
* @requestid: Identifier of the request
+ * @raw: true means keep the vmpacket_descriptor header in the received data.
*
* Receives directly from the hyper-v vmbus and puts the data it received
* into Buffer. This will receive the data unparsed from hyper-v.
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 0f0e091c117c..6277597d3d58 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -198,24 +198,19 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
}
/**
- * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
+ * vmbus_prep_negotiate_resp() - Create default response for Negotiate message
* @icmsghdrp: Pointer to msg header structure
- * @icmsg_negotiate: Pointer to negotiate message structure
* @buf: Raw buffer channel data
+ * @fw_version: The framework versions we can support.
+ * @fw_vercnt: The size of @fw_version.
+ * @srv_version: The service versions we can support.
+ * @srv_vercnt: The size of @srv_version.
+ * @nego_fw_version: The selected framework version.
+ * @nego_srv_version: The selected service version.
*
- * @icmsghdrp is of type &struct icmsg_hdr.
- * Set up and fill in default negotiate response message.
- *
- * The fw_version and fw_vercnt specifies the framework version that
- * we can support.
- *
- * The srv_version and srv_vercnt specifies the service
- * versions we can support.
- *
- * Versions are given in decreasing order.
- *
- * nego_fw_version and nego_srv_version store the selected protocol versions.
+ * Note: Versions are given in decreasing order.
*
+ * Set up and fill in default negotiate response message.
* Mainly used by Hyper-V drivers.
*/
bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
@@ -385,21 +380,14 @@ static void vmbus_release_relid(u32 relid)
trace_vmbus_release_relid(&msg, ret);
}
-void hv_process_channel_removal(u32 relid)
+void hv_process_channel_removal(struct vmbus_channel *channel)
{
+ struct vmbus_channel *primary_channel;
unsigned long flags;
- struct vmbus_channel *primary_channel, *channel;
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
-
- /*
- * Make sure channel is valid as we may have raced.
- */
- channel = relid2channel(relid);
- if (!channel)
- return;
-
BUG_ON(!channel->rescind);
+
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu,
@@ -429,7 +417,7 @@ void hv_process_channel_removal(u32 relid)
cpumask_clear_cpu(channel->target_cpu,
&primary_channel->alloced_cpus_in_node);
- vmbus_release_relid(relid);
+ vmbus_release_relid(channel->offermsg.child_relid);
free_channel(channel);
}
@@ -606,16 +594,18 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
bool perf_chn = vmbus_devs[dev_type].perf_device;
struct vmbus_channel *primary = channel->primary_channel;
int next_node;
- struct cpumask available_mask;
+ cpumask_var_t available_mask;
struct cpumask *alloced_mask;
if ((vmbus_proto_version == VERSION_WS2008) ||
- (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
+ (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
+ !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
/*
* Prior to win8, all channel interrupts are
* delivered on cpu 0.
* Also if the channel is not a performance critical
* channel, bind it to cpu 0.
+ * In case alloc_cpumask_var() fails, bind it to cpu 0.
*/
channel->numa_node = 0;
channel->target_cpu = 0;
@@ -653,7 +643,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
cpumask_clear(alloced_mask);
}
- cpumask_xor(&available_mask, alloced_mask,
+ cpumask_xor(available_mask, alloced_mask,
cpumask_of_node(primary->numa_node));
cur_cpu = -1;
@@ -671,10 +661,10 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
}
while (true) {
- cur_cpu = cpumask_next(cur_cpu, &available_mask);
+ cur_cpu = cpumask_next(cur_cpu, available_mask);
if (cur_cpu >= nr_cpu_ids) {
cur_cpu = -1;
- cpumask_copy(&available_mask,
+ cpumask_copy(available_mask,
cpumask_of_node(primary->numa_node));
continue;
}
@@ -704,6 +694,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
channel->target_cpu = cur_cpu;
channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
+
+ free_cpumask_var(available_mask);
}
static void vmbus_wait_for_unload(void)
@@ -943,7 +935,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* The channel is currently not open;
* it is safe for us to cleanup the channel.
*/
- hv_process_channel_removal(rescind->child_relid);
+ hv_process_channel_removal(channel);
} else {
complete(&channel->rescind_event);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 748a1c4172a6..332d7c34be5c 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -189,6 +189,17 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
int hv_synic_alloc(void)
{
int cpu;
+ struct hv_per_cpu_context *hv_cpu;
+
+ /*
+ * First, zero all per-cpu memory areas so hv_synic_free() can
+ * detect what memory has been allocated and cleanup properly
+ * after any failures.
+ */
+ for_each_present_cpu(cpu) {
+ hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
+ memset(hv_cpu, 0, sizeof(*hv_cpu));
+ }
hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
GFP_KERNEL);
@@ -198,10 +209,8 @@ int hv_synic_alloc(void)
}
for_each_present_cpu(cpu) {
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
- memset(hv_cpu, 0, sizeof(*hv_cpu));
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b1b788082793..41631512ae97 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -689,7 +689,7 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
__online_page_increment_counters(pg);
__online_page_free(pg);
- WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
+ lockdep_assert_held(&dm_device.ha_lock);
dm_device.num_pages_onlined++;
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 5eed1e7da15c..a7513a8a8e37 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -353,7 +353,6 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
- default:
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
MAX_ADAPTER_ID_SIZE,
UTF16_LITTLE_ENDIAN,
@@ -406,7 +405,7 @@ kvp_send_key(struct work_struct *dummy)
process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
break;
case KVP_OP_GET_IP_INFO:
- process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
+ /* We only need to pass on message->kvp_hdr.operation. */
break;
case KVP_OP_SET:
switch (in_msg->body.kvp_set.data.value_type) {
@@ -421,7 +420,7 @@ kvp_send_key(struct work_struct *dummy)
UTF16_LITTLE_ENDIAN,
message->body.kvp_set.data.value,
HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1;
- break;
+ break;
case REG_U32:
/*
@@ -446,6 +445,9 @@ kvp_send_key(struct work_struct *dummy)
break;
}
+
+ break;
+
case KVP_OP_GET:
message->body.kvp_set.data.key_size =
utf16s_to_utf8s(
@@ -454,7 +456,7 @@ kvp_send_key(struct work_struct *dummy)
UTF16_LITTLE_ENDIAN,
message->body.kvp_set.data.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
- break;
+ break;
case KVP_OP_DELETE:
message->body.kvp_delete.key_size =
@@ -464,12 +466,12 @@ kvp_send_key(struct work_struct *dummy)
UTF16_LITTLE_ENDIAN,
message->body.kvp_delete.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
- break;
+ break;
case KVP_OP_ENUMERATE:
message->body.kvp_enum_data.index =
in_msg->body.kvp_enum_data.index;
- break;
+ break;
}
kvp_transaction.state = HVUTIL_USERSPACE_REQ;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 3e90eb91db45..64d0c85d5161 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -241,6 +241,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
{
vunmap(ring_info->ring_buffer);
+ ring_info->ring_buffer = NULL;
}
/* Write to the ring buffer. */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index c71cc857b649..283d184280af 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -498,6 +498,54 @@ static ssize_t device_show(struct device *dev,
}
static DEVICE_ATTR_RO(device);
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ char *driver_override, *old, *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = hv_dev->driver_override;
+ if (strlen(driver_override)) {
+ hv_dev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ hv_dev->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
+ device_unlock(dev);
+
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_id.attr,
@@ -528,6 +576,7 @@ static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_channel_vp_mapping.attr,
&dev_attr_vendor.attr,
&dev_attr_device.attr,
+ &dev_attr_driver_override.attr,
NULL,
};
ATTRIBUTE_GROUPS(vmbus_dev);
@@ -563,17 +612,26 @@ static inline bool is_null_guid(const uuid_le *guid)
return true;
}
-/*
- * Return a matching hv_vmbus_device_id pointer.
- * If there is no match, return NULL.
- */
-static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
- const uuid_le *guid)
+static const struct hv_vmbus_device_id *
+hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const uuid_le *guid)
+
+{
+ if (id == NULL)
+ return NULL; /* empty device table */
+
+ for (; !is_null_guid(&id->guid); id++)
+ if (!uuid_le_cmp(id->guid, *guid))
+ return id;
+
+ return NULL;
+}
+
+static const struct hv_vmbus_device_id *
+hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid)
{
const struct hv_vmbus_device_id *id = NULL;
struct vmbus_dynid *dynid;
- /* Look at the dynamic ids first, before the static ones */
spin_lock(&drv->dynids.lock);
list_for_each_entry(dynid, &drv->dynids.list, node) {
if (!uuid_le_cmp(dynid->id.guid, *guid)) {
@@ -583,18 +641,37 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
}
spin_unlock(&drv->dynids.lock);
- if (id)
- return id;
+ return id;
+}
- id = drv->id_table;
- if (id == NULL)
- return NULL; /* empty device table */
+static const struct hv_vmbus_device_id vmbus_device_null = {
+ .guid = NULL_UUID_LE,
+};
- for (; !is_null_guid(&id->guid); id++)
- if (!uuid_le_cmp(id->guid, *guid))
- return id;
+/*
+ * Return a matching hv_vmbus_device_id pointer.
+ * If there is no match, return NULL.
+ */
+static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
+ struct hv_device *dev)
+{
+ const uuid_le *guid = &dev->dev_type;
+ const struct hv_vmbus_device_id *id;
- return NULL;
+ /* When driver_override is set, only bind to the matching driver */
+ if (dev->driver_override && strcmp(dev->driver_override, drv->name))
+ return NULL;
+
+ /* Look at the dynamic ids first, before the static ones */
+ id = hv_vmbus_dynid_match(drv, guid);
+ if (!id)
+ id = hv_vmbus_dev_match(drv->id_table, guid);
+
+ /* driver_override will always match, send a dummy id */
+ if (!id && dev->driver_override)
+ id = &vmbus_device_null;
+
+ return id;
}
/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
@@ -643,7 +720,7 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
if (retval)
return retval;
- if (hv_vmbus_get_id(drv, &guid))
+ if (hv_vmbus_dynid_match(drv, &guid))
return -EEXIST;
retval = vmbus_add_dynid(drv, &guid);
@@ -708,7 +785,7 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
if (is_hvsock_channel(hv_dev->channel))
return drv->hvsock;
- if (hv_vmbus_get_id(drv, &hv_dev->dev_type))
+ if (hv_vmbus_get_id(drv, hv_dev))
return 1;
return 0;
@@ -725,7 +802,7 @@ static int vmbus_probe(struct device *child_device)
struct hv_device *dev = device_to_hv_device(child_device);
const struct hv_vmbus_device_id *dev_id;
- dev_id = hv_vmbus_get_id(drv, &dev->dev_type);
+ dev_id = hv_vmbus_get_id(drv, dev);
if (drv->probe) {
ret = drv->probe(dev, dev_id);
if (ret != 0)
@@ -787,10 +864,9 @@ static void vmbus_device_release(struct device *device)
struct vmbus_channel *channel = hv_dev->channel;
mutex_lock(&vmbus_connection.channel_mutex);
- hv_process_channel_removal(channel->offermsg.child_relid);
+ hv_process_channel_removal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
kfree(hv_dev);
-
}
/* The one and only one */
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index ff94e58845b7..170fbb66bda2 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -406,6 +406,7 @@ static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
{
+ int rc;
u32 control, mode;
struct etr_buf *etr_buf = data;
@@ -418,6 +419,10 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
return -EBUSY;
}
+ rc = coresight_claim_device_unlocked(drvdata->base);
+ if (rc)
+ return rc;
+
control |= BIT(CATU_CONTROL_ENABLE);
if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
@@ -459,6 +464,7 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
int rc = 0;
catu_write_control(drvdata, 0);
+ coresight_disclaim_device_unlocked(drvdata->base);
if (catu_wait_for_ready(drvdata)) {
dev_info(drvdata->dev, "Timeout while waiting for READY\n");
rc = -EAGAIN;
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index f6d0571ab9dd..299667b887fc 100644
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -34,48 +34,87 @@ struct replicator_state {
struct coresight_device *csdev;
};
+/*
+ * replicator_reset : Reset the replicator configuration to sane values.
+ */
+static void replicator_reset(struct replicator_state *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ if (!coresight_claim_device_unlocked(drvdata->base)) {
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
+ coresight_disclaim_device_unlocked(drvdata->base);
+ }
+
+ CS_LOCK(drvdata->base);
+}
+
static int replicator_enable(struct coresight_device *csdev, int inport,
int outport)
{
+ int rc = 0;
+ u32 reg;
struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
+ switch (outport) {
+ case 0:
+ reg = REPLICATOR_IDFILTER0;
+ break;
+ case 1:
+ reg = REPLICATOR_IDFILTER1;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
CS_UNLOCK(drvdata->base);
- /*
- * Ensure that the other port is disabled
- * 0x00 - passing through the replicator unimpeded
- * 0xff - disable (or impede) the flow of ATB data
- */
- if (outport == 0) {
- writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER0);
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
- } else {
- writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER1);
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
+ if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
+ (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
+ rc = coresight_claim_device_unlocked(drvdata->base);
+
+ /* Ensure that the outport is enabled. */
+ if (!rc) {
+ writel_relaxed(0x00, drvdata->base + reg);
+ dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
}
CS_LOCK(drvdata->base);
- dev_info(drvdata->dev, "REPLICATOR enabled\n");
- return 0;
+ return rc;
}
static void replicator_disable(struct coresight_device *csdev, int inport,
int outport)
{
+ u32 reg;
struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
+ switch (outport) {
+ case 0:
+ reg = REPLICATOR_IDFILTER0;
+ break;
+ case 1:
+ reg = REPLICATOR_IDFILTER1;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
CS_UNLOCK(drvdata->base);
/* disable the flow of ATB data through port */
- if (outport == 0)
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
- else
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
+ writel_relaxed(0xff, drvdata->base + reg);
+ if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
+ (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
+ coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
- dev_info(drvdata->dev, "REPLICATOR disabled\n");
+ dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
}
static const struct coresight_ops_link replicator_link_ops = {
@@ -156,7 +195,11 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
desc.groups = replicator_groups;
drvdata->csdev = coresight_register(&desc);
- return PTR_ERR_OR_ZERO(drvdata->csdev);
+ if (!IS_ERR(drvdata->csdev)) {
+ replicator_reset(drvdata);
+ return 0;
+ }
+ return PTR_ERR(drvdata->csdev);
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 306119eaf16a..824be0c5f592 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -5,7 +5,6 @@
* Description: CoreSight Embedded Trace Buffer driver
*/
-#include <asm/local.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -28,6 +27,7 @@
#include "coresight-priv.h"
+#include "coresight-etm-perf.h"
#define ETB_RAM_DEPTH_REG 0x004
#define ETB_STATUS_REG 0x00c
@@ -71,8 +71,8 @@
* @miscdev: specifics to handle "/dev/xyz.etb" entry.
* @spinlock: only one at a time pls.
* @reading: synchronise user space access to etb buffer.
- * @mode: this ETB is being used.
* @buf: area of memory where ETB buffer content gets sent.
+ * @mode: this ETB is being used.
* @buffer_depth: size of @buf.
* @trigger_cntr: amount of words to store after a trigger.
*/
@@ -84,12 +84,15 @@ struct etb_drvdata {
struct miscdevice miscdev;
spinlock_t spinlock;
local_t reading;
- local_t mode;
u8 *buf;
+ u32 mode;
u32 buffer_depth;
u32 trigger_cntr;
};
+static int etb_set_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle);
+
static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
{
u32 depth = 0;
@@ -103,7 +106,7 @@ static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
return depth;
}
-static void etb_enable_hw(struct etb_drvdata *drvdata)
+static void __etb_enable_hw(struct etb_drvdata *drvdata)
{
int i;
u32 depth;
@@ -131,32 +134,92 @@ static void etb_enable_hw(struct etb_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int etb_enable(struct coresight_device *csdev, u32 mode)
+static int etb_enable_hw(struct etb_drvdata *drvdata)
+{
+ __etb_enable_hw(drvdata);
+ return 0;
+}
+
+static int etb_enable_sysfs(struct coresight_device *csdev)
{
- u32 val;
+ int ret = 0;
unsigned long flags;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- val = local_cmpxchg(&drvdata->mode,
- CS_MODE_DISABLED, mode);
- /*
- * When accessing from Perf, a HW buffer can be handled
- * by a single trace entity. In sysFS mode many tracers
- * can be logging to the same HW buffer.
- */
- if (val == CS_MODE_PERF)
- return -EBUSY;
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* Don't messup with perf sessions. */
+ if (drvdata->mode == CS_MODE_PERF) {
+ ret = -EBUSY;
+ goto out;
+ }
/* Nothing to do, the tracer is already enabled. */
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
goto out;
- spin_lock_irqsave(&drvdata->spinlock, flags);
- etb_enable_hw(drvdata);
+ ret = etb_enable_hw(drvdata);
+ if (!ret)
+ drvdata->mode = CS_MODE_SYSFS;
+
+out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return ret;
+}
+
+static int etb_enable_perf(struct coresight_device *csdev, void *data)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* No need to continue if the component is already in use. */
+ if (drvdata->mode != CS_MODE_DISABLED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * We don't have an internal state to clean up if we fail to setup
+ * the perf buffer. So we can perform the step before we turn the
+ * ETB on and leave without cleaning up.
+ */
+ ret = etb_set_buffer(csdev, (struct perf_output_handle *)data);
+ if (ret)
+ goto out;
+
+ ret = etb_enable_hw(drvdata);
+ if (!ret)
+ drvdata->mode = CS_MODE_PERF;
out:
- dev_info(drvdata->dev, "ETB enabled\n");
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return ret;
+}
+
+static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
+{
+ int ret;
+ struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ switch (mode) {
+ case CS_MODE_SYSFS:
+ ret = etb_enable_sysfs(csdev);
+ break;
+ case CS_MODE_PERF:
+ ret = etb_enable_perf(csdev, data);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ dev_dbg(drvdata->dev, "ETB enabled\n");
return 0;
}
@@ -256,13 +319,16 @@ static void etb_disable(struct coresight_device *csdev)
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
- etb_disable_hw(drvdata);
- etb_dump_hw(drvdata);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ /* Disable the ETB only if it needs to */
+ if (drvdata->mode != CS_MODE_DISABLED) {
+ etb_disable_hw(drvdata);
+ etb_dump_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "ETB disabled\n");
+ dev_dbg(drvdata->dev, "ETB disabled\n");
}
static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
@@ -294,12 +360,14 @@ static void etb_free_buffer(void *config)
}
static int etb_set_buffer(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config)
+ struct perf_output_handle *handle)
{
int ret = 0;
unsigned long head;
- struct cs_buffers *buf = sink_config;
+ struct cs_buffers *buf = etm_perf_sink_config(handle);
+
+ if (!buf)
+ return -EINVAL;
/* wrap head around to the amount of space we have */
head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
@@ -315,37 +383,7 @@ static int etb_set_buffer(struct coresight_device *csdev,
return ret;
}
-static unsigned long etb_reset_buffer(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config)
-{
- unsigned long size = 0;
- struct cs_buffers *buf = sink_config;
-
- if (buf) {
- /*
- * In snapshot mode ->data_size holds the new address of the
- * ring buffer's head. The size itself is the whole address
- * range since we want the latest information.
- */
- if (buf->snapshot)
- handle->head = local_xchg(&buf->data_size,
- buf->nr_pages << PAGE_SHIFT);
-
- /*
- * Tell the tracer PMU how much we got in this run and if
- * something went wrong along the way. Nobody else can use
- * this cs_buffers instance until we are done. As such
- * resetting parameters here and squaring off with the ring
- * buffer API in the tracer PMU is fine.
- */
- size = local_xchg(&buf->data_size, 0);
- }
-
- return size;
-}
-
-static void etb_update_buffer(struct coresight_device *csdev,
+static unsigned long etb_update_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
@@ -354,13 +392,13 @@ static void etb_update_buffer(struct coresight_device *csdev,
u8 *buf_ptr;
const u32 *barrier;
u32 read_ptr, write_ptr, capacity;
- u32 status, read_data, to_read;
- unsigned long offset;
+ u32 status, read_data;
+ unsigned long offset, to_read;
struct cs_buffers *buf = sink_config;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (!buf)
- return;
+ return 0;
capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
@@ -465,18 +503,17 @@ static void etb_update_buffer(struct coresight_device *csdev,
writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
/*
- * In snapshot mode all we have to do is communicate to
- * perf_aux_output_end() the address of the current head. In full
- * trace mode the same function expects a size to move rb->aux_head
- * forward.
+ * In snapshot mode we have to update the handle->head to point
+ * to the new location.
*/
- if (buf->snapshot)
- local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
- else
- local_add(to_read, &buf->data_size);
-
+ if (buf->snapshot) {
+ handle->head = (cur * PAGE_SIZE) + offset;
+ to_read = buf->nr_pages << PAGE_SHIFT;
+ }
etb_enable_hw(drvdata);
CS_LOCK(drvdata->base);
+
+ return to_read;
}
static const struct coresight_ops_sink etb_sink_ops = {
@@ -484,8 +521,6 @@ static const struct coresight_ops_sink etb_sink_ops = {
.disable = etb_disable,
.alloc_buffer = etb_alloc_buffer,
.free_buffer = etb_free_buffer,
- .set_buffer = etb_set_buffer,
- .reset_buffer = etb_reset_buffer,
.update_buffer = etb_update_buffer,
};
@@ -498,14 +533,14 @@ static void etb_dump(struct etb_drvdata *drvdata)
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ if (drvdata->mode == CS_MODE_SYSFS) {
etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
etb_enable_hw(drvdata);
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "ETB dumped\n");
+ dev_dbg(drvdata->dev, "ETB dumped\n");
}
static int etb_open(struct inode *inode, struct file *file)
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 677695635211..abe8249b893b 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/perf_event.h>
+#include <linux/percpu-defs.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -22,20 +23,6 @@
static struct pmu etm_pmu;
static bool etm_perf_up;
-/**
- * struct etm_event_data - Coresight specifics associated to an event
- * @work: Handle to free allocated memory outside IRQ context.
- * @mask: Hold the CPU(s) this event was set for.
- * @snk_config: The sink configuration.
- * @path: An array of path, each slot for one CPU.
- */
-struct etm_event_data {
- struct work_struct work;
- cpumask_t mask;
- void *snk_config;
- struct list_head **path;
-};
-
static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
@@ -61,6 +48,18 @@ static const struct attribute_group *etm_pmu_attr_groups[] = {
NULL,
};
+static inline struct list_head **
+etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
+{
+ return per_cpu_ptr(data->path, cpu);
+}
+
+static inline struct list_head *
+etm_event_cpu_path(struct etm_event_data *data, int cpu)
+{
+ return *etm_event_cpu_path_ptr(data, cpu);
+}
+
static void etm_event_read(struct perf_event *event) {}
static int etm_addr_filters_alloc(struct perf_event *event)
@@ -114,29 +113,30 @@ static void free_event_data(struct work_struct *work)
event_data = container_of(work, struct etm_event_data, work);
mask = &event_data->mask;
- /*
- * First deal with the sink configuration. See comment in
- * etm_setup_aux() about why we take the first available path.
- */
- if (event_data->snk_config) {
+
+ /* Free the sink buffers, if there are any */
+ if (event_data->snk_config && !WARN_ON(cpumask_empty(mask))) {
cpu = cpumask_first(mask);
- sink = coresight_get_sink(event_data->path[cpu]);
+ sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
if (sink_ops(sink)->free_buffer)
sink_ops(sink)->free_buffer(event_data->snk_config);
}
for_each_cpu(cpu, mask) {
- if (!(IS_ERR_OR_NULL(event_data->path[cpu])))
- coresight_release_path(event_data->path[cpu]);
+ struct list_head **ppath;
+
+ ppath = etm_event_cpu_path_ptr(event_data, cpu);
+ if (!(IS_ERR_OR_NULL(*ppath)))
+ coresight_release_path(*ppath);
+ *ppath = NULL;
}
- kfree(event_data->path);
+ free_percpu(event_data->path);
kfree(event_data);
}
static void *alloc_event_data(int cpu)
{
- int size;
cpumask_t *mask;
struct etm_event_data *event_data;
@@ -145,16 +145,12 @@ static void *alloc_event_data(int cpu)
if (!event_data)
return NULL;
- /* Make sure nothing disappears under us */
- get_online_cpus();
- size = num_online_cpus();
mask = &event_data->mask;
if (cpu != -1)
cpumask_set_cpu(cpu, mask);
else
- cpumask_copy(mask, cpu_online_mask);
- put_online_cpus();
+ cpumask_copy(mask, cpu_present_mask);
/*
* Each CPU has a single path between source and destination. As such
@@ -164,8 +160,8 @@ static void *alloc_event_data(int cpu)
* unused memory when dealing with single CPU trace scenarios is small
* compared to the cost of searching through an optimized array.
*/
- event_data->path = kcalloc(size,
- sizeof(struct list_head *), GFP_KERNEL);
+ event_data->path = alloc_percpu(struct list_head *);
+
if (!event_data->path) {
kfree(event_data);
return NULL;
@@ -206,34 +202,53 @@ static void *etm_setup_aux(int event_cpu, void **pages,
* on the cmd line. As such the "enable_sink" flag in sysFS is reset.
*/
sink = coresight_get_enabled_sink(true);
- if (!sink)
+ if (!sink || !sink_ops(sink)->alloc_buffer)
goto err;
mask = &event_data->mask;
- /* Setup the path for each CPU in a trace session */
+ /*
+ * Setup the path for each CPU in a trace session. We try to build
+ * trace path for each CPU in the mask. If we don't find an ETM
+ * for the CPU or fail to build a path, we clear the CPU from the
+ * mask and continue with the rest. If ever we try to trace on those
+ * CPUs, we can handle it and fail the session.
+ */
for_each_cpu(cpu, mask) {
+ struct list_head *path;
struct coresight_device *csdev;
csdev = per_cpu(csdev_src, cpu);
- if (!csdev)
- goto err;
+ /*
+ * If there is no ETM associated with this CPU clear it from
+ * the mask and continue with the rest. If ever we try to trace
+ * on this CPU, we handle it accordingly.
+ */
+ if (!csdev) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
/*
* Building a path doesn't enable it, it simply builds a
* list of devices from source to sink that can be
* referenced later when the path is actually needed.
*/
- event_data->path[cpu] = coresight_build_path(csdev, sink);
- if (IS_ERR(event_data->path[cpu]))
- goto err;
+ path = coresight_build_path(csdev, sink);
+ if (IS_ERR(path)) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
+
+ *etm_event_cpu_path_ptr(event_data, cpu) = path;
}
- if (!sink_ops(sink)->alloc_buffer)
+ /* If we don't have any CPUs ready for tracing, abort */
+ cpu = cpumask_first(mask);
+ if (cpu >= nr_cpu_ids)
goto err;
- cpu = cpumask_first(mask);
- /* Get the AUX specific data from the sink buffer */
+ /* Allocate the sink buffer for this session */
event_data->snk_config =
sink_ops(sink)->alloc_buffer(sink, cpu, pages,
nr_pages, overwrite);
@@ -255,6 +270,7 @@ static void etm_event_start(struct perf_event *event, int flags)
struct etm_event_data *event_data;
struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+ struct list_head *path;
if (!csdev)
goto fail;
@@ -267,18 +283,14 @@ static void etm_event_start(struct perf_event *event, int flags)
if (!event_data)
goto fail;
+ path = etm_event_cpu_path(event_data, cpu);
/* We need a sink, no need to continue without one */
- sink = coresight_get_sink(event_data->path[cpu]);
- if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
- goto fail_end_stop;
-
- /* Configure the sink */
- if (sink_ops(sink)->set_buffer(sink, handle,
- event_data->snk_config))
+ sink = coresight_get_sink(path);
+ if (WARN_ON_ONCE(!sink))
goto fail_end_stop;
/* Nothing will happen without a path */
- if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF))
+ if (coresight_enable_path(path, CS_MODE_PERF, handle))
goto fail_end_stop;
/* Tell the perf core the event is alive */
@@ -286,11 +298,13 @@ static void etm_event_start(struct perf_event *event, int flags)
/* Finally enable the tracer */
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
- goto fail_end_stop;
+ goto fail_disable_path;
out:
return;
+fail_disable_path:
+ coresight_disable_path(path);
fail_end_stop:
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
perf_aux_output_end(handle, 0);
@@ -306,6 +320,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
struct etm_event_data *event_data = perf_get_aux(handle);
+ struct list_head *path;
if (event->hw.state == PERF_HES_STOPPED)
return;
@@ -313,7 +328,11 @@ static void etm_event_stop(struct perf_event *event, int mode)
if (!csdev)
return;
- sink = coresight_get_sink(event_data->path[cpu]);
+ path = etm_event_cpu_path(event_data, cpu);
+ if (!path)
+ return;
+
+ sink = coresight_get_sink(path);
if (!sink)
return;
@@ -331,20 +350,13 @@ static void etm_event_stop(struct perf_event *event, int mode)
if (!sink_ops(sink)->update_buffer)
return;
- sink_ops(sink)->update_buffer(sink, handle,
+ size = sink_ops(sink)->update_buffer(sink, handle,
event_data->snk_config);
-
- if (!sink_ops(sink)->reset_buffer)
- return;
-
- size = sink_ops(sink)->reset_buffer(sink, handle,
- event_data->snk_config);
-
perf_aux_output_end(handle, size);
}
/* Disabling the path make its elements available to other sessions */
- coresight_disable_path(event_data->path[cpu]);
+ coresight_disable_path(path);
}
static int etm_event_add(struct perf_event *event, int mode)
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index 4197df4faf5e..da7d9336a15c 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -7,6 +7,7 @@
#ifndef _CORESIGHT_ETM_PERF_H
#define _CORESIGHT_ETM_PERF_H
+#include <linux/percpu-defs.h>
#include "coresight-priv.h"
struct coresight_device;
@@ -42,14 +43,39 @@ struct etm_filters {
bool ssstatus;
};
+/**
+ * struct etm_event_data - Coresight specifics associated to an event
+ * @work: Handle to free allocated memory outside IRQ context.
+ * @mask: Hold the CPU(s) this event was set for.
+ * @snk_config: The sink configuration.
+ * @path: An array of path, each slot for one CPU.
+ */
+struct etm_event_data {
+ struct work_struct work;
+ cpumask_t mask;
+ void *snk_config;
+ struct list_head * __percpu *path;
+};
#ifdef CONFIG_CORESIGHT
int etm_perf_symlink(struct coresight_device *csdev, bool link);
+static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
+{
+ struct etm_event_data *data = perf_get_aux(handle);
+ if (data)
+ return data->snk_config;
+ return NULL;
+}
#else
static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
{ return -EINVAL; }
+static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
+{
+ return NULL;
+}
+
#endif /* CONFIG_CORESIGHT */
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 7c74263c333d..fd5c4cca7db5 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -355,11 +355,10 @@ static int etm_parse_event_config(struct etm_drvdata *drvdata,
return 0;
}
-static void etm_enable_hw(void *info)
+static int etm_enable_hw(struct etm_drvdata *drvdata)
{
- int i;
+ int i, rc;
u32 etmcr;
- struct etm_drvdata *drvdata = info;
struct etm_config *config = &drvdata->config;
CS_UNLOCK(drvdata->base);
@@ -370,6 +369,9 @@ static void etm_enable_hw(void *info)
etm_set_pwrup(drvdata);
/* Make sure all registers are accessible */
etm_os_unlock(drvdata);
+ rc = coresight_claim_device_unlocked(drvdata->base);
+ if (rc)
+ goto done;
etm_set_prog(drvdata);
@@ -418,9 +420,29 @@ static void etm_enable_hw(void *info)
etm_writel(drvdata, 0x0, ETMVMIDCVR);
etm_clr_prog(drvdata);
+
+done:
+ if (rc)
+ etm_set_pwrdwn(drvdata);
CS_LOCK(drvdata->base);
- dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
+ dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n",
+ drvdata->cpu, rc);
+ return rc;
+}
+
+struct etm_enable_arg {
+ struct etm_drvdata *drvdata;
+ int rc;
+};
+
+static void etm_enable_hw_smp_call(void *info)
+{
+ struct etm_enable_arg *arg = info;
+
+ if (WARN_ON(!arg))
+ return;
+ arg->rc = etm_enable_hw(arg->drvdata);
}
static int etm_cpu_id(struct coresight_device *csdev)
@@ -475,14 +497,13 @@ static int etm_enable_perf(struct coresight_device *csdev,
/* Configure the tracer based on the session's specifics */
etm_parse_event_config(drvdata, event);
/* And enable it */
- etm_enable_hw(drvdata);
-
- return 0;
+ return etm_enable_hw(drvdata);
}
static int etm_enable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etm_enable_arg arg = { 0 };
int ret;
spin_lock(&drvdata->spinlock);
@@ -492,20 +513,21 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
* hw configuration will take place on the local CPU during bring up.
*/
if (cpu_online(drvdata->cpu)) {
+ arg.drvdata = drvdata;
ret = smp_call_function_single(drvdata->cpu,
- etm_enable_hw, drvdata, 1);
- if (ret)
- goto err;
+ etm_enable_hw_smp_call, &arg, 1);
+ if (!ret)
+ ret = arg.rc;
+ if (!ret)
+ drvdata->sticky_enable = true;
+ } else {
+ ret = -ENODEV;
}
- drvdata->sticky_enable = true;
spin_unlock(&drvdata->spinlock);
- dev_info(drvdata->dev, "ETM tracing enabled\n");
- return 0;
-
-err:
- spin_unlock(&drvdata->spinlock);
+ if (!ret)
+ dev_dbg(drvdata->dev, "ETM tracing enabled\n");
return ret;
}
@@ -555,6 +577,8 @@ static void etm_disable_hw(void *info)
for (i = 0; i < drvdata->nr_cntr; i++)
config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
+ coresight_disclaim_device_unlocked(drvdata->base);
+
etm_set_pwrdwn(drvdata);
CS_LOCK(drvdata->base);
@@ -604,7 +628,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
- dev_info(drvdata->dev, "ETM tracing disabled\n");
+ dev_dbg(drvdata->dev, "ETM tracing disabled\n");
}
static void etm_disable(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 1d94ebec027b..53e2fb6e86f6 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <asm/sections.h>
#include <asm/local.h>
+#include <asm/virt.h>
#include "coresight-etm4x.h"
#include "coresight-etm-perf.h"
@@ -77,16 +78,24 @@ static int etm4_trace_id(struct coresight_device *csdev)
return drvdata->trcid;
}
-static void etm4_enable_hw(void *info)
+struct etm4_enable_arg {
+ struct etmv4_drvdata *drvdata;
+ int rc;
+};
+
+static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
- int i;
- struct etmv4_drvdata *drvdata = info;
+ int i, rc;
struct etmv4_config *config = &drvdata->config;
CS_UNLOCK(drvdata->base);
etm4_os_unlock(drvdata);
+ rc = coresight_claim_device_unlocked(drvdata->base);
+ if (rc)
+ goto done;
+
/* Disable the trace unit before programming trace registers */
writel_relaxed(0, drvdata->base + TRCPRGCTLR);
@@ -174,9 +183,21 @@ static void etm4_enable_hw(void *info)
dev_err(drvdata->dev,
"timeout while waiting for Idle Trace Status\n");
+done:
CS_LOCK(drvdata->base);
- dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
+ dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n",
+ drvdata->cpu, rc);
+ return rc;
+}
+
+static void etm4_enable_hw_smp_call(void *info)
+{
+ struct etm4_enable_arg *arg = info;
+
+ if (WARN_ON(!arg))
+ return;
+ arg->rc = etm4_enable_hw(arg->drvdata);
}
static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
@@ -242,7 +263,7 @@ static int etm4_enable_perf(struct coresight_device *csdev,
if (ret)
goto out;
/* And enable it */
- etm4_enable_hw(drvdata);
+ ret = etm4_enable_hw(drvdata);
out:
return ret;
@@ -251,6 +272,7 @@ out:
static int etm4_enable_sysfs(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etm4_enable_arg arg = { 0 };
int ret;
spin_lock(&drvdata->spinlock);
@@ -259,19 +281,17 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
* Executing etm4_enable_hw on the cpu whose ETM is being enabled
* ensures that register writes occur when cpu is powered.
*/
+ arg.drvdata = drvdata;
ret = smp_call_function_single(drvdata->cpu,
- etm4_enable_hw, drvdata, 1);
- if (ret)
- goto err;
-
- drvdata->sticky_enable = true;
+ etm4_enable_hw_smp_call, &arg, 1);
+ if (!ret)
+ ret = arg.rc;
+ if (!ret)
+ drvdata->sticky_enable = true;
spin_unlock(&drvdata->spinlock);
- dev_info(drvdata->dev, "ETM tracing enabled\n");
- return 0;
-
-err:
- spin_unlock(&drvdata->spinlock);
+ if (!ret)
+ dev_dbg(drvdata->dev, "ETM tracing enabled\n");
return ret;
}
@@ -328,6 +348,8 @@ static void etm4_disable_hw(void *info)
isb();
writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+ coresight_disclaim_device_unlocked(drvdata->base);
+
CS_LOCK(drvdata->base);
dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
@@ -380,7 +402,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
- dev_info(drvdata->dev, "ETM tracing disabled\n");
+ dev_dbg(drvdata->dev, "ETM tracing disabled\n");
}
static void etm4_disable(struct coresight_device *csdev,
@@ -605,7 +627,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
config->vinst_ctrl |= BIT(0);
}
-static u64 etm4_get_access_type(struct etmv4_config *config)
+static u64 etm4_get_ns_access_type(struct etmv4_config *config)
{
u64 access_type = 0;
@@ -616,17 +638,26 @@ static u64 etm4_get_access_type(struct etmv4_config *config)
* Bit[13] Exception level 1 - OS
* Bit[14] Exception level 2 - Hypervisor
* Bit[15] Never implemented
- *
- * Always stay away from hypervisor mode.
*/
- access_type = ETM_EXLEVEL_NS_HYP;
-
- if (config->mode & ETM_MODE_EXCL_KERN)
- access_type |= ETM_EXLEVEL_NS_OS;
+ if (!is_kernel_in_hyp_mode()) {
+ /* Stay away from hypervisor mode for non-VHE */
+ access_type = ETM_EXLEVEL_NS_HYP;
+ if (config->mode & ETM_MODE_EXCL_KERN)
+ access_type |= ETM_EXLEVEL_NS_OS;
+ } else if (config->mode & ETM_MODE_EXCL_KERN) {
+ access_type = ETM_EXLEVEL_NS_HYP;
+ }
if (config->mode & ETM_MODE_EXCL_USER)
access_type |= ETM_EXLEVEL_NS_APP;
+ return access_type;
+}
+
+static u64 etm4_get_access_type(struct etmv4_config *config)
+{
+ u64 access_type = etm4_get_ns_access_type(config);
+
/*
* EXLEVEL_S, bits[11:8], don't trace anything happening
* in secure state.
@@ -880,20 +911,10 @@ void etm4_config_trace_mode(struct etmv4_config *config)
addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
/* clear default config */
- addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS);
+ addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
+ ETM_EXLEVEL_NS_HYP);
- /*
- * EXLEVEL_NS, bits[15:12]
- * The Exception levels are:
- * Bit[12] Exception level 0 - Application
- * Bit[13] Exception level 1 - OS
- * Bit[14] Exception level 2 - Hypervisor
- * Bit[15] Never implemented
- */
- if (mode & ETM_MODE_EXCL_KERN)
- addr_acc |= ETM_EXLEVEL_NS_OS;
- else
- addr_acc |= ETM_EXLEVEL_NS_APP;
+ addr_acc |= etm4_get_ns_access_type(config);
config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 448145a36675..927925151509 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -25,6 +25,7 @@
#define FUNNEL_HOLDTIME_MASK 0xf00
#define FUNNEL_HOLDTIME_SHFT 0x8
#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
+#define FUNNEL_ENSx_MASK 0xff
/**
* struct funnel_drvdata - specifics associated to a funnel component
@@ -42,31 +43,42 @@ struct funnel_drvdata {
unsigned long priority;
};
-static void funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
+static int funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
{
u32 functl;
+ int rc = 0;
CS_UNLOCK(drvdata->base);
functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
+ /* Claim the device only when we enable the first slave */
+ if (!(functl & FUNNEL_ENSx_MASK)) {
+ rc = coresight_claim_device_unlocked(drvdata->base);
+ if (rc)
+ goto done;
+ }
+
functl &= ~FUNNEL_HOLDTIME_MASK;
functl |= FUNNEL_HOLDTIME;
functl |= (1 << port);
writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
writel_relaxed(drvdata->priority, drvdata->base + FUNNEL_PRICTL);
-
+done:
CS_LOCK(drvdata->base);
+ return rc;
}
static int funnel_enable(struct coresight_device *csdev, int inport,
int outport)
{
+ int rc;
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- funnel_enable_hw(drvdata, inport);
+ rc = funnel_enable_hw(drvdata, inport);
- dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
- return 0;
+ if (!rc)
+ dev_dbg(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
+ return rc;
}
static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
@@ -79,6 +91,10 @@ static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
functl &= ~(1 << inport);
writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
+ /* Disclaim the device if none of the slaves are now active */
+ if (!(functl & FUNNEL_ENSx_MASK))
+ coresight_disclaim_device_unlocked(drvdata->base);
+
CS_LOCK(drvdata->base);
}
@@ -89,7 +105,7 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
funnel_disable_hw(drvdata, inport);
- dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
+ dev_dbg(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
}
static const struct coresight_ops_link funnel_link_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 1a6cf3589866..579f34943bf1 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -25,6 +25,13 @@
#define CORESIGHT_DEVID 0xfc8
#define CORESIGHT_DEVTYPE 0xfcc
+
+/*
+ * Coresight device CLAIM protocol.
+ * See PSCI - ARM DEN 0022D, Section: 6.8.1 Debug and Trace save and restore.
+ */
+#define CORESIGHT_CLAIM_SELF_HOSTED BIT(1)
+
#define TIMEOUT_US 100
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
@@ -137,7 +144,7 @@ static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
}
void coresight_disable_path(struct list_head *path);
-int coresight_enable_path(struct list_head *path, u32 mode);
+int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *coresight_get_enabled_sink(bool reset);
struct list_head *coresight_build_path(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 8d2eaaab6c2f..feac98315471 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -35,7 +35,7 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- dev_info(drvdata->dev, "REPLICATOR enabled\n");
+ dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
return 0;
}
@@ -44,7 +44,7 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- dev_info(drvdata->dev, "REPLICATOR disabled\n");
+ dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
}
static const struct coresight_ops_link replicator_link_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index c46c70aec1d5..35d6f9709274 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -211,7 +211,7 @@ static int stm_enable(struct coresight_device *csdev,
stm_enable_hw(drvdata);
spin_unlock(&drvdata->spinlock);
- dev_info(drvdata->dev, "STM tracing enabled\n");
+ dev_dbg(drvdata->dev, "STM tracing enabled\n");
return 0;
}
@@ -274,7 +274,7 @@ static void stm_disable(struct coresight_device *csdev,
pm_runtime_put(drvdata->dev);
local_set(&drvdata->mode, CS_MODE_DISABLED);
- dev_info(drvdata->dev, "STM tracing disabled\n");
+ dev_dbg(drvdata->dev, "STM tracing disabled\n");
}
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 0549249f4b39..53fc83b72a49 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -10,8 +10,12 @@
#include <linux/slab.h>
#include "coresight-priv.h"
#include "coresight-tmc.h"
+#include "coresight-etm-perf.h"
-static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
+static int tmc_set_etf_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle);
+
+static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
@@ -30,33 +34,41 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
+{
+ int rc = coresight_claim_device(drvdata->base);
+
+ if (rc)
+ return rc;
+
+ __tmc_etb_enable_hw(drvdata);
+ return 0;
+}
+
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
{
char *bufp;
u32 read_data, lost;
- int i;
/* Check if the buffer wrapped around. */
lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
bufp = drvdata->buf;
drvdata->len = 0;
while (1) {
- for (i = 0; i < drvdata->memwidth; i++) {
- read_data = readl_relaxed(drvdata->base + TMC_RRD);
- if (read_data == 0xFFFFFFFF)
- goto done;
- memcpy(bufp, &read_data, 4);
- bufp += 4;
- drvdata->len += 4;
- }
+ read_data = readl_relaxed(drvdata->base + TMC_RRD);
+ if (read_data == 0xFFFFFFFF)
+ break;
+ memcpy(bufp, &read_data, 4);
+ bufp += 4;
+ drvdata->len += 4;
}
-done:
+
if (lost)
coresight_insert_barrier_packet(drvdata->buf);
return;
}
-static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
+static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
@@ -72,7 +84,13 @@ static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
+static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
+{
+ coresight_disclaim_device(drvdata);
+ __tmc_etb_disable_hw(drvdata);
+}
+
+static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
@@ -88,13 +106,24 @@ static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
+{
+ int rc = coresight_claim_device(drvdata->base);
+
+ if (rc)
+ return rc;
+
+ __tmc_etf_enable_hw(drvdata);
+ return 0;
+}
+
static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
tmc_disable_hw(drvdata);
-
+ coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
}
@@ -170,8 +199,12 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
drvdata->buf = buf;
}
- drvdata->mode = CS_MODE_SYSFS;
- tmc_etb_enable_hw(drvdata);
+ ret = tmc_etb_enable_hw(drvdata);
+ if (!ret)
+ drvdata->mode = CS_MODE_SYSFS;
+ else
+ /* Free up the buffer if we failed to enable */
+ used = false;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -182,37 +215,40 @@ out:
return ret;
}
-static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
+static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
{
int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct perf_output_handle *handle = data;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->reading) {
+ do {
ret = -EINVAL;
- goto out;
- }
-
- /*
- * In Perf mode there can be only one writer per sink. There
- * is also no need to continue if the ETB/ETR is already operated
- * from sysFS.
- */
- if (drvdata->mode != CS_MODE_DISABLED) {
- ret = -EINVAL;
- goto out;
- }
+ if (drvdata->reading)
+ break;
+ /*
+ * In Perf mode there can be only one writer per sink. There
+ * is also no need to continue if the ETB/ETF is already
+ * operated from sysFS.
+ */
+ if (drvdata->mode != CS_MODE_DISABLED)
+ break;
- drvdata->mode = CS_MODE_PERF;
- tmc_etb_enable_hw(drvdata);
-out:
+ ret = tmc_set_etf_buffer(csdev, handle);
+ if (ret)
+ break;
+ ret = tmc_etb_enable_hw(drvdata);
+ if (!ret)
+ drvdata->mode = CS_MODE_PERF;
+ } while (0);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
-static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etf_sink(struct coresight_device *csdev,
+ u32 mode, void *data)
{
int ret;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -222,7 +258,7 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
ret = tmc_enable_etf_sink_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = tmc_enable_etf_sink_perf(csdev);
+ ret = tmc_enable_etf_sink_perf(csdev, data);
break;
/* We shouldn't be here */
default:
@@ -233,7 +269,7 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
if (ret)
return ret;
- dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
+ dev_dbg(drvdata->dev, "TMC-ETB/ETF enabled\n");
return 0;
}
@@ -256,12 +292,13 @@ static void tmc_disable_etf_sink(struct coresight_device *csdev)
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
+ dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
}
static int tmc_enable_etf_link(struct coresight_device *csdev,
int inport, int outport)
{
+ int ret;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -271,12 +308,14 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
return -EBUSY;
}
- tmc_etf_enable_hw(drvdata);
- drvdata->mode = CS_MODE_SYSFS;
+ ret = tmc_etf_enable_hw(drvdata);
+ if (!ret)
+ drvdata->mode = CS_MODE_SYSFS;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "TMC-ETF enabled\n");
- return 0;
+ if (!ret)
+ dev_dbg(drvdata->dev, "TMC-ETF enabled\n");
+ return ret;
}
static void tmc_disable_etf_link(struct coresight_device *csdev,
@@ -295,7 +334,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "TMC-ETF disabled\n");
+ dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
}
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
@@ -328,12 +367,14 @@ static void tmc_free_etf_buffer(void *config)
}
static int tmc_set_etf_buffer(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config)
+ struct perf_output_handle *handle)
{
int ret = 0;
unsigned long head;
- struct cs_buffers *buf = sink_config;
+ struct cs_buffers *buf = etm_perf_sink_config(handle);
+
+ if (!buf)
+ return -EINVAL;
/* wrap head around to the amount of space we have */
head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
@@ -349,36 +390,7 @@ static int tmc_set_etf_buffer(struct coresight_device *csdev,
return ret;
}
-static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config)
-{
- long size = 0;
- struct cs_buffers *buf = sink_config;
-
- if (buf) {
- /*
- * In snapshot mode ->data_size holds the new address of the
- * ring buffer's head. The size itself is the whole address
- * range since we want the latest information.
- */
- if (buf->snapshot)
- handle->head = local_xchg(&buf->data_size,
- buf->nr_pages << PAGE_SHIFT);
- /*
- * Tell the tracer PMU how much we got in this run and if
- * something went wrong along the way. Nobody else can use
- * this cs_buffers instance until we are done. As such
- * resetting parameters here and squaring off with the ring
- * buffer API in the tracer PMU is fine.
- */
- size = local_xchg(&buf->data_size, 0);
- }
-
- return size;
-}
-
-static void tmc_update_etf_buffer(struct coresight_device *csdev,
+static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
@@ -387,17 +399,17 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
const u32 *barrier;
u32 *buf_ptr;
u64 read_ptr, write_ptr;
- u32 status, to_read;
- unsigned long offset;
+ u32 status;
+ unsigned long offset, to_read;
struct cs_buffers *buf = sink_config;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (!buf)
- return;
+ return 0;
/* This shouldn't happen */
if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
- return;
+ return 0;
CS_UNLOCK(drvdata->base);
@@ -438,10 +450,10 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
case TMC_MEM_INTF_WIDTH_32BITS:
case TMC_MEM_INTF_WIDTH_64BITS:
case TMC_MEM_INTF_WIDTH_128BITS:
- mask = GENMASK(31, 5);
+ mask = GENMASK(31, 4);
break;
case TMC_MEM_INTF_WIDTH_256BITS:
- mask = GENMASK(31, 6);
+ mask = GENMASK(31, 5);
break;
}
@@ -486,18 +498,14 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
}
}
- /*
- * In snapshot mode all we have to do is communicate to
- * perf_aux_output_end() the address of the current head. In full
- * trace mode the same function expects a size to move rb->aux_head
- * forward.
- */
- if (buf->snapshot)
- local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
- else
- local_add(to_read, &buf->data_size);
-
+ /* In snapshot mode we have to update the head */
+ if (buf->snapshot) {
+ handle->head = (cur * PAGE_SIZE) + offset;
+ to_read = buf->nr_pages << PAGE_SHIFT;
+ }
CS_LOCK(drvdata->base);
+
+ return to_read;
}
static const struct coresight_ops_sink tmc_etf_sink_ops = {
@@ -505,8 +513,6 @@ static const struct coresight_ops_sink tmc_etf_sink_ops = {
.disable = tmc_disable_etf_sink,
.alloc_buffer = tmc_alloc_etf_buffer,
.free_buffer = tmc_free_etf_buffer,
- .set_buffer = tmc_set_etf_buffer,
- .reset_buffer = tmc_reset_etf_buffer,
.update_buffer = tmc_update_etf_buffer,
};
@@ -563,7 +569,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
/* Disable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS)
- tmc_etb_disable_hw(drvdata);
+ __tmc_etb_disable_hw(drvdata);
drvdata->reading = true;
out:
@@ -603,7 +609,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
* can't be NULL.
*/
memset(drvdata->buf, 0, drvdata->size);
- tmc_etb_enable_hw(drvdata);
+ __tmc_etb_enable_hw(drvdata);
} else {
/*
* The ETB/ETF is not tracing and the buffer was just read.
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 2eda5de304c2..f684283890d3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "coresight-catu.h"
+#include "coresight-etm-perf.h"
#include "coresight-priv.h"
#include "coresight-tmc.h"
@@ -21,6 +22,28 @@ struct etr_flat_buf {
};
/*
+ * etr_perf_buffer - Perf buffer used for ETR
+ * @etr_buf - Actual buffer used by the ETR
+ * @snaphost - Perf session mode
+ * @head - handle->head at the beginning of the session.
+ * @nr_pages - Number of pages in the ring buffer.
+ * @pages - Array of Pages in the ring buffer.
+ */
+struct etr_perf_buffer {
+ struct etr_buf *etr_buf;
+ bool snapshot;
+ unsigned long head;
+ int nr_pages;
+ void **pages;
+};
+
+/* Convert the perf index to an offset within the ETR buffer */
+#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+
+/* Lower limit for ETR hardware buffer */
+#define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M
+
+/*
* The TMC ETR SG has a page size of 4K. The SG table contains pointers
* to 4KB buffers. However, the OS may use a PAGE_SIZE different from
* 4K (i.e, 16KB or 64KB). This implies that a single OS page could
@@ -536,7 +559,7 @@ tmc_init_etr_sg_table(struct device *dev, int node,
sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
if (IS_ERR(sg_table)) {
kfree(etr_table);
- return ERR_PTR(PTR_ERR(sg_table));
+ return ERR_CAST(sg_table);
}
etr_table->sg_table = sg_table;
@@ -728,12 +751,14 @@ tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
return NULL;
}
-static inline void tmc_etr_enable_catu(struct tmc_drvdata *drvdata)
+static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf)
{
struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
if (catu && helper_ops(catu)->enable)
- helper_ops(catu)->enable(catu, drvdata->etr_buf);
+ return helper_ops(catu)->enable(catu, etr_buf);
+ return 0;
}
static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
@@ -895,17 +920,11 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
}
-static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
+static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl, sts;
struct etr_buf *etr_buf = drvdata->etr_buf;
- /*
- * If this ETR is connected to a CATU, enable it before we turn
- * this on
- */
- tmc_etr_enable_catu(drvdata);
-
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
@@ -924,11 +943,8 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
axictl |= TMC_AXICTL_ARCACHE_OS;
}
- if (etr_buf->mode == ETR_MODE_ETR_SG) {
- if (WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
- return;
+ if (etr_buf->mode == ETR_MODE_ETR_SG)
axictl |= TMC_AXICTL_SCT_GAT_MODE;
- }
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
tmc_write_dba(drvdata, etr_buf->hwaddr);
@@ -954,19 +970,54 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf)
+{
+ int rc;
+
+ /* Callers should provide an appropriate buffer for use */
+ if (WARN_ON(!etr_buf))
+ return -EINVAL;
+
+ if ((etr_buf->mode == ETR_MODE_ETR_SG) &&
+ WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
+ return -EINVAL;
+
+ if (WARN_ON(drvdata->etr_buf))
+ return -EBUSY;
+
+ /*
+ * If this ETR is connected to a CATU, enable it before we turn
+ * this on.
+ */
+ rc = tmc_etr_enable_catu(drvdata, etr_buf);
+ if (rc)
+ return rc;
+ rc = coresight_claim_device(drvdata->base);
+ if (!rc) {
+ drvdata->etr_buf = etr_buf;
+ __tmc_etr_enable_hw(drvdata);
+ }
+
+ return rc;
+}
+
/*
* Return the available trace data in the buffer (starts at etr_buf->offset,
* limited by etr_buf->len) from @pos, with a maximum limit of @len,
* also updating the @bufpp on where to find it. Since the trace data
* starts at anywhere in the buffer, depending on the RRP, we adjust the
* @len returned to handle buffer wrapping around.
+ *
+ * We are protected here by drvdata->reading != 0, which ensures the
+ * sysfs_buf stays alive.
*/
ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp)
{
s64 offset;
ssize_t actual = len;
- struct etr_buf *etr_buf = drvdata->etr_buf;
+ struct etr_buf *etr_buf = drvdata->sysfs_buf;
if (pos + actual > etr_buf->len)
actual = etr_buf->len - pos;
@@ -996,10 +1047,17 @@ tmc_etr_free_sysfs_buf(struct etr_buf *buf)
static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
{
- tmc_sync_etr_buf(drvdata);
+ struct etr_buf *etr_buf = drvdata->etr_buf;
+
+ if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
+ tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
+ drvdata->sysfs_buf = NULL;
+ } else {
+ tmc_sync_etr_buf(drvdata);
+ }
}
-static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
@@ -1015,8 +1073,16 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
+}
+
+static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+{
+ __tmc_etr_disable_hw(drvdata);
/* Disable CATU device if this ETR is connected to one */
tmc_etr_disable_catu(drvdata);
+ coresight_disclaim_device(drvdata->base);
+ /* Reset the ETR buf used by hardware */
+ drvdata->etr_buf = NULL;
}
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
@@ -1024,7 +1090,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct etr_buf *new_buf = NULL, *free_buf = NULL;
+ struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
/*
* If we are enabling the ETR from disabled state, we need to make
@@ -1035,7 +1101,8 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
* with the lock released.
*/
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) {
+ sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
+ if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Allocate memory with the locks released */
@@ -1064,14 +1131,15 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
* If we don't have a buffer or it doesn't match the requested size,
* use the buffer allocated above. Otherwise reuse the existing buffer.
*/
- if (!drvdata->etr_buf ||
- (new_buf && drvdata->etr_buf->size != new_buf->size)) {
- free_buf = drvdata->etr_buf;
- drvdata->etr_buf = new_buf;
+ sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
+ if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
+ free_buf = sysfs_buf;
+ drvdata->sysfs_buf = new_buf;
}
- drvdata->mode = CS_MODE_SYSFS;
- tmc_etr_enable_hw(drvdata);
+ ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
+ if (!ret)
+ drvdata->mode = CS_MODE_SYSFS;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1080,24 +1148,244 @@ out:
tmc_etr_free_sysfs_buf(free_buf);
if (!ret)
- dev_info(drvdata->dev, "TMC-ETR enabled\n");
+ dev_dbg(drvdata->dev, "TMC-ETR enabled\n");
return ret;
}
-static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
+/*
+ * tmc_etr_setup_perf_buf: Allocate ETR buffer for use by perf.
+ * The size of the hardware buffer is dependent on the size configured
+ * via sysfs and the perf ring buffer size. We prefer to allocate the
+ * largest possible size, scaling down the size by half until it
+ * reaches a minimum limit (1M), beyond which we give up.
+ */
+static struct etr_perf_buffer *
+tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages,
+ void **pages, bool snapshot)
{
- /* We don't support perf mode yet ! */
- return -EINVAL;
+ struct etr_buf *etr_buf;
+ struct etr_perf_buffer *etr_perf;
+ unsigned long size;
+
+ etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
+ if (!etr_perf)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Try to match the perf ring buffer size if it is larger
+ * than the size requested via sysfs.
+ */
+ if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
+ etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
+ 0, node, NULL);
+ if (!IS_ERR(etr_buf))
+ goto done;
+ }
+
+ /*
+ * Else switch to configured size for this ETR
+ * and scale down until we hit the minimum limit.
+ */
+ size = drvdata->size;
+ do {
+ etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL);
+ if (!IS_ERR(etr_buf))
+ goto done;
+ size /= 2;
+ } while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
+
+ kfree(etr_perf);
+ return ERR_PTR(-ENOMEM);
+
+done:
+ etr_perf->etr_buf = etr_buf;
+ return etr_perf;
}
-static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
+
+static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
+ int cpu, void **pages, int nr_pages,
+ bool snapshot)
+{
+ struct etr_perf_buffer *etr_perf;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (cpu == -1)
+ cpu = smp_processor_id();
+
+ etr_perf = tmc_etr_setup_perf_buf(drvdata, cpu_to_node(cpu),
+ nr_pages, pages, snapshot);
+ if (IS_ERR(etr_perf)) {
+ dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n");
+ return NULL;
+ }
+
+ etr_perf->snapshot = snapshot;
+ etr_perf->nr_pages = nr_pages;
+ etr_perf->pages = pages;
+
+ return etr_perf;
+}
+
+static void tmc_free_etr_buffer(void *config)
+{
+ struct etr_perf_buffer *etr_perf = config;
+
+ if (etr_perf->etr_buf)
+ tmc_free_etr_buf(etr_perf->etr_buf);
+ kfree(etr_perf);
+}
+
+/*
+ * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware
+ * buffer to the perf ring buffer.
+ */
+static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
+{
+ long bytes, to_copy;
+ long pg_idx, pg_offset, src_offset;
+ unsigned long head = etr_perf->head;
+ char **dst_pages, *src_buf;
+ struct etr_buf *etr_buf = etr_perf->etr_buf;
+
+ head = etr_perf->head;
+ pg_idx = head >> PAGE_SHIFT;
+ pg_offset = head & (PAGE_SIZE - 1);
+ dst_pages = (char **)etr_perf->pages;
+ src_offset = etr_buf->offset;
+ to_copy = etr_buf->len;
+
+ while (to_copy > 0) {
+ /*
+ * In one iteration, we can copy minimum of :
+ * 1) what is available in the source buffer,
+ * 2) what is available in the source buffer, before it
+ * wraps around.
+ * 3) what is available in the destination page.
+ * in one iteration.
+ */
+ bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy,
+ &src_buf);
+ if (WARN_ON_ONCE(bytes <= 0))
+ break;
+ bytes = min(bytes, (long)(PAGE_SIZE - pg_offset));
+
+ memcpy(dst_pages[pg_idx] + pg_offset, src_buf, bytes);
+
+ to_copy -= bytes;
+
+ /* Move destination pointers */
+ pg_offset += bytes;
+ if (pg_offset == PAGE_SIZE) {
+ pg_offset = 0;
+ if (++pg_idx == etr_perf->nr_pages)
+ pg_idx = 0;
+ }
+
+ /* Move source pointers */
+ src_offset += bytes;
+ if (src_offset >= etr_buf->size)
+ src_offset -= etr_buf->size;
+ }
+}
+
+/*
+ * tmc_update_etr_buffer : Update the perf ring buffer with the
+ * available trace data. We use software double buffering at the moment.
+ *
+ * TODO: Add support for reusing the perf ring buffer.
+ */
+static unsigned long
+tmc_update_etr_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *config)
+{
+ bool lost = false;
+ unsigned long flags, size = 0;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etr_perf_buffer *etr_perf = config;
+ struct etr_buf *etr_buf = etr_perf->etr_buf;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (WARN_ON(drvdata->perf_data != etr_perf)) {
+ lost = true;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ goto out;
+ }
+
+ CS_UNLOCK(drvdata->base);
+
+ tmc_flush_and_stop(drvdata);
+ tmc_sync_etr_buf(drvdata);
+
+ CS_LOCK(drvdata->base);
+ /* Reset perf specific data */
+ drvdata->perf_data = NULL;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ size = etr_buf->len;
+ tmc_etr_sync_perf_buffer(etr_perf);
+
+ /*
+ * Update handle->head in snapshot mode. Also update the size to the
+ * hardware buffer size if there was an overflow.
+ */
+ if (etr_perf->snapshot) {
+ handle->head += size;
+ if (etr_buf->full)
+ size = etr_buf->size;
+ }
+
+ lost |= etr_buf->full;
+out:
+ if (lost)
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ return size;
+}
+
+static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct perf_output_handle *handle = data;
+ struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ /*
+ * There can be only one writer per sink in perf mode. If the sink
+ * is already open in SYSFS mode, we can't use it.
+ */
+ if (drvdata->mode != CS_MODE_DISABLED || WARN_ON(drvdata->perf_data)) {
+ rc = -EBUSY;
+ goto unlock_out;
+ }
+
+ if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+
+ etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
+ drvdata->perf_data = etr_perf;
+ rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
+ if (!rc)
+ drvdata->mode = CS_MODE_PERF;
+
+unlock_out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return rc;
+}
+
+static int tmc_enable_etr_sink(struct coresight_device *csdev,
+ u32 mode, void *data)
{
switch (mode) {
case CS_MODE_SYSFS:
return tmc_enable_etr_sink_sysfs(csdev);
case CS_MODE_PERF:
- return tmc_enable_etr_sink_perf(csdev);
+ return tmc_enable_etr_sink_perf(csdev, data);
}
/* We shouldn't be here */
@@ -1123,12 +1411,15 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "TMC-ETR disabled\n");
+ dev_dbg(drvdata->dev, "TMC-ETR disabled\n");
}
static const struct coresight_ops_sink tmc_etr_sink_ops = {
.enable = tmc_enable_etr_sink,
.disable = tmc_disable_etr_sink,
+ .alloc_buffer = tmc_alloc_etr_buffer,
+ .update_buffer = tmc_update_etr_buffer,
+ .free_buffer = tmc_free_etr_buffer,
};
const struct coresight_ops tmc_etr_cs_ops = {
@@ -1150,21 +1441,19 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
goto out;
}
- /* Don't interfere if operated from Perf */
- if (drvdata->mode == CS_MODE_PERF) {
- ret = -EINVAL;
- goto out;
- }
-
- /* If drvdata::etr_buf is NULL the trace data has been read already */
- if (drvdata->etr_buf == NULL) {
+ /*
+ * We can safely allow reads even if the ETR is operating in PERF mode,
+ * since the sysfs session is captured in mode specific data.
+ * If drvdata::sysfs_data is NULL the trace data has been read already.
+ */
+ if (!drvdata->sysfs_buf) {
ret = -EINVAL;
goto out;
}
- /* Disable the TMC if need be */
+ /* Disable the TMC if we are trying to read from a running session. */
if (drvdata->mode == CS_MODE_SYSFS)
- tmc_etr_disable_hw(drvdata);
+ __tmc_etr_disable_hw(drvdata);
drvdata->reading = true;
out:
@@ -1176,7 +1465,7 @@ out:
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
{
unsigned long flags;
- struct etr_buf *etr_buf = NULL;
+ struct etr_buf *sysfs_buf = NULL;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
@@ -1191,22 +1480,22 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
* buffer. Since the tracer is still enabled drvdata::buf can't
* be NULL.
*/
- tmc_etr_enable_hw(drvdata);
+ __tmc_etr_enable_hw(drvdata);
} else {
/*
* The ETR is not tracing and the buffer was just read.
* As such prepare to free the trace buffer.
*/
- etr_buf = drvdata->etr_buf;
- drvdata->etr_buf = NULL;
+ sysfs_buf = drvdata->sysfs_buf;
+ drvdata->sysfs_buf = NULL;
}
drvdata->reading = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free allocated memory out side of the spinlock */
- if (etr_buf)
- tmc_free_etr_buf(etr_buf);
+ if (sysfs_buf)
+ tmc_etr_free_sysfs_buf(sysfs_buf);
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 1b817ec1192c..ea249f0bcd73 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -81,7 +81,7 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
}
if (!ret)
- dev_info(drvdata->dev, "TMC read start\n");
+ dev_dbg(drvdata->dev, "TMC read start\n");
return ret;
}
@@ -103,7 +103,7 @@ static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
}
if (!ret)
- dev_info(drvdata->dev, "TMC read end\n");
+ dev_dbg(drvdata->dev, "TMC read end\n");
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 7027bd60c4cc..487c53701e9c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -170,6 +170,8 @@ struct etr_buf {
* @trigger_cntr: amount of words to store after a trigger.
* @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the
* device configuration register (DEVID)
+ * @perf_data: PERF buffer for ETR.
+ * @sysfs_data: SYSFS buffer for ETR.
*/
struct tmc_drvdata {
void __iomem *base;
@@ -189,6 +191,8 @@ struct tmc_drvdata {
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
u32 etr_caps;
+ struct etr_buf *sysfs_buf;
+ void *perf_data;
};
struct etr_buf_operations {
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 459ef930d98c..b2f72a1fa402 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -68,13 +68,13 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int tpiu_enable(struct coresight_device *csdev, u32 mode)
+static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
tpiu_enable_hw(drvdata);
- dev_info(drvdata->dev, "TPIU enabled\n");
+ dev_dbg(drvdata->dev, "TPIU enabled\n");
return 0;
}
@@ -100,7 +100,7 @@ static void tpiu_disable(struct coresight_device *csdev)
tpiu_disable_hw(drvdata);
- dev_info(drvdata->dev, "TPIU disabled\n");
+ dev_dbg(drvdata->dev, "TPIU disabled\n");
}
static const struct coresight_ops_sink tpiu_sink_ops = {
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 3e07fd335f8c..2b0df1a0a8df 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -128,16 +128,105 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
return -ENODEV;
}
-static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
+static inline u32 coresight_read_claim_tags(void __iomem *base)
+{
+ return readl_relaxed(base + CORESIGHT_CLAIMCLR);
+}
+
+static inline bool coresight_is_claimed_self_hosted(void __iomem *base)
+{
+ return coresight_read_claim_tags(base) == CORESIGHT_CLAIM_SELF_HOSTED;
+}
+
+static inline bool coresight_is_claimed_any(void __iomem *base)
+{
+ return coresight_read_claim_tags(base) != 0;
+}
+
+static inline void coresight_set_claim_tags(void __iomem *base)
+{
+ writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMSET);
+ isb();
+}
+
+static inline void coresight_clear_claim_tags(void __iomem *base)
+{
+ writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMCLR);
+ isb();
+}
+
+/*
+ * coresight_claim_device_unlocked : Claim the device for self-hosted usage
+ * to prevent an external tool from touching this device. As per PSCI
+ * standards, section "Preserving the execution context" => "Debug and Trace
+ * save and Restore", DBGCLAIM[1] is reserved for Self-hosted debug/trace and
+ * DBGCLAIM[0] is reserved for external tools.
+ *
+ * Called with CS_UNLOCKed for the component.
+ * Returns : 0 on success
+ */
+int coresight_claim_device_unlocked(void __iomem *base)
+{
+ if (coresight_is_claimed_any(base))
+ return -EBUSY;
+
+ coresight_set_claim_tags(base);
+ if (coresight_is_claimed_self_hosted(base))
+ return 0;
+ /* There was a race setting the tags, clean up and fail */
+ coresight_clear_claim_tags(base);
+ return -EBUSY;
+}
+
+int coresight_claim_device(void __iomem *base)
+{
+ int rc;
+
+ CS_UNLOCK(base);
+ rc = coresight_claim_device_unlocked(base);
+ CS_LOCK(base);
+
+ return rc;
+}
+
+/*
+ * coresight_disclaim_device_unlocked : Clear the claim tags for the device.
+ * Called with CS_UNLOCKed for the component.
+ */
+void coresight_disclaim_device_unlocked(void __iomem *base)
+{
+
+ if (coresight_is_claimed_self_hosted(base))
+ coresight_clear_claim_tags(base);
+ else
+ /*
+ * The external agent may have not honoured our claim
+ * and has manipulated it. Or something else has seriously
+ * gone wrong in our driver.
+ */
+ WARN_ON_ONCE(1);
+}
+
+void coresight_disclaim_device(void __iomem *base)
+{
+ CS_UNLOCK(base);
+ coresight_disclaim_device_unlocked(base);
+ CS_LOCK(base);
+}
+
+static int coresight_enable_sink(struct coresight_device *csdev,
+ u32 mode, void *data)
{
int ret;
- if (!csdev->enable) {
- if (sink_ops(csdev)->enable) {
- ret = sink_ops(csdev)->enable(csdev, mode);
- if (ret)
- return ret;
- }
+ /*
+ * We need to make sure the "new" session is compatible with the
+ * existing "mode" of operation.
+ */
+ if (sink_ops(csdev)->enable) {
+ ret = sink_ops(csdev)->enable(csdev, mode, data);
+ if (ret)
+ return ret;
csdev->enable = true;
}
@@ -184,8 +273,10 @@ static int coresight_enable_link(struct coresight_device *csdev,
if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
if (link_ops(csdev)->enable) {
ret = link_ops(csdev)->enable(csdev, inport, outport);
- if (ret)
+ if (ret) {
+ atomic_dec(&csdev->refcnt[refport]);
return ret;
+ }
}
}
@@ -274,13 +365,21 @@ static bool coresight_disable_source(struct coresight_device *csdev)
return !csdev->enable;
}
-void coresight_disable_path(struct list_head *path)
+/*
+ * coresight_disable_path_from : Disable components in the given path beyond
+ * @nd in the list. If @nd is NULL, all the components, except the SOURCE are
+ * disabled.
+ */
+static void coresight_disable_path_from(struct list_head *path,
+ struct coresight_node *nd)
{
u32 type;
- struct coresight_node *nd;
struct coresight_device *csdev, *parent, *child;
- list_for_each_entry(nd, path, link) {
+ if (!nd)
+ nd = list_first_entry(path, struct coresight_node, link);
+
+ list_for_each_entry_continue(nd, path, link) {
csdev = nd->csdev;
type = csdev->type;
@@ -300,7 +399,12 @@ void coresight_disable_path(struct list_head *path)
coresight_disable_sink(csdev);
break;
case CORESIGHT_DEV_TYPE_SOURCE:
- /* sources are disabled from either sysFS or Perf */
+ /*
+ * We skip the first node in the path assuming that it
+ * is the source. So we don't expect a source device in
+ * the middle of a path.
+ */
+ WARN_ON(1);
break;
case CORESIGHT_DEV_TYPE_LINK:
parent = list_prev_entry(nd, link)->csdev;
@@ -313,7 +417,12 @@ void coresight_disable_path(struct list_head *path)
}
}
-int coresight_enable_path(struct list_head *path, u32 mode)
+void coresight_disable_path(struct list_head *path)
+{
+ coresight_disable_path_from(path, NULL);
+}
+
+int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data)
{
int ret = 0;
@@ -338,9 +447,15 @@ int coresight_enable_path(struct list_head *path, u32 mode)
switch (type) {
case CORESIGHT_DEV_TYPE_SINK:
- ret = coresight_enable_sink(csdev, mode);
+ ret = coresight_enable_sink(csdev, mode, sink_data);
+ /*
+ * Sink is the first component turned on. If we
+ * failed to enable the sink, there are no components
+ * that need disabling. Disabling the path here
+ * would mean we could disrupt an existing session.
+ */
if (ret)
- goto err;
+ goto out;
break;
case CORESIGHT_DEV_TYPE_SOURCE:
/* sources are enabled from either sysFS or Perf */
@@ -360,7 +475,7 @@ int coresight_enable_path(struct list_head *path, u32 mode)
out:
return ret;
err:
- coresight_disable_path(path);
+ coresight_disable_path_from(path, nd);
goto out;
}
@@ -635,7 +750,7 @@ int coresight_enable(struct coresight_device *csdev)
goto out;
}
- ret = coresight_enable_path(path, CS_MODE_SYSFS);
+ ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
if (ret)
goto err_path;
@@ -995,18 +1110,16 @@ postcore_initcall(coresight_init);
struct coresight_device *coresight_register(struct coresight_desc *desc)
{
- int i;
int ret;
int link_subtype;
int nr_refcnts = 1;
atomic_t *refcnts = NULL;
struct coresight_device *csdev;
- struct coresight_connection *conns = NULL;
csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
if (!csdev) {
ret = -ENOMEM;
- goto err_kzalloc_csdev;
+ goto err_out;
}
if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
@@ -1022,7 +1135,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
if (!refcnts) {
ret = -ENOMEM;
- goto err_kzalloc_refcnts;
+ goto err_free_csdev;
}
csdev->refcnt = refcnts;
@@ -1030,22 +1143,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->nr_inport = desc->pdata->nr_inport;
csdev->nr_outport = desc->pdata->nr_outport;
- /* Initialise connections if there is at least one outport */
- if (csdev->nr_outport) {
- conns = kcalloc(csdev->nr_outport, sizeof(*conns), GFP_KERNEL);
- if (!conns) {
- ret = -ENOMEM;
- goto err_kzalloc_conns;
- }
-
- for (i = 0; i < csdev->nr_outport; i++) {
- conns[i].outport = desc->pdata->outports[i];
- conns[i].child_name = desc->pdata->child_names[i];
- conns[i].child_port = desc->pdata->child_ports[i];
- }
- }
-
- csdev->conns = conns;
+ csdev->conns = desc->pdata->conns;
csdev->type = desc->type;
csdev->subtype = desc->subtype;
@@ -1062,7 +1160,11 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
ret = device_register(&csdev->dev);
if (ret) {
put_device(&csdev->dev);
- goto err_kzalloc_csdev;
+ /*
+ * All resources are free'd explicitly via
+ * coresight_device_release(), triggered from put_device().
+ */
+ goto err_out;
}
mutex_lock(&coresight_mutex);
@@ -1074,11 +1176,9 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
return csdev;
-err_kzalloc_conns:
- kfree(refcnts);
-err_kzalloc_refcnts:
+err_free_csdev:
kfree(csdev);
-err_kzalloc_csdev:
+err_out:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_register);
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 6880bee195c8..89092f83567e 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -45,8 +45,13 @@ of_coresight_get_endpoint_device(struct device_node *endpoint)
endpoint, of_dev_node_match);
}
-static void of_coresight_get_ports(const struct device_node *node,
- int *nr_inport, int *nr_outport)
+static inline bool of_coresight_legacy_ep_is_input(struct device_node *ep)
+{
+ return of_property_read_bool(ep, "slave-mode");
+}
+
+static void of_coresight_get_ports_legacy(const struct device_node *node,
+ int *nr_inport, int *nr_outport)
{
struct device_node *ep = NULL;
int in = 0, out = 0;
@@ -56,7 +61,7 @@ static void of_coresight_get_ports(const struct device_node *node,
if (!ep)
break;
- if (of_property_read_bool(ep, "slave-mode"))
+ if (of_coresight_legacy_ep_is_input(ep))
in++;
else
out++;
@@ -67,32 +72,77 @@ static void of_coresight_get_ports(const struct device_node *node,
*nr_outport = out;
}
+static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
+{
+ struct device_node *parent = of_graph_get_port_parent(ep);
+
+ /*
+ * Skip one-level up to the real device node, if we
+ * are using the new bindings.
+ */
+ if (!of_node_cmp(parent->name, "in-ports") ||
+ !of_node_cmp(parent->name, "out-ports"))
+ parent = of_get_next_parent(parent);
+
+ return parent;
+}
+
+static inline struct device_node *
+of_coresight_get_input_ports_node(const struct device_node *node)
+{
+ return of_get_child_by_name(node, "in-ports");
+}
+
+static inline struct device_node *
+of_coresight_get_output_ports_node(const struct device_node *node)
+{
+ return of_get_child_by_name(node, "out-ports");
+}
+
+static inline int
+of_coresight_count_ports(struct device_node *port_parent)
+{
+ int i = 0;
+ struct device_node *ep = NULL;
+
+ while ((ep = of_graph_get_next_endpoint(port_parent, ep)))
+ i++;
+ return i;
+}
+
+static void of_coresight_get_ports(const struct device_node *node,
+ int *nr_inport, int *nr_outport)
+{
+ struct device_node *input_ports = NULL, *output_ports = NULL;
+
+ input_ports = of_coresight_get_input_ports_node(node);
+ output_ports = of_coresight_get_output_ports_node(node);
+
+ if (input_ports || output_ports) {
+ if (input_ports) {
+ *nr_inport = of_coresight_count_ports(input_ports);
+ of_node_put(input_ports);
+ }
+ if (output_ports) {
+ *nr_outport = of_coresight_count_ports(output_ports);
+ of_node_put(output_ports);
+ }
+ } else {
+ /* Fall back to legacy DT bindings parsing */
+ of_coresight_get_ports_legacy(node, nr_inport, nr_outport);
+ }
+}
+
static int of_coresight_alloc_memory(struct device *dev,
struct coresight_platform_data *pdata)
{
- /* List of output port on this component */
- pdata->outports = devm_kcalloc(dev,
- pdata->nr_outport,
- sizeof(*pdata->outports),
- GFP_KERNEL);
- if (!pdata->outports)
- return -ENOMEM;
-
- /* Children connected to this component via @outports */
- pdata->child_names = devm_kcalloc(dev,
- pdata->nr_outport,
- sizeof(*pdata->child_names),
- GFP_KERNEL);
- if (!pdata->child_names)
- return -ENOMEM;
-
- /* Port number on the child this component is connected to */
- pdata->child_ports = devm_kcalloc(dev,
- pdata->nr_outport,
- sizeof(*pdata->child_ports),
- GFP_KERNEL);
- if (!pdata->child_ports)
- return -ENOMEM;
+ if (pdata->nr_outport) {
+ pdata->conns = devm_kzalloc(dev, pdata->nr_outport *
+ sizeof(*pdata->conns),
+ GFP_KERNEL);
+ if (!pdata->conns)
+ return -ENOMEM;
+ }
return 0;
}
@@ -114,17 +164,78 @@ int of_coresight_get_cpu(const struct device_node *node)
}
EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
+/*
+ * of_coresight_parse_endpoint : Parse the given output endpoint @ep
+ * and fill the connection information in @conn
+ *
+ * Parses the local port, remote device name and the remote port.
+ *
+ * Returns :
+ * 1 - If the parsing is successful and a connection record
+ * was created for an output connection.
+ * 0 - If the parsing completed without any fatal errors.
+ * -Errno - Fatal error, abort the scanning.
+ */
+static int of_coresight_parse_endpoint(struct device *dev,
+ struct device_node *ep,
+ struct coresight_connection *conn)
+{
+ int ret = 0;
+ struct of_endpoint endpoint, rendpoint;
+ struct device_node *rparent = NULL;
+ struct device_node *rep = NULL;
+ struct device *rdev = NULL;
+
+ do {
+ /* Parse the local port details */
+ if (of_graph_parse_endpoint(ep, &endpoint))
+ break;
+ /*
+ * Get a handle on the remote endpoint and the device it is
+ * attached to.
+ */
+ rep = of_graph_get_remote_endpoint(ep);
+ if (!rep)
+ break;
+ rparent = of_coresight_get_port_parent(rep);
+ if (!rparent)
+ break;
+ if (of_graph_parse_endpoint(rep, &rendpoint))
+ break;
+
+ /* If the remote device is not available, defer probing */
+ rdev = of_coresight_get_endpoint_device(rparent);
+ if (!rdev) {
+ ret = -EPROBE_DEFER;
+ break;
+ }
+
+ conn->outport = endpoint.port;
+ conn->child_name = devm_kstrdup(dev,
+ dev_name(rdev),
+ GFP_KERNEL);
+ conn->child_port = rendpoint.port;
+ /* Connection record updated */
+ ret = 1;
+ } while (0);
+
+ of_node_put(rparent);
+ of_node_put(rep);
+ put_device(rdev);
+
+ return ret;
+}
+
struct coresight_platform_data *
of_get_coresight_platform_data(struct device *dev,
const struct device_node *node)
{
- int i = 0, ret = 0;
+ int ret = 0;
struct coresight_platform_data *pdata;
- struct of_endpoint endpoint, rendpoint;
- struct device *rdev;
+ struct coresight_connection *conn;
struct device_node *ep = NULL;
- struct device_node *rparent = NULL;
- struct device_node *rport = NULL;
+ const struct device_node *parent = NULL;
+ bool legacy_binding = false;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -132,63 +243,54 @@ of_get_coresight_platform_data(struct device *dev,
/* Use device name as sysfs handle */
pdata->name = dev_name(dev);
+ pdata->cpu = of_coresight_get_cpu(node);
/* Get the number of input and output port for this component */
of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
- if (pdata->nr_outport) {
- ret = of_coresight_alloc_memory(dev, pdata);
- if (ret)
- return ERR_PTR(ret);
-
- /* Iterate through each port to discover topology */
- do {
- /* Get a handle on a port */
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- break;
-
- /*
- * No need to deal with input ports, processing for as
- * processing for output ports will deal with them.
- */
- if (of_find_property(ep, "slave-mode", NULL))
- continue;
-
- /* Get a handle on the local endpoint */
- ret = of_graph_parse_endpoint(ep, &endpoint);
-
- if (ret)
- continue;
-
- /* The local out port number */
- pdata->outports[i] = endpoint.port;
-
- /*
- * Get a handle on the remote port and parent
- * attached to it.
- */
- rparent = of_graph_get_remote_port_parent(ep);
- rport = of_graph_get_remote_port(ep);
-
- if (!rparent || !rport)
- continue;
+ /* If there are no output connections, we are done */
+ if (!pdata->nr_outport)
+ return pdata;
- if (of_graph_parse_endpoint(rport, &rendpoint))
- continue;
+ ret = of_coresight_alloc_memory(dev, pdata);
+ if (ret)
+ return ERR_PTR(ret);
- rdev = of_coresight_get_endpoint_device(rparent);
- if (!rdev)
- return ERR_PTR(-EPROBE_DEFER);
-
- pdata->child_names[i] = dev_name(rdev);
- pdata->child_ports[i] = rendpoint.id;
-
- i++;
- } while (ep);
+ parent = of_coresight_get_output_ports_node(node);
+ /*
+ * If the DT uses obsoleted bindings, the ports are listed
+ * under the device and we need to filter out the input
+ * ports.
+ */
+ if (!parent) {
+ legacy_binding = true;
+ parent = node;
+ dev_warn_once(dev, "Uses obsolete Coresight DT bindings\n");
}
- pdata->cpu = of_coresight_get_cpu(node);
+ conn = pdata->conns;
+
+ /* Iterate through each output port to discover topology */
+ while ((ep = of_graph_get_next_endpoint(parent, ep))) {
+ /*
+ * Legacy binding mixes input/output ports under the
+ * same parent. So, skip the input ports if we are dealing
+ * with legacy binding, as they processed with their
+ * connected output ports.
+ */
+ if (legacy_binding && of_coresight_legacy_ep_is_input(ep))
+ continue;
+
+ ret = of_coresight_parse_endpoint(dev, ep, conn);
+ switch (ret) {
+ case 1:
+ conn++; /* Fall through */
+ case 0:
+ break;
+ default:
+ return ERR_PTR(ret);
+ }
+ }
return pdata;
}
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index 723e2d90083d..752dd66742bf 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -11,6 +11,35 @@ config STM
if STM
+config STM_PROTO_BASIC
+ tristate "Basic STM framing protocol driver"
+ default CONFIG_STM
+ help
+ This is a simple framing protocol for sending data over STM
+ devices. This was the protocol that the STM framework used
+ exclusively until the MIPI SyS-T support was added. Use this
+ driver for compatibility with your existing STM setup.
+
+ The receiving side only needs to be able to decode the MIPI
+ STP protocol in order to extract the data.
+
+ If you want to be able to use the basic protocol or want the
+ backwards compatibility for your existing setup, say Y.
+
+config STM_PROTO_SYS_T
+ tristate "MIPI SyS-T STM framing protocol driver"
+ default CONFIG_STM
+ help
+ This is an implementation of MIPI SyS-T protocol to be used
+ over the STP transport. In addition to the data payload, it
+ also carries additional metadata for time correlation, better
+ means of trace source identification, etc.
+
+ The receiving side must be able to decode this protocol in
+ addition to the MIPI STP, in order to extract the data.
+
+ If you don't know what this is, say N.
+
config STM_DUMMY
tristate "Dummy STM driver"
help
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index effc19e5190f..1692fcd29277 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -3,6 +3,12 @@ obj-$(CONFIG_STM) += stm_core.o
stm_core-y := core.o policy.o
+obj-$(CONFIG_STM_PROTO_BASIC) += stm_p_basic.o
+obj-$(CONFIG_STM_PROTO_SYS_T) += stm_p_sys-t.o
+
+stm_p_basic-y := p_basic.o
+stm_p_sys-t-y := p_sys-t.o
+
obj-$(CONFIG_STM_DUMMY) += dummy_stm.o
obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 10bcb5d73f90..93ce3aa740a9 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -293,15 +293,15 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
if (width > stm->data->sw_nchannels)
return -EINVAL;
- if (policy_node) {
- stp_policy_node_get_ranges(policy_node,
- &midx, &mend, &cidx, &cend);
- } else {
- midx = stm->data->sw_start;
- cidx = 0;
- mend = stm->data->sw_end;
- cend = stm->data->sw_nchannels - 1;
- }
+ /* We no longer accept policy_node==NULL here */
+ if (WARN_ON_ONCE(!policy_node))
+ return -EINVAL;
+
+ /*
+ * Also, the caller holds reference to policy_node, so it won't
+ * disappear on us.
+ */
+ stp_policy_node_get_ranges(policy_node, &midx, &mend, &cidx, &cend);
spin_lock(&stm->mc_lock);
spin_lock(&output->lock);
@@ -316,11 +316,26 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
output->master = midx;
output->channel = cidx;
output->nr_chans = width;
+ if (stm->pdrv->output_open) {
+ void *priv = stp_policy_node_priv(policy_node);
+
+ if (WARN_ON_ONCE(!priv))
+ goto unlock;
+
+ /* configfs subsys mutex is held by the caller */
+ ret = stm->pdrv->output_open(priv, output);
+ if (ret)
+ goto unlock;
+ }
+
stm_output_claim(stm, output);
dev_dbg(&stm->dev, "assigned %u:%u (+%u)\n", midx, cidx, width);
ret = 0;
unlock:
+ if (ret)
+ output->nr_chans = 0;
+
spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
@@ -333,6 +348,8 @@ static void stm_output_free(struct stm_device *stm, struct stm_output *output)
spin_lock(&output->lock);
if (output->nr_chans)
stm_output_disclaim(stm, output);
+ if (stm->pdrv && stm->pdrv->output_close)
+ stm->pdrv->output_close(output);
spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
}
@@ -349,6 +366,127 @@ static int major_match(struct device *dev, const void *data)
return MAJOR(dev->devt) == major;
}
+/*
+ * Framing protocol management
+ * Modules can implement STM protocol drivers and (un-)register them
+ * with the STM class framework.
+ */
+static struct list_head stm_pdrv_head;
+static struct mutex stm_pdrv_mutex;
+
+struct stm_pdrv_entry {
+ struct list_head entry;
+ const struct stm_protocol_driver *pdrv;
+ const struct config_item_type *node_type;
+};
+
+static const struct stm_pdrv_entry *
+__stm_lookup_protocol(const char *name)
+{
+ struct stm_pdrv_entry *pe;
+
+ /*
+ * If no name is given (NULL or ""), fall back to "p_basic".
+ */
+ if (!name || !*name)
+ name = "p_basic";
+
+ list_for_each_entry(pe, &stm_pdrv_head, entry) {
+ if (!strcmp(name, pe->pdrv->name))
+ return pe;
+ }
+
+ return NULL;
+}
+
+int stm_register_protocol(const struct stm_protocol_driver *pdrv)
+{
+ struct stm_pdrv_entry *pe = NULL;
+ int ret = -ENOMEM;
+
+ mutex_lock(&stm_pdrv_mutex);
+
+ if (__stm_lookup_protocol(pdrv->name)) {
+ ret = -EEXIST;
+ goto unlock;
+ }
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ goto unlock;
+
+ if (pdrv->policy_attr) {
+ pe->node_type = get_policy_node_type(pdrv->policy_attr);
+ if (!pe->node_type)
+ goto unlock;
+ }
+
+ list_add_tail(&pe->entry, &stm_pdrv_head);
+ pe->pdrv = pdrv;
+
+ ret = 0;
+unlock:
+ mutex_unlock(&stm_pdrv_mutex);
+
+ if (ret)
+ kfree(pe);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stm_register_protocol);
+
+void stm_unregister_protocol(const struct stm_protocol_driver *pdrv)
+{
+ struct stm_pdrv_entry *pe, *iter;
+
+ mutex_lock(&stm_pdrv_mutex);
+
+ list_for_each_entry_safe(pe, iter, &stm_pdrv_head, entry) {
+ if (pe->pdrv == pdrv) {
+ list_del(&pe->entry);
+
+ if (pe->node_type) {
+ kfree(pe->node_type->ct_attrs);
+ kfree(pe->node_type);
+ }
+ kfree(pe);
+ break;
+ }
+ }
+
+ mutex_unlock(&stm_pdrv_mutex);
+}
+EXPORT_SYMBOL_GPL(stm_unregister_protocol);
+
+static bool stm_get_protocol(const struct stm_protocol_driver *pdrv)
+{
+ return try_module_get(pdrv->owner);
+}
+
+void stm_put_protocol(const struct stm_protocol_driver *pdrv)
+{
+ module_put(pdrv->owner);
+}
+
+int stm_lookup_protocol(const char *name,
+ const struct stm_protocol_driver **pdrv,
+ const struct config_item_type **node_type)
+{
+ const struct stm_pdrv_entry *pe;
+
+ mutex_lock(&stm_pdrv_mutex);
+
+ pe = __stm_lookup_protocol(name);
+ if (pe && pe->pdrv && stm_get_protocol(pe->pdrv)) {
+ *pdrv = pe->pdrv;
+ *node_type = pe->node_type;
+ }
+
+ mutex_unlock(&stm_pdrv_mutex);
+
+ return pe ? 0 : -ENOENT;
+}
+
static int stm_char_open(struct inode *inode, struct file *file)
{
struct stm_file *stmf;
@@ -405,42 +543,81 @@ static int stm_char_release(struct inode *inode, struct file *file)
return 0;
}
-static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
+static int
+stm_assign_first_policy(struct stm_device *stm, struct stm_output *output,
+ char **ids, unsigned int width)
{
- struct stm_device *stm = stmf->stm;
- int ret;
+ struct stp_policy_node *pn;
+ int err, n;
- stmf->policy_node = stp_policy_node_lookup(stm, id);
+ /*
+ * On success, stp_policy_node_lookup() will return holding the
+ * configfs subsystem mutex, which is then released in
+ * stp_policy_node_put(). This allows the pdrv->output_open() in
+ * stm_output_assign() to serialize against the attribute accessors.
+ */
+ for (n = 0, pn = NULL; ids[n] && !pn; n++)
+ pn = stp_policy_node_lookup(stm, ids[n]);
- ret = stm_output_assign(stm, width, stmf->policy_node, &stmf->output);
+ if (!pn)
+ return -EINVAL;
- if (stmf->policy_node)
- stp_policy_node_put(stmf->policy_node);
+ err = stm_output_assign(stm, width, pn, output);
- return ret;
+ stp_policy_node_put(pn);
+
+ return err;
}
-static ssize_t notrace stm_write(struct stm_data *data, unsigned int master,
- unsigned int channel, const char *buf, size_t count)
+/**
+ * stm_data_write() - send the given payload as data packets
+ * @data: stm driver's data
+ * @m: STP master
+ * @c: STP channel
+ * @ts_first: timestamp the first packet
+ * @buf: data payload buffer
+ * @count: data payload size
+ */
+ssize_t notrace stm_data_write(struct stm_data *data, unsigned int m,
+ unsigned int c, bool ts_first, const void *buf,
+ size_t count)
{
- unsigned int flags = STP_PACKET_TIMESTAMPED;
- const unsigned char *p = buf, nil = 0;
- size_t pos;
+ unsigned int flags = ts_first ? STP_PACKET_TIMESTAMPED : 0;
ssize_t sz;
+ size_t pos;
- for (pos = 0, p = buf; count > pos; pos += sz, p += sz) {
+ for (pos = 0, sz = 0; pos < count; pos += sz) {
sz = min_t(unsigned int, count - pos, 8);
- sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
- sz, p);
- flags = 0;
-
- if (sz < 0)
+ sz = data->packet(data, m, c, STP_PACKET_DATA, flags, sz,
+ &((u8 *)buf)[pos]);
+ if (sz <= 0)
break;
+
+ if (ts_first) {
+ flags = 0;
+ ts_first = false;
+ }
}
- data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
+ return sz < 0 ? sz : pos;
+}
+EXPORT_SYMBOL_GPL(stm_data_write);
+
+static ssize_t notrace
+stm_write(struct stm_device *stm, struct stm_output *output,
+ unsigned int chan, const char *buf, size_t count)
+{
+ int err;
+
+ /* stm->pdrv is serialized against policy_mutex */
+ if (!stm->pdrv)
+ return -ENODEV;
+
+ err = stm->pdrv->write(stm->data, output, chan, buf, count);
+ if (err < 0)
+ return err;
- return pos;
+ return err;
}
static ssize_t stm_char_write(struct file *file, const char __user *buf,
@@ -455,16 +632,21 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
count = PAGE_SIZE - 1;
/*
- * if no m/c have been assigned to this writer up to this
- * point, use "default" policy entry
+ * If no m/c have been assigned to this writer up to this
+ * point, try to use the task name and "default" policy entries.
*/
if (!stmf->output.nr_chans) {
- err = stm_file_assign(stmf, "default", 1);
+ char comm[sizeof(current->comm)];
+ char *ids[] = { comm, "default", NULL };
+
+ get_task_comm(comm, current);
+
+ err = stm_assign_first_policy(stmf->stm, &stmf->output, ids, 1);
/*
* EBUSY means that somebody else just assigned this
* output, which is just fine for write()
*/
- if (err && err != -EBUSY)
+ if (err)
return err;
}
@@ -480,8 +662,7 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
pm_runtime_get_sync(&stm->dev);
- count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
- kbuf, count);
+ count = stm_write(stm, &stmf->output, 0, kbuf, count);
pm_runtime_mark_last_busy(&stm->dev);
pm_runtime_put_autosuspend(&stm->dev);
@@ -550,6 +731,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
{
struct stm_device *stm = stmf->stm;
struct stp_policy_id *id;
+ char *ids[] = { NULL, NULL };
int ret = -EINVAL;
u32 size;
@@ -582,7 +764,9 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
id->width > PAGE_SIZE / stm->data->sw_mmiosz)
goto err_free;
- ret = stm_file_assign(stmf, id->id, id->width);
+ ids[0] = id->id;
+ ret = stm_assign_first_policy(stmf->stm, &stmf->output, ids,
+ id->width);
if (ret)
goto err_free;
@@ -818,8 +1002,8 @@ EXPORT_SYMBOL_GPL(stm_unregister_device);
static int stm_source_link_add(struct stm_source_device *src,
struct stm_device *stm)
{
- char *id;
- int err;
+ char *ids[] = { NULL, "default", NULL };
+ int err = -ENOMEM;
mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
@@ -833,19 +1017,13 @@ static int stm_source_link_add(struct stm_source_device *src,
spin_unlock(&stm->link_lock);
mutex_unlock(&stm->link_mutex);
- id = kstrdup(src->data->name, GFP_KERNEL);
- if (id) {
- src->policy_node =
- stp_policy_node_lookup(stm, id);
-
- kfree(id);
- }
-
- err = stm_output_assign(stm, src->data->nr_chans,
- src->policy_node, &src->output);
+ ids[0] = kstrdup(src->data->name, GFP_KERNEL);
+ if (!ids[0])
+ goto fail_detach;
- if (src->policy_node)
- stp_policy_node_put(src->policy_node);
+ err = stm_assign_first_policy(stm, &src->output, ids,
+ src->data->nr_chans);
+ kfree(ids[0]);
if (err)
goto fail_detach;
@@ -1134,9 +1312,7 @@ int notrace stm_source_write(struct stm_source_data *data,
stm = srcu_dereference(src->link, &stm_source_srcu);
if (stm)
- count = stm_write(stm->data, src->output.master,
- src->output.channel + chan,
- buf, count);
+ count = stm_write(stm, &src->output, chan, buf, count);
else
count = -ENODEV;
@@ -1163,7 +1339,15 @@ static int __init stm_core_init(void)
goto err_src;
init_srcu_struct(&stm_source_srcu);
+ INIT_LIST_HEAD(&stm_pdrv_head);
+ mutex_init(&stm_pdrv_mutex);
+ /*
+ * So as to not confuse existing users with a requirement
+ * to load yet another module, do it here.
+ */
+ if (IS_ENABLED(CONFIG_STM_PROTO_BASIC))
+ (void)request_module_nowait("stm_p_basic");
stm_core_up++;
return 0;
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index 7db42395e131..3e7df1c0477f 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -76,7 +76,7 @@ static int stm_heartbeat_init(void)
goto fail_unregister;
stm_heartbeat[i].data.nr_chans = 1;
- stm_heartbeat[i].data.link = stm_heartbeat_link;
+ stm_heartbeat[i].data.link = stm_heartbeat_link;
stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
diff --git a/drivers/hwtracing/stm/p_basic.c b/drivers/hwtracing/stm/p_basic.c
new file mode 100644
index 000000000000..8980a6a5fd6c
--- /dev/null
+++ b/drivers/hwtracing/stm/p_basic.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic framing protocol for STM devices.
+ * Copyright (c) 2018, Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/stm.h>
+#include "stm.h"
+
+static ssize_t basic_write(struct stm_data *data, struct stm_output *output,
+ unsigned int chan, const char *buf, size_t count)
+{
+ unsigned int c = output->channel + chan;
+ unsigned int m = output->master;
+ const unsigned char nil = 0;
+ ssize_t sz;
+
+ sz = stm_data_write(data, m, c, true, buf, count);
+ if (sz > 0)
+ data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
+
+ return sz;
+}
+
+static const struct stm_protocol_driver basic_pdrv = {
+ .owner = THIS_MODULE,
+ .name = "p_basic",
+ .write = basic_write,
+};
+
+static int basic_stm_init(void)
+{
+ return stm_register_protocol(&basic_pdrv);
+}
+
+static void basic_stm_exit(void)
+{
+ stm_unregister_protocol(&basic_pdrv);
+}
+
+module_init(basic_stm_init);
+module_exit(basic_stm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Basic STM framing protocol driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/p_sys-t.c b/drivers/hwtracing/stm/p_sys-t.c
new file mode 100644
index 000000000000..b178a5495b67
--- /dev/null
+++ b/drivers/hwtracing/stm/p_sys-t.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MIPI SyS-T framing protocol for STM devices.
+ * Copyright (c) 2018, Intel Corporation.
+ */
+
+#include <linux/configfs.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/stm.h>
+#include "stm.h"
+
+enum sys_t_message_type {
+ MIPI_SYST_TYPE_BUILD = 0,
+ MIPI_SYST_TYPE_SHORT32,
+ MIPI_SYST_TYPE_STRING,
+ MIPI_SYST_TYPE_CATALOG,
+ MIPI_SYST_TYPE_RAW = 6,
+ MIPI_SYST_TYPE_SHORT64,
+ MIPI_SYST_TYPE_CLOCK,
+};
+
+enum sys_t_message_severity {
+ MIPI_SYST_SEVERITY_MAX = 0,
+ MIPI_SYST_SEVERITY_FATAL,
+ MIPI_SYST_SEVERITY_ERROR,
+ MIPI_SYST_SEVERITY_WARNING,
+ MIPI_SYST_SEVERITY_INFO,
+ MIPI_SYST_SEVERITY_USER1,
+ MIPI_SYST_SEVERITY_USER2,
+ MIPI_SYST_SEVERITY_DEBUG,
+};
+
+enum sys_t_message_build_subtype {
+ MIPI_SYST_BUILD_ID_COMPACT32 = 0,
+ MIPI_SYST_BUILD_ID_COMPACT64,
+ MIPI_SYST_BUILD_ID_LONG,
+};
+
+enum sys_t_message_clock_subtype {
+ MIPI_SYST_CLOCK_TRANSPORT_SYNC = 1,
+};
+
+enum sys_t_message_string_subtype {
+ MIPI_SYST_STRING_GENERIC = 1,
+ MIPI_SYST_STRING_FUNCTIONENTER,
+ MIPI_SYST_STRING_FUNCTIONEXIT,
+ MIPI_SYST_STRING_INVALIDPARAM = 5,
+ MIPI_SYST_STRING_ASSERT = 7,
+ MIPI_SYST_STRING_PRINTF_32 = 11,
+ MIPI_SYST_STRING_PRINTF_64 = 12,
+};
+
+#define MIPI_SYST_TYPE(t) ((u32)(MIPI_SYST_TYPE_ ## t))
+#define MIPI_SYST_SEVERITY(s) ((u32)(MIPI_SYST_SEVERITY_ ## s) << 4)
+#define MIPI_SYST_OPT_LOC BIT(8)
+#define MIPI_SYST_OPT_LEN BIT(9)
+#define MIPI_SYST_OPT_CHK BIT(10)
+#define MIPI_SYST_OPT_TS BIT(11)
+#define MIPI_SYST_UNIT(u) ((u32)(u) << 12)
+#define MIPI_SYST_ORIGIN(o) ((u32)(o) << 16)
+#define MIPI_SYST_OPT_GUID BIT(23)
+#define MIPI_SYST_SUBTYPE(s) ((u32)(MIPI_SYST_ ## s) << 24)
+#define MIPI_SYST_UNITLARGE(u) (MIPI_SYST_UNIT(u & 0xf) | \
+ MIPI_SYST_ORIGIN(u >> 4))
+#define MIPI_SYST_TYPES(t, s) (MIPI_SYST_TYPE(t) | \
+ MIPI_SYST_SUBTYPE(t ## _ ## s))
+
+#define DATA_HEADER (MIPI_SYST_TYPES(STRING, GENERIC) | \
+ MIPI_SYST_SEVERITY(INFO) | \
+ MIPI_SYST_OPT_GUID)
+
+#define CLOCK_SYNC_HEADER (MIPI_SYST_TYPES(CLOCK, TRANSPORT_SYNC) | \
+ MIPI_SYST_SEVERITY(MAX))
+
+struct sys_t_policy_node {
+ uuid_t uuid;
+ bool do_len;
+ unsigned long ts_interval;
+ unsigned long clocksync_interval;
+};
+
+struct sys_t_output {
+ struct sys_t_policy_node node;
+ unsigned long ts_jiffies;
+ unsigned long clocksync_jiffies;
+};
+
+static void sys_t_policy_node_init(void *priv)
+{
+ struct sys_t_policy_node *pn = priv;
+
+ generate_random_uuid(pn->uuid.b);
+}
+
+static int sys_t_output_open(void *priv, struct stm_output *output)
+{
+ struct sys_t_policy_node *pn = priv;
+ struct sys_t_output *opriv;
+
+ opriv = kzalloc(sizeof(*opriv), GFP_ATOMIC);
+ if (!opriv)
+ return -ENOMEM;
+
+ memcpy(&opriv->node, pn, sizeof(opriv->node));
+ output->pdrv_private = opriv;
+
+ return 0;
+}
+
+static void sys_t_output_close(struct stm_output *output)
+{
+ kfree(output->pdrv_private);
+}
+
+static ssize_t sys_t_policy_uuid_show(struct config_item *item,
+ char *page)
+{
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+
+ return sprintf(page, "%pU\n", &pn->uuid);
+}
+
+static ssize_t
+sys_t_policy_uuid_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+ int ret;
+
+ mutex_lock(mutexp);
+ ret = uuid_parse(page, &pn->uuid);
+ mutex_unlock(mutexp);
+
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(sys_t_policy_, uuid);
+
+static ssize_t sys_t_policy_do_len_show(struct config_item *item,
+ char *page)
+{
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+
+ return sprintf(page, "%d\n", pn->do_len);
+}
+
+static ssize_t
+sys_t_policy_do_len_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+ int ret;
+
+ mutex_lock(mutexp);
+ ret = kstrtobool(page, &pn->do_len);
+ mutex_unlock(mutexp);
+
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(sys_t_policy_, do_len);
+
+static ssize_t sys_t_policy_ts_interval_show(struct config_item *item,
+ char *page)
+{
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+
+ return sprintf(page, "%u\n", jiffies_to_msecs(pn->ts_interval));
+}
+
+static ssize_t
+sys_t_policy_ts_interval_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+ unsigned int ms;
+ int ret;
+
+ mutex_lock(mutexp);
+ ret = kstrtouint(page, 10, &ms);
+ mutex_unlock(mutexp);
+
+ if (!ret) {
+ pn->ts_interval = msecs_to_jiffies(ms);
+ return count;
+ }
+
+ return ret;
+}
+
+CONFIGFS_ATTR(sys_t_policy_, ts_interval);
+
+static ssize_t sys_t_policy_clocksync_interval_show(struct config_item *item,
+ char *page)
+{
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+
+ return sprintf(page, "%u\n", jiffies_to_msecs(pn->clocksync_interval));
+}
+
+static ssize_t
+sys_t_policy_clocksync_interval_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+ unsigned int ms;
+ int ret;
+
+ mutex_lock(mutexp);
+ ret = kstrtouint(page, 10, &ms);
+ mutex_unlock(mutexp);
+
+ if (!ret) {
+ pn->clocksync_interval = msecs_to_jiffies(ms);
+ return count;
+ }
+
+ return ret;
+}
+
+CONFIGFS_ATTR(sys_t_policy_, clocksync_interval);
+
+static struct configfs_attribute *sys_t_policy_attrs[] = {
+ &sys_t_policy_attr_uuid,
+ &sys_t_policy_attr_do_len,
+ &sys_t_policy_attr_ts_interval,
+ &sys_t_policy_attr_clocksync_interval,
+ NULL,
+};
+
+static inline bool sys_t_need_ts(struct sys_t_output *op)
+{
+ if (op->node.ts_interval &&
+ time_after(op->ts_jiffies + op->node.ts_interval, jiffies)) {
+ op->ts_jiffies = jiffies;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool sys_t_need_clock_sync(struct sys_t_output *op)
+{
+ if (op->node.clocksync_interval &&
+ time_after(op->clocksync_jiffies + op->node.clocksync_interval,
+ jiffies)) {
+ op->clocksync_jiffies = jiffies;
+
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t
+sys_t_clock_sync(struct stm_data *data, unsigned int m, unsigned int c)
+{
+ u32 header = CLOCK_SYNC_HEADER;
+ const unsigned char nil = 0;
+ u64 payload[2]; /* Clock value and frequency */
+ ssize_t sz;
+
+ sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED,
+ 4, (u8 *)&header);
+ if (sz <= 0)
+ return sz;
+
+ payload[0] = ktime_get_real_ns();
+ payload[1] = NSEC_PER_SEC;
+ sz = stm_data_write(data, m, c, false, &payload, sizeof(payload));
+ if (sz <= 0)
+ return sz;
+
+ data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
+
+ return sizeof(header) + sizeof(payload);
+}
+
+static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output,
+ unsigned int chan, const char *buf, size_t count)
+{
+ struct sys_t_output *op = output->pdrv_private;
+ unsigned int c = output->channel + chan;
+ unsigned int m = output->master;
+ const unsigned char nil = 0;
+ u32 header = DATA_HEADER;
+ ssize_t sz;
+
+ /* We require an existing policy node to proceed */
+ if (!op)
+ return -EINVAL;
+
+ if (sys_t_need_clock_sync(op)) {
+ sz = sys_t_clock_sync(data, m, c);
+ if (sz <= 0)
+ return sz;
+ }
+
+ if (op->node.do_len)
+ header |= MIPI_SYST_OPT_LEN;
+ if (sys_t_need_ts(op))
+ header |= MIPI_SYST_OPT_TS;
+
+ /*
+ * STP framing rules for SyS-T frames:
+ * * the first packet of the SyS-T frame is timestamped;
+ * * the last packet is a FLAG.
+ */
+ /* Message layout: HEADER / GUID / [LENGTH /][TIMESTAMP /] DATA */
+ /* HEADER */
+ sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED,
+ 4, (u8 *)&header);
+ if (sz <= 0)
+ return sz;
+
+ /* GUID */
+ sz = stm_data_write(data, m, c, false, op->node.uuid.b, UUID_SIZE);
+ if (sz <= 0)
+ return sz;
+
+ /* [LENGTH] */
+ if (op->node.do_len) {
+ u16 length = count;
+
+ sz = data->packet(data, m, c, STP_PACKET_DATA, 0, 2,
+ (u8 *)&length);
+ if (sz <= 0)
+ return sz;
+ }
+
+ /* [TIMESTAMP] */
+ if (header & MIPI_SYST_OPT_TS) {
+ u64 ts = ktime_get_real_ns();
+
+ sz = stm_data_write(data, m, c, false, &ts, sizeof(ts));
+ if (sz <= 0)
+ return sz;
+ }
+
+ /* DATA */
+ sz = stm_data_write(data, m, c, false, buf, count);
+ if (sz > 0)
+ data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
+
+ return sz;
+}
+
+static const struct stm_protocol_driver sys_t_pdrv = {
+ .owner = THIS_MODULE,
+ .name = "p_sys-t",
+ .priv_sz = sizeof(struct sys_t_policy_node),
+ .write = sys_t_write,
+ .policy_attr = sys_t_policy_attrs,
+ .policy_node_init = sys_t_policy_node_init,
+ .output_open = sys_t_output_open,
+ .output_close = sys_t_output_close,
+};
+
+static int sys_t_stm_init(void)
+{
+ return stm_register_protocol(&sys_t_pdrv);
+}
+
+static void sys_t_stm_exit(void)
+{
+ stm_unregister_protocol(&sys_t_pdrv);
+}
+
+module_init(sys_t_stm_init);
+module_exit(sys_t_stm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MIPI SyS-T STM framing protocol driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 3fd07e275b34..0910ec807187 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -33,8 +33,18 @@ struct stp_policy_node {
unsigned int last_master;
unsigned int first_channel;
unsigned int last_channel;
+ /* this is the one that's exposed to the attributes */
+ unsigned char priv[0];
};
+void *stp_policy_node_priv(struct stp_policy_node *pn)
+{
+ if (!pn)
+ return NULL;
+
+ return pn->priv;
+}
+
static struct configfs_subsystem stp_policy_subsys;
void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
@@ -68,6 +78,14 @@ to_stp_policy_node(struct config_item *item)
NULL;
}
+void *to_pdrv_policy_node(struct config_item *item)
+{
+ struct stp_policy_node *node = to_stp_policy_node(item);
+
+ return stp_policy_node_priv(node);
+}
+EXPORT_SYMBOL_GPL(to_pdrv_policy_node);
+
static ssize_t
stp_policy_node_masters_show(struct config_item *item, char *page)
{
@@ -163,7 +181,9 @@ unlock:
static void stp_policy_node_release(struct config_item *item)
{
- kfree(to_stp_policy_node(item));
+ struct stp_policy_node *node = to_stp_policy_node(item);
+
+ kfree(node);
}
static struct configfs_item_operations stp_policy_node_item_ops = {
@@ -182,10 +202,34 @@ static struct configfs_attribute *stp_policy_node_attrs[] = {
static const struct config_item_type stp_policy_type;
static const struct config_item_type stp_policy_node_type;
+const struct config_item_type *
+get_policy_node_type(struct configfs_attribute **attrs)
+{
+ struct config_item_type *type;
+ struct configfs_attribute **merged;
+
+ type = kmemdup(&stp_policy_node_type, sizeof(stp_policy_node_type),
+ GFP_KERNEL);
+ if (!type)
+ return NULL;
+
+ merged = memcat_p(stp_policy_node_attrs, attrs);
+ if (!merged) {
+ kfree(type);
+ return NULL;
+ }
+
+ type->ct_attrs = merged;
+
+ return type;
+}
+
static struct config_group *
stp_policy_node_make(struct config_group *group, const char *name)
{
+ const struct config_item_type *type = &stp_policy_node_type;
struct stp_policy_node *policy_node, *parent_node;
+ const struct stm_protocol_driver *pdrv;
struct stp_policy *policy;
if (group->cg_item.ci_type == &stp_policy_type) {
@@ -199,12 +243,20 @@ stp_policy_node_make(struct config_group *group, const char *name)
if (!policy->stm)
return ERR_PTR(-ENODEV);
- policy_node = kzalloc(sizeof(struct stp_policy_node), GFP_KERNEL);
+ pdrv = policy->stm->pdrv;
+ policy_node =
+ kzalloc(offsetof(struct stp_policy_node, priv[pdrv->priv_sz]),
+ GFP_KERNEL);
if (!policy_node)
return ERR_PTR(-ENOMEM);
- config_group_init_type_name(&policy_node->group, name,
- &stp_policy_node_type);
+ if (pdrv->policy_node_init)
+ pdrv->policy_node_init((void *)policy_node->priv);
+
+ if (policy->stm->pdrv_node_type)
+ type = policy->stm->pdrv_node_type;
+
+ config_group_init_type_name(&policy_node->group, name, type);
policy_node->policy = policy;
@@ -254,8 +306,25 @@ static ssize_t stp_policy_device_show(struct config_item *item,
CONFIGFS_ATTR_RO(stp_policy_, device);
+static ssize_t stp_policy_protocol_show(struct config_item *item,
+ char *page)
+{
+ struct stp_policy *policy = to_stp_policy(item);
+ ssize_t count;
+
+ count = sprintf(page, "%s\n",
+ (policy && policy->stm) ?
+ policy->stm->pdrv->name :
+ "<none>");
+
+ return count;
+}
+
+CONFIGFS_ATTR_RO(stp_policy_, protocol);
+
static struct configfs_attribute *stp_policy_attrs[] = {
&stp_policy_attr_device,
+ &stp_policy_attr_protocol,
NULL,
};
@@ -276,6 +345,7 @@ void stp_policy_unbind(struct stp_policy *policy)
stm->policy = NULL;
policy->stm = NULL;
+ stm_put_protocol(stm->pdrv);
stm_put_device(stm);
}
@@ -311,11 +381,14 @@ static const struct config_item_type stp_policy_type = {
};
static struct config_group *
-stp_policies_make(struct config_group *group, const char *name)
+stp_policy_make(struct config_group *group, const char *name)
{
+ const struct config_item_type *pdrv_node_type;
+ const struct stm_protocol_driver *pdrv;
+ char *devname, *proto, *p;
struct config_group *ret;
struct stm_device *stm;
- char *devname, *p;
+ int err;
devname = kasprintf(GFP_KERNEL, "%s", name);
if (!devname)
@@ -326,6 +399,7 @@ stp_policies_make(struct config_group *group, const char *name)
* <device_name> is the name of an existing stm device; may
* contain dots;
* <policy_name> is an arbitrary string; may not contain dots
+ * <device_name>:<protocol_name>.<policy_name>
*/
p = strrchr(devname, '.');
if (!p) {
@@ -335,11 +409,28 @@ stp_policies_make(struct config_group *group, const char *name)
*p = '\0';
+ /*
+ * look for ":<protocol_name>":
+ * + no protocol suffix: fall back to whatever is available;
+ * + unknown protocol: fail the whole thing
+ */
+ proto = strrchr(devname, ':');
+ if (proto)
+ *proto++ = '\0';
+
stm = stm_find_device(devname);
+ if (!stm) {
+ kfree(devname);
+ return ERR_PTR(-ENODEV);
+ }
+
+ err = stm_lookup_protocol(proto, &pdrv, &pdrv_node_type);
kfree(devname);
- if (!stm)
+ if (err) {
+ stm_put_device(stm);
return ERR_PTR(-ENODEV);
+ }
mutex_lock(&stm->policy_mutex);
if (stm->policy) {
@@ -349,31 +440,37 @@ stp_policies_make(struct config_group *group, const char *name)
stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL);
if (!stm->policy) {
- ret = ERR_PTR(-ENOMEM);
- goto unlock_policy;
+ mutex_unlock(&stm->policy_mutex);
+ stm_put_protocol(pdrv);
+ stm_put_device(stm);
+ return ERR_PTR(-ENOMEM);
}
config_group_init_type_name(&stm->policy->group, name,
&stp_policy_type);
- stm->policy->stm = stm;
+ stm->pdrv = pdrv;
+ stm->pdrv_node_type = pdrv_node_type;
+ stm->policy->stm = stm;
ret = &stm->policy->group;
unlock_policy:
mutex_unlock(&stm->policy_mutex);
- if (IS_ERR(ret))
+ if (IS_ERR(ret)) {
+ stm_put_protocol(stm->pdrv);
stm_put_device(stm);
+ }
return ret;
}
-static struct configfs_group_operations stp_policies_group_ops = {
- .make_group = stp_policies_make,
+static struct configfs_group_operations stp_policy_root_group_ops = {
+ .make_group = stp_policy_make,
};
-static const struct config_item_type stp_policies_type = {
- .ct_group_ops = &stp_policies_group_ops,
+static const struct config_item_type stp_policy_root_type = {
+ .ct_group_ops = &stp_policy_root_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -381,7 +478,7 @@ static struct configfs_subsystem stp_policy_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "stp-policy",
- .ci_type = &stp_policies_type,
+ .ci_type = &stp_policy_root_type,
},
},
};
@@ -392,7 +489,7 @@ static struct configfs_subsystem stp_policy_subsys = {
static struct stp_policy_node *
__stp_policy_node_lookup(struct stp_policy *policy, char *s)
{
- struct stp_policy_node *policy_node, *ret;
+ struct stp_policy_node *policy_node, *ret = NULL;
struct list_head *head = &policy->group.cg_children;
struct config_item *item;
char *start, *end = s;
@@ -400,10 +497,6 @@ __stp_policy_node_lookup(struct stp_policy *policy, char *s)
if (list_empty(head))
return NULL;
- /* return the first entry if everything else fails */
- item = list_entry(head->next, struct config_item, ci_entry);
- ret = to_stp_policy_node(item);
-
next:
for (;;) {
start = strsep(&end, "/");
@@ -449,25 +542,25 @@ stp_policy_node_lookup(struct stm_device *stm, char *s)
if (policy_node)
config_item_get(&policy_node->group.cg_item);
- mutex_unlock(&stp_policy_subsys.su_mutex);
+ else
+ mutex_unlock(&stp_policy_subsys.su_mutex);
return policy_node;
}
void stp_policy_node_put(struct stp_policy_node *policy_node)
{
+ lockdep_assert_held(&stp_policy_subsys.su_mutex);
+
+ mutex_unlock(&stp_policy_subsys.su_mutex);
config_item_put(&policy_node->group.cg_item);
}
int __init stp_configfs_init(void)
{
- int err;
-
config_group_init(&stp_policy_subsys.su_group);
mutex_init(&stp_policy_subsys.su_mutex);
- err = configfs_register_subsystem(&stp_policy_subsys);
-
- return err;
+ return configfs_register_subsystem(&stp_policy_subsys);
}
void __exit stp_configfs_exit(void)
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h
index 923571adc6f4..3569439d53bb 100644
--- a/drivers/hwtracing/stm/stm.h
+++ b/drivers/hwtracing/stm/stm.h
@@ -10,20 +10,17 @@
#ifndef _STM_STM_H_
#define _STM_STM_H_
+#include <linux/configfs.h>
+
struct stp_policy;
struct stp_policy_node;
+struct stm_protocol_driver;
-struct stp_policy_node *
-stp_policy_node_lookup(struct stm_device *stm, char *s);
-void stp_policy_node_put(struct stp_policy_node *policy_node);
-void stp_policy_unbind(struct stp_policy *policy);
-
-void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
- unsigned int *mstart, unsigned int *mend,
- unsigned int *cstart, unsigned int *cend);
int stp_configfs_init(void);
void stp_configfs_exit(void);
+void *stp_policy_node_priv(struct stp_policy_node *pn);
+
struct stp_master {
unsigned int nr_free;
unsigned long chan_map[0];
@@ -40,6 +37,9 @@ struct stm_device {
struct mutex link_mutex;
spinlock_t link_lock;
struct list_head link_list;
+ /* framing protocol in use */
+ const struct stm_protocol_driver *pdrv;
+ const struct config_item_type *pdrv_node_type;
/* master allocation */
spinlock_t mc_lock;
struct stp_master *masters[0];
@@ -48,16 +48,28 @@ struct stm_device {
#define to_stm_device(_d) \
container_of((_d), struct stm_device, dev)
+struct stp_policy_node *
+stp_policy_node_lookup(struct stm_device *stm, char *s);
+void stp_policy_node_put(struct stp_policy_node *policy_node);
+void stp_policy_unbind(struct stp_policy *policy);
+
+void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
+ unsigned int *mstart, unsigned int *mend,
+ unsigned int *cstart, unsigned int *cend);
+
+const struct config_item_type *
+get_policy_node_type(struct configfs_attribute **attrs);
+
struct stm_output {
spinlock_t lock;
unsigned int master;
unsigned int channel;
unsigned int nr_chans;
+ void *pdrv_private;
};
struct stm_file {
struct stm_device *stm;
- struct stp_policy_node *policy_node;
struct stm_output output;
};
@@ -71,11 +83,35 @@ struct stm_source_device {
struct stm_device __rcu *link;
struct list_head link_entry;
/* one output per stm_source device */
- struct stp_policy_node *policy_node;
struct stm_output output;
};
#define to_stm_source_device(_d) \
container_of((_d), struct stm_source_device, dev)
+void *to_pdrv_policy_node(struct config_item *item);
+
+struct stm_protocol_driver {
+ struct module *owner;
+ const char *name;
+ ssize_t (*write)(struct stm_data *data,
+ struct stm_output *output, unsigned int chan,
+ const char *buf, size_t count);
+ void (*policy_node_init)(void *arg);
+ int (*output_open)(void *priv, struct stm_output *output);
+ void (*output_close)(struct stm_output *output);
+ ssize_t priv_sz;
+ struct configfs_attribute **policy_attr;
+};
+
+int stm_register_protocol(const struct stm_protocol_driver *pdrv);
+void stm_unregister_protocol(const struct stm_protocol_driver *pdrv);
+int stm_lookup_protocol(const char *name,
+ const struct stm_protocol_driver **pdrv,
+ const struct config_item_type **type);
+void stm_put_protocol(const struct stm_protocol_driver *pdrv);
+ssize_t stm_data_write(struct stm_data *data, unsigned int m,
+ unsigned int c, bool ts_first, const void *buf,
+ size_t count);
+
#endif /* _STM_STM_H_ */
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 80df5a377d30..cafb1dcadc48 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -693,16 +693,12 @@ static int __maybe_unused tiadc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
- struct ti_tscadc_dev *tscadc_dev;
unsigned int idle;
- tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
- if (!device_may_wakeup(tscadc_dev->dev)) {
- idle = tiadc_readl(adc_dev, REG_CTRL);
- idle &= ~(CNTRLREG_TSCSSENB);
- tiadc_writel(adc_dev, REG_CTRL, (idle |
- CNTRLREG_POWERDOWN));
- }
+ idle = tiadc_readl(adc_dev, REG_CTRL);
+ idle &= ~(CNTRLREG_TSCSSENB);
+ tiadc_writel(adc_dev, REG_CTRL, (idle |
+ CNTRLREG_POWERDOWN));
return 0;
}
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index abb6660c099c..0a3ec7c726ec 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -26,6 +26,7 @@ config INFINIBAND_USER_MAD
config INFINIBAND_USER_ACCESS
tristate "InfiniBand userspace access (verbs and CM)"
select ANON_INODES
+ depends on MMU
---help---
Userspace InfiniBand access support. This enables the
kernel side of userspace verbs and the userspace
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 46b855a42884..0dce94e3c495 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -45,6 +45,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <rdma/ib_addr.h>
+#include <rdma/ib_sa.h>
#include <rdma/ib.h>
#include <rdma/rdma_netlink.h>
#include <net/netlink.h>
@@ -61,6 +62,7 @@ struct addr_req {
struct rdma_dev_addr *addr, void *context);
unsigned long timeout;
struct delayed_work work;
+ bool resolve_by_gid_attr; /* Consider gid attr in resolve phase */
int status;
u32 seq;
};
@@ -219,60 +221,75 @@ int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
}
EXPORT_SYMBOL(rdma_addr_size_kss);
-void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
- const struct net_device *dev,
- const unsigned char *dst_dev_addr)
+/**
+ * rdma_copy_src_l2_addr - Copy netdevice source addresses
+ * @dev_addr: Destination address pointer where to copy the addresses
+ * @dev: Netdevice whose source addresses to copy
+ *
+ * rdma_copy_src_l2_addr() copies source addresses from the specified netdevice.
+ * This includes unicast address, broadcast address, device type and
+ * interface index.
+ */
+void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev)
{
dev_addr->dev_type = dev->type;
memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
- if (dst_dev_addr)
- memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
dev_addr->bound_dev_if = dev->ifindex;
}
-EXPORT_SYMBOL(rdma_copy_addr);
+EXPORT_SYMBOL(rdma_copy_src_l2_addr);
-int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr)
+static struct net_device *
+rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
{
- struct net_device *dev;
+ struct net_device *dev = NULL;
+ int ret = -EADDRNOTAVAIL;
- if (dev_addr->bound_dev_if) {
- dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
- if (!dev)
- return -ENODEV;
- rdma_copy_addr(dev_addr, dev, NULL);
- dev_put(dev);
- return 0;
- }
-
- switch (addr->sa_family) {
+ switch (src_in->sa_family) {
case AF_INET:
- dev = ip_dev_find(dev_addr->net,
- ((const struct sockaddr_in *)addr)->sin_addr.s_addr);
-
- if (!dev)
- return -EADDRNOTAVAIL;
-
- rdma_copy_addr(dev_addr, dev, NULL);
- dev_put(dev);
+ dev = __ip_dev_find(net,
+ ((const struct sockaddr_in *)src_in)->sin_addr.s_addr,
+ false);
+ if (dev)
+ ret = 0;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- rcu_read_lock();
- for_each_netdev_rcu(dev_addr->net, dev) {
- if (ipv6_chk_addr(dev_addr->net,
- &((const struct sockaddr_in6 *)addr)->sin6_addr,
+ for_each_netdev_rcu(net, dev) {
+ if (ipv6_chk_addr(net,
+ &((const struct sockaddr_in6 *)src_in)->sin6_addr,
dev, 1)) {
- rdma_copy_addr(dev_addr, dev, NULL);
+ ret = 0;
break;
}
}
- rcu_read_unlock();
break;
#endif
}
- return 0;
+ return ret ? ERR_PTR(ret) : dev;
+}
+
+int rdma_translate_ip(const struct sockaddr *addr,
+ struct rdma_dev_addr *dev_addr)
+{
+ struct net_device *dev;
+
+ if (dev_addr->bound_dev_if) {
+ dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ if (!dev)
+ return -ENODEV;
+ rdma_copy_src_l2_addr(dev_addr, dev);
+ dev_put(dev);
+ return 0;
+ }
+
+ rcu_read_lock();
+ dev = rdma_find_ndev_for_src_ip_rcu(dev_addr->net, addr);
+ if (!IS_ERR(dev))
+ rdma_copy_src_l2_addr(dev_addr, dev);
+ rcu_read_unlock();
+ return PTR_ERR_OR_ZERO(dev);
}
EXPORT_SYMBOL(rdma_translate_ip);
@@ -295,15 +312,12 @@ static void queue_req(struct addr_req *req)
spin_unlock_bh(&lock);
}
-static int ib_nl_fetch_ha(const struct dst_entry *dst,
- struct rdma_dev_addr *dev_addr,
+static int ib_nl_fetch_ha(struct rdma_dev_addr *dev_addr,
const void *daddr, u32 seq, u16 family)
{
- if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS))
+ if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS))
return -EADDRNOTAVAIL;
- /* We fill in what we can, the response will fill the rest */
- rdma_copy_addr(dev_addr, dst->dev, NULL);
return ib_nl_ip_send_msg(dev_addr, daddr, seq, family);
}
@@ -322,7 +336,7 @@ static int dst_fetch_ha(const struct dst_entry *dst,
neigh_event_send(n, NULL);
ret = -ENODATA;
} else {
- rdma_copy_addr(dev_addr, dst->dev, n->ha);
+ memcpy(dev_addr->dst_dev_addr, n->ha, MAX_ADDR_LEN);
}
neigh_release(n);
@@ -356,18 +370,22 @@ static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
(const void *)&dst_in6->sin6_addr;
sa_family_t family = dst_in->sa_family;
- /* Gateway + ARPHRD_INFINIBAND -> IB router */
- if (has_gateway(dst, family) && dst->dev->type == ARPHRD_INFINIBAND)
- return ib_nl_fetch_ha(dst, dev_addr, daddr, seq, family);
+ /* If we have a gateway in IB mode then it must be an IB network */
+ if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB)
+ return ib_nl_fetch_ha(dev_addr, daddr, seq, family);
else
return dst_fetch_ha(dst, dev_addr, daddr);
}
-static int addr4_resolve(struct sockaddr_in *src_in,
- const struct sockaddr_in *dst_in,
+static int addr4_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
struct rdma_dev_addr *addr,
struct rtable **prt)
{
+ struct sockaddr_in *src_in = (struct sockaddr_in *)src_sock;
+ const struct sockaddr_in *dst_in =
+ (const struct sockaddr_in *)dst_sock;
+
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct rtable *rt;
@@ -383,16 +401,8 @@ static int addr4_resolve(struct sockaddr_in *src_in,
if (ret)
return ret;
- src_in->sin_family = AF_INET;
src_in->sin_addr.s_addr = fl4.saddr;
- /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
- * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
- * type accordingly.
- */
- if (rt->rt_uses_gateway && rt->dst.dev->type != ARPHRD_INFINIBAND)
- addr->network = RDMA_NETWORK_IPV4;
-
addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
*prt = rt;
@@ -400,14 +410,16 @@ static int addr4_resolve(struct sockaddr_in *src_in,
}
#if IS_ENABLED(CONFIG_IPV6)
-static int addr6_resolve(struct sockaddr_in6 *src_in,
- const struct sockaddr_in6 *dst_in,
+static int addr6_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
struct rdma_dev_addr *addr,
struct dst_entry **pdst)
{
+ struct sockaddr_in6 *src_in = (struct sockaddr_in6 *)src_sock;
+ const struct sockaddr_in6 *dst_in =
+ (const struct sockaddr_in6 *)dst_sock;
struct flowi6 fl6;
struct dst_entry *dst;
- struct rt6_info *rt;
int ret;
memset(&fl6, 0, sizeof fl6);
@@ -419,19 +431,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
if (ret < 0)
return ret;
- rt = (struct rt6_info *)dst;
- if (ipv6_addr_any(&src_in->sin6_addr)) {
- src_in->sin6_family = AF_INET6;
+ if (ipv6_addr_any(&src_in->sin6_addr))
src_in->sin6_addr = fl6.saddr;
- }
-
- /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
- * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
- * type accordingly.
- */
- if (rt->rt6i_flags & RTF_GATEWAY &&
- ip6_dst_idev(dst)->dev->type != ARPHRD_INFINIBAND)
- addr->network = RDMA_NETWORK_IPV6;
addr->hoplimit = ip6_dst_hoplimit(dst);
@@ -439,8 +440,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
return 0;
}
#else
-static int addr6_resolve(struct sockaddr_in6 *src_in,
- const struct sockaddr_in6 *dst_in,
+static int addr6_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
struct rdma_dev_addr *addr,
struct dst_entry **pdst)
{
@@ -451,36 +452,110 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
static int addr_resolve_neigh(const struct dst_entry *dst,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
+ unsigned int ndev_flags,
u32 seq)
{
- if (dst->dev->flags & IFF_LOOPBACK) {
- int ret;
+ int ret = 0;
- ret = rdma_translate_ip(dst_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, addr->src_dev_addr,
- MAX_ADDR_LEN);
+ if (ndev_flags & IFF_LOOPBACK) {
+ memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
+ } else {
+ if (!(ndev_flags & IFF_NOARP)) {
+ /* If the device doesn't do ARP internally */
+ ret = fetch_ha(dst, addr, dst_in, seq);
+ }
+ }
+ return ret;
+}
- return ret;
+static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct sockaddr *dst_in,
+ const struct dst_entry *dst,
+ const struct net_device *ndev)
+{
+ int ret = 0;
+
+ if (dst->dev->flags & IFF_LOOPBACK)
+ ret = rdma_translate_ip(dst_in, dev_addr);
+ else
+ rdma_copy_src_l2_addr(dev_addr, dst->dev);
+
+ /*
+ * If there's a gateway and type of device not ARPHRD_INFINIBAND,
+ * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
+ * network type accordingly.
+ */
+ if (has_gateway(dst, dst_in->sa_family) &&
+ ndev->type != ARPHRD_INFINIBAND)
+ dev_addr->network = dst_in->sa_family == AF_INET ?
+ RDMA_NETWORK_IPV4 :
+ RDMA_NETWORK_IPV6;
+ else
+ dev_addr->network = RDMA_NETWORK_IB;
+
+ return ret;
+}
+
+static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
+ unsigned int *ndev_flags,
+ const struct sockaddr *dst_in,
+ const struct dst_entry *dst)
+{
+ struct net_device *ndev = READ_ONCE(dst->dev);
+
+ *ndev_flags = ndev->flags;
+ /* A physical device must be the RDMA device to use */
+ if (ndev->flags & IFF_LOOPBACK) {
+ /*
+ * RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
+ * loopback IP address. So if route is resolved to loopback
+ * interface, translate that to a real ndev based on non
+ * loopback IP address.
+ */
+ ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in);
+ if (IS_ERR(ndev))
+ return -ENODEV;
}
- /* If the device doesn't do ARP internally */
- if (!(dst->dev->flags & IFF_NOARP))
- return fetch_ha(dst, addr, dst_in, seq);
+ return copy_src_l2_addr(dev_addr, dst_in, dst, ndev);
+}
+
+static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr)
+{
+ struct net_device *ndev;
- rdma_copy_addr(addr, dst->dev, NULL);
+ ndev = rdma_read_gid_attr_ndev_rcu(addr->sgid_attr);
+ if (IS_ERR(ndev))
+ return PTR_ERR(ndev);
+ /*
+ * Since we are holding the rcu, reading net and ifindex
+ * are safe without any additional reference; because
+ * change_net_namespace() in net/core/dev.c does rcu sync
+ * after it changes the state to IFF_DOWN and before
+ * updating netdev fields {net, ifindex}.
+ */
+ addr->net = dev_net(ndev);
+ addr->bound_dev_if = ndev->ifindex;
return 0;
}
+static void rdma_addr_set_net_defaults(struct rdma_dev_addr *addr)
+{
+ addr->net = &init_net;
+ addr->bound_dev_if = 0;
+}
+
static int addr_resolve(struct sockaddr *src_in,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
bool resolve_neigh,
+ bool resolve_by_gid_attr,
u32 seq)
{
- struct net_device *ndev;
- struct dst_entry *dst;
+ struct dst_entry *dst = NULL;
+ unsigned int ndev_flags = 0;
+ struct rtable *rt = NULL;
int ret;
if (!addr->net) {
@@ -488,58 +563,55 @@ static int addr_resolve(struct sockaddr *src_in,
return -EINVAL;
}
- if (src_in->sa_family == AF_INET) {
- struct rtable *rt = NULL;
- const struct sockaddr_in *dst_in4 =
- (const struct sockaddr_in *)dst_in;
-
- ret = addr4_resolve((struct sockaddr_in *)src_in,
- dst_in4, addr, &rt);
- if (ret)
- return ret;
-
- if (resolve_neigh)
- ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
-
- if (addr->bound_dev_if) {
- ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
- } else {
- ndev = rt->dst.dev;
- dev_hold(ndev);
+ rcu_read_lock();
+ if (resolve_by_gid_attr) {
+ if (!addr->sgid_attr) {
+ rcu_read_unlock();
+ pr_warn_ratelimited("%s: missing gid_attr\n", __func__);
+ return -EINVAL;
}
-
- ip_rt_put(rt);
- } else {
- const struct sockaddr_in6 *dst_in6 =
- (const struct sockaddr_in6 *)dst_in;
-
- ret = addr6_resolve((struct sockaddr_in6 *)src_in,
- dst_in6, addr,
- &dst);
- if (ret)
+ /*
+ * If the request is for a specific gid attribute of the
+ * rdma_dev_addr, derive net from the netdevice of the
+ * GID attribute.
+ */
+ ret = set_addr_netns_by_gid_rcu(addr);
+ if (ret) {
+ rcu_read_unlock();
return ret;
-
- if (resolve_neigh)
- ret = addr_resolve_neigh(dst, dst_in, addr, seq);
-
- if (addr->bound_dev_if) {
- ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
- } else {
- ndev = dst->dev;
- dev_hold(ndev);
}
-
- dst_release(dst);
}
-
- if (ndev) {
- if (ndev->flags & IFF_LOOPBACK)
- ret = rdma_translate_ip(dst_in, addr);
- else
- addr->bound_dev_if = ndev->ifindex;
- dev_put(ndev);
+ if (src_in->sa_family == AF_INET) {
+ ret = addr4_resolve(src_in, dst_in, addr, &rt);
+ dst = &rt->dst;
+ } else {
+ ret = addr6_resolve(src_in, dst_in, addr, &dst);
}
+ if (ret) {
+ rcu_read_unlock();
+ goto done;
+ }
+ ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst);
+ rcu_read_unlock();
+
+ /*
+ * Resolve neighbor destination address if requested and
+ * only if src addr translation didn't fail.
+ */
+ if (!ret && resolve_neigh)
+ ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq);
+ if (src_in->sa_family == AF_INET)
+ ip_rt_put(rt);
+ else
+ dst_release(dst);
+done:
+ /*
+ * Clear the addr net to go back to its original state, only if it was
+ * derived from GID attribute in this context.
+ */
+ if (resolve_by_gid_attr)
+ rdma_addr_set_net_defaults(addr);
return ret;
}
@@ -554,7 +626,8 @@ static void process_one_req(struct work_struct *_work)
src_in = (struct sockaddr *)&req->src_addr;
dst_in = (struct sockaddr *)&req->dst_addr;
req->status = addr_resolve(src_in, dst_in, req->addr,
- true, req->seq);
+ true, req->resolve_by_gid_attr,
+ req->seq);
if (req->status && time_after_eq(jiffies, req->timeout)) {
req->status = -ETIMEDOUT;
} else if (req->status == -ENODATA) {
@@ -586,10 +659,10 @@ static void process_one_req(struct work_struct *_work)
}
int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr, int timeout_ms,
+ struct rdma_dev_addr *addr, unsigned long timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
- void *context)
+ bool resolve_by_gid_attr, void *context)
{
struct sockaddr *src_in, *dst_in;
struct addr_req *req;
@@ -617,10 +690,12 @@ int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
req->addr = addr;
req->callback = callback;
req->context = context;
+ req->resolve_by_gid_attr = resolve_by_gid_attr;
INIT_DELAYED_WORK(&req->work, process_one_req);
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
- req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
+ req->status = addr_resolve(src_in, dst_in, addr, true,
+ req->resolve_by_gid_attr, req->seq);
switch (req->status) {
case 0:
req->timeout = jiffies;
@@ -641,25 +716,53 @@ err:
}
EXPORT_SYMBOL(rdma_resolve_ip);
-int rdma_resolve_ip_route(struct sockaddr *src_addr,
- const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr)
+int roce_resolve_route_from_path(struct sa_path_rec *rec,
+ const struct ib_gid_attr *attr)
{
- struct sockaddr_storage ssrc_addr = {};
- struct sockaddr *src_in = (struct sockaddr *)&ssrc_addr;
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid, dgid;
+ struct rdma_dev_addr dev_addr = {};
+ int ret;
- if (src_addr) {
- if (src_addr->sa_family != dst_addr->sa_family)
- return -EINVAL;
+ if (rec->roce.route_resolved)
+ return 0;
- memcpy(src_in, src_addr, rdma_addr_size(src_addr));
- } else {
- src_in->sa_family = dst_addr->sa_family;
- }
+ rdma_gid2ip(&sgid._sockaddr, &rec->sgid);
+ rdma_gid2ip(&dgid._sockaddr, &rec->dgid);
+
+ if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family)
+ return -EINVAL;
+
+ if (!attr || !attr->ndev)
+ return -EINVAL;
+
+ dev_addr.net = &init_net;
+ dev_addr.sgid_attr = attr;
+
+ ret = addr_resolve(&sgid._sockaddr, &dgid._sockaddr,
+ &dev_addr, false, true, 0);
+ if (ret)
+ return ret;
+
+ if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
+ dev_addr.network == RDMA_NETWORK_IPV6) &&
+ rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
+ return -EINVAL;
- return addr_resolve(src_in, dst_addr, addr, false, 0);
+ rec->roce.route_resolved = true;
+ return 0;
}
+/**
+ * rdma_addr_cancel - Cancel resolve ip request
+ * @addr: Pointer to address structure given previously
+ * during rdma_resolve_ip().
+ * rdma_addr_cancel() is synchronous function which cancels any pending
+ * request if there is any.
+ */
void rdma_addr_cancel(struct rdma_dev_addr *addr)
{
struct addr_req *req, *temp_req;
@@ -687,11 +790,6 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
* guarentees no work is running and none will be started.
*/
cancel_delayed_work_sync(&found->work);
-
- if (found->callback)
- found->callback(-ECANCELED, (struct sockaddr *)&found->src_addr,
- found->addr, found->context);
-
kfree(found);
}
EXPORT_SYMBOL(rdma_addr_cancel);
@@ -710,7 +808,7 @@ static void resolve_cb(int status, struct sockaddr *src_addr,
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *dmac, const struct net_device *ndev,
+ u8 *dmac, const struct ib_gid_attr *sgid_attr,
int *hoplimit)
{
struct rdma_dev_addr dev_addr;
@@ -726,12 +824,12 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
rdma_gid2ip(&dgid_addr._sockaddr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
- dev_addr.bound_dev_if = ndev->ifindex;
dev_addr.net = &init_net;
+ dev_addr.sgid_attr = sgid_attr;
init_completion(&ctx.comp);
ret = rdma_resolve_ip(&sgid_addr._sockaddr, &dgid_addr._sockaddr,
- &dev_addr, 1000, resolve_cb, &ctx);
+ &dev_addr, 1000, resolve_cb, true, &ctx);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 3208ad6ad540..5b2fce4a7091 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -212,9 +212,8 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
u8 port_num = entry->attr.port_num;
struct ib_gid_table *table = rdma_gid_table(device, port_num);
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- device->name, port_num, entry->attr.index,
- entry->attr.gid.raw);
+ dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
+ port_num, entry->attr.index, entry->attr.gid.raw);
if (rdma_cap_roce_gid_table(device, port_num) &&
entry->state != GID_TABLE_ENTRY_INVALID)
@@ -289,9 +288,9 @@ static void store_gid_entry(struct ib_gid_table *table,
{
entry->state = GID_TABLE_ENTRY_VALID;
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- entry->attr.device->name, entry->attr.port_num,
- entry->attr.index, entry->attr.gid.raw);
+ dev_dbg(&entry->attr.device->dev, "%s port=%d index=%d gid %pI6\n",
+ __func__, entry->attr.port_num, entry->attr.index,
+ entry->attr.gid.raw);
lockdep_assert_held(&table->lock);
write_lock_irq(&table->rwlock);
@@ -320,17 +319,16 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
int ret;
if (!attr->ndev) {
- pr_err("%s NULL netdev device=%s port=%d index=%d\n",
- __func__, attr->device->name, attr->port_num,
- attr->index);
+ dev_err(&attr->device->dev, "%s NULL netdev port=%d index=%d\n",
+ __func__, attr->port_num, attr->index);
return -EINVAL;
}
if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
ret = attr->device->add_gid(attr, &entry->context);
if (ret) {
- pr_err("%s GID add failed device=%s port=%d index=%d\n",
- __func__, attr->device->name, attr->port_num,
- attr->index);
+ dev_err(&attr->device->dev,
+ "%s GID add failed port=%d index=%d\n",
+ __func__, attr->port_num, attr->index);
return ret;
}
}
@@ -353,9 +351,8 @@ static void del_gid(struct ib_device *ib_dev, u8 port,
lockdep_assert_held(&table->lock);
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- ib_dev->name, port, ix,
- table->data_vec[ix]->attr.gid.raw);
+ dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
+ ix, table->data_vec[ix]->attr.gid.raw);
write_lock_irq(&table->rwlock);
entry = table->data_vec[ix];
@@ -782,9 +779,9 @@ static void release_gid_table(struct ib_device *device, u8 port,
if (is_gid_entry_free(table->data_vec[i]))
continue;
if (kref_read(&table->data_vec[i]->kref) > 1) {
- pr_err("GID entry ref leak for %s (index %d) ref=%d\n",
- device->name, i,
- kref_read(&table->data_vec[i]->kref));
+ dev_err(&device->dev,
+ "GID entry ref leak for index %d ref=%d\n", i,
+ kref_read(&table->data_vec[i]->kref));
leak = true;
}
}
@@ -1252,6 +1249,39 @@ void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
}
EXPORT_SYMBOL(rdma_hold_gid_attr);
+/**
+ * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
+ * which must be in UP state.
+ *
+ * @attr:Pointer to the GID attribute
+ *
+ * Returns pointer to netdevice if the netdevice was attached to GID and
+ * netdevice is in UP state. Caller must hold RCU lock as this API
+ * reads the netdev flags which can change while netdevice migrates to
+ * different net namespace. Returns ERR_PTR with error code otherwise.
+ *
+ */
+struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(attr, struct ib_gid_table_entry, attr);
+ struct ib_device *device = entry->attr.device;
+ struct net_device *ndev = ERR_PTR(-ENODEV);
+ u8 port_num = entry->attr.port_num;
+ struct ib_gid_table *table;
+ unsigned long flags;
+ bool valid;
+
+ table = rdma_gid_table(device, port_num);
+
+ read_lock_irqsave(&table->rwlock, flags);
+ valid = is_gid_entry_valid(table->data_vec[attr->index]);
+ if (valid && attr->ndev && (READ_ONCE(attr->ndev->flags) & IFF_UP))
+ ndev = attr->ndev;
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return ndev;
+}
+
static int config_non_roce_gid_cache(struct ib_device *device,
u8 port, int gid_tbl_len)
{
@@ -1270,8 +1300,9 @@ static int config_non_roce_gid_cache(struct ib_device *device,
continue;
ret = device->query_gid(device, port, i, &gid_attr.gid);
if (ret) {
- pr_warn("query_gid failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ dev_warn(&device->dev,
+ "query_gid failed (%d) for index %d\n", ret,
+ i);
goto err;
}
gid_attr.index = i;
@@ -1300,8 +1331,7 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_port(device, port, tprops);
if (ret) {
- pr_warn("ib_query_port failed (%d) for %s\n",
- ret, device->name);
+ dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
goto err;
}
@@ -1323,8 +1353,9 @@ static void ib_cache_update(struct ib_device *device,
for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
if (ret) {
- pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ dev_warn(&device->dev,
+ "ib_query_pkey failed (%d) for index %d\n",
+ ret, i);
goto err;
}
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 6e39c27dca8e..edb2cb758be7 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3292,8 +3292,11 @@ static int cm_lap_handler(struct cm_work *work)
if (ret)
goto unlock;
- cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av,
- cm_id_priv);
+ ret = cm_init_av_by_path(param->alternate_path, NULL,
+ &cm_id_priv->alt_av, cm_id_priv);
+ if (ret)
+ goto unlock;
+
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
@@ -4367,7 +4370,7 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev->going_down = 0;
cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL,
- "%s", ib_device->name);
+ "%s", dev_name(&ib_device->dev));
if (IS_ERR(cm_dev->device)) {
kfree(cm_dev);
return;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a36c94930c31..15d5bb7bf6bb 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -639,13 +639,21 @@ static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
}
-static int cma_acquire_dev(struct rdma_id_private *id_priv,
- const struct rdma_id_private *listen_id_priv)
+/**
+ * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
+ * based on source ip address.
+ * @id_priv: cm_id which should be bound to cma device
+ *
+ * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
+ * based on source IP address. It returns 0 on success or error code otherwise.
+ * It is applicable to active and passive side cm_id.
+ */
+static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
const struct ib_gid_attr *sgid_attr;
- struct cma_device *cma_dev;
union ib_gid gid, iboe_gid, *gidp;
+ struct cma_device *cma_dev;
enum ib_gid_type gid_type;
int ret = -ENODEV;
u8 port;
@@ -654,41 +662,125 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
id_priv->id.ps == RDMA_PS_IPOIB)
return -EINVAL;
- mutex_lock(&lock);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr +
- rdma_addr_gid_offset(dev_addr), sizeof gid);
-
- if (listen_id_priv) {
- cma_dev = listen_id_priv->cma_dev;
- port = listen_id_priv->id.port_num;
- gidp = rdma_protocol_roce(cma_dev->device, port) ?
- &iboe_gid : &gid;
- gid_type = listen_id_priv->gid_type;
- sgid_attr = cma_validate_port(cma_dev->device, port,
- gid_type, gidp, id_priv);
- if (!IS_ERR(sgid_attr)) {
- id_priv->id.port_num = port;
- cma_bind_sgid_attr(id_priv, sgid_attr);
- ret = 0;
- goto out;
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ mutex_lock(&lock);
+ list_for_each_entry(cma_dev, &dev_list, list) {
+ for (port = rdma_start_port(cma_dev->device);
+ port <= rdma_end_port(cma_dev->device); port++) {
+ gidp = rdma_protocol_roce(cma_dev->device, port) ?
+ &iboe_gid : &gid;
+ gid_type = cma_dev->default_gid_type[port - 1];
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, gidp, id_priv);
+ if (!IS_ERR(sgid_attr)) {
+ id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ cma_attach_to_dev(id_priv, cma_dev);
+ ret = 0;
+ goto out;
+ }
}
}
+out:
+ mutex_unlock(&lock);
+ return ret;
+}
+
+/**
+ * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
+ * @id_priv: cm id to bind to cma device
+ * @listen_id_priv: listener cm id to match against
+ * @req: Pointer to req structure containaining incoming
+ * request information
+ * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
+ * rdma device matches for listen_id and incoming request. It also verifies
+ * that a GID table entry is present for the source address.
+ * Returns 0 on success, or returns error code otherwise.
+ */
+static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
+ const struct rdma_id_private *listen_id_priv,
+ struct cma_req_info *req)
+{
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr;
+ enum ib_gid_type gid_type;
+ union ib_gid gid;
+
+ if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
+ if (rdma_protocol_roce(req->device, req->port))
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &gid);
+ else
+ memcpy(&gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
+ sgid_attr = cma_validate_port(req->device, req->port,
+ gid_type, &gid, id_priv);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
+
+ id_priv->id.port_num = req->port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ /* Need to acquire lock to protect against reader
+ * of cma_dev->id_list such as cma_netdev_callback() and
+ * cma_process_remove().
+ */
+ mutex_lock(&lock);
+ cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
+ mutex_unlock(&lock);
+ return 0;
+}
+
+static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
+ const struct rdma_id_private *listen_id_priv)
+{
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr;
+ struct cma_device *cma_dev;
+ enum ib_gid_type gid_type;
+ int ret = -ENODEV;
+ union ib_gid gid;
+ u8 port;
+
+ if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
+ memcpy(&gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ mutex_lock(&lock);
+
+ cma_dev = listen_id_priv->cma_dev;
+ port = listen_id_priv->id.port_num;
+ gid_type = listen_id_priv->gid_type;
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, &gid, id_priv);
+ if (!IS_ERR(sgid_attr)) {
+ id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ ret = 0;
+ goto out;
+ }
list_for_each_entry(cma_dev, &dev_list, list) {
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
- if (listen_id_priv &&
- listen_id_priv->cma_dev == cma_dev &&
+ if (listen_id_priv->cma_dev == cma_dev &&
listen_id_priv->id.port_num == port)
continue;
- gidp = rdma_protocol_roce(cma_dev->device, port) ?
- &iboe_gid : &gid;
gid_type = cma_dev->default_gid_type[port - 1];
sgid_attr = cma_validate_port(cma_dev->device, port,
- gid_type, gidp, id_priv);
+ gid_type, &gid, id_priv);
if (!IS_ERR(sgid_attr)) {
id_priv->id.port_num = port;
cma_bind_sgid_attr(id_priv, sgid_attr);
@@ -785,10 +877,7 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
if (!id_priv)
return ERR_PTR(-ENOMEM);
- if (caller)
- id_priv->res.kern_name = caller;
- else
- rdma_restrack_set_task(&id_priv->res, current);
+ rdma_restrack_set_task(&id_priv->res, caller);
id_priv->res.type = RDMA_RESTRACK_CM_ID;
id_priv->state = RDMA_CM_IDLE;
id_priv->id.context = context;
@@ -1462,18 +1551,35 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id)
return rdma_protocol_roce(device, port_num);
}
+static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
+{
+ const struct sockaddr *daddr =
+ (const struct sockaddr *)&req->listen_addr_storage;
+ const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
+
+ /* Returns true if the req is for IPv6 link local */
+ return (daddr->sa_family == AF_INET6 &&
+ (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL));
+}
+
static bool cma_match_net_dev(const struct rdma_cm_id *id,
const struct net_device *net_dev,
- u8 port_num)
+ const struct cma_req_info *req)
{
const struct rdma_addr *addr = &id->route.addr;
if (!net_dev)
/* This request is an AF_IB request */
- return (!id->port_num || id->port_num == port_num) &&
+ return (!id->port_num || id->port_num == req->port) &&
(addr->src_addr.ss_family == AF_IB);
/*
+ * If the request is not for IPv6 link local, allow matching
+ * request to any netdevice of the one or multiport rdma device.
+ */
+ if (!cma_is_req_ipv6_ll(req))
+ return true;
+ /*
* Net namespaces must match, and if the listner is listening
* on a specific netdevice than netdevice must match as well.
*/
@@ -1500,13 +1606,14 @@ static struct rdma_id_private *cma_find_listener(
hlist_for_each_entry(id_priv, &bind_list->owners, node) {
if (cma_match_private_data(id_priv, ib_event->private_data)) {
if (id_priv->id.device == cm_id->device &&
- cma_match_net_dev(&id_priv->id, net_dev, req->port))
+ cma_match_net_dev(&id_priv->id, net_dev, req))
return id_priv;
list_for_each_entry(id_priv_dev,
&id_priv->listen_list,
listen_list) {
if (id_priv_dev->id.device == cm_id->device &&
- cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
+ cma_match_net_dev(&id_priv_dev->id,
+ net_dev, req))
return id_priv_dev;
}
}
@@ -1518,18 +1625,18 @@ static struct rdma_id_private *cma_find_listener(
static struct rdma_id_private *
cma_ib_id_from_event(struct ib_cm_id *cm_id,
const struct ib_cm_event *ib_event,
+ struct cma_req_info *req,
struct net_device **net_dev)
{
- struct cma_req_info req;
struct rdma_bind_list *bind_list;
struct rdma_id_private *id_priv;
int err;
- err = cma_save_req_info(ib_event, &req);
+ err = cma_save_req_info(ib_event, req);
if (err)
return ERR_PTR(err);
- *net_dev = cma_get_net_dev(ib_event, &req);
+ *net_dev = cma_get_net_dev(ib_event, req);
if (IS_ERR(*net_dev)) {
if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
/* Assuming the protocol is AF_IB */
@@ -1567,17 +1674,17 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
if (!validate_net_dev(*net_dev,
- (struct sockaddr *)&req.listen_addr_storage,
- (struct sockaddr *)&req.src_addr_storage)) {
+ (struct sockaddr *)&req->listen_addr_storage,
+ (struct sockaddr *)&req->src_addr_storage)) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
}
bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
- rdma_ps_from_service_id(req.service_id),
- cma_port_from_service_id(req.service_id));
- id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
+ rdma_ps_from_service_id(req->service_id),
+ cma_port_from_service_id(req->service_id));
+ id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
err:
rcu_read_unlock();
if (IS_ERR(id_priv) && *net_dev) {
@@ -1710,8 +1817,8 @@ void rdma_destroy_id(struct rdma_cm_id *id)
mutex_lock(&id_priv->handler_mutex);
mutex_unlock(&id_priv->handler_mutex);
+ rdma_restrack_del(&id_priv->res);
if (id_priv->cma_dev) {
- rdma_restrack_del(&id_priv->res);
if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.ib)
ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -1902,7 +2009,7 @@ cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
if (net_dev) {
- rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
+ rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
} else {
if (!cma_protocol_roce(listen_id) &&
cma_any_addr(cma_src_addr(id_priv))) {
@@ -1952,7 +2059,7 @@ cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
goto err;
if (net_dev) {
- rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
+ rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
} else {
if (!cma_any_addr(cma_src_addr(id_priv))) {
ret = cma_translate_addr(cma_src_addr(id_priv),
@@ -1999,11 +2106,12 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
{
struct rdma_id_private *listen_id, *conn_id = NULL;
struct rdma_cm_event event = {};
+ struct cma_req_info req = {};
struct net_device *net_dev;
u8 offset;
int ret;
- listen_id = cma_ib_id_from_event(cm_id, ib_event, &net_dev);
+ listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
if (IS_ERR(listen_id))
return PTR_ERR(listen_id);
@@ -2036,7 +2144,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
}
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
- ret = cma_acquire_dev(conn_id, listen_id);
+ ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
if (ret)
goto err2;
@@ -2232,7 +2340,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
goto out;
}
- ret = cma_acquire_dev(conn_id, listen_id);
+ ret = cma_iw_acquire_dev(conn_id, listen_id);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -2354,8 +2462,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(id, id_priv->backlog);
if (ret)
- pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
- ret, cma_dev->device->name);
+ dev_warn(&cma_dev->device->dev,
+ "RDMA CMA: cma_listen_on_dev, error %d\n", ret);
}
static void cma_listen_on_all(struct rdma_id_private *id_priv)
@@ -2402,8 +2510,8 @@ static void cma_query_handler(int status, struct sa_path_rec *path_rec,
queue_work(cma_wq, &work->work);
}
-static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
- struct cma_work *work)
+static int cma_query_ib_route(struct rdma_id_private *id_priv,
+ unsigned long timeout_ms, struct cma_work *work)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct sa_path_rec path_rec;
@@ -2521,7 +2629,8 @@ static void cma_init_resolve_addr_work(struct cma_work *work,
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
}
-static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
+static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
+ unsigned long timeout_ms)
{
struct rdma_route *route = &id_priv->id.route;
struct cma_work *work;
@@ -2643,7 +2752,7 @@ err:
}
EXPORT_SYMBOL(rdma_set_ib_path);
-static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
+static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
{
struct cma_work *work;
@@ -2744,7 +2853,7 @@ err1:
return ret;
}
-int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
+int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv;
int ret;
@@ -2759,7 +2868,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
else if (rdma_protocol_roce(id->device, id->port_num))
ret = cma_resolve_iboe_route(id_priv);
else if (rdma_protocol_iwarp(id->device, id->port_num))
- ret = cma_resolve_iw_route(id_priv, timeout_ms);
+ ret = cma_resolve_iw_route(id_priv);
else
ret = -ENOSYS;
@@ -2862,7 +2971,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
if (!status && !id_priv->cma_dev) {
- status = cma_acquire_dev(id_priv, NULL);
+ status = cma_acquire_dev_by_src_ip(id_priv);
if (status)
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
status);
@@ -2882,13 +2991,11 @@ static void addr_handler(int status, struct sockaddr *src_addr,
if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
rdma_destroy_id(&id_priv->id);
return;
}
out:
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
}
static int cma_resolve_loopback(struct rdma_id_private *id_priv)
@@ -2966,7 +3073,7 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- const struct sockaddr *dst_addr, int timeout_ms)
+ const struct sockaddr *dst_addr, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv;
int ret;
@@ -2985,16 +3092,16 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
return -EINVAL;
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
- atomic_inc(&id_priv->refcount);
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
} else {
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
- ret = rdma_resolve_ip(cma_src_addr(id_priv),
- dst_addr, &id->route.addr.dev_addr,
- timeout_ms, addr_handler, id_priv);
+ ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
+ &id->route.addr.dev_addr,
+ timeout_ms, addr_handler,
+ false, id_priv);
}
}
if (ret)
@@ -3003,7 +3110,6 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
return 0;
err:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
- cma_deref_id(id_priv);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_addr);
@@ -3414,7 +3520,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err1;
- ret = cma_acquire_dev(id_priv, NULL);
+ ret = cma_acquire_dev_by_src_ip(id_priv);
if (ret)
goto err1;
}
@@ -3439,10 +3545,9 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
return 0;
err2:
- if (id_priv->cma_dev) {
- rdma_restrack_del(&id_priv->res);
+ rdma_restrack_del(&id_priv->res);
+ if (id_priv->cma_dev)
cma_release_dev(id_priv);
- }
err1:
cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
return ret;
@@ -3839,10 +3944,7 @@ int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
id_priv = container_of(id, struct rdma_id_private, id);
- if (caller)
- id_priv->res.kern_name = caller;
- else
- rdma_restrack_set_task(&id_priv->res, current);
+ rdma_restrack_set_task(&id_priv->res, caller);
if (!cma_comp(id_priv, RDMA_CM_CONNECT))
return -EINVAL;
@@ -4087,9 +4189,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
(!ib_sa_sendonly_fullmem_support(&sa_client,
id_priv->id.device,
id_priv->id.port_num))) {
- pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
- "RDMA CM: SM doesn't support Send Only Full Member option\n",
- id_priv->id.device->name, id_priv->id.port_num);
+ dev_warn(
+ &id_priv->id.device->dev,
+ "RDMA CM: port %u Unable to multicast join: SM doesn't support Send Only Full Member option\n",
+ id_priv->id.port_num);
return -EOPNOTSUPP;
}
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index eee38b40be99..8c2dfb3e294e 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -65,7 +65,7 @@ static struct cma_dev_port_group *to_dev_port_group(struct config_item *item)
static bool filter_by_name(struct ib_device *ib_dev, void *cookie)
{
- return !strcmp(ib_dev->name, cookie);
+ return !strcmp(dev_name(&ib_dev->dev), cookie);
}
static int cma_configfs_params_get(struct config_item *item,
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 77c7005c396c..bb9007a0cca7 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -44,7 +44,7 @@
#include "mad_priv.h"
/* Total number of ports combined across all struct ib_devices's */
-#define RDMA_MAX_PORTS 1024
+#define RDMA_MAX_PORTS 8192
struct pkey_index_qp_list {
struct list_head pkey_index_list;
@@ -87,6 +87,7 @@ int ib_device_register_sysfs(struct ib_device *device,
int (*port_callback)(struct ib_device *,
u8, struct kobject *));
void ib_device_unregister_sysfs(struct ib_device *device);
+int ib_device_rename(struct ib_device *ibdev, const char *name);
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
struct net_device *idev, void *cookie);
@@ -338,7 +339,14 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr,
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *dmac, const struct net_device *ndev,
+ u8 *dmac, const struct ib_gid_attr *sgid_attr,
int *hoplimit);
+void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev);
+struct sa_path_rec;
+int roce_resolve_route_from_path(struct sa_path_rec *rec,
+ const struct ib_gid_attr *attr);
+
+struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index af5ad6a56ae4..b1e5365ddafa 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -112,12 +112,12 @@ static void ib_cq_poll_work(struct work_struct *work)
IB_POLL_BATCH);
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
- queue_work(ib_comp_wq, &cq->work);
+ queue_work(cq->comp_wq, &cq->work);
}
static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
{
- queue_work(ib_comp_wq, &cq->work);
+ queue_work(cq->comp_wq, &cq->work);
}
/**
@@ -161,7 +161,7 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
goto out_destroy_cq;
cq->res.type = RDMA_RESTRACK_CQ;
- cq->res.kern_name = caller;
+ rdma_restrack_set_task(&cq->res, caller);
rdma_restrack_add(&cq->res);
switch (cq->poll_ctx) {
@@ -175,9 +175,12 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
break;
case IB_POLL_WORKQUEUE:
+ case IB_POLL_UNBOUND_WORKQUEUE:
cq->comp_handler = ib_cq_completion_workqueue;
INIT_WORK(&cq->work, ib_cq_poll_work);
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
+ ib_comp_wq : ib_comp_unbound_wq;
break;
default:
ret = -EINVAL;
@@ -213,6 +216,7 @@ void ib_free_cq(struct ib_cq *cq)
irq_poll_disable(&cq->iop);
break;
case IB_POLL_WORKQUEUE:
+ case IB_POLL_UNBOUND_WORKQUEUE:
cancel_work_sync(&cq->work);
break;
default:
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index db3b6271f09d..87eb4f2cdd7d 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -61,6 +61,7 @@ struct ib_client_data {
};
struct workqueue_struct *ib_comp_wq;
+struct workqueue_struct *ib_comp_unbound_wq;
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
@@ -122,8 +123,9 @@ static int ib_device_check_mandatory(struct ib_device *device)
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
- pr_warn("Device %s is missing mandatory function %s\n",
- device->name, mandatory_table[i].name);
+ dev_warn(&device->dev,
+ "Device is missing mandatory function %s\n",
+ mandatory_table[i].name);
return -EINVAL;
}
}
@@ -163,16 +165,40 @@ static struct ib_device *__ib_device_get_by_name(const char *name)
struct ib_device *device;
list_for_each_entry(device, &device_list, core_list)
- if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
+ if (!strcmp(name, dev_name(&device->dev)))
return device;
return NULL;
}
-static int alloc_name(char *name)
+int ib_device_rename(struct ib_device *ibdev, const char *name)
+{
+ struct ib_device *device;
+ int ret = 0;
+
+ if (!strcmp(name, dev_name(&ibdev->dev)))
+ return ret;
+
+ mutex_lock(&device_mutex);
+ list_for_each_entry(device, &device_list, core_list) {
+ if (!strcmp(name, dev_name(&device->dev))) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+
+ ret = device_rename(&ibdev->dev, name);
+ if (ret)
+ goto out;
+ strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
+out:
+ mutex_unlock(&device_mutex);
+ return ret;
+}
+
+static int alloc_name(struct ib_device *ibdev, const char *name)
{
unsigned long *inuse;
- char buf[IB_DEVICE_NAME_MAX];
struct ib_device *device;
int i;
@@ -181,24 +207,21 @@ static int alloc_name(char *name)
return -ENOMEM;
list_for_each_entry(device, &device_list, core_list) {
- if (!sscanf(device->name, name, &i))
+ char buf[IB_DEVICE_NAME_MAX];
+
+ if (sscanf(dev_name(&device->dev), name, &i) != 1)
continue;
if (i < 0 || i >= PAGE_SIZE * 8)
continue;
snprintf(buf, sizeof buf, name, i);
- if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
+ if (!strcmp(buf, dev_name(&device->dev)))
set_bit(i, inuse);
}
i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
free_page((unsigned long) inuse);
- snprintf(buf, sizeof buf, name, i);
- if (__ib_device_get_by_name(buf))
- return -ENFILE;
-
- strlcpy(name, buf, IB_DEVICE_NAME_MAX);
- return 0;
+ return dev_set_name(&ibdev->dev, name, i);
}
static void ib_device_release(struct device *device)
@@ -221,9 +244,7 @@ static void ib_device_release(struct device *device)
static int ib_device_uevent(struct device *device,
struct kobj_uevent_env *env)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
-
- if (add_uevent_var(env, "NAME=%s", dev->name))
+ if (add_uevent_var(env, "NAME=%s", dev_name(device)))
return -ENOMEM;
/*
@@ -269,7 +290,7 @@ struct ib_device *ib_alloc_device(size_t size)
INIT_LIST_HEAD(&device->event_handler_list);
spin_lock_init(&device->event_handler_lock);
- spin_lock_init(&device->client_data_lock);
+ rwlock_init(&device->client_data_lock);
INIT_LIST_HEAD(&device->client_data_list);
INIT_LIST_HEAD(&device->port_list);
@@ -285,6 +306,7 @@ EXPORT_SYMBOL(ib_alloc_device);
*/
void ib_dealloc_device(struct ib_device *device)
{
+ WARN_ON(!list_empty(&device->client_data_list));
WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
device->reg_state != IB_DEV_UNINITIALIZED);
rdma_restrack_clean(&device->res);
@@ -295,9 +317,8 @@ EXPORT_SYMBOL(ib_dealloc_device);
static int add_client_context(struct ib_device *device, struct ib_client *client)
{
struct ib_client_data *context;
- unsigned long flags;
- context = kmalloc(sizeof *context, GFP_KERNEL);
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
@@ -306,9 +327,9 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
context->going_down = false;
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
+ write_lock_irq(&device->client_data_lock);
list_add(&context->list, &device->client_data_list);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
up_write(&lists_rwsem);
return 0;
@@ -444,22 +465,8 @@ static u32 __dev_new_index(void)
}
}
-/**
- * ib_register_device - Register an IB device with IB core
- * @device:Device to register
- *
- * Low-level drivers use ib_register_device() to register their
- * devices with the IB core. All registered clients will receive a
- * callback for each device that is added. @device must be allocated
- * with ib_alloc_device().
- */
-int ib_register_device(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *))
+static void setup_dma_device(struct ib_device *device)
{
- int ret;
- struct ib_client *client;
- struct ib_udata uhw = {.outlen = 0, .inlen = 0};
struct device *parent = device->dev.parent;
WARN_ON_ONCE(device->dma_device);
@@ -491,56 +498,113 @@ int ib_register_device(struct ib_device *device,
WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
+}
- mutex_lock(&device_mutex);
+static void cleanup_device(struct ib_device *device)
+{
+ ib_cache_cleanup_one(device);
+ ib_cache_release_one(device);
+ kfree(device->port_pkey_list);
+ kfree(device->port_immutable);
+}
- if (strchr(device->name, '%')) {
- ret = alloc_name(device->name);
- if (ret)
- goto out;
- }
+static int setup_device(struct ib_device *device)
+{
+ struct ib_udata uhw = {.outlen = 0, .inlen = 0};
+ int ret;
- if (ib_device_check_mandatory(device)) {
- ret = -EINVAL;
- goto out;
- }
+ ret = ib_device_check_mandatory(device);
+ if (ret)
+ return ret;
ret = read_port_immutable(device);
if (ret) {
- pr_warn("Couldn't create per port immutable data %s\n",
- device->name);
- goto out;
+ dev_warn(&device->dev,
+ "Couldn't create per port immutable data\n");
+ return ret;
}
- ret = setup_port_pkey_list(device);
+ memset(&device->attrs, 0, sizeof(device->attrs));
+ ret = device->query_device(device, &device->attrs, &uhw);
if (ret) {
- pr_warn("Couldn't create per port_pkey_list\n");
- goto out;
+ dev_warn(&device->dev,
+ "Couldn't query the device attributes\n");
+ goto port_cleanup;
}
- ret = ib_cache_setup_one(device);
+ ret = setup_port_pkey_list(device);
if (ret) {
- pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
+ dev_warn(&device->dev, "Couldn't create per port_pkey_list\n");
goto port_cleanup;
}
- ret = ib_device_register_rdmacg(device);
+ ret = ib_cache_setup_one(device);
if (ret) {
- pr_warn("Couldn't register device with rdma cgroup\n");
- goto cache_cleanup;
+ dev_warn(&device->dev,
+ "Couldn't set up InfiniBand P_Key/GID cache\n");
+ goto pkey_cleanup;
+ }
+ return 0;
+
+pkey_cleanup:
+ kfree(device->port_pkey_list);
+port_cleanup:
+ kfree(device->port_immutable);
+ return ret;
+}
+
+/**
+ * ib_register_device - Register an IB device with IB core
+ * @device:Device to register
+ *
+ * Low-level drivers use ib_register_device() to register their
+ * devices with the IB core. All registered clients will receive a
+ * callback for each device that is added. @device must be allocated
+ * with ib_alloc_device().
+ */
+int ib_register_device(struct ib_device *device, const char *name,
+ int (*port_callback)(struct ib_device *, u8,
+ struct kobject *))
+{
+ int ret;
+ struct ib_client *client;
+
+ setup_dma_device(device);
+
+ mutex_lock(&device_mutex);
+
+ if (strchr(name, '%')) {
+ ret = alloc_name(device, name);
+ if (ret)
+ goto out;
+ } else {
+ ret = dev_set_name(&device->dev, name);
+ if (ret)
+ goto out;
+ }
+ if (__ib_device_get_by_name(dev_name(&device->dev))) {
+ ret = -ENFILE;
+ goto out;
}
+ strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
- memset(&device->attrs, 0, sizeof(device->attrs));
- ret = device->query_device(device, &device->attrs, &uhw);
+ ret = setup_device(device);
+ if (ret)
+ goto out;
+
+ device->index = __dev_new_index();
+
+ ret = ib_device_register_rdmacg(device);
if (ret) {
- pr_warn("Couldn't query the device attributes\n");
- goto cg_cleanup;
+ dev_warn(&device->dev,
+ "Couldn't register device with rdma cgroup\n");
+ goto dev_cleanup;
}
ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
- pr_warn("Couldn't register device %s with driver model\n",
- device->name);
+ dev_warn(&device->dev,
+ "Couldn't register device with driver model\n");
goto cg_cleanup;
}
@@ -550,7 +614,6 @@ int ib_register_device(struct ib_device *device,
if (!add_client_context(device, client) && client->add)
client->add(device);
- device->index = __dev_new_index();
down_write(&lists_rwsem);
list_add_tail(&device->core_list, &device_list);
up_write(&lists_rwsem);
@@ -559,11 +622,8 @@ int ib_register_device(struct ib_device *device,
cg_cleanup:
ib_device_unregister_rdmacg(device);
-cache_cleanup:
- ib_cache_cleanup_one(device);
- ib_cache_release_one(device);
-port_cleanup:
- kfree(device->port_immutable);
+dev_cleanup:
+ cleanup_device(device);
out:
mutex_unlock(&device_mutex);
return ret;
@@ -585,21 +645,20 @@ void ib_unregister_device(struct ib_device *device)
down_write(&lists_rwsem);
list_del(&device->core_list);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ write_lock_irq(&device->client_data_lock);
+ list_for_each_entry(context, &device->client_data_list, list)
context->going_down = true;
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
downgrade_write(&lists_rwsem);
- list_for_each_entry_safe(context, tmp, &device->client_data_list,
- list) {
+ list_for_each_entry(context, &device->client_data_list, list) {
if (context->client->remove)
context->client->remove(device, context->data);
}
up_read(&lists_rwsem);
- ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
+ ib_device_unregister_rdmacg(device);
mutex_unlock(&device_mutex);
@@ -609,10 +668,13 @@ void ib_unregister_device(struct ib_device *device)
kfree(device->port_pkey_list);
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ write_lock_irqsave(&device->client_data_lock, flags);
+ list_for_each_entry_safe(context, tmp, &device->client_data_list,
+ list) {
+ list_del(&context->list);
kfree(context);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ }
+ write_unlock_irqrestore(&device->client_data_lock, flags);
up_write(&lists_rwsem);
device->reg_state = IB_DEV_UNREGISTERED;
@@ -662,9 +724,8 @@ EXPORT_SYMBOL(ib_register_client);
*/
void ib_unregister_client(struct ib_client *client)
{
- struct ib_client_data *context, *tmp;
+ struct ib_client_data *context;
struct ib_device *device;
- unsigned long flags;
mutex_lock(&device_mutex);
@@ -676,14 +737,14 @@ void ib_unregister_client(struct ib_client *client)
struct ib_client_data *found_context = NULL;
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ write_lock_irq(&device->client_data_lock);
+ list_for_each_entry(context, &device->client_data_list, list)
if (context->client == client) {
context->going_down = true;
found_context = context;
break;
}
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
up_write(&lists_rwsem);
if (client->remove)
@@ -691,17 +752,18 @@ void ib_unregister_client(struct ib_client *client)
found_context->data : NULL);
if (!found_context) {
- pr_warn("No client context found for %s/%s\n",
- device->name, client->name);
+ dev_warn(&device->dev,
+ "No client context found for %s\n",
+ client->name);
continue;
}
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
+ write_lock_irq(&device->client_data_lock);
list_del(&found_context->list);
- kfree(found_context);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
up_write(&lists_rwsem);
+ kfree(found_context);
}
mutex_unlock(&device_mutex);
@@ -722,13 +784,13 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
void *ret = NULL;
unsigned long flags;
- spin_lock_irqsave(&device->client_data_lock, flags);
+ read_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry(context, &device->client_data_list, list)
if (context->client == client) {
ret = context->data;
break;
}
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ read_unlock_irqrestore(&device->client_data_lock, flags);
return ret;
}
@@ -749,18 +811,18 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
struct ib_client_data *context;
unsigned long flags;
- spin_lock_irqsave(&device->client_data_lock, flags);
+ write_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry(context, &device->client_data_list, list)
if (context->client == client) {
context->data = data;
goto out;
}
- pr_warn("No client context found for %s/%s\n",
- device->name, client->name);
+ dev_warn(&device->dev, "No client context found for %s\n",
+ client->name);
out:
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irqrestore(&device->client_data_lock, flags);
}
EXPORT_SYMBOL(ib_set_client_data);
@@ -1166,10 +1228,19 @@ static int __init ib_core_init(void)
goto err;
}
+ ib_comp_unbound_wq =
+ alloc_workqueue("ib-comp-unb-wq",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
+ WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
+ if (!ib_comp_unbound_wq) {
+ ret = -ENOMEM;
+ goto err_comp;
+ }
+
ret = class_register(&ib_class);
if (ret) {
pr_warn("Couldn't create InfiniBand device class\n");
- goto err_comp;
+ goto err_comp_unbound;
}
ret = rdma_nl_init();
@@ -1218,6 +1289,8 @@ err_ibnl:
rdma_nl_exit();
err_sysfs:
class_unregister(&ib_class);
+err_comp_unbound:
+ destroy_workqueue(ib_comp_unbound_wq);
err_comp:
destroy_workqueue(ib_comp_wq);
err:
@@ -1236,6 +1309,7 @@ static void __exit ib_core_cleanup(void)
addr_cleanup();
rdma_nl_exit();
class_unregister(&ib_class);
+ destroy_workqueue(ib_comp_unbound_wq);
destroy_workqueue(ib_comp_wq);
/* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index a077500f7f32..83ba0068e8bb 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -213,7 +213,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
device = pd->device;
if (!device->alloc_fmr || !device->dealloc_fmr ||
!device->map_phys_fmr || !device->unmap_fmr) {
- pr_info(PFX "Device %s does not support FMRs\n", device->name);
+ dev_info(&device->dev, "Device does not support FMRs\n");
return ERR_PTR(-ENOSYS);
}
@@ -257,7 +257,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
atomic_set(&pool->flush_ser, 0);
init_waitqueue_head(&pool->force_wait);
- pool->worker = kthread_create_worker(0, "ib_fmr(%s)", device->name);
+ pool->worker =
+ kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
if (IS_ERR(pool->worker)) {
pr_warn(PFX "couldn't start cleanup kthread worker\n");
ret = PTR_ERR(pool->worker);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 5d676cff41f4..ba668d49c751 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -509,7 +509,7 @@ static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
cm_id->m_local_addr = cm_id->local_addr;
cm_id->m_remote_addr = cm_id->remote_addr;
- memcpy(pm_reg_msg.dev_name, cm_id->device->name,
+ memcpy(pm_reg_msg.dev_name, dev_name(&cm_id->device->dev),
sizeof(pm_reg_msg.dev_name));
memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
sizeof(pm_reg_msg.if_name));
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index ef459f2f2eeb..d7025cd5be28 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -220,33 +220,37 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
int ret2, qpn;
u8 mgmt_class, vclass;
+ if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
+ (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
+ return ERR_PTR(-EPROTONOSUPPORT);
+
/* Validate parameters */
qpn = get_spl_qp_index(qp_type);
if (qpn == -1) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid QP Type %d\n",
- qp_type);
+ dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
+ __func__, qp_type);
goto error1;
}
if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid RMPP Version %u\n",
- rmpp_version);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: invalid RMPP Version %u\n",
+ __func__, rmpp_version);
goto error1;
}
/* Validate MAD registration request if supplied */
if (mad_reg_req) {
if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid Class Version %u\n",
- mad_reg_req->mgmt_class_version);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: invalid Class Version %u\n",
+ __func__,
+ mad_reg_req->mgmt_class_version);
goto error1;
}
if (!recv_handler) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: no recv_handler\n");
+ dev_dbg_ratelimited(&device->dev,
+ "%s: no recv_handler\n", __func__);
goto error1;
}
if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
@@ -256,9 +260,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
*/
if (mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid Mgmt Class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
} else if (mad_reg_req->mgmt_class == 0) {
@@ -266,8 +270,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
* Class 0 is reserved in IBA and is used for
* aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
*/
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid Mgmt Class 0\n");
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid Mgmt Class 0\n",
+ __func__);
goto error1;
} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
/*
@@ -275,18 +280,19 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
* ensure supplied OUI is not zero
*/
if (!is_vendor_oui(mad_reg_req->oui)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: No OUI specified for class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: No OUI specified for class 0x%x\n",
+ __func__,
+ mad_reg_req->mgmt_class);
goto error1;
}
}
/* Make sure class supplied is consistent with RMPP */
if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
if (rmpp_version) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: RMPP version for non-RMPP class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
}
@@ -297,9 +303,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
(mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid SM QP type: class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
} else {
@@ -307,9 +313,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
(mad_reg_req->mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid GS QP type: class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
}
@@ -324,18 +330,18 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
/* Validate device and port */
port_priv = ib_get_mad_port(device, port_num);
if (!port_priv) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid port %d\n",
- port_num);
+ dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
+ __func__, port_num);
ret = ERR_PTR(-ENODEV);
goto error1;
}
- /* Verify the QP requested is supported. For example, Ethernet devices
- * will not have QP0 */
+ /* Verify the QP requested is supported. For example, Ethernet devices
+ * will not have QP0.
+ */
if (!port_priv->qp_info[qpn].qp) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: QP %d not supported\n", qpn);
+ dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
+ __func__, qpn);
ret = ERR_PTR(-EPROTONOSUPPORT);
goto error1;
}
@@ -2408,7 +2414,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
}
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
- int timeout_ms)
+ unsigned long timeout_ms)
{
mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
wait_for_response(mad_send_wr);
@@ -3183,7 +3189,7 @@ static int ib_mad_port_open(struct ib_device *device,
cq_size *= 2;
port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
- IB_POLL_WORKQUEUE);
+ IB_POLL_UNBOUND_WORKQUEUE);
if (IS_ERR(port_priv->cq)) {
dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d84ae1671898..216509036aa8 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -221,6 +221,6 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr);
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
- int timeout_ms);
+ unsigned long timeout_ms);
#endif /* __IB_MAD_PRIV_H__ */
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 3ccaae18ad75..724f5a62e82f 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -47,9 +47,9 @@ static struct {
const struct rdma_nl_cbs *cb_table;
} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
-int rdma_nl_chk_listeners(unsigned int group)
+bool rdma_nl_chk_listeners(unsigned int group)
{
- return (netlink_has_listeners(nls, group)) ? 0 : -1;
+ return netlink_has_listeners(nls, group);
}
EXPORT_SYMBOL(rdma_nl_chk_listeners);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 0385ab438320..573399e3ccc1 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -179,7 +179,8 @@ static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
return -EMSGSIZE;
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
+ dev_name(&device->dev)))
return -EMSGSIZE;
return 0;
@@ -645,6 +646,36 @@ err:
return err;
}
+static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ u32 index;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
+ char name[IB_DEVICE_NAME_MAX] = {};
+
+ nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
+ IB_DEVICE_NAME_MAX);
+ err = ib_device_rename(device, name);
+ }
+
+ put_device(&device->dev);
+ return err;
+}
+
static int _nldev_get_dumpit(struct ib_device *device,
struct sk_buff *skb,
struct netlink_callback *cb,
@@ -1077,6 +1108,10 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_get_doit,
.dump = nldev_get_dumpit,
},
+ [RDMA_NLDEV_CMD_SET] = {
+ .doit = nldev_set_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
[RDMA_NLDEV_CMD_PORT_GET] = {
.doit = nldev_port_get_doit,
.dump = nldev_port_get_dumpit,
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index c4118bcd5103..752a55c6bdce 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -794,44 +794,6 @@ void uverbs_close_fd(struct file *f)
uverbs_uobject_put(uobj);
}
-static void ufile_disassociate_ucontext(struct ib_ucontext *ibcontext)
-{
- struct ib_device *ib_dev = ibcontext->device;
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
- if (!owning_process)
- return;
-
- owning_mm = get_task_mm(owning_process);
- if (!owning_mm) {
- pr_info("no mm, disassociate ucontext is pending task termination\n");
- while (1) {
- put_task_struct(owning_process);
- usleep_range(1000, 2000);
- owning_process = get_pid_task(ibcontext->tgid,
- PIDTYPE_PID);
- if (!owning_process ||
- owning_process->state == TASK_DEAD) {
- pr_info("disassociate ucontext done, task was terminated\n");
- /* in case task was dead need to release the
- * task struct.
- */
- if (owning_process)
- put_task_struct(owning_process);
- return;
- }
- }
- }
-
- down_write(&owning_mm->mmap_sem);
- ib_dev->disassociate_ucontext(ibcontext);
- up_write(&owning_mm->mmap_sem);
- mmput(owning_mm);
- put_task_struct(owning_process);
-}
-
/*
* Drop the ucontext off the ufile and completely disconnect it from the
* ib_device
@@ -840,20 +802,28 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
enum rdma_remove_reason reason)
{
struct ib_ucontext *ucontext = ufile->ucontext;
+ struct ib_device *ib_dev = ucontext->device;
int ret;
- if (reason == RDMA_REMOVE_DRIVER_REMOVE)
- ufile_disassociate_ucontext(ucontext);
+ /*
+ * If we are closing the FD then the user mmap VMAs must have
+ * already been destroyed as they hold on to the filep, otherwise
+ * they need to be zap'd.
+ */
+ if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
+ uverbs_user_mmap_disassociate(ufile);
+ if (ib_dev->disassociate_ucontext)
+ ib_dev->disassociate_ucontext(ucontext);
+ }
- put_pid(ucontext->tgid);
- ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
+ ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
RDMACG_RESOURCE_HCA_HANDLE);
/*
* FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
* the error return.
*/
- ret = ucontext->device->dealloc_ucontext(ucontext);
+ ret = ib_dev->dealloc_ucontext(ucontext);
WARN_ON(ret);
ufile->ucontext = NULL;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index f962f2a593ba..4886d2bba7c7 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -160,5 +160,6 @@ void uverbs_disassociate_api(struct uverbs_api *uapi);
void uverbs_destroy_api(struct uverbs_api *uapi);
void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
unsigned int num_attrs);
+void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile);
#endif /* RDMA_CORE_H */
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 3b7fa0ccaa08..06d8657ce583 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -50,8 +50,7 @@ void rdma_restrack_clean(struct rdma_restrack_root *res)
dev = container_of(res, struct ib_device, res);
pr_err("restrack: %s", CUT_HERE);
- pr_err("restrack: BUG: RESTRACK detected leak of resources on %s\n",
- dev->name);
+ dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
hash_for_each(res->hash, bkt, e, node) {
if (rdma_is_kernel_res(e)) {
owner = e->kern_name;
@@ -156,6 +155,21 @@ static bool res_is_user(struct rdma_restrack_entry *res)
}
}
+void rdma_restrack_set_task(struct rdma_restrack_entry *res,
+ const char *caller)
+{
+ if (caller) {
+ res->kern_name = caller;
+ return;
+ }
+
+ if (res->task)
+ put_task_struct(res->task);
+ get_task_struct(current);
+ res->task = current;
+}
+EXPORT_SYMBOL(rdma_restrack_set_task);
+
void rdma_restrack_add(struct rdma_restrack_entry *res)
{
struct ib_device *dev = res_to_dev(res);
@@ -168,7 +182,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
if (res_is_user(res)) {
if (!res->task)
- rdma_restrack_set_task(res, current);
+ rdma_restrack_set_task(res, NULL);
res->kern_name = NULL;
} else {
set_kern_name(res);
@@ -209,7 +223,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
struct ib_device *dev;
if (!res->valid)
- return;
+ goto out;
dev = res_to_dev(res);
if (!dev)
@@ -222,8 +236,12 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
down_write(&dev->res.rwsem);
hash_del(&res->node);
res->valid = false;
- if (res->task)
- put_task_struct(res->task);
up_write(&dev->res.rwsem);
+
+out:
+ if (res->task) {
+ put_task_struct(res->task);
+ res->task = NULL;
+ }
}
EXPORT_SYMBOL(rdma_restrack_del);
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 683e6d11a564..d22c4a2ebac6 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -12,6 +12,7 @@
*/
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/pci-p2pdma.h>
#include <rdma/mr_pool.h>
#include <rdma/rw.h>
@@ -280,7 +281,11 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
struct ib_device *dev = qp->pd->device;
int ret;
- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+ if (is_pci_p2pdma_page(sg_page(sg)))
+ ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+ else
+ ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+
if (!ret)
return -ENOMEM;
sg_cnt = ret;
@@ -602,7 +607,9 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
break;
}
- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ /* P2PDMA contexts do not need to be unmapped */
+ if (!is_pci_p2pdma_page(sg_page(sg)))
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
diff --git a/drivers/infiniband/core/sa.h b/drivers/infiniband/core/sa.h
index b1d4bbf4ce5c..cbaaaa92fff3 100644
--- a/drivers/infiniband/core/sa.h
+++ b/drivers/infiniband/core/sa.h
@@ -49,16 +49,14 @@ static inline void ib_sa_client_put(struct ib_sa_client *client)
}
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- u8 method,
+ struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
- void *context,
- struct ib_sa_query **sa_query);
+ void *context, struct ib_sa_query **sa_query);
int mcast_init(void);
void mcast_cleanup(void);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7b794a14d6e8..be5ba5e15496 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -761,7 +761,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
/* Construct the family header first */
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
- memcpy(header->device_name, query->port->agent->device->name,
+ memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
LS_DEVICE_NAME_MAX);
header->port_num = query->port->port_num;
@@ -835,7 +835,6 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
void *data;
- int ret = 0;
struct ib_sa_mad *mad;
int len;
@@ -862,13 +861,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
- if (!ret)
- ret = len;
- else
- ret = 0;
-
- return ret;
+ return rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
}
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
@@ -891,14 +884,12 @@ static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
ret = ib_nl_send_msg(query, gfp_mask);
- if (ret <= 0) {
+ if (ret) {
ret = -EIO;
/* Remove the request */
spin_lock_irqsave(&ib_nl_request_lock, flags);
list_del(&query->list);
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
- } else {
- ret = 0;
}
return ret;
@@ -1227,46 +1218,6 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-static int roce_resolve_route_from_path(struct sa_path_rec *rec,
- const struct ib_gid_attr *attr)
-{
- struct rdma_dev_addr dev_addr = {};
- union {
- struct sockaddr _sockaddr;
- struct sockaddr_in _sockaddr_in;
- struct sockaddr_in6 _sockaddr_in6;
- } sgid_addr, dgid_addr;
- int ret;
-
- if (rec->roce.route_resolved)
- return 0;
- if (!attr || !attr->ndev)
- return -EINVAL;
-
- dev_addr.bound_dev_if = attr->ndev->ifindex;
- /* TODO: Use net from the ib_gid_attr once it is added to it,
- * until than, limit itself to init_net.
- */
- dev_addr.net = &init_net;
-
- rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
-
- /* validate the route */
- ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
- &dgid_addr._sockaddr, &dev_addr);
- if (ret)
- return ret;
-
- if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
- dev_addr.network == RDMA_NETWORK_IPV6) &&
- rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
- return -EINVAL;
-
- rec->roce.route_resolved = true;
- return 0;
-}
-
static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr,
@@ -1409,7 +1360,8 @@ static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
spin_unlock_irqrestore(&tid_lock, flags);
}
-static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
+static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
+ gfp_t gfp_mask)
{
bool preload = gfpflags_allow_blocking(gfp_mask);
unsigned long flags;
@@ -1433,7 +1385,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
(!(query->flags & IB_SA_QUERY_OPA))) {
- if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
+ if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
if (!ib_nl_make_request(query, gfp_mask))
return id;
}
@@ -1599,7 +1551,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct sa_path_rec *resp,
void *context),
@@ -1753,7 +1705,7 @@ int ib_sa_service_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_service_rec *resp,
void *context),
@@ -1850,7 +1802,7 @@ int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
@@ -1941,7 +1893,7 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_guidinfo_rec *resp,
void *context),
@@ -2108,7 +2060,7 @@ static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
}
static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
- int timeout_ms,
+ unsigned long timeout_ms,
void (*callback)(void *context),
void *context,
struct ib_sa_query **sa_query)
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 9b0bea8303e0..1143c0448666 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -685,9 +685,8 @@ static int ib_mad_agent_security_change(struct notifier_block *nb,
if (event != LSM_POLICY_CHANGE)
return NOTIFY_DONE;
- ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
- ag->device->name,
- ag->port_num);
+ ag->smp_allowed = !security_ib_endport_manage_subnet(
+ ag->security, dev_name(&ag->device->dev), ag->port_num);
return NOTIFY_OK;
}
@@ -708,7 +707,7 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
return 0;
ret = security_ib_endport_manage_subnet(agent->security,
- agent->device->name,
+ dev_name(&agent->device->dev),
agent->port_num);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7fd14ead7b37..6fcce2c206c6 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -512,7 +512,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data,
40 + offset / 8, sizeof(data));
if (ret < 0)
- return sprintf(buf, "N/A (no PMA)\n");
+ return ret;
switch (width) {
case 4:
@@ -1036,7 +1036,7 @@ static int add_port(struct ib_device *device, int port_num,
p->port_num = port_num;
ret = kobject_init_and_add(&p->kobj, &port_type,
- device->ports_parent,
+ device->ports_kobj,
"%d", port_num);
if (ret) {
kfree(p);
@@ -1057,10 +1057,12 @@ static int add_port(struct ib_device *device, int port_num,
goto err_put;
}
- p->pma_table = get_counter_table(device, port_num);
- ret = sysfs_create_group(&p->kobj, p->pma_table);
- if (ret)
- goto err_put_gid_attrs;
+ if (device->process_mad) {
+ p->pma_table = get_counter_table(device, port_num);
+ ret = sysfs_create_group(&p->kobj, p->pma_table);
+ if (ret)
+ goto err_put_gid_attrs;
+ }
p->gid_group.name = "gids";
p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
@@ -1118,9 +1120,9 @@ static int add_port(struct ib_device *device, int port_num,
}
/*
- * If port == 0, it means we have only one port and the parent
- * device, not this port device, should be the holder of the
- * hw_counters
+ * If port == 0, it means hw_counters are per device and not per
+ * port, so holder should be device. Therefore skip per port conunter
+ * initialization.
*/
if (device->alloc_hw_stats && port_num)
setup_hw_stats(device, p, port_num);
@@ -1173,7 +1175,8 @@ err_free_gid:
p->gid_group.attrs = NULL;
err_remove_pma:
- sysfs_remove_group(&p->kobj, p->pma_table);
+ if (p->pma_table)
+ sysfs_remove_group(&p->kobj, p->pma_table);
err_put_gid_attrs:
kobject_put(&p->gid_attr_group->kobj);
@@ -1183,7 +1186,7 @@ err_put:
return ret;
}
-static ssize_t show_node_type(struct device *device,
+static ssize_t node_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1198,8 +1201,9 @@ static ssize_t show_node_type(struct device *device,
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
}
}
+static DEVICE_ATTR_RO(node_type);
-static ssize_t show_sys_image_guid(struct device *device,
+static ssize_t sys_image_guid_show(struct device *device,
struct device_attribute *dev_attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1210,8 +1214,9 @@ static ssize_t show_sys_image_guid(struct device *device,
be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[2]),
be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[3]));
}
+static DEVICE_ATTR_RO(sys_image_guid);
-static ssize_t show_node_guid(struct device *device,
+static ssize_t node_guid_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1222,8 +1227,9 @@ static ssize_t show_node_guid(struct device *device,
be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
}
+static DEVICE_ATTR_RO(node_guid);
-static ssize_t show_node_desc(struct device *device,
+static ssize_t node_desc_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1231,9 +1237,9 @@ static ssize_t show_node_desc(struct device *device,
return sprintf(buf, "%.64s\n", dev->node_desc);
}
-static ssize_t set_node_desc(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t node_desc_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
struct ib_device_modify desc = {};
@@ -1249,8 +1255,9 @@ static ssize_t set_node_desc(struct device *device,
return count;
}
+static DEVICE_ATTR_RW(node_desc);
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+static ssize_t fw_ver_show(struct device *device, struct device_attribute *attr,
char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1259,19 +1266,19 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
strlcat(buf, "\n", IB_FW_VERSION_NAME_MAX);
return strlen(buf);
}
+static DEVICE_ATTR_RO(fw_ver);
+
+static struct attribute *ib_dev_attrs[] = {
+ &dev_attr_node_type.attr,
+ &dev_attr_node_guid.attr,
+ &dev_attr_sys_image_guid.attr,
+ &dev_attr_fw_ver.attr,
+ &dev_attr_node_desc.attr,
+ NULL,
+};
-static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
-static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
-static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
-static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-
-static struct device_attribute *ib_class_attributes[] = {
- &dev_attr_node_type,
- &dev_attr_sys_image_guid,
- &dev_attr_node_guid,
- &dev_attr_node_desc,
- &dev_attr_fw_ver,
+static const struct attribute_group dev_attr_group = {
+ .attrs = ib_dev_attrs,
};
static void free_port_list_attributes(struct ib_device *device)
@@ -1285,7 +1292,9 @@ static void free_port_list_attributes(struct ib_device *device)
kfree(port->hw_stats);
free_hsag(&port->kobj, port->hw_stats_ag);
}
- sysfs_remove_group(p, port->pma_table);
+
+ if (port->pma_table)
+ sysfs_remove_group(p, port->pma_table);
sysfs_remove_group(p, &port->pkey_group);
sysfs_remove_group(p, &port->gid_group);
sysfs_remove_group(&port->gid_attr_group->kobj,
@@ -1296,7 +1305,7 @@ static void free_port_list_attributes(struct ib_device *device)
kobject_put(p);
}
- kobject_put(device->ports_parent);
+ kobject_put(device->ports_kobj);
}
int ib_device_register_sysfs(struct ib_device *device,
@@ -1307,23 +1316,15 @@ int ib_device_register_sysfs(struct ib_device *device,
int ret;
int i;
- ret = dev_set_name(class_dev, "%s", device->name);
- if (ret)
- return ret;
+ device->groups[0] = &dev_attr_group;
+ class_dev->groups = device->groups;
ret = device_add(class_dev);
if (ret)
goto err;
- for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) {
- ret = device_create_file(class_dev, ib_class_attributes[i]);
- if (ret)
- goto err_unregister;
- }
-
- device->ports_parent = kobject_create_and_add("ports",
- &class_dev->kobj);
- if (!device->ports_parent) {
+ device->ports_kobj = kobject_create_and_add("ports", &class_dev->kobj);
+ if (!device->ports_kobj) {
ret = -ENOMEM;
goto err_put;
}
@@ -1347,20 +1348,15 @@ int ib_device_register_sysfs(struct ib_device *device,
err_put:
free_port_list_attributes(device);
-
-err_unregister:
device_del(class_dev);
-
err:
return ret;
}
void ib_device_unregister_sysfs(struct ib_device *device)
{
- int i;
-
- /* Hold kobject until ib_dealloc_device() */
- kobject_get(&device->dev.kobj);
+ /* Hold device until ib_dealloc_device() */
+ get_device(&device->dev);
free_port_list_attributes(device);
@@ -1369,8 +1365,5 @@ void ib_device_unregister_sysfs(struct ib_device *device)
free_hsag(&device->dev.kobj, device->hw_stats_ag);
}
- for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i)
- device_remove_file(&device->dev, ib_class_attributes[i]);
-
device_unregister(&device->dev);
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index a41792dbae1f..c6144df47ea4 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -85,7 +85,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct page **page_list;
struct vm_area_struct **vma_list;
unsigned long lock_limit;
+ unsigned long new_pinned;
unsigned long cur_base;
+ struct mm_struct *mm;
unsigned long npages;
int ret;
int i;
@@ -107,25 +109,32 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!can_do_mlock())
return ERR_PTR(-EPERM);
- umem = kzalloc(sizeof *umem, GFP_KERNEL);
- if (!umem)
- return ERR_PTR(-ENOMEM);
+ if (access & IB_ACCESS_ON_DEMAND) {
+ umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
+ if (!umem)
+ return ERR_PTR(-ENOMEM);
+ umem->is_odp = 1;
+ } else {
+ umem = kzalloc(sizeof(*umem), GFP_KERNEL);
+ if (!umem)
+ return ERR_PTR(-ENOMEM);
+ }
umem->context = context;
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
umem->writable = ib_access_writable(access);
+ umem->owning_mm = mm = current->mm;
+ mmgrab(mm);
if (access & IB_ACCESS_ON_DEMAND) {
- ret = ib_umem_odp_get(context, umem, access);
+ ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
if (ret)
goto umem_kfree;
return umem;
}
- umem->odp_data = NULL;
-
/* We assume the memory is from hugetlb until proved otherwise */
umem->hugetlb = 1;
@@ -144,25 +153,25 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->hugetlb = 0;
npages = ib_umem_num_pages(umem);
+ if (npages == 0 || npages > UINT_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- down_write(&current->mm->mmap_sem);
- current->mm->pinned_vm += npages;
- if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
- up_write(&current->mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ if (check_add_overflow(mm->pinned_vm, npages, &new_pinned) ||
+ (new_pinned > lock_limit && !capable(CAP_IPC_LOCK))) {
+ up_write(&mm->mmap_sem);
ret = -ENOMEM;
- goto vma;
+ goto out;
}
- up_write(&current->mm->mmap_sem);
+ mm->pinned_vm = new_pinned;
+ up_write(&mm->mmap_sem);
cur_base = addr & PAGE_MASK;
- if (npages == 0 || npages > UINT_MAX) {
- ret = -EINVAL;
- goto vma;
- }
-
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
if (ret)
goto vma;
@@ -172,14 +181,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
sg_list_start = umem->sg_head.sgl;
- down_read(&current->mm->mmap_sem);
while (npages) {
+ down_read(&mm->mmap_sem);
ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list);
if (ret < 0) {
- up_read(&current->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
goto umem_release;
}
@@ -187,17 +196,20 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base += ret * PAGE_SIZE;
npages -= ret;
+ /* Continue to hold the mmap_sem as vma_list access
+ * needs to be protected.
+ */
for_each_sg(sg_list_start, sg, ret, i) {
if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
umem->hugetlb = 0;
sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
}
+ up_read(&mm->mmap_sem);
/* preparing for next loop */
sg_list_start = sg;
}
- up_read(&current->mm->mmap_sem);
umem->nmap = ib_dma_map_sg_attrs(context->device,
umem->sg_head.sgl,
@@ -216,29 +228,40 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem_release:
__ib_umem_release(context->device, umem, 0);
vma:
- down_write(&current->mm->mmap_sem);
- current->mm->pinned_vm -= ib_umem_num_pages(umem);
- up_write(&current->mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(&mm->mmap_sem);
out:
if (vma_list)
free_page((unsigned long) vma_list);
free_page((unsigned long) page_list);
umem_kfree:
- if (ret)
+ if (ret) {
+ mmdrop(umem->owning_mm);
kfree(umem);
+ }
return ret ? ERR_PTR(ret) : umem;
}
EXPORT_SYMBOL(ib_umem_get);
-static void ib_umem_account(struct work_struct *work)
+static void __ib_umem_release_tail(struct ib_umem *umem)
+{
+ mmdrop(umem->owning_mm);
+ if (umem->is_odp)
+ kfree(to_ib_umem_odp(umem));
+ else
+ kfree(umem);
+}
+
+static void ib_umem_release_defer(struct work_struct *work)
{
struct ib_umem *umem = container_of(work, struct ib_umem, work);
- down_write(&umem->mm->mmap_sem);
- umem->mm->pinned_vm -= umem->diff;
- up_write(&umem->mm->mmap_sem);
- mmput(umem->mm);
- kfree(umem);
+ down_write(&umem->owning_mm->mmap_sem);
+ umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(&umem->owning_mm->mmap_sem);
+
+ __ib_umem_release_tail(umem);
}
/**
@@ -248,52 +271,36 @@ static void ib_umem_account(struct work_struct *work)
void ib_umem_release(struct ib_umem *umem)
{
struct ib_ucontext *context = umem->context;
- struct mm_struct *mm;
- struct task_struct *task;
- unsigned long diff;
- if (umem->odp_data) {
- ib_umem_odp_release(umem);
+ if (umem->is_odp) {
+ ib_umem_odp_release(to_ib_umem_odp(umem));
+ __ib_umem_release_tail(umem);
return;
}
__ib_umem_release(umem->context->device, umem, 1);
- task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
- if (!task)
- goto out;
- mm = get_task_mm(task);
- put_task_struct(task);
- if (!mm)
- goto out;
-
- diff = ib_umem_num_pages(umem);
-
/*
* We may be called with the mm's mmap_sem already held. This
* can happen when a userspace munmap() is the call that drops
* the last reference to our file and calls our release
* method. If there are memory regions to destroy, we'll end
* up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting to the system workqueue.
+ * we defer the vm_locked accounting a workqueue.
*/
if (context->closing) {
- if (!down_write_trylock(&mm->mmap_sem)) {
- INIT_WORK(&umem->work, ib_umem_account);
- umem->mm = mm;
- umem->diff = diff;
-
+ if (!down_write_trylock(&umem->owning_mm->mmap_sem)) {
+ INIT_WORK(&umem->work, ib_umem_release_defer);
queue_work(ib_wq, &umem->work);
return;
}
- } else
- down_write(&mm->mmap_sem);
+ } else {
+ down_write(&umem->owning_mm->mmap_sem);
+ }
+ umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(&umem->owning_mm->mmap_sem);
- mm->pinned_vm -= diff;
- up_write(&mm->mmap_sem);
- mmput(mm);
-out:
- kfree(umem);
+ __ib_umem_release_tail(umem);
}
EXPORT_SYMBOL(ib_umem_release);
@@ -303,7 +310,7 @@ int ib_umem_page_count(struct ib_umem *umem)
int n;
struct scatterlist *sg;
- if (umem->odp_data)
+ if (umem->is_odp)
return ib_umem_num_pages(umem);
n = 0;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 6ec748eccff7..2b4c5e7dd5a1 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -58,7 +58,7 @@ static u64 node_start(struct umem_odp_node *n)
struct ib_umem_odp *umem_odp =
container_of(n, struct ib_umem_odp, interval_tree);
- return ib_umem_start(umem_odp->umem);
+ return ib_umem_start(&umem_odp->umem);
}
/* Note that the representation of the intervals in the interval tree
@@ -71,140 +71,86 @@ static u64 node_last(struct umem_odp_node *n)
struct ib_umem_odp *umem_odp =
container_of(n, struct ib_umem_odp, interval_tree);
- return ib_umem_end(umem_odp->umem) - 1;
+ return ib_umem_end(&umem_odp->umem) - 1;
}
INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
node_start, node_last, static, rbt_ib_umem)
-static void ib_umem_notifier_start_account(struct ib_umem *item)
+static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp)
{
- mutex_lock(&item->odp_data->umem_mutex);
-
- /* Only update private counters for this umem if it has them.
- * Otherwise skip it. All page faults will be delayed for this umem. */
- if (item->odp_data->mn_counters_active) {
- int notifiers_count = item->odp_data->notifiers_count++;
-
- if (notifiers_count == 0)
- /* Initialize the completion object for waiting on
- * notifiers. Since notifier_count is zero, no one
- * should be waiting right now. */
- reinit_completion(&item->odp_data->notifier_completion);
- }
- mutex_unlock(&item->odp_data->umem_mutex);
-}
-
-static void ib_umem_notifier_end_account(struct ib_umem *item)
-{
- mutex_lock(&item->odp_data->umem_mutex);
-
- /* Only update private counters for this umem if it has them.
- * Otherwise skip it. All page faults will be delayed for this umem. */
- if (item->odp_data->mn_counters_active) {
+ mutex_lock(&umem_odp->umem_mutex);
+ if (umem_odp->notifiers_count++ == 0)
/*
- * This sequence increase will notify the QP page fault that
- * the page that is going to be mapped in the spte could have
- * been freed.
+ * Initialize the completion object for waiting on
+ * notifiers. Since notifier_count is zero, no one should be
+ * waiting right now.
*/
- ++item->odp_data->notifiers_seq;
- if (--item->odp_data->notifiers_count == 0)
- complete_all(&item->odp_data->notifier_completion);
- }
- mutex_unlock(&item->odp_data->umem_mutex);
+ reinit_completion(&umem_odp->notifier_completion);
+ mutex_unlock(&umem_odp->umem_mutex);
}
-/* Account for a new mmu notifier in an ib_ucontext. */
-static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
+static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
{
- atomic_inc(&context->notifier_count);
+ mutex_lock(&umem_odp->umem_mutex);
+ /*
+ * This sequence increase will notify the QP page fault that the page
+ * that is going to be mapped in the spte could have been freed.
+ */
+ ++umem_odp->notifiers_seq;
+ if (--umem_odp->notifiers_count == 0)
+ complete_all(&umem_odp->notifier_completion);
+ mutex_unlock(&umem_odp->umem_mutex);
}
-/* Account for a terminating mmu notifier in an ib_ucontext.
- *
- * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
- * the function takes the semaphore itself. */
-static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
+static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
+ u64 start, u64 end, void *cookie)
{
- int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
-
- if (zero_notifiers &&
- !list_empty(&context->no_private_counters)) {
- /* No currently running mmu notifiers. Now is the chance to
- * add private accounting to all previously added umems. */
- struct ib_umem_odp *odp_data, *next;
-
- /* Prevent concurrent mmu notifiers from working on the
- * no_private_counters list. */
- down_write(&context->umem_rwsem);
-
- /* Read the notifier_count again, with the umem_rwsem
- * semaphore taken for write. */
- if (!atomic_read(&context->notifier_count)) {
- list_for_each_entry_safe(odp_data, next,
- &context->no_private_counters,
- no_private_counters) {
- mutex_lock(&odp_data->umem_mutex);
- odp_data->mn_counters_active = true;
- list_del(&odp_data->no_private_counters);
- complete_all(&odp_data->notifier_completion);
- mutex_unlock(&odp_data->umem_mutex);
- }
- }
-
- up_write(&context->umem_rwsem);
- }
-}
+ struct ib_umem *umem = &umem_odp->umem;
-static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
- u64 end, void *cookie) {
/*
* Increase the number of notifiers running, to
* prevent any further fault handling on this MR.
*/
- ib_umem_notifier_start_account(item);
- item->odp_data->dying = 1;
+ ib_umem_notifier_start_account(umem_odp);
+ umem_odp->dying = 1;
/* Make sure that the fact the umem is dying is out before we release
* all pending page faults. */
smp_wmb();
- complete_all(&item->odp_data->notifier_completion);
- item->context->invalidate_range(item, ib_umem_start(item),
- ib_umem_end(item));
+ complete_all(&umem_odp->notifier_completion);
+ umem->context->invalidate_range(umem_odp, ib_umem_start(umem),
+ ib_umem_end(umem));
return 0;
}
static void ib_umem_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
- if (!context->invalidate_range)
- return;
-
- ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
- ULLONG_MAX,
- ib_umem_notifier_release_trampoline,
- true,
- NULL);
- up_read(&context->umem_rwsem);
+ struct ib_ucontext_per_mm *per_mm =
+ container_of(mn, struct ib_ucontext_per_mm, mn);
+
+ down_read(&per_mm->umem_rwsem);
+ if (per_mm->active)
+ rbt_ib_umem_for_each_in_range(
+ &per_mm->umem_tree, 0, ULLONG_MAX,
+ ib_umem_notifier_release_trampoline, true, NULL);
+ up_read(&per_mm->umem_rwsem);
}
-static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
+static int invalidate_page_trampoline(struct ib_umem_odp *item, u64 start,
u64 end, void *cookie)
{
ib_umem_notifier_start_account(item);
- item->context->invalidate_range(item, start, start + PAGE_SIZE);
+ item->umem.context->invalidate_range(item, start, start + PAGE_SIZE);
ib_umem_notifier_end_account(item);
return 0;
}
-static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
- u64 end, void *cookie)
+static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
+ u64 start, u64 end, void *cookie)
{
ib_umem_notifier_start_account(item);
- item->context->invalidate_range(item, start, end);
+ item->umem.context->invalidate_range(item, start, end);
return 0;
}
@@ -214,28 +160,30 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
unsigned long end,
bool blockable)
{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
- int ret;
-
- if (!context->invalidate_range)
- return 0;
+ struct ib_ucontext_per_mm *per_mm =
+ container_of(mn, struct ib_ucontext_per_mm, mn);
if (blockable)
- down_read(&context->umem_rwsem);
- else if (!down_read_trylock(&context->umem_rwsem))
+ down_read(&per_mm->umem_rwsem);
+ else if (!down_read_trylock(&per_mm->umem_rwsem))
return -EAGAIN;
- ib_ucontext_notifier_start_account(context);
- ret = rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
- end,
- invalidate_range_start_trampoline,
- blockable, NULL);
- up_read(&context->umem_rwsem);
+ if (!per_mm->active) {
+ up_read(&per_mm->umem_rwsem);
+ /*
+ * At this point active is permanently set and visible to this
+ * CPU without a lock, that fact is relied on to skip the unlock
+ * in range_end.
+ */
+ return 0;
+ }
- return ret;
+ return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end,
+ invalidate_range_start_trampoline,
+ blockable, NULL);
}
-static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
+static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
u64 end, void *cookie)
{
ib_umem_notifier_end_account(item);
@@ -247,22 +195,16 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+ struct ib_ucontext_per_mm *per_mm =
+ container_of(mn, struct ib_ucontext_per_mm, mn);
- if (!context->invalidate_range)
+ if (unlikely(!per_mm->active))
return;
- /*
- * TODO: we currently bail out if there is any sleepable work to be done
- * in ib_umem_notifier_invalidate_range_start so we shouldn't really block
- * here. But this is ugly and fragile.
- */
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start,
end,
invalidate_range_end_trampoline, true, NULL);
- up_read(&context->umem_rwsem);
- ib_ucontext_notifier_end_account(context);
+ up_read(&per_mm->umem_rwsem);
}
static const struct mmu_notifier_ops ib_umem_notifiers = {
@@ -271,31 +213,158 @@ static const struct mmu_notifier_ops ib_umem_notifiers = {
.invalidate_range_end = ib_umem_notifier_invalidate_range_end,
};
-struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
- unsigned long addr,
- size_t size)
+static void add_umem_to_per_mm(struct ib_umem_odp *umem_odp)
{
- struct ib_umem *umem;
+ struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
+ struct ib_umem *umem = &umem_odp->umem;
+
+ down_write(&per_mm->umem_rwsem);
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_insert(&umem_odp->interval_tree,
+ &per_mm->umem_tree);
+ up_write(&per_mm->umem_rwsem);
+}
+
+static void remove_umem_from_per_mm(struct ib_umem_odp *umem_odp)
+{
+ struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
+ struct ib_umem *umem = &umem_odp->umem;
+
+ down_write(&per_mm->umem_rwsem);
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_remove(&umem_odp->interval_tree,
+ &per_mm->umem_tree);
+ complete_all(&umem_odp->notifier_completion);
+
+ up_write(&per_mm->umem_rwsem);
+}
+
+static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx,
+ struct mm_struct *mm)
+{
+ struct ib_ucontext_per_mm *per_mm;
+ int ret;
+
+ per_mm = kzalloc(sizeof(*per_mm), GFP_KERNEL);
+ if (!per_mm)
+ return ERR_PTR(-ENOMEM);
+
+ per_mm->context = ctx;
+ per_mm->mm = mm;
+ per_mm->umem_tree = RB_ROOT_CACHED;
+ init_rwsem(&per_mm->umem_rwsem);
+ per_mm->active = ctx->invalidate_range;
+
+ rcu_read_lock();
+ per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ rcu_read_unlock();
+
+ WARN_ON(mm != current->mm);
+
+ per_mm->mn.ops = &ib_umem_notifiers;
+ ret = mmu_notifier_register(&per_mm->mn, per_mm->mm);
+ if (ret) {
+ dev_err(&ctx->device->dev,
+ "Failed to register mmu_notifier %d\n", ret);
+ goto out_pid;
+ }
+
+ list_add(&per_mm->ucontext_list, &ctx->per_mm_list);
+ return per_mm;
+
+out_pid:
+ put_pid(per_mm->tgid);
+ kfree(per_mm);
+ return ERR_PTR(ret);
+}
+
+static int get_per_mm(struct ib_umem_odp *umem_odp)
+{
+ struct ib_ucontext *ctx = umem_odp->umem.context;
+ struct ib_ucontext_per_mm *per_mm;
+
+ /*
+ * Generally speaking we expect only one or two per_mm in this list,
+ * so no reason to optimize this search today.
+ */
+ mutex_lock(&ctx->per_mm_list_lock);
+ list_for_each_entry(per_mm, &ctx->per_mm_list, ucontext_list) {
+ if (per_mm->mm == umem_odp->umem.owning_mm)
+ goto found;
+ }
+
+ per_mm = alloc_per_mm(ctx, umem_odp->umem.owning_mm);
+ if (IS_ERR(per_mm)) {
+ mutex_unlock(&ctx->per_mm_list_lock);
+ return PTR_ERR(per_mm);
+ }
+
+found:
+ umem_odp->per_mm = per_mm;
+ per_mm->odp_mrs_count++;
+ mutex_unlock(&ctx->per_mm_list_lock);
+
+ return 0;
+}
+
+static void free_per_mm(struct rcu_head *rcu)
+{
+ kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu));
+}
+
+void put_per_mm(struct ib_umem_odp *umem_odp)
+{
+ struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
+ struct ib_ucontext *ctx = umem_odp->umem.context;
+ bool need_free;
+
+ mutex_lock(&ctx->per_mm_list_lock);
+ umem_odp->per_mm = NULL;
+ per_mm->odp_mrs_count--;
+ need_free = per_mm->odp_mrs_count == 0;
+ if (need_free)
+ list_del(&per_mm->ucontext_list);
+ mutex_unlock(&ctx->per_mm_list_lock);
+
+ if (!need_free)
+ return;
+
+ /*
+ * NOTE! mmu_notifier_unregister() can happen between a start/end
+ * callback, resulting in an start/end, and thus an unbalanced
+ * lock. This doesn't really matter to us since we are about to kfree
+ * the memory that holds the lock, however LOCKDEP doesn't like this.
+ */
+ down_write(&per_mm->umem_rwsem);
+ per_mm->active = false;
+ up_write(&per_mm->umem_rwsem);
+
+ WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root));
+ mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm);
+ put_pid(per_mm->tgid);
+ mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
+}
+
+struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
+ unsigned long addr, size_t size)
+{
+ struct ib_ucontext *ctx = per_mm->context;
struct ib_umem_odp *odp_data;
+ struct ib_umem *umem;
int pages = size >> PAGE_SHIFT;
int ret;
- umem = kzalloc(sizeof(*umem), GFP_KERNEL);
- if (!umem)
+ odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
+ if (!odp_data)
return ERR_PTR(-ENOMEM);
-
- umem->context = context;
+ umem = &odp_data->umem;
+ umem->context = ctx;
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
umem->writable = 1;
-
- odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
- if (!odp_data) {
- ret = -ENOMEM;
- goto out_umem;
- }
- odp_data->umem = umem;
+ umem->is_odp = 1;
+ odp_data->per_mm = per_mm;
mutex_init(&odp_data->umem_mutex);
init_completion(&odp_data->notifier_completion);
@@ -314,39 +383,34 @@ struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
goto out_page_list;
}
- down_write(&context->umem_rwsem);
- context->odp_mrs_count++;
- rbt_ib_umem_insert(&odp_data->interval_tree, &context->umem_tree);
- if (likely(!atomic_read(&context->notifier_count)))
- odp_data->mn_counters_active = true;
- else
- list_add(&odp_data->no_private_counters,
- &context->no_private_counters);
- up_write(&context->umem_rwsem);
-
- umem->odp_data = odp_data;
+ /*
+ * Caller must ensure that the umem_odp that the per_mm came from
+ * cannot be freed during the call to ib_alloc_odp_umem.
+ */
+ mutex_lock(&ctx->per_mm_list_lock);
+ per_mm->odp_mrs_count++;
+ mutex_unlock(&ctx->per_mm_list_lock);
+ add_umem_to_per_mm(odp_data);
- return umem;
+ return odp_data;
out_page_list:
vfree(odp_data->page_list);
out_odp_data:
kfree(odp_data);
-out_umem:
- kfree(umem);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(ib_alloc_odp_umem);
-int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
- int access)
+int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
{
+ struct ib_umem *umem = &umem_odp->umem;
+ /*
+ * NOTE: This must called in a process context where umem->owning_mm
+ * == current->mm
+ */
+ struct mm_struct *mm = umem->owning_mm;
int ret_val;
- struct pid *our_pid;
- struct mm_struct *mm = get_task_mm(current);
-
- if (!mm)
- return -EINVAL;
if (access & IB_ACCESS_HUGETLB) {
struct vm_area_struct *vma;
@@ -366,111 +430,43 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
umem->hugetlb = 0;
}
- /* Prevent creating ODP MRs in child processes */
- rcu_read_lock();
- our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
- rcu_read_unlock();
- put_pid(our_pid);
- if (context->tgid != our_pid) {
- ret_val = -EINVAL;
- goto out_mm;
- }
-
- umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
- if (!umem->odp_data) {
- ret_val = -ENOMEM;
- goto out_mm;
- }
- umem->odp_data->umem = umem;
-
- mutex_init(&umem->odp_data->umem_mutex);
+ mutex_init(&umem_odp->umem_mutex);
- init_completion(&umem->odp_data->notifier_completion);
+ init_completion(&umem_odp->notifier_completion);
if (ib_umem_num_pages(umem)) {
- umem->odp_data->page_list =
- vzalloc(array_size(sizeof(*umem->odp_data->page_list),
+ umem_odp->page_list =
+ vzalloc(array_size(sizeof(*umem_odp->page_list),
ib_umem_num_pages(umem)));
- if (!umem->odp_data->page_list) {
- ret_val = -ENOMEM;
- goto out_odp_data;
- }
+ if (!umem_odp->page_list)
+ return -ENOMEM;
- umem->odp_data->dma_list =
- vzalloc(array_size(sizeof(*umem->odp_data->dma_list),
+ umem_odp->dma_list =
+ vzalloc(array_size(sizeof(*umem_odp->dma_list),
ib_umem_num_pages(umem)));
- if (!umem->odp_data->dma_list) {
+ if (!umem_odp->dma_list) {
ret_val = -ENOMEM;
goto out_page_list;
}
}
- /*
- * When using MMU notifiers, we will get a
- * notification before the "current" task (and MM) is
- * destroyed. We use the umem_rwsem semaphore to synchronize.
- */
- down_write(&context->umem_rwsem);
- context->odp_mrs_count++;
- if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
- rbt_ib_umem_insert(&umem->odp_data->interval_tree,
- &context->umem_tree);
- if (likely(!atomic_read(&context->notifier_count)) ||
- context->odp_mrs_count == 1)
- umem->odp_data->mn_counters_active = true;
- else
- list_add(&umem->odp_data->no_private_counters,
- &context->no_private_counters);
- downgrade_write(&context->umem_rwsem);
-
- if (context->odp_mrs_count == 1) {
- /*
- * Note that at this point, no MMU notifier is running
- * for this context!
- */
- atomic_set(&context->notifier_count, 0);
- INIT_HLIST_NODE(&context->mn.hlist);
- context->mn.ops = &ib_umem_notifiers;
- /*
- * Lock-dep detects a false positive for mmap_sem vs.
- * umem_rwsem, due to not grasping downgrade_write correctly.
- */
- lockdep_off();
- ret_val = mmu_notifier_register(&context->mn, mm);
- lockdep_on();
- if (ret_val) {
- pr_err("Failed to register mmu_notifier %d\n", ret_val);
- ret_val = -EBUSY;
- goto out_mutex;
- }
- }
-
- up_read(&context->umem_rwsem);
+ ret_val = get_per_mm(umem_odp);
+ if (ret_val)
+ goto out_dma_list;
+ add_umem_to_per_mm(umem_odp);
- /*
- * Note that doing an mmput can cause a notifier for the relevant mm.
- * If the notifier is called while we hold the umem_rwsem, this will
- * cause a deadlock. Therefore, we release the reference only after we
- * released the semaphore.
- */
- mmput(mm);
return 0;
-out_mutex:
- up_read(&context->umem_rwsem);
- vfree(umem->odp_data->dma_list);
+out_dma_list:
+ vfree(umem_odp->dma_list);
out_page_list:
- vfree(umem->odp_data->page_list);
-out_odp_data:
- kfree(umem->odp_data);
-out_mm:
- mmput(mm);
+ vfree(umem_odp->page_list);
return ret_val;
}
-void ib_umem_odp_release(struct ib_umem *umem)
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_ucontext *context = umem->context;
+ struct ib_umem *umem = &umem_odp->umem;
/*
* Ensure that no more pages are mapped in the umem.
@@ -478,61 +474,13 @@ void ib_umem_odp_release(struct ib_umem *umem)
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
ib_umem_end(umem));
- down_write(&context->umem_rwsem);
- if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
- rbt_ib_umem_remove(&umem->odp_data->interval_tree,
- &context->umem_tree);
- context->odp_mrs_count--;
- if (!umem->odp_data->mn_counters_active) {
- list_del(&umem->odp_data->no_private_counters);
- complete_all(&umem->odp_data->notifier_completion);
- }
-
- /*
- * Downgrade the lock to a read lock. This ensures that the notifiers
- * (who lock the mutex for reading) will be able to finish, and we
- * will be able to enventually obtain the mmu notifiers SRCU. Note
- * that since we are doing it atomically, no other user could register
- * and unregister while we do the check.
- */
- downgrade_write(&context->umem_rwsem);
- if (!context->odp_mrs_count) {
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(context->tgid,
- PIDTYPE_PID);
- if (owning_process == NULL)
- /*
- * The process is already dead, notifier were removed
- * already.
- */
- goto out;
-
- owning_mm = get_task_mm(owning_process);
- if (owning_mm == NULL)
- /*
- * The process' mm is already dead, notifier were
- * removed already.
- */
- goto out_put_task;
- mmu_notifier_unregister(&context->mn, owning_mm);
-
- mmput(owning_mm);
-
-out_put_task:
- put_task_struct(owning_process);
- }
-out:
- up_read(&context->umem_rwsem);
-
- vfree(umem->odp_data->dma_list);
- vfree(umem->odp_data->page_list);
- kfree(umem->odp_data);
- kfree(umem);
+ remove_umem_from_per_mm(umem_odp);
+ put_per_mm(umem_odp);
+ vfree(umem_odp->dma_list);
+ vfree(umem_odp->page_list);
}
/*
@@ -544,7 +492,7 @@ out:
* @access_mask: access permissions needed for this page.
* @current_seq: sequence number for synchronization with invalidations.
* the sequence number is taken from
- * umem->odp_data->notifiers_seq.
+ * umem_odp->notifiers_seq.
*
* The function returns -EFAULT if the DMA mapping operation fails. It returns
* -EAGAIN if a concurrent invalidation prevents us from updating the page.
@@ -554,12 +502,13 @@ out:
* umem.
*/
static int ib_umem_odp_map_dma_single_page(
- struct ib_umem *umem,
+ struct ib_umem_odp *umem_odp,
int page_index,
struct page *page,
u64 access_mask,
unsigned long current_seq)
{
+ struct ib_umem *umem = &umem_odp->umem;
struct ib_device *dev = umem->context->device;
dma_addr_t dma_addr;
int stored_page = 0;
@@ -571,11 +520,11 @@ static int ib_umem_odp_map_dma_single_page(
* handle case of a racing notifier. This check also allows us to bail
* early if we have a notifier running in parallel with us.
*/
- if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
+ if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) {
ret = -EAGAIN;
goto out;
}
- if (!(umem->odp_data->dma_list[page_index])) {
+ if (!(umem_odp->dma_list[page_index])) {
dma_addr = ib_dma_map_page(dev,
page,
0, BIT(umem->page_shift),
@@ -584,15 +533,15 @@ static int ib_umem_odp_map_dma_single_page(
ret = -EFAULT;
goto out;
}
- umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
- umem->odp_data->page_list[page_index] = page;
+ umem_odp->dma_list[page_index] = dma_addr | access_mask;
+ umem_odp->page_list[page_index] = page;
umem->npages++;
stored_page = 1;
- } else if (umem->odp_data->page_list[page_index] == page) {
- umem->odp_data->dma_list[page_index] |= access_mask;
+ } else if (umem_odp->page_list[page_index] == page) {
+ umem_odp->dma_list[page_index] |= access_mask;
} else {
pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
- umem->odp_data->page_list[page_index], page);
+ umem_odp->page_list[page_index], page);
/* Better remove the mapping now, to prevent any further
* damage. */
remove_existing_mapping = 1;
@@ -605,7 +554,7 @@ out:
if (remove_existing_mapping && umem->context->invalidate_range) {
invalidate_page_trampoline(
- umem,
+ umem_odp,
ib_umem_start(umem) + (page_index >> umem->page_shift),
ib_umem_start(umem) + ((page_index + 1) >>
umem->page_shift),
@@ -621,7 +570,7 @@ out:
*
* Pins the range of pages passed in the argument, and maps them to
* DMA addresses. The DMA addresses of the mapped pages is updated in
- * umem->odp_data->dma_list.
+ * umem_odp->dma_list.
*
* Returns the number of pages mapped in success, negative error code
* for failure.
@@ -629,7 +578,7 @@ out:
* the function from completing its task.
* An -ENOENT error code indicates that userspace process is being terminated
* and mm was already destroyed.
- * @umem: the umem to map and pin
+ * @umem_odp: the umem to map and pin
* @user_virt: the address from which we need to map.
* @bcnt: the minimal number of bytes to pin and map. The mapping might be
* bigger due to alignment, and may also be smaller in case of an error
@@ -639,13 +588,15 @@ out:
* range.
* @current_seq: the MMU notifiers sequance value for synchronization with
* invalidations. the sequance number is read from
- * umem->odp_data->notifiers_seq before calling this function
+ * umem_odp->notifiers_seq before calling this function
*/
-int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
- u64 access_mask, unsigned long current_seq)
+int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
+ u64 bcnt, u64 access_mask,
+ unsigned long current_seq)
{
+ struct ib_umem *umem = &umem_odp->umem;
struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
+ struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
struct page **local_page_list = NULL;
u64 page_mask, off;
int j, k, ret = 0, start_idx, npages = 0, page_shift;
@@ -669,15 +620,14 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
user_virt = user_virt & page_mask;
bcnt += off; /* Charge for the first page offset as well. */
- owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
- if (owning_process == NULL) {
+ /*
+ * owning_process is allowed to be NULL, this means somehow the mm is
+ * existing beyond the lifetime of the originating process.. Presumably
+ * mmget_not_zero will fail in this case.
+ */
+ owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID);
+ if (WARN_ON(!mmget_not_zero(umem_odp->umem.owning_mm))) {
ret = -EINVAL;
- goto out_no_task;
- }
-
- owning_mm = get_task_mm(owning_process);
- if (owning_mm == NULL) {
- ret = -ENOENT;
goto out_put_task;
}
@@ -709,7 +659,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
break;
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
- mutex_lock(&umem->odp_data->umem_mutex);
+ mutex_lock(&umem_odp->umem_mutex);
for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
if (user_virt & ~page_mask) {
p += PAGE_SIZE;
@@ -722,7 +672,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
}
ret = ib_umem_odp_map_dma_single_page(
- umem, k, local_page_list[j],
+ umem_odp, k, local_page_list[j],
access_mask, current_seq);
if (ret < 0)
break;
@@ -730,7 +680,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
p = page_to_phys(local_page_list[j]);
k++;
}
- mutex_unlock(&umem->odp_data->umem_mutex);
+ mutex_unlock(&umem_odp->umem_mutex);
if (ret < 0) {
/* Release left over pages when handling errors. */
@@ -749,16 +699,17 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
mmput(owning_mm);
out_put_task:
- put_task_struct(owning_process);
-out_no_task:
+ if (owning_process)
+ put_task_struct(owning_process);
free_page((unsigned long)local_page_list);
return ret;
}
EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
-void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
+void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 bound)
{
+ struct ib_umem *umem = &umem_odp->umem;
int idx;
u64 addr;
struct ib_device *dev = umem->context->device;
@@ -770,12 +721,12 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* faults from completion. We might be racing with other
* invalidations, so we must make sure we free each page only
* once. */
- mutex_lock(&umem->odp_data->umem_mutex);
+ mutex_lock(&umem_odp->umem_mutex);
for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
- if (umem->odp_data->page_list[idx]) {
- struct page *page = umem->odp_data->page_list[idx];
- dma_addr_t dma = umem->odp_data->dma_list[idx];
+ if (umem_odp->page_list[idx]) {
+ struct page *page = umem_odp->page_list[idx];
+ dma_addr_t dma = umem_odp->dma_list[idx];
dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
WARN_ON(!dma_addr);
@@ -798,12 +749,12 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
/* on demand pinning support */
if (!umem->context->invalidate_range)
put_page(page);
- umem->odp_data->page_list[idx] = NULL;
- umem->odp_data->dma_list[idx] = 0;
+ umem_odp->page_list[idx] = NULL;
+ umem_odp->dma_list[idx] = 0;
umem->npages--;
}
}
- mutex_unlock(&umem->odp_data->umem_mutex);
+ mutex_unlock(&umem_odp->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
@@ -830,7 +781,7 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
return -EAGAIN;
next = rbt_ib_umem_iter_next(node, start, last - 1);
umem = container_of(node, struct ib_umem_odp, interval_tree);
- ret_val = cb(umem->umem, start, last, cookie) || ret_val;
+ ret_val = cb(umem, start, last, cookie) || ret_val;
}
return ret_val;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c34a6852d691..f55f48f6b272 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -138,7 +138,7 @@ static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
static dev_t dynamic_umad_dev;
static dev_t dynamic_issm_dev;
-static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
+static DEFINE_IDA(umad_ida);
static void ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device, void *client_data);
@@ -1132,7 +1132,7 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
if (!port)
return -ENODEV;
- return sprintf(buf, "%s\n", port->ib_dev->name);
+ return sprintf(buf, "%s\n", dev_name(&port->ib_dev->dev));
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
@@ -1159,11 +1159,10 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
dev_t base_umad;
dev_t base_issm;
- devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (devnum >= IB_UMAD_MAX_PORTS)
+ devnum = ida_alloc_max(&umad_ida, IB_UMAD_MAX_PORTS - 1, GFP_KERNEL);
+ if (devnum < 0)
return -1;
port->dev_num = devnum;
- set_bit(devnum, dev_map);
if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
@@ -1227,7 +1226,7 @@ err_dev:
err_cdev:
cdev_del(&port->cdev);
- clear_bit(devnum, dev_map);
+ ida_free(&umad_ida, devnum);
return -1;
}
@@ -1261,7 +1260,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
}
mutex_unlock(&port->file_mutex);
- clear_bit(port->dev_num, dev_map);
+ ida_free(&umad_ida, port->dev_num);
}
static void ib_umad_add_one(struct ib_device *device)
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 5df8e548cc14..c97935a0c7c6 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -100,13 +100,14 @@ struct ib_uverbs_device {
atomic_t refcount;
int num_comp_vectors;
struct completion comp;
- struct device *dev;
+ struct device dev;
+ /* First group for device attributes, NULL terminated array */
+ const struct attribute_group *groups[2];
struct ib_device __rcu *ib_dev;
int devnum;
struct cdev cdev;
struct rb_root xrcd_tree;
struct mutex xrcd_tree_mutex;
- struct kobject kobj;
struct srcu_struct disassociate_srcu;
struct mutex lists_mutex; /* protect lists */
struct list_head uverbs_file_list;
@@ -146,7 +147,6 @@ struct ib_uverbs_file {
struct ib_event_handler event_handler;
struct ib_uverbs_async_event_file *async_file;
struct list_head list;
- int is_closed;
/*
* To access the uobjects list hw_destroy_rwsem must be held for write
@@ -158,6 +158,9 @@ struct ib_uverbs_file {
spinlock_t uobjects_lock;
struct list_head uobjects;
+ struct mutex umap_lock;
+ struct list_head umaps;
+
u64 uverbs_cmd_mask;
u64 uverbs_ex_cmd_mask;
@@ -218,12 +221,6 @@ struct ib_ucq_object {
u32 async_events_reported;
};
-struct ib_uflow_resources;
-struct ib_uflow_object {
- struct ib_uobject uobject;
- struct ib_uflow_resources *resources;
-};
-
extern const struct file_operations uverbs_event_fops;
void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e012ca80f9d1..a93853770e3c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -117,18 +117,12 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
/* ufile is required when some objects are released */
ucontext->ufile = file;
- rcu_read_lock();
- ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- rcu_read_unlock();
- ucontext->closing = 0;
+ ucontext->closing = false;
ucontext->cleanup_retryable = false;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- ucontext->umem_tree = RB_ROOT_CACHED;
- init_rwsem(&ucontext->umem_rwsem);
- ucontext->odp_mrs_count = 0;
- INIT_LIST_HEAD(&ucontext->no_private_counters);
-
+ mutex_init(&ucontext->per_mm_list_lock);
+ INIT_LIST_HEAD(&ucontext->per_mm_list);
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
ucontext->invalidate_range = NULL;
@@ -172,7 +166,6 @@ err_fd:
put_unused_fd(resp.async_fd);
err_free:
- put_pid(ucontext->tgid);
ib_dev->dealloc_ucontext(ucontext);
err_alloc:
@@ -2769,16 +2762,7 @@ out_put:
return ret ? ret : in_len;
}
-struct ib_uflow_resources {
- size_t max;
- size_t num;
- size_t collection_num;
- size_t counters_num;
- struct ib_counters **counters;
- struct ib_flow_action **collection;
-};
-
-static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
+struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
{
struct ib_uflow_resources *resources;
@@ -2808,6 +2792,7 @@ err:
return NULL;
}
+EXPORT_SYMBOL(flow_resources_alloc);
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
@@ -2826,10 +2811,11 @@ void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
kfree(uflow_res->counters);
kfree(uflow_res);
}
+EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
-static void flow_resources_add(struct ib_uflow_resources *uflow_res,
- enum ib_flow_spec_type type,
- void *ibobj)
+void flow_resources_add(struct ib_uflow_resources *uflow_res,
+ enum ib_flow_spec_type type,
+ void *ibobj)
{
WARN_ON(uflow_res->num >= uflow_res->max);
@@ -2850,6 +2836,7 @@ static void flow_resources_add(struct ib_uflow_resources *uflow_res,
uflow_res->num++;
}
+EXPORT_SYMBOL(flow_resources_add);
static int kern_spec_to_ib_spec_action(struct ib_uverbs_file *ufile,
struct ib_uverbs_flow_spec *kern_spec,
@@ -3484,7 +3471,6 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
struct ib_uverbs_create_flow cmd;
struct ib_uverbs_create_flow_resp resp;
struct ib_uobject *uobj;
- struct ib_uflow_object *uflow;
struct ib_flow *flow_id;
struct ib_uverbs_flow_attr *kern_flow_attr;
struct ib_flow_attr *flow_attr;
@@ -3623,13 +3609,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
err = PTR_ERR(flow_id);
goto err_free;
}
- atomic_inc(&qp->usecnt);
- flow_id->qp = qp;
- flow_id->device = qp->device;
- flow_id->uobject = uobj;
- uobj->object = flow_id;
- uflow = container_of(uobj, typeof(*uflow), uobject);
- uflow->resources = uflow_res;
+
+ ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
memset(&resp, 0, sizeof(resp));
resp.flow_handle = uobj->id;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 1a6b229e3db3..b0e493e8d860 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -57,6 +57,7 @@ struct bundle_priv {
struct ib_uverbs_attr *uattrs;
DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
+ DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN);
/*
* Must be last. bundle ends in a flex array which overlaps
@@ -143,6 +144,86 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
0, uattr->len - len);
}
+static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
+ const struct uverbs_api_attr *attr_uapi,
+ struct uverbs_objs_arr_attr *attr,
+ struct ib_uverbs_attr *uattr,
+ u32 attr_bkey)
+{
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ size_t array_len;
+ u32 *idr_vals;
+ int ret = 0;
+ size_t i;
+
+ if (uattr->attr_data.reserved)
+ return -EINVAL;
+
+ if (uattr->len % sizeof(u32))
+ return -EINVAL;
+
+ array_len = uattr->len / sizeof(u32);
+ if (array_len < spec->u2.objs_arr.min_len ||
+ array_len > spec->u2.objs_arr.max_len)
+ return -EINVAL;
+
+ attr->uobjects =
+ uverbs_alloc(&pbundle->bundle,
+ array_size(array_len, sizeof(*attr->uobjects)));
+ if (IS_ERR(attr->uobjects))
+ return PTR_ERR(attr->uobjects);
+
+ /*
+ * Since idr is 4B and *uobjects is >= 4B, we can use attr->uobjects
+ * to store idrs array and avoid additional memory allocation. The
+ * idrs array is offset to the end of the uobjects array so we will be
+ * able to read idr and replace with a pointer.
+ */
+ idr_vals = (u32 *)(attr->uobjects + array_len) - array_len;
+
+ if (uattr->len > sizeof(uattr->data)) {
+ ret = copy_from_user(idr_vals, u64_to_user_ptr(uattr->data),
+ uattr->len);
+ if (ret)
+ return -EFAULT;
+ } else {
+ memcpy(idr_vals, &uattr->data, uattr->len);
+ }
+
+ for (i = 0; i != array_len; i++) {
+ attr->uobjects[i] = uverbs_get_uobject_from_file(
+ spec->u2.objs_arr.obj_type, pbundle->bundle.ufile,
+ spec->u2.objs_arr.access, idr_vals[i]);
+ if (IS_ERR(attr->uobjects[i])) {
+ ret = PTR_ERR(attr->uobjects[i]);
+ break;
+ }
+ }
+
+ attr->len = i;
+ __set_bit(attr_bkey, pbundle->spec_finalize);
+ return ret;
+}
+
+static int uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
+ struct uverbs_objs_arr_attr *attr,
+ bool commit)
+{
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ int current_ret;
+ int ret = 0;
+ size_t i;
+
+ for (i = 0; i != attr->len; i++) {
+ current_ret = uverbs_finalize_object(
+ attr->uobjects[i], spec->u2.objs_arr.access, commit);
+ if (!ret)
+ ret = current_ret;
+ }
+
+ return ret;
+}
+
static int uverbs_process_attr(struct bundle_priv *pbundle,
const struct uverbs_api_attr *attr_uapi,
struct ib_uverbs_attr *uattr, u32 attr_bkey)
@@ -246,6 +327,11 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
}
break;
+
+ case UVERBS_ATTR_TYPE_IDRS_ARRAY:
+ return uverbs_process_idrs_array(pbundle, attr_uapi,
+ &e->objs_arr_attr, uattr,
+ attr_bkey);
default:
return -EOPNOTSUPP;
}
@@ -300,8 +386,7 @@ static int uverbs_set_attr(struct bundle_priv *pbundle,
return -EPROTONOSUPPORT;
return 0;
}
- attr = srcu_dereference(
- *slot, &pbundle->bundle.ufile->device->disassociate_srcu);
+ attr = rcu_dereference_protected(*slot, true);
/* Reject duplicate attributes from user-space */
if (test_bit(attr_bkey, pbundle->bundle.attr_present))
@@ -384,6 +469,7 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
unsigned int i;
int ret = 0;
+ /* fast path for simple uobjects */
i = -1;
while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
i + 1)) < key_bitmap_len) {
@@ -397,6 +483,30 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
ret = current_ret;
}
+ i = -1;
+ while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len,
+ i + 1)) < key_bitmap_len) {
+ struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
+ const struct uverbs_api_attr *attr_uapi;
+ void __rcu **slot;
+ int current_ret;
+
+ slot = uapi_get_attr_for_method(
+ pbundle,
+ pbundle->method_key | uapi_bkey_to_key_attr(i));
+ if (WARN_ON(!slot))
+ continue;
+
+ attr_uapi = rcu_dereference_protected(*slot, true);
+
+ if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
+ current_ret = uverbs_free_idrs_array(
+ attr_uapi, &attr->objs_arr_attr, commit);
+ if (!ret)
+ ret = current_ret;
+ }
+ }
+
for (memblock = pbundle->allocated_mem; memblock;) {
struct bundle_alloc_head *tmp = memblock;
@@ -429,7 +539,7 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
uapi_key_ioctl_method(hdr->method_id));
if (unlikely(!slot))
return -EPROTONOSUPPORT;
- method_elm = srcu_dereference(*slot, &ufile->device->disassociate_srcu);
+ method_elm = rcu_dereference_protected(*slot, true);
if (!method_elm->use_stack) {
pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL);
@@ -461,6 +571,7 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
memset(pbundle->bundle.attr_present, 0,
sizeof(pbundle->bundle.attr_present));
memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
+ memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
destroy_ret = bundle_destroy(pbundle, ret == 0);
@@ -611,3 +722,26 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
return 0;
}
EXPORT_SYMBOL(uverbs_copy_to);
+
+int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
+{
+ const struct uverbs_attr *attr;
+
+ attr = uverbs_attr_get(attrs_bundle, idx);
+ if (IS_ERR(attr)) {
+ if ((PTR_ERR(attr) != -ENOENT) || !def_val)
+ return PTR_ERR(attr);
+
+ *to = *def_val;
+ } else {
+ *to = attr->ptr_attr.data;
+ }
+
+ if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(_uverbs_get_const);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 50152c1b1004..6d373f5515b7 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -45,6 +45,7 @@
#include <linux/cdev.h>
#include <linux/anon_inodes.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@@ -72,7 +73,7 @@ enum {
static dev_t dynamic_uverbs_dev;
static struct class *uverbs_class;
-static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
+static DEFINE_IDA(uverbs_ida);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
@@ -169,20 +170,16 @@ int uverbs_dealloc_mw(struct ib_mw *mw)
return ret;
}
-static void ib_uverbs_release_dev(struct kobject *kobj)
+static void ib_uverbs_release_dev(struct device *device)
{
struct ib_uverbs_device *dev =
- container_of(kobj, struct ib_uverbs_device, kobj);
+ container_of(device, struct ib_uverbs_device, dev);
uverbs_destroy_api(dev->uapi);
cleanup_srcu_struct(&dev->disassociate_srcu);
kfree(dev);
}
-static struct kobj_type ib_uverbs_dev_ktype = {
- .release = ib_uverbs_release_dev,
-};
-
static void ib_uverbs_release_async_event_file(struct kref *ref)
{
struct ib_uverbs_async_event_file *file =
@@ -265,7 +262,7 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
- kobject_put(&file->device->kobj);
+ put_device(&file->device->dev);
kfree(file);
}
@@ -817,6 +814,226 @@ out:
}
/*
+ * Each time we map IO memory into user space this keeps track of the mapping.
+ * When the device is hot-unplugged we 'zap' the mmaps in user space to point
+ * to the zero page and allow the hot unplug to proceed.
+ *
+ * This is necessary for cases like PCI physical hot unplug as the actual BAR
+ * memory may vanish after this and access to it from userspace could MCE.
+ *
+ * RDMA drivers supporting disassociation must have their user space designed
+ * to cope in some way with their IO pages going to the zero page.
+ */
+struct rdma_umap_priv {
+ struct vm_area_struct *vma;
+ struct list_head list;
+};
+
+static const struct vm_operations_struct rdma_umap_ops;
+
+static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+
+ priv->vma = vma;
+ vma->vm_private_data = priv;
+ vma->vm_ops = &rdma_umap_ops;
+
+ mutex_lock(&ufile->umap_lock);
+ list_add(&priv->list, &ufile->umaps);
+ mutex_unlock(&ufile->umap_lock);
+}
+
+/*
+ * The VMA has been dup'd, initialize the vm_private_data with a new tracking
+ * struct
+ */
+static void rdma_umap_open(struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+ struct rdma_umap_priv *opriv = vma->vm_private_data;
+ struct rdma_umap_priv *priv;
+
+ if (!opriv)
+ return;
+
+ /* We are racing with disassociation */
+ if (!down_read_trylock(&ufile->hw_destroy_rwsem))
+ goto out_zap;
+ /*
+ * Disassociation already completed, the VMA should already be zapped.
+ */
+ if (!ufile->ucontext)
+ goto out_unlock;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto out_unlock;
+ rdma_umap_priv_init(priv, vma);
+
+ up_read(&ufile->hw_destroy_rwsem);
+ return;
+
+out_unlock:
+ up_read(&ufile->hw_destroy_rwsem);
+out_zap:
+ /*
+ * We can't allow the VMA to be created with the actual IO pages, that
+ * would break our API contract, and it can't be stopped at this
+ * point, so zap it.
+ */
+ vma->vm_private_data = NULL;
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
+static void rdma_umap_close(struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+ struct rdma_umap_priv *priv = vma->vm_private_data;
+
+ if (!priv)
+ return;
+
+ /*
+ * The vma holds a reference on the struct file that created it, which
+ * in turn means that the ib_uverbs_file is guaranteed to exist at
+ * this point.
+ */
+ mutex_lock(&ufile->umap_lock);
+ list_del(&priv->list);
+ mutex_unlock(&ufile->umap_lock);
+ kfree(priv);
+}
+
+static const struct vm_operations_struct rdma_umap_ops = {
+ .open = rdma_umap_open,
+ .close = rdma_umap_close,
+};
+
+static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma,
+ unsigned long size)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ struct rdma_umap_priv *priv;
+
+ if (vma->vm_end - vma->vm_start != size)
+ return ERR_PTR(-EINVAL);
+
+ /* Driver is using this wrong, must be called by ib_uverbs_mmap */
+ if (WARN_ON(!vma->vm_file ||
+ vma->vm_file->private_data != ufile))
+ return ERR_PTR(-EINVAL);
+ lockdep_assert_held(&ufile->device->disassociate_srcu);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+ return priv;
+}
+
+/*
+ * Map IO memory into a process. This is to be called by drivers as part of
+ * their mmap() functions if they wish to send something like PCI-E BAR memory
+ * to userspace.
+ */
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
+
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ vma->vm_page_prot = prot;
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_io);
+
+/*
+ * The page case is here for a slightly different reason, the driver expects
+ * to be able to free the page it is sharing to user space when it destroys
+ * its ucontext, which means we need to zap the user space references.
+ *
+ * We could handle this differently by providing an API to allocate a shared
+ * page and then only freeing the shared page when the last ufile is
+ * destroyed.
+ */
+int rdma_user_mmap_page(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma, struct page *page,
+ unsigned long size)
+{
+ struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
+
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
+ vma->vm_page_prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_page);
+
+void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
+{
+ struct rdma_umap_priv *priv, *next_priv;
+
+ lockdep_assert_held(&ufile->hw_destroy_rwsem);
+
+ while (1) {
+ struct mm_struct *mm = NULL;
+
+ /* Get an arbitrary mm pointer that hasn't been cleaned yet */
+ mutex_lock(&ufile->umap_lock);
+ if (!list_empty(&ufile->umaps)) {
+ mm = list_first_entry(&ufile->umaps,
+ struct rdma_umap_priv, list)
+ ->vma->vm_mm;
+ mmget(mm);
+ }
+ mutex_unlock(&ufile->umap_lock);
+ if (!mm)
+ return;
+
+ /*
+ * The umap_lock is nested under mmap_sem since it used within
+ * the vma_ops callbacks, so we have to clean the list one mm
+ * at a time to get the lock ordering right. Typically there
+ * will only be one mm, so no big deal.
+ */
+ down_write(&mm->mmap_sem);
+ mutex_lock(&ufile->umap_lock);
+ list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
+ list) {
+ struct vm_area_struct *vma = priv->vma;
+
+ if (vma->vm_mm != mm)
+ continue;
+ list_del_init(&priv->list);
+
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+ vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
+ }
+ mutex_unlock(&ufile->umap_lock);
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+}
+
+/*
* ib_uverbs_open() does not need the BKL:
*
* - the ib_uverbs_device structures are properly reference counted and
@@ -839,6 +1056,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
if (!atomic_inc_not_zero(&dev->refcount))
return -ENXIO;
+ get_device(&dev->dev);
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
mutex_lock(&dev->lists_mutex);
ib_dev = srcu_dereference(dev->ib_dev,
@@ -876,9 +1094,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
spin_lock_init(&file->uobjects_lock);
INIT_LIST_HEAD(&file->uobjects);
init_rwsem(&file->hw_destroy_rwsem);
+ mutex_init(&file->umap_lock);
+ INIT_LIST_HEAD(&file->umaps);
filp->private_data = file;
- kobject_get(&dev->kobj);
list_add_tail(&file->list, &dev->uverbs_file_list);
mutex_unlock(&dev->lists_mutex);
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
@@ -899,6 +1118,7 @@ err:
if (atomic_dec_and_test(&dev->refcount))
ib_uverbs_comp_dev(dev);
+ put_device(&dev->dev);
return ret;
}
@@ -909,10 +1129,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
mutex_lock(&file->device->lists_mutex);
- if (!file->is_closed) {
- list_del(&file->list);
- file->is_closed = 1;
- }
+ list_del_init(&file->list);
mutex_unlock(&file->device->lists_mutex);
if (file->async_file)
@@ -951,37 +1168,34 @@ static struct ib_client uverbs_client = {
.remove = ib_uverbs_remove_one
};
-static ssize_t show_ibdev(struct device *device, struct device_attribute *attr,
+static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
+ struct ib_uverbs_device *dev =
+ container_of(device, struct ib_uverbs_device, dev);
int ret = -ENODEV;
int srcu_key;
- struct ib_uverbs_device *dev = dev_get_drvdata(device);
struct ib_device *ib_dev;
- if (!dev)
- return -ENODEV;
-
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
if (ib_dev)
- ret = sprintf(buf, "%s\n", ib_dev->name);
+ ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
return ret;
}
-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+static DEVICE_ATTR_RO(ibdev);
-static ssize_t show_dev_abi_version(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t abi_version_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- struct ib_uverbs_device *dev = dev_get_drvdata(device);
+ struct ib_uverbs_device *dev =
+ container_of(device, struct ib_uverbs_device, dev);
int ret = -ENODEV;
int srcu_key;
struct ib_device *ib_dev;
- if (!dev)
- return -ENODEV;
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
if (ib_dev)
@@ -990,7 +1204,17 @@ static ssize_t show_dev_abi_version(struct device *device,
return ret;
}
-static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
+static DEVICE_ATTR_RO(abi_version);
+
+static struct attribute *ib_dev_attrs[] = {
+ &dev_attr_abi_version.attr,
+ &dev_attr_ibdev.attr,
+ NULL,
+};
+
+static const struct attribute_group dev_attr_group = {
+ .attrs = ib_dev_attrs,
+};
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_VERBS_ABI_VERSION));
@@ -1028,65 +1252,56 @@ static void ib_uverbs_add_one(struct ib_device *device)
return;
}
+ device_initialize(&uverbs_dev->dev);
+ uverbs_dev->dev.class = uverbs_class;
+ uverbs_dev->dev.parent = device->dev.parent;
+ uverbs_dev->dev.release = ib_uverbs_release_dev;
+ uverbs_dev->groups[0] = &dev_attr_group;
+ uverbs_dev->dev.groups = uverbs_dev->groups;
atomic_set(&uverbs_dev->refcount, 1);
init_completion(&uverbs_dev->comp);
uverbs_dev->xrcd_tree = RB_ROOT;
mutex_init(&uverbs_dev->xrcd_tree_mutex);
- kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
mutex_init(&uverbs_dev->lists_mutex);
INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
+ rcu_assign_pointer(uverbs_dev->ib_dev, device);
+ uverbs_dev->num_comp_vectors = device->num_comp_vectors;
- devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (devnum >= IB_UVERBS_MAX_DEVICES)
+ devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
+ GFP_KERNEL);
+ if (devnum < 0)
goto err;
uverbs_dev->devnum = devnum;
- set_bit(devnum, dev_map);
if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
else
base = IB_UVERBS_BASE_DEV + devnum;
- rcu_assign_pointer(uverbs_dev->ib_dev, device);
- uverbs_dev->num_comp_vectors = device->num_comp_vectors;
-
if (ib_uverbs_create_uapi(device, uverbs_dev))
goto err_uapi;
- cdev_init(&uverbs_dev->cdev, NULL);
+ uverbs_dev->dev.devt = base;
+ dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
+
+ cdev_init(&uverbs_dev->cdev,
+ device->mmap ? &uverbs_mmap_fops : &uverbs_fops);
uverbs_dev->cdev.owner = THIS_MODULE;
- uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
- cdev_set_parent(&uverbs_dev->cdev, &uverbs_dev->kobj);
- kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
- if (cdev_add(&uverbs_dev->cdev, base, 1))
- goto err_cdev;
-
- uverbs_dev->dev = device_create(uverbs_class, device->dev.parent,
- uverbs_dev->cdev.dev, uverbs_dev,
- "uverbs%d", uverbs_dev->devnum);
- if (IS_ERR(uverbs_dev->dev))
- goto err_cdev;
-
- if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev))
- goto err_class;
- if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
- goto err_class;
- ib_set_client_data(device, &uverbs_client, uverbs_dev);
+ ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
+ if (ret)
+ goto err_uapi;
+ ib_set_client_data(device, &uverbs_client, uverbs_dev);
return;
-err_class:
- device_destroy(uverbs_class, uverbs_dev->cdev.dev);
-err_cdev:
- cdev_del(&uverbs_dev->cdev);
err_uapi:
- clear_bit(devnum, dev_map);
+ ida_free(&uverbs_ida, devnum);
err:
if (atomic_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
- kobject_put(&uverbs_dev->kobj);
+ put_device(&uverbs_dev->dev);
return;
}
@@ -1107,8 +1322,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
while (!list_empty(&uverbs_dev->uverbs_file_list)) {
file = list_first_entry(&uverbs_dev->uverbs_file_list,
struct ib_uverbs_file, list);
- file->is_closed = 1;
- list_del(&file->list);
+ list_del_init(&file->list);
kref_get(&file->ref);
/* We must release the mutex before going ahead and calling
@@ -1156,10 +1370,8 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
if (!uverbs_dev)
return;
- dev_set_drvdata(uverbs_dev->dev, NULL);
- device_destroy(uverbs_class, uverbs_dev->cdev.dev);
- cdev_del(&uverbs_dev->cdev);
- clear_bit(uverbs_dev->devnum, dev_map);
+ cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
+ ida_free(&uverbs_ida, uverbs_dev->devnum);
if (device->disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
@@ -1182,7 +1394,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
if (wait_clients)
wait_for_completion(&uverbs_dev->comp);
- kobject_put(&uverbs_dev->kobj);
+ put_device(&uverbs_dev->dev);
}
static char *uverbs_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index d8cfafe23bd9..cb9486ad5c67 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -326,11 +326,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
if (IS_ERR(action))
return PTR_ERR(action);
- atomic_set(&action->usecnt, 0);
- action->device = ib_dev;
- action->type = IB_FLOW_ACTION_ESP;
- action->uobject = uobj;
- uobj->object = action;
+ uverbs_flow_action_fill_action(action, uobj, ib_dev,
+ IB_FLOW_ACTION_ESP);
return 0;
}
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index be854628a7c6..86f3fc5e04b4 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -73,6 +73,18 @@ static int uapi_merge_method(struct uverbs_api *uapi,
if (attr->attr.type == UVERBS_ATTR_TYPE_ENUM_IN)
method_elm->driver_method |= is_driver;
+ /*
+ * Like other uobject based things we only support a single
+ * uobject being NEW'd or DESTROY'd
+ */
+ if (attr->attr.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
+ u8 access = attr->attr.u2.objs_arr.access;
+
+ if (WARN_ON(access == UVERBS_ACCESS_NEW ||
+ access == UVERBS_ACCESS_DESTROY))
+ return -EINVAL;
+ }
+
attr_slot =
uapi_add_elm(uapi, method_key | uapi_key_attr(attr->id),
sizeof(*attr_slot));
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 8ec7418e99f0..178899e3ce73 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -264,7 +264,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
}
pd->res.type = RDMA_RESTRACK_PD;
- pd->res.kern_name = caller;
+ rdma_restrack_set_task(&pd->res, caller);
rdma_restrack_add(&pd->res);
if (mr_access_flags) {
@@ -710,7 +710,7 @@ static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
ah_attr->roce.dmac,
- sgid_attr->ndev, &hop_limit);
+ sgid_attr, &hop_limit);
grh->hop_limit = hop_limit;
return ret;
@@ -1509,8 +1509,7 @@ static const struct {
};
bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask,
- enum rdma_link_layer ll)
+ enum ib_qp_type type, enum ib_qp_attr_mask mask)
{
enum ib_qp_attr_mask req_param, opt_param;
@@ -1629,14 +1628,16 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
if (rdma_ib_or_roce(qp->device, port)) {
if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
- pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
- __func__, qp->device->name);
+ dev_warn(&qp->device->dev,
+ "%s rq_psn overflow, masking to 24 bits\n",
+ __func__);
attr->rq_psn &= 0xffffff;
}
if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
- pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n",
- __func__, qp->device->name);
+ dev_warn(&qp->device->dev,
+ " %s sq_psn overflow, masking to 24 bits\n",
+ __func__);
attr->sq_psn &= 0xffffff;
}
}
@@ -1888,7 +1889,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device,
cq->cq_context = cq_context;
atomic_set(&cq->usecnt, 0);
cq->res.type = RDMA_RESTRACK_CQ;
- cq->res.kern_name = caller;
+ rdma_restrack_set_task(&cq->res, caller);
rdma_restrack_add(&cq->res);
}
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 96f76896488d..31baa8939a4f 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -40,7 +40,6 @@
#ifndef __BNXT_RE_H__
#define __BNXT_RE_H__
#define ROCE_DRV_MODULE_NAME "bnxt_re"
-#define ROCE_DRV_MODULE_VERSION "1.0.0"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
#define BNXT_RE_PAGE_SHIFT_4K (12)
@@ -120,6 +119,8 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
+#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
+#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 77416bc61e6e..604b71875f5f 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -68,6 +68,8 @@ static const char * const bnxt_re_stat_name[] = {
[BNXT_RE_TX_PKTS] = "tx_pkts",
[BNXT_RE_TX_BYTES] = "tx_bytes",
[BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors",
+ [BNXT_RE_RX_DROPS] = "rx_roce_drops",
+ [BNXT_RE_RX_DISCARDS] = "rx_roce_discards",
[BNXT_RE_TO_RETRANSMITS] = "to_retransmits",
[BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd",
[BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded",
@@ -106,7 +108,8 @@ static const char * const bnxt_re_stat_name[] = {
[BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err",
[BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err",
[BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err",
- [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err"
+ [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err",
+ [BNXT_RE_OUT_OF_SEQ_ERR] = "oos_drop_count"
};
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
@@ -128,6 +131,10 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
if (bnxt_re_stats) {
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
+ stats->value[BNXT_RE_RX_DROPS] =
+ le64_to_cpu(bnxt_re_stats->rx_drop_pkts);
+ stats->value[BNXT_RE_RX_DISCARDS] =
+ le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
stats->value[BNXT_RE_RX_PKTS] =
le64_to_cpu(bnxt_re_stats->rx_ucast_pkts);
stats->value[BNXT_RE_RX_BYTES] =
@@ -220,6 +227,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
rdev->stats.res_tx_pci_err;
stats->value[BNXT_RE_RES_RX_PCI_ERR] =
rdev->stats.res_rx_pci_err;
+ stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
+ rdev->stats.res_oos_drop_count;
}
return ARRAY_SIZE(bnxt_re_stat_name);
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index a01a922717d5..76399f477e5c 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -51,6 +51,8 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_RX_DROPS,
+ BNXT_RE_RX_DISCARDS,
BNXT_RE_TO_RETRANSMITS,
BNXT_RE_SEQ_ERR_NAKS_RCVD,
BNXT_RE_MAX_RETRY_EXCEEDED,
@@ -90,6 +92,7 @@ enum bnxt_re_hw_stats {
BNXT_RE_RES_SRQ_LOAD_ERR,
BNXT_RE_RES_TX_PCI_ERR,
BNXT_RE_RES_RX_PCI_ERR,
+ BNXT_RE_OUT_OF_SEQ_ERR,
BNXT_RE_NUM_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index bc2b9e038439..54fdd4cf5288 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1598,8 +1598,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
new_qp_state = qp_attr->qp_state;
if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
- ib_qp->qp_type, qp_attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ ib_qp->qp_type, qp_attr_mask)) {
dev_err(rdev_to_dev(rdev),
"Invalid attribute mask: %#x specified ",
qp_attr_mask);
@@ -2664,6 +2663,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
nq->budget++;
atomic_inc(&rdev->cq_count);
+ spin_lock_init(&cq->cq_lock);
if (context) {
struct bnxt_re_cq_resp resp;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 85cd1a3593d6..cf2282654210 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -67,7 +67,7 @@
#include "hw_counters.h"
static char version[] =
- BNXT_RE_DESC " v" ROCE_DRV_MODULE_VERSION "\n";
+ BNXT_RE_DESC "\n";
MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
@@ -535,6 +535,34 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
return en_dev;
}
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *bnxt_re_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group bnxt_re_dev_attr_group = {
+ .attrs = bnxt_re_attributes,
+};
+
static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev)
{
ib_unregister_device(&rdev->ibdev);
@@ -547,7 +575,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
/* ib device init */
ibdev->owner = THIS_MODULE;
ibdev->node_type = RDMA_NODE_IB_CA;
- strlcpy(ibdev->name, "bnxt_re%d", IB_DEVICE_NAME_MAX);
strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
strlen(BNXT_RE_DESC) + 5);
ibdev->phys_port_cnt = 1;
@@ -639,34 +666,11 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->get_hw_stats = bnxt_re_ib_get_hw_stats;
ibdev->alloc_hw_stats = bnxt_re_ib_alloc_hw_stats;
+ rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
ibdev->driver_id = RDMA_DRIVER_BNXT_RE;
- return ib_register_device(ibdev, NULL);
-}
-
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
-}
-
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
+ return ib_register_device(ibdev, "bnxt_re%d", NULL);
}
-static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL);
-static DEVICE_ATTR(hca_type, 0444, show_hca, NULL);
-
-static struct device_attribute *bnxt_re_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type
-};
-
static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
{
dev_put(rdev->netdev);
@@ -864,10 +868,8 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
{
int i;
- if (rdev->nq[0].hwq.max_elements) {
- for (i = 1; i < rdev->num_msix; i++)
- bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
- }
+ for (i = 1; i < rdev->num_msix; i++)
+ bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
if (rdev->qplib_res.rcfw)
bnxt_qplib_cleanup_res(&rdev->qplib_res);
@@ -876,6 +878,7 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
{
int rc = 0, i;
+ int num_vec_enabled = 0;
bnxt_qplib_init_res(&rdev->qplib_res);
@@ -891,9 +894,13 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
"Failed to enable NQ with rc = 0x%x", rc);
goto fail;
}
+ num_vec_enabled++;
}
return 0;
fail:
+ for (i = num_vec_enabled; i >= 0; i--)
+ bnxt_qplib_disable_nq(&rdev->nq[i]);
+
return rc;
}
@@ -925,6 +932,7 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
int rc = 0, i;
+ int num_vec_created = 0;
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
@@ -951,7 +959,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
if (rc) {
dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
i, rc);
- goto dealloc_dpi;
+ goto free_nq;
}
rc = bnxt_re_net_ring_alloc
(rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr,
@@ -964,14 +972,17 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
dev_err(rdev_to_dev(rdev),
"Failed to allocate NQ fw id with rc = 0x%x",
rc);
+ bnxt_qplib_free_nq(&rdev->nq[i]);
goto free_nq;
}
+ num_vec_created++;
}
return 0;
free_nq:
- for (i = 0; i < rdev->num_msix - 1; i++)
+ for (i = num_vec_created; i >= 0; i--) {
+ bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
bnxt_qplib_free_nq(&rdev->nq[i]);
-dealloc_dpi:
+ }
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
&rdev->qplib_res.dpi_tbl,
&rdev->dpi_privileged);
@@ -989,12 +1000,17 @@ static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
struct ib_event ib_event;
ib_event.device = ibdev;
- if (qp)
+ if (qp) {
ib_event.element.qp = qp;
- else
+ ib_event.event = event;
+ if (qp->event_handler)
+ qp->event_handler(&ib_event, qp->qp_context);
+
+ } else {
ib_event.element.port_num = port_num;
- ib_event.event = event;
- ib_dispatch_event(&ib_event);
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+ }
}
#define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
@@ -1189,20 +1205,20 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
{
- int i, rc;
+ int rc;
if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
- for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++)
- device_remove_file(&rdev->ibdev.dev,
- bnxt_re_attributes[i]);
/* Cleanup ib dev */
bnxt_re_unregister_ib(rdev);
}
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
- cancel_delayed_work(&rdev->worker);
+ cancel_delayed_work_sync(&rdev->worker);
- bnxt_re_cleanup_res(rdev);
- bnxt_re_free_res(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
+ &rdev->flags))
+ bnxt_re_cleanup_res(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
+ bnxt_re_free_res(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
@@ -1241,7 +1257,7 @@ static void bnxt_re_worker(struct work_struct *work)
static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
{
- int i, j, rc;
+ int rc;
bool locked;
@@ -1331,12 +1347,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
pr_err("Failed to allocate resources: %#x\n", rc);
goto fail;
}
+ set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
rc = bnxt_re_init_res(rdev);
if (rc) {
pr_err("Failed to initialize resources: %#x\n", rc);
goto fail;
}
+ set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
+
if (!rdev->is_virtfn) {
rc = bnxt_re_setup_qos(rdev);
if (rc)
@@ -1358,20 +1377,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
}
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
dev_info(rdev_to_dev(rdev), "Device registered successfully");
- for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
- rc = device_create_file(&rdev->ibdev.dev,
- bnxt_re_attributes[i]);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to create IB sysfs: %#x", rc);
- /* Must clean up all created device files */
- for (j = 0; j < i; j++)
- device_remove_file(&rdev->ibdev.dev,
- bnxt_re_attributes[j]);
- bnxt_re_unregister_ib(rdev);
- goto fail;
- }
- }
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 6ad0d46ab879..b98b054148cd 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -36,6 +36,8 @@
* Description: Fast Path Operators
*/
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
@@ -71,8 +73,7 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
if (!qp->sq.flushed) {
dev_dbg(&scq->hwq.pdev->dev,
- "QPLIB: FP: Adding to SQ Flush list = %p",
- qp);
+ "FP: Adding to SQ Flush list = %p\n", qp);
bnxt_qplib_cancel_phantom_processing(qp);
list_add_tail(&qp->sq_flush, &scq->sqf_head);
qp->sq.flushed = true;
@@ -80,8 +81,7 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
if (!qp->srq) {
if (!qp->rq.flushed) {
dev_dbg(&rcq->hwq.pdev->dev,
- "QPLIB: FP: Adding to RQ Flush list = %p",
- qp);
+ "FP: Adding to RQ Flush list = %p\n", qp);
list_add_tail(&qp->rq_flush, &rcq->rqf_head);
qp->rq.flushed = true;
}
@@ -207,7 +207,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
if (!qp->sq_hdr_buf) {
rc = -ENOMEM;
dev_err(&res->pdev->dev,
- "QPLIB: Failed to create sq_hdr_buf");
+ "Failed to create sq_hdr_buf\n");
goto fail;
}
}
@@ -221,7 +221,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
if (!qp->rq_hdr_buf) {
rc = -ENOMEM;
dev_err(&res->pdev->dev,
- "QPLIB: Failed to create rq_hdr_buf");
+ "Failed to create rq_hdr_buf\n");
goto fail;
}
}
@@ -277,8 +277,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
num_cqne_processed++;
else
dev_warn(&nq->pdev->dev,
- "QPLIB: cqn - type 0x%x not handled",
- type);
+ "cqn - type 0x%x not handled\n", type);
spin_unlock_bh(&cq->compl_lock);
break;
}
@@ -298,7 +297,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
num_srqne_processed++;
else
dev_warn(&nq->pdev->dev,
- "QPLIB: SRQ event 0x%x not handled",
+ "SRQ event 0x%x not handled\n",
nqsrqe->event);
break;
}
@@ -306,8 +305,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
break;
default:
dev_warn(&nq->pdev->dev,
- "QPLIB: nqe with type = 0x%x not handled",
- type);
+ "nqe with type = 0x%x not handled\n", type);
break;
}
raw_cons++;
@@ -360,7 +358,8 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
}
/* Make sure the HW is stopped! */
- bnxt_qplib_nq_stop_irq(nq, true);
+ if (nq->requested)
+ bnxt_qplib_nq_stop_irq(nq, true);
if (nq->bar_reg_iomem)
iounmap(nq->bar_reg_iomem);
@@ -396,7 +395,7 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
rc = irq_set_affinity_hint(nq->vector, &nq->mask);
if (rc) {
dev_warn(&nq->pdev->dev,
- "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+ "set affinity failed; vector: %d nq_idx: %d\n",
nq->vector, nq_indx);
}
nq->requested = true;
@@ -443,7 +442,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
if (rc) {
dev_err(&nq->pdev->dev,
- "QPLIB: Failed to request irq for nq-idx %d", nq_idx);
+ "Failed to request irq for nq-idx %d\n", nq_idx);
goto fail;
}
@@ -662,8 +661,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
spin_lock(&srq_hwq->lock);
if (srq->start_idx == srq->last_idx) {
- dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
- srq->id);
+ dev_err(&srq_hwq->pdev->dev,
+ "FP: SRQ (0x%x) is full!\n", srq->id);
rc = -EINVAL;
spin_unlock(&srq_hwq->lock);
goto done;
@@ -1324,7 +1323,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
}
if (i == res->sgid_tbl.max)
- dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
+ dev_warn(&res->pdev->dev, "SGID not found??\n");
qp->ah.hop_limit = sb->hop_limit;
qp->ah.traffic_class = sb->traffic_class;
@@ -1536,7 +1535,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
if (bnxt_qplib_queue_full(sq)) {
dev_err(&sq->hwq.pdev->dev,
- "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
+ "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
sq->q_full_delta);
rc = -ENOMEM;
@@ -1561,7 +1560,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
/* Copy the inline data */
if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
dev_warn(&sq->hwq.pdev->dev,
- "QPLIB: Inline data length > 96 detected");
+ "Inline data length > 96 detected\n");
data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
} else {
data_len = wqe->inline_len;
@@ -1776,7 +1775,7 @@ done:
queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
} else {
dev_err(&sq->hwq.pdev->dev,
- "QPLIB: FP: Failed to allocate SQ nq_work!");
+ "FP: Failed to allocate SQ nq_work!\n");
rc = -ENOMEM;
}
}
@@ -1815,13 +1814,12 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
sch_handler = true;
dev_dbg(&rq->hwq.pdev->dev,
- "%s Error QP. Scheduling for poll_cq\n",
- __func__);
+ "%s: Error QP. Scheduling for poll_cq\n", __func__);
goto queue_err;
}
if (bnxt_qplib_queue_full(rq)) {
dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
+ "FP: QP (0x%x) RQ is full!\n", qp->id);
rc = -EINVAL;
goto done;
}
@@ -1870,7 +1868,7 @@ queue_err:
queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
} else {
dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: Failed to allocate RQ nq_work!");
+ "FP: Failed to allocate RQ nq_work!\n");
rc = -ENOMEM;
}
}
@@ -1932,7 +1930,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
if (!cq->dpi) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
+ "FP: CREATE_CQ failed due to NULL DPI\n");
return -EINVAL;
}
req.dpi = cpu_to_le32(cq->dpi->dpi);
@@ -1969,6 +1967,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
INIT_LIST_HEAD(&cq->sqf_head);
INIT_LIST_HEAD(&cq->rqf_head);
spin_lock_init(&cq->compl_lock);
+ spin_lock_init(&cq->flush_lock);
bnxt_qplib_arm_cq_enable(cq);
return 0;
@@ -2172,7 +2171,7 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
* comes back
*/
dev_dbg(&cq->hwq.pdev->dev,
- "FP:Got Phantom CQE");
+ "FP: Got Phantom CQE\n");
sq->condition = false;
sq->single = true;
rc = 0;
@@ -2189,7 +2188,7 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
peek_raw_cq_cons++;
}
dev_err(&cq->hwq.pdev->dev,
- "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
+ "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
rc = -EINVAL;
}
@@ -2213,7 +2212,7 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: Process Req qp is NULL");
+ "FP: Process Req qp is NULL\n");
return -EINVAL;
}
sq = &qp->sq;
@@ -2221,16 +2220,14 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
if (cqe_sq_cons > sq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process req reported ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
+ "FP: CQ Process req reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
cqe_sq_cons, sq->hwq.max_elements);
return -EINVAL;
}
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
/* Require to walk the sq's swq to fabricate CQEs for all previously
@@ -2262,9 +2259,7 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
hwcqe->status != CQ_REQ_STATUS_OK) {
cqe->status = hwcqe->status;
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Processed Req ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
+ "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
sw_sq_cons, cqe->wr_id, cqe->status);
cqe++;
(*budget)--;
@@ -2330,12 +2325,12 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
+ dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
return -EINVAL;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
@@ -2356,9 +2351,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
return -EINVAL;
if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
wr_id_idx, srq->hwq.max_elements);
return -EINVAL;
}
@@ -2371,9 +2364,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
rq = &qp->rq;
if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
@@ -2409,12 +2400,12 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
+ dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
return -EINVAL;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
cqe = *pcqe;
@@ -2439,9 +2430,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
wr_id_idx, srq->hwq.max_elements);
return -EINVAL;
}
@@ -2454,9 +2443,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
rq = &qp->rq;
if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
@@ -2508,13 +2495,12 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cq Raw/QP1 qp is NULL");
+ dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
return -EINVAL;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
cqe = *pcqe;
@@ -2543,14 +2529,12 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
srq = qp->srq;
if (!srq) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: SRQ used but not defined??");
+ "FP: SRQ used but not defined??\n");
return -EINVAL;
}
if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Raw/QP1 ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
wr_id_idx, srq->hwq.max_elements);
return -EINVAL;
}
@@ -2563,9 +2547,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
rq = &qp->rq;
if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: ix 0x%x exceeded RQ max 0x%x",
+ "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
@@ -2600,14 +2582,14 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
/* Check the Status */
if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
dev_warn(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
+ "FP: CQ Process Terminal Error status = 0x%x\n",
hwcqe->status);
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process terminal qp is NULL");
+ "FP: CQ Process terminal qp is NULL\n");
return -EINVAL;
}
@@ -2623,16 +2605,14 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
if (cqe_cons > sq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process terminal reported ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
+ "FP: CQ Process terminal reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
cqe_cons, sq->hwq.max_elements);
goto do_rq;
}
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto sq_done;
}
@@ -2673,16 +2653,14 @@ do_rq:
goto done;
} else if (cqe_cons > rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Processed terminal ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
+ "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
cqe_cons, rq->hwq.max_elements);
goto done;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
rc = 0;
goto done;
}
@@ -2704,7 +2682,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
/* Check the Status */
if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
+ "FP: CQ Process Cutoff Error status = 0x%x\n",
hwcqe->status);
return -EINVAL;
}
@@ -2724,16 +2702,12 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
spin_lock_irqsave(&cq->flush_lock, flags);
list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
- dev_dbg(&cq->hwq.pdev->dev,
- "QPLIB: FP: Flushing SQ QP= %p",
- qp);
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
__flush_sq(&qp->sq, qp, &cqe, &budget);
}
list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
- dev_dbg(&cq->hwq.pdev->dev,
- "QPLIB: FP: Flushing RQ QP= %p",
- qp);
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
__flush_rq(&qp->rq, qp, &cqe, &budget);
}
spin_unlock_irqrestore(&cq->flush_lock, flags);
@@ -2801,7 +2775,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
goto exit;
default:
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cq unknown type 0x%lx",
+ "process_cq unknown type 0x%lx\n",
hw_cqe->cqe_type_toggle &
CQ_BASE_CQE_TYPE_MASK);
rc = -EINVAL;
@@ -2814,7 +2788,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
* next one
*/
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cqe error rc = 0x%x", rc);
+ "process_cqe error rc = 0x%x\n", rc);
}
raw_cons++;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 2852d350ada1..be4e33e9f962 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -35,6 +35,9 @@
*
* Description: RDMA Controller HW interface
*/
+
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
@@ -96,14 +99,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW not initialized, reject opcode 0x%x",
- opcode);
+ "RCFW not initialized, reject opcode 0x%x\n", opcode);
return -EINVAL;
}
if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
- dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
+ dev_err(&rcfw->pdev->dev, "RCFW already initialized!\n");
return -EINVAL;
}
@@ -115,7 +117,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
*/
spin_lock_irqsave(&cmdq->lock, flags);
if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
+ dev_err(&rcfw->pdev->dev, "RCFW: CMDQ is full!\n");
spin_unlock_irqrestore(&cmdq->lock, flags);
return -EAGAIN;
}
@@ -154,7 +156,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
if (!cmdqe) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW request failed with no cmdqe!");
+ "RCFW request failed with no cmdqe!\n");
goto done;
}
/* Copy a segment of the req cmd to the cmdq */
@@ -210,7 +212,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
/* send failed */
- dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n",
cookie, opcode);
return rc;
}
@@ -224,7 +226,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
rc = __wait_for_resp(rcfw, cookie);
if (rc) {
/* timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n",
cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
return rc;
@@ -232,7 +234,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
if (evnt->status) {
/* failed with status */
- dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
cookie, opcode, evnt->status);
rc = -EFAULT;
}
@@ -298,9 +300,9 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
qp_id = le32_to_cpu(err_event->xid);
qp = rcfw->qp_tbl[qp_id].qp_handle;
dev_dbg(&rcfw->pdev->dev,
- "QPLIB: Received QP error notification");
+ "Received QP error notification\n");
dev_dbg(&rcfw->pdev->dev,
- "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
+ "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
if (!qp)
@@ -309,8 +311,17 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
rcfw->aeq_handler(rcfw, qp_event, qp);
break;
default:
- /* Command Response */
- spin_lock_irqsave(&cmdq->lock, flags);
+ /*
+ * Command Response
+ * cmdq->lock needs to be acquired to synchronie
+ * the command send and completion reaping. This function
+ * is always called with creq->lock held. Using
+ * the nested variant of spin_lock.
+ *
+ */
+
+ spin_lock_irqsave_nested(&cmdq->lock, flags,
+ SINGLE_DEPTH_NESTING);
cookie = le16_to_cpu(qp_event->cookie);
mcookie = qp_event->cookie;
blocked = cookie & RCFW_CMD_IS_BLOCKING;
@@ -322,14 +333,16 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
crsqe->resp = NULL;
} else {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
- crsqe->resp ? "mismatch" : "collision",
- crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
+ if (crsqe->resp && crsqe->resp->cookie)
+ dev_err(&rcfw->pdev->dev,
+ "CMD %s cookie sent=%#x, recd=%#x\n",
+ crsqe->resp ? "mismatch" : "collision",
+ crsqe->resp ? crsqe->resp->cookie : 0,
+ mcookie);
}
if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
dev_warn(&rcfw->pdev->dev,
- "QPLIB: CMD bit %d was not requested", cbit);
+ "CMD bit %d was not requested\n", cbit);
cmdq->cons += crsqe->req_size;
crsqe->req_size = 0;
@@ -376,14 +389,14 @@ static void bnxt_qplib_service_creq(unsigned long data)
(rcfw, (struct creq_func_event *)creqe))
rcfw->creq_func_event_processed++;
else
- dev_warn
- (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
- type);
+ dev_warn(&rcfw->pdev->dev,
+ "aeqe:%#x Not handled\n", type);
break;
default:
- dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
- dev_warn(&rcfw->pdev->dev,
- "QPLIB: op_event = 0x%x not handled", type);
+ if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
+ dev_warn(&rcfw->pdev->dev,
+ "creqe with event 0x%x not handled\n",
+ type);
break;
}
raw_cons++;
@@ -551,7 +564,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
HWQ_TYPE_L2_CMPL)) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: HW channel CREQ allocation failed");
+ "HW channel CREQ allocation failed\n");
goto fail;
}
rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
@@ -560,7 +573,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
HWQ_TYPE_CTX)) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: HW channel CMDQ allocation failed");
+ "HW channel CMDQ allocation failed\n");
goto fail;
}
@@ -605,21 +618,18 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
bnxt_qplib_rcfw_stop_irq(rcfw, true);
- if (rcfw->cmdq_bar_reg_iomem)
- iounmap(rcfw->cmdq_bar_reg_iomem);
- rcfw->cmdq_bar_reg_iomem = NULL;
-
- if (rcfw->creq_bar_reg_iomem)
- iounmap(rcfw->creq_bar_reg_iomem);
- rcfw->creq_bar_reg_iomem = NULL;
+ iounmap(rcfw->cmdq_bar_reg_iomem);
+ iounmap(rcfw->creq_bar_reg_iomem);
indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
if (indx != rcfw->bmap_size)
dev_err(&rcfw->pdev->dev,
- "QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
+ "disabling RCFW with pending cmd-bit %lx\n", indx);
kfree(rcfw->cmdq_bitmap);
rcfw->bmap_size = 0;
+ rcfw->cmdq_bar_reg_iomem = NULL;
+ rcfw->creq_bar_reg_iomem = NULL;
rcfw->aeq_handler = NULL;
rcfw->vector = 0;
}
@@ -681,8 +691,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
RCFW_COMM_BASE_OFFSET,
RCFW_COMM_SIZE);
if (!rcfw->cmdq_bar_reg_iomem) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: CMDQ BAR region %d mapping failed",
+ dev_err(&rcfw->pdev->dev, "CMDQ BAR region %d mapping failed\n",
rcfw->cmdq_bar_reg);
return -ENOMEM;
}
@@ -697,14 +706,15 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
if (!res_base)
dev_err(&rcfw->pdev->dev,
- "QPLIB: CREQ BAR region %d resc start is 0!",
+ "CREQ BAR region %d resc start is 0!\n",
rcfw->creq_bar_reg);
rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
4);
if (!rcfw->creq_bar_reg_iomem) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: CREQ BAR region %d mapping failed",
+ dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
rcfw->creq_bar_reg);
+ iounmap(rcfw->cmdq_bar_reg_iomem);
+ rcfw->cmdq_bar_reg_iomem = NULL;
return -ENOMEM;
}
rcfw->creq_qp_event_processed = 0;
@@ -717,7 +727,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
if (rc) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
+ "Failed to request IRQ for CREQ rc = 0x%x\n", rc);
bnxt_qplib_disable_rcfw_channel(rcfw);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 46416dfe8830..9a8687dc0a79 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -154,6 +154,8 @@ struct bnxt_qplib_qp_node {
void *qp_handle; /* ptr to qplib_qp */
};
+#define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF
+
/* RCFW Communication Channels */
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
@@ -190,6 +192,8 @@ struct bnxt_qplib_rcfw {
struct bnxt_qplib_crsq *crsqe_tbl;
int qp_tbl_size;
struct bnxt_qplib_qp_node *qp_tbl;
+ u64 oos_prev;
+ u32 init_oos_stats;
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 539a5d44e6db..59eeac55626f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -36,6 +36,8 @@
* Description: QPLib resource manager
*/
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -68,8 +70,7 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
pbl->pg_map_arr[i]);
else
dev_warn(&pdev->dev,
- "QPLIB: PBL free pg_arr[%d] empty?!",
- i);
+ "PBL free pg_arr[%d] empty?!\n", i);
pbl->pg_arr[i] = NULL;
}
}
@@ -537,7 +538,7 @@ static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl)
{
if (!pkey_tbl->tbl)
- dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
+ dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
else
kfree(pkey_tbl->tbl);
@@ -578,7 +579,7 @@ int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd *pd)
{
if (test_and_set_bit(pd->id, pdt->tbl)) {
- dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
+ dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
pd->id);
return -EINVAL;
}
@@ -639,11 +640,11 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
struct bnxt_qplib_dpi *dpi)
{
if (dpi->dpi >= dpit->max) {
- dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
+ dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
return -EINVAL;
}
if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
- dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
+ dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
dpi->dpi);
return -EINVAL;
}
@@ -673,22 +674,21 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
u32 dbr_len, bytes;
if (dpit->dbr_bar_reg_iomem) {
- dev_err(&res->pdev->dev,
- "QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
+ dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
+ dbr_bar_reg);
return -EALREADY;
}
bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
if (!bar_reg_base) {
- dev_err(&res->pdev->dev,
- "QPLIB: BAR region %d resc start failed", dbr_bar_reg);
+ dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
+ dbr_bar_reg);
return -ENOMEM;
}
dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
- dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
- dbr_len);
+ dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
return -ENOMEM;
}
@@ -696,8 +696,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
dbr_len);
if (!dpit->dbr_bar_reg_iomem) {
dev_err(&res->pdev->dev,
- "QPLIB: FP: DBR BAR region %d mapping failed",
- dbr_bar_reg);
+ "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
return -ENOMEM;
}
@@ -767,7 +766,7 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
&stats->dma_map, GFP_KERNEL);
if (!stats->dma) {
- dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
+ dev_err(&pdev->dev, "Stats DMA allocation failed\n");
return -ENOMEM;
}
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 4097f3fa25c5..5216b5f844cc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -36,6 +36,8 @@
* Description: Slow Path Operators
*/
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
@@ -89,7 +91,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
if (!sbuf) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: SP: QUERY_FUNC alloc side buffer failed");
+ "SP: QUERY_FUNC alloc side buffer failed\n");
return -ENOMEM;
}
@@ -135,8 +137,16 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_srq = le16_to_cpu(sb->max_srq);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
- /* Bono only reports 1 PKEY for now, but it can support > 1 */
attr->max_pkey = le32_to_cpu(sb->max_pkeys);
+ /*
+ * Some versions of FW reports more than 0xFFFF.
+ * Restrict it for now to 0xFFFF to avoid
+ * reporting trucated value
+ */
+ if (attr->max_pkey > 0xFFFF) {
+ /* ib_port_attr::pkey_tbl_len is u16 */
+ attr->max_pkey = 0xFFFF;
+ }
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) *
@@ -186,8 +196,7 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
(void *)&resp,
NULL, 0);
if (rc) {
- dev_err(&res->pdev->dev,
- "QPLIB: Failed to set function resources");
+ dev_err(&res->pdev->dev, "Failed to set function resources\n");
}
return rc;
}
@@ -199,7 +208,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
{
if (index >= sgid_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: Index %d exceeded SGID table max (%d)",
+ "Index %d exceeded SGID table max (%d)\n",
index, sgid_tbl->max);
return -EINVAL;
}
@@ -217,13 +226,12 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int index;
if (!sgid_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
+ dev_err(&res->pdev->dev, "SGID table not allocated\n");
return -EINVAL;
}
/* Do we need a sgid_lock here? */
if (!sgid_tbl->active) {
- dev_err(&res->pdev->dev,
- "QPLIB: SGID table has no active entries");
+ dev_err(&res->pdev->dev, "SGID table has no active entries\n");
return -ENOMEM;
}
for (index = 0; index < sgid_tbl->max; index++) {
@@ -231,7 +239,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
break;
}
if (index == sgid_tbl->max) {
- dev_warn(&res->pdev->dev, "GID not found in the SGID table");
+ dev_warn(&res->pdev->dev, "GID not found in the SGID table\n");
return 0;
}
/* Remove GID from the SGID table */
@@ -244,7 +252,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
if (sgid_tbl->hw_id[index] == 0xFFFF) {
dev_err(&res->pdev->dev,
- "QPLIB: GID entry contains an invalid HW id");
+ "GID entry contains an invalid HW id\n");
return -EINVAL;
}
req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
@@ -258,7 +266,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
sgid_tbl->vlan[index] = 0;
sgid_tbl->active--;
dev_dbg(&res->pdev->dev,
- "QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x",
+ "SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n",
index, sgid_tbl->hw_id[index], sgid_tbl->active);
sgid_tbl->hw_id[index] = (u16)-1;
@@ -277,20 +285,19 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int i, free_idx;
if (!sgid_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
+ dev_err(&res->pdev->dev, "SGID table not allocated\n");
return -EINVAL;
}
/* Do we need a sgid_lock here? */
if (sgid_tbl->active == sgid_tbl->max) {
- dev_err(&res->pdev->dev, "QPLIB: SGID table is full");
+ dev_err(&res->pdev->dev, "SGID table is full\n");
return -ENOMEM;
}
free_idx = sgid_tbl->max;
for (i = 0; i < sgid_tbl->max; i++) {
if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
dev_dbg(&res->pdev->dev,
- "QPLIB: SGID entry already exist in entry %d!",
- i);
+ "SGID entry already exist in entry %d!\n", i);
*index = i;
return -EALREADY;
} else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
@@ -301,7 +308,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
if (free_idx == sgid_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: SGID table is FULL but count is not MAX??");
+ "SGID table is FULL but count is not MAX??\n");
return -ENOMEM;
}
if (update) {
@@ -348,7 +355,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
sgid_tbl->vlan[free_idx] = 1;
dev_dbg(&res->pdev->dev,
- "QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x",
+ "SGID added hw_id[0x%x] = 0x%x active = 0x%x\n",
free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
*index = free_idx;
@@ -404,7 +411,7 @@ int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
}
if (index >= pkey_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: Index %d exceeded PKEY table max (%d)",
+ "Index %d exceeded PKEY table max (%d)\n",
index, pkey_tbl->max);
return -EINVAL;
}
@@ -419,14 +426,13 @@ int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
int i, rc = 0;
if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
+ dev_err(&res->pdev->dev, "PKEY table not allocated\n");
return -EINVAL;
}
/* Do we need a pkey_lock here? */
if (!pkey_tbl->active) {
- dev_err(&res->pdev->dev,
- "QPLIB: PKEY table has no active entries");
+ dev_err(&res->pdev->dev, "PKEY table has no active entries\n");
return -ENOMEM;
}
for (i = 0; i < pkey_tbl->max; i++) {
@@ -435,8 +441,7 @@ int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
}
if (i == pkey_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: PKEY 0x%04x not found in the pkey table",
- *pkey);
+ "PKEY 0x%04x not found in the pkey table\n", *pkey);
return -ENOMEM;
}
memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey));
@@ -453,13 +458,13 @@ int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
int i, free_idx, rc = 0;
if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
+ dev_err(&res->pdev->dev, "PKEY table not allocated\n");
return -EINVAL;
}
/* Do we need a pkey_lock here? */
if (pkey_tbl->active == pkey_tbl->max) {
- dev_err(&res->pdev->dev, "QPLIB: PKEY table is full");
+ dev_err(&res->pdev->dev, "PKEY table is full\n");
return -ENOMEM;
}
free_idx = pkey_tbl->max;
@@ -471,7 +476,7 @@ int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
}
if (free_idx == pkey_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: PKEY table is FULL but count is not MAX??");
+ "PKEY table is FULL but count is not MAX??\n");
return -ENOMEM;
}
/* Add PKEY to the pkey_tbl */
@@ -555,8 +560,7 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
int rc;
if (mrw->lkey == 0xFFFFFFFF) {
- dev_info(&res->pdev->dev,
- "QPLIB: SP: Free a reserved lkey MRW");
+ dev_info(&res->pdev->dev, "SP: Free a reserved lkey MRW\n");
return 0;
}
@@ -666,9 +670,8 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
pages++;
if (pages > MAX_PBL_LVL_1_PGS) {
- dev_err(&res->pdev->dev, "QPLIB: SP: Reg MR pages ");
dev_err(&res->pdev->dev,
- "requested (0x%x) exceeded max (0x%x)",
+ "SP: Reg MR pages requested (0x%x) exceeded max (0x%x)\n",
pages, MAX_PBL_LVL_1_PGS);
return -ENOMEM;
}
@@ -684,7 +687,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
HWQ_TYPE_CTX);
if (rc) {
dev_err(&res->pdev->dev,
- "SP: Reg MR memory allocation failed");
+ "SP: Reg MR memory allocation failed\n");
return -ENOMEM;
}
/* Write to the hwq */
@@ -795,7 +798,7 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
if (!sbuf) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: SP: QUERY_ROCE_STATS alloc side buffer failed");
+ "SP: QUERY_ROCE_STATS alloc side buffer failed\n");
return -ENOMEM;
}
@@ -845,6 +848,16 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
+ if (!rcfw->init_oos_stats) {
+ rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
+ rcfw->init_oos_stats = 1;
+ } else {
+ stats->res_oos_drop_count +=
+ (le64_to_cpu(sb->res_oos_drop_count) -
+ rcfw->oos_prev) & BNXT_QPLIB_OOS_COUNT_MASK;
+ rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
+ }
+
bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 9d3e8b994945..8079d7f5a008 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -205,6 +205,16 @@ struct bnxt_qplib_roce_stats {
/* res_tx_pci_err is 64 b */
u64 res_rx_pci_err;
/* res_rx_pci_err is 64 b */
+ u64 res_oos_drop_count;
+ /* res_oos_drop_count */
+ u64 active_qp_count_p0;
+ /* port 0 active qps */
+ u64 active_qp_count_p1;
+ /* port 1 active qps */
+ u64 active_qp_count_p2;
+ /* port 2 active qps */
+ u64 active_qp_count_p3;
+ /* port 3 active qps */
};
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 3e5a4f760d0e..8a9ead419ac2 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2929,6 +2929,11 @@ struct creq_query_roce_stats_resp_sb {
__le64 res_srq_load_err;
__le64 res_tx_pci_err;
__le64 res_rx_pci_err;
+ __le64 res_oos_drop_count;
+ __le64 active_qp_count_p0;
+ __le64 active_qp_count_p1;
+ __le64 active_qp_count_p2;
+ __le64 active_qp_count_p3;
};
/* QP error notification event (16 bytes) */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 1b9ff21aa1d5..ebbec02cebe0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1127,17 +1127,18 @@ static int iwch_query_port(struct ib_device *ibdev,
return 0;
}
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
pr_debug("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
@@ -1148,9 +1149,10 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
@@ -1158,6 +1160,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
iwch_dev->rdev.rnic_info.pdev->device);
}
+static DEVICE_ATTR_RO(board_id);
enum counters {
IPINRECEIVES,
@@ -1274,14 +1277,15 @@ static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
return stats->num_counters;
}
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *iwch_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *iwch_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
+static const struct attribute_group iwch_attr_group = {
+ .attrs = iwch_class_attributes,
};
static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -1316,10 +1320,8 @@ static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
int iwch_register_device(struct iwch_dev *dev)
{
int ret;
- int i;
pr_debug("%s iwch_dev %p\n", __func__, dev);
- strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
dev->ibdev.owner = THIS_MODULE;
@@ -1402,33 +1404,16 @@ int iwch_register_device(struct iwch_dev *dev)
sizeof(dev->ibdev.iwcm->ifname));
dev->ibdev.driver_id = RDMA_DRIVER_CXGB3;
- ret = ib_register_device(&dev->ibdev, NULL);
+ rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
+ ret = ib_register_device(&dev->ibdev, "cxgb3_%d", NULL);
if (ret)
- goto bail1;
-
- for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
- ret = device_create_file(&dev->ibdev.dev,
- iwch_class_attributes[i]);
- if (ret) {
- goto bail2;
- }
- }
- return 0;
-bail2:
- ib_unregister_device(&dev->ibdev);
-bail1:
- kfree(dev->ibdev.iwcm);
+ kfree(dev->ibdev.iwcm);
return ret;
}
void iwch_unregister_device(struct iwch_dev *dev)
{
- int i;
-
pr_debug("%s iwch_dev %p\n", __func__, dev);
- for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
- device_remove_file(&dev->ibdev.dev,
- iwch_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
kfree(dev->ibdev.iwcm);
return;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0f83cbec33f3..615413bd3e8d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -403,8 +403,7 @@ void _c4iw_free_ep(struct kref *kref)
ep->com.local_addr.ss_family);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
- if (ep->mpa_skb)
- kfree_skb(ep->mpa_skb);
+ kfree_skb(ep->mpa_skb);
}
if (!skb_queue_empty(&ep->com.ep_skb_list))
skb_queue_purge(&ep->com.ep_skb_list);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 6d3042794094..1fd8798d91a7 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -161,7 +161,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->gts = rdev->lldi.gts_reg;
cq->rdev = rdev;
- cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
+ cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
if (user && !cq->bar2_pa) {
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 4eda6872e617..cbb3c0ddd990 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -373,8 +373,8 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
return 0;
}
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
@@ -382,9 +382,10 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
@@ -395,9 +396,10 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
@@ -405,6 +407,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device);
}
+static DEVICE_ATTR_RO(board_id);
enum counters {
IP4INSEGS,
@@ -461,14 +464,15 @@ static int c4iw_get_mib(struct ib_device *ibdev,
return stats->num_counters;
}
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *c4iw_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *c4iw_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
+static const struct attribute_group c4iw_attr_group = {
+ .attrs = c4iw_class_attributes,
};
static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -530,12 +534,10 @@ static int fill_res_entry(struct sk_buff *msg, struct rdma_restrack_entry *res)
void c4iw_register_device(struct work_struct *work)
{
int ret;
- int i;
struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
struct c4iw_dev *dev = ctx->dev;
pr_debug("c4iw_dev %p\n", dev);
- strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
dev->ibdev.owner = THIS_MODULE;
@@ -626,20 +628,13 @@ void c4iw_register_device(struct work_struct *work)
memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
sizeof(dev->ibdev.iwcm->ifname));
+ rdma_set_device_sysfs_group(&dev->ibdev, &c4iw_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_CXGB4;
- ret = ib_register_device(&dev->ibdev, NULL);
+ ret = ib_register_device(&dev->ibdev, "cxgb4_%d", NULL);
if (ret)
goto err_kfree_iwcm;
-
- for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
- ret = device_create_file(&dev->ibdev.dev,
- c4iw_class_attributes[i]);
- if (ret)
- goto err_unregister_device;
- }
return;
-err_unregister_device:
- ib_unregister_device(&dev->ibdev);
+
err_kfree_iwcm:
kfree(dev->ibdev.iwcm);
err_dealloc_ctx:
@@ -651,12 +646,7 @@ err_dealloc_ctx:
void c4iw_unregister_device(struct c4iw_dev *dev)
{
- int i;
-
pr_debug("c4iw_dev %p\n", dev);
- for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
- device_remove_file(&dev->ibdev.dev,
- c4iw_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
kfree(dev->ibdev.iwcm);
return;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 347fe18b1a41..13478f3b7057 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -99,7 +99,7 @@ static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{
dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
- pci_unmap_addr(sq, mapping));
+ dma_unmap_addr(sq, mapping));
}
static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
@@ -132,7 +132,7 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
if (!sq->queue)
return -ENOMEM;
sq->phys_addr = virt_to_phys(sq->queue);
- pci_unmap_addr_set(sq, mapping, sq->dma_addr);
+ dma_unmap_addr_set(sq, mapping, sq->dma_addr);
return 0;
}
@@ -279,12 +279,13 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
wq->db = rdev->lldi.db_reg;
- wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
+ wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
+ CXGB4_BAR2_QTYPE_EGRESS,
&wq->sq.bar2_qid,
user ? &wq->sq.bar2_pa : NULL);
if (need_rq)
wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
- T4_BAR2_QTYPE_EGRESS,
+ CXGB4_BAR2_QTYPE_EGRESS,
&wq->rq.bar2_qid,
user ? &wq->rq.bar2_pa : NULL);
@@ -2521,7 +2522,7 @@ static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
dma_free_coherent(&rdev->lldi.pdev->dev,
wq->memsize, wq->queue,
- pci_unmap_addr(wq, mapping));
+ dma_unmap_addr(wq, mapping));
c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
kfree(wq->sw_rq);
c4iw_put_qpid(rdev, wq->qid, uctx);
@@ -2570,9 +2571,9 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
goto err_free_rqtpool;
memset(wq->queue, 0, wq->memsize);
- pci_unmap_addr_set(wq, mapping, wq->dma_addr);
+ dma_unmap_addr_set(wq, mapping, wq->dma_addr);
- wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, T4_BAR2_QTYPE_EGRESS,
+ wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
&wq->bar2_qid,
user ? &wq->bar2_pa : NULL);
@@ -2649,7 +2650,7 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
err_free_queue:
dma_free_coherent(&rdev->lldi.pdev->dev,
wq->memsize, wq->queue,
- pci_unmap_addr(wq, mapping));
+ dma_unmap_addr(wq, mapping));
err_free_rqtpool:
c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
err_free_pending_wrs:
@@ -2813,8 +2814,7 @@ err_free_queue:
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
srq->wr_waitp);
err_free_skb:
- if (srq->destroy_skb)
- kfree_skb(srq->destroy_skb);
+ kfree_skb(srq->destroy_skb);
err_free_srq_idx:
c4iw_free_srq_idx(&rhp->rdev, srq->idx);
err_free_wr_wait:
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e42021fd6fd6..fff6d48d262f 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -397,7 +397,7 @@ struct t4_srq_pending_wr {
struct t4_srq {
union t4_recv_wr *queue;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
struct t4_swrqe *sw_rq;
void __iomem *bar2_va;
u64 bar2_pa;
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index f451ba912f47..ff790390c91a 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -8,12 +8,42 @@
#
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
-hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
- eprom.o exp_rcv.o file_ops.o firmware.o \
- init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
- qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o \
- uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
- verbs_txreq.o vnic_main.o vnic_sdma.o
+hfi1-y := \
+ affinity.o \
+ chip.o \
+ device.o \
+ driver.o \
+ efivar.o \
+ eprom.o \
+ exp_rcv.o \
+ file_ops.o \
+ firmware.o \
+ init.o \
+ intr.o \
+ iowait.o \
+ mad.o \
+ mmu_rb.o \
+ msix.o \
+ pcie.o \
+ pio.o \
+ pio_copy.o \
+ platform.o \
+ qp.o \
+ qsfp.o \
+ rc.o \
+ ruc.o \
+ sdma.o \
+ sysfs.o \
+ trace.o \
+ uc.o \
+ ud.o \
+ user_exp_rcv.o \
+ user_pages.o \
+ user_sdma.o \
+ verbs.o \
+ verbs_txreq.o \
+ vnic_main.o \
+ vnic_sdma.o
ifdef CONFIG_DEBUG_FS
hfi1-y += debugfs.o
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index bedd5fba33b0..2baf38cc1e23 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -817,10 +817,10 @@ static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
set = &entry->def_intr;
cpumask_set_cpu(cpu, &set->mask);
cpumask_set_cpu(cpu, &set->used);
- for (i = 0; i < dd->num_msix_entries; i++) {
+ for (i = 0; i < dd->msix_info.max_requested; i++) {
struct hfi1_msix_entry *other_msix;
- other_msix = &dd->msix_entries[i];
+ other_msix = &dd->msix_info.msix_entries[i];
if (other_msix->type != IRQ_SDMA || other_msix == msix)
continue;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e1668bcc2d13..9b20479dc710 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -67,8 +67,6 @@
#include "debugfs.h"
#include "fault.h"
-#define NUM_IB_PORTS 1
-
uint kdeth_qp;
module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
@@ -1100,9 +1098,9 @@ struct err_reg_info {
const char *desc;
};
-#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
-#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
-#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
+#define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
+#define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
+#define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
/*
* Helpers for building HFI and DC error interrupt table entries. Different
@@ -8181,7 +8179,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
/**
* is_rcv_urgent_int() - User receive context urgent IRQ handler
* @dd: valid dd
- * @source: logical IRQ source (ofse from IS_RCVURGENT_START)
+ * @source: logical IRQ source (offset from IS_RCVURGENT_START)
*
* RX block receive urgent interrupt. Source is < 160.
*
@@ -8231,7 +8229,7 @@ static const struct is_table is_table[] = {
is_sdma_eng_err_name, is_sdma_eng_err_int },
{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
is_sendctxt_err_name, is_sendctxt_err_int },
-{ IS_SDMA_START, IS_SDMA_END,
+{ IS_SDMA_START, IS_SDMA_IDLE_END,
is_sdma_eng_name, is_sdma_eng_int },
{ IS_VARIOUS_START, IS_VARIOUS_END,
is_various_name, is_various_int },
@@ -8257,7 +8255,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
/* avoids a double compare by walking the table in-order */
for (entry = &is_table[0]; entry->is_name; entry++) {
- if (source < entry->end) {
+ if (source <= entry->end) {
trace_hfi1_interrupt(dd, entry, source);
entry->is_int(dd, source - entry->start);
return;
@@ -8276,7 +8274,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
* context DATA IRQs are threaded and are not supported by this handler.
*
*/
-static irqreturn_t general_interrupt(int irq, void *data)
+irqreturn_t general_interrupt(int irq, void *data)
{
struct hfi1_devdata *dd = data;
u64 regs[CCE_NUM_INT_CSRS];
@@ -8309,7 +8307,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
return handled;
}
-static irqreturn_t sdma_interrupt(int irq, void *data)
+irqreturn_t sdma_interrupt(int irq, void *data)
{
struct sdma_engine *sde = data;
struct hfi1_devdata *dd = sde->dd;
@@ -8401,7 +8399,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
* invoked) is finished. The intent is to avoid extra interrupts while we
* are processing packets anyway.
*/
-static irqreturn_t receive_context_interrupt(int irq, void *data)
+irqreturn_t receive_context_interrupt(int irq, void *data)
{
struct hfi1_ctxtdata *rcd = data;
struct hfi1_devdata *dd = rcd->dd;
@@ -8441,7 +8439,7 @@ static irqreturn_t receive_context_interrupt(int irq, void *data)
* Receive packet thread handler. This expects to be invoked with the
* receive interrupt still blocked.
*/
-static irqreturn_t receive_context_thread(int irq, void *data)
+irqreturn_t receive_context_thread(int irq, void *data)
{
struct hfi1_ctxtdata *rcd = data;
int present;
@@ -9651,30 +9649,10 @@ void qsfp_event(struct work_struct *work)
}
}
-static void init_qsfp_int(struct hfi1_devdata *dd)
+void init_qsfp_int(struct hfi1_devdata *dd)
{
struct hfi1_pportdata *ppd = dd->pport;
- u64 qsfp_mask, cce_int_mask;
- const int qsfp1_int_smask = QSFP1_INT % 64;
- const int qsfp2_int_smask = QSFP2_INT % 64;
-
- /*
- * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
- * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
- * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
- * the index of the appropriate CSR in the CCEIntMask CSR array
- */
- cce_int_mask = read_csr(dd, CCE_INT_MASK +
- (8 * (QSFP1_INT / 64)));
- if (dd->hfi1_id) {
- cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
- write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
- cce_int_mask);
- } else {
- cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
- write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
- cce_int_mask);
- }
+ u64 qsfp_mask;
qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
/* Clear current status to avoid spurious interrupts */
@@ -9691,6 +9669,12 @@ static void init_qsfp_int(struct hfi1_devdata *dd)
write_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
qsfp_mask);
+
+ /* Enable the appropriate QSFP IRQ source */
+ if (!dd->hfi1_id)
+ set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
+ else
+ set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
}
/*
@@ -10577,12 +10561,29 @@ void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
}
}
-/*
- * Verify if BCT for data VLs is non-zero.
+/**
+ * data_vls_operational() - Verify if data VL BCT credits and MTU
+ * are both set.
+ * @ppd: pointer to hfi1_pportdata structure
+ *
+ * Return: true - Ok, false -otherwise.
*/
static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
{
- return !!ppd->actual_vls_operational;
+ int i;
+ u64 reg;
+
+ if (!ppd->actual_vls_operational)
+ return false;
+
+ for (i = 0; i < ppd->vls_supported; i++) {
+ reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
+ if ((reg && !ppd->dd->vld[i].mtu) ||
+ (!reg && ppd->dd->vld[i].mtu))
+ return false;
+ }
+
+ return true;
}
/*
@@ -10695,7 +10696,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
if (!data_vls_operational(ppd)) {
dd_dev_err(dd,
- "%s: data VLs not operational\n", __func__);
+ "%s: Invalid data VL credits or mtu\n",
+ __func__);
ret = -EINVAL;
break;
}
@@ -11932,10 +11934,16 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
}
- if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
+ if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
+ set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
+ IS_RCVAVAIL_START + rcd->ctxt, true);
rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
- if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
+ }
+ if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
+ set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
+ IS_RCVAVAIL_START + rcd->ctxt, false);
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
+ }
if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
@@ -11965,6 +11973,13 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
+ if (op & HFI1_RCVCTRL_URGENT_ENB)
+ set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
+ IS_RCVURGENT_START + rcd->ctxt, true);
+ if (op & HFI1_RCVCTRL_URGENT_DIS)
+ set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
+ IS_RCVURGENT_START + rcd->ctxt, false);
+
hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
@@ -12963,63 +12978,71 @@ int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
return ret;
}
+/* ========================================================================= */
+
/**
- * get_int_mask - get 64 bit int mask
- * @dd - the devdata
- * @i - the csr (relative to CCE_INT_MASK)
+ * read_mod_write() - Calculate the IRQ register index and set/clear the bits
+ * @dd: valid devdata
+ * @src: IRQ source to determine register index from
+ * @bits: the bits to set or clear
+ * @set: true == set the bits, false == clear the bits
*
- * Returns the mask with the urgent interrupt mask
- * bit clear for kernel receive contexts.
*/
-static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
+static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
+ bool set)
{
- u64 mask = U64_MAX; /* default to no change */
-
- if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
- int j = (i - (IS_RCVURGENT_START / 64)) * 64;
- int k = !j ? IS_RCVURGENT_START % 64 : 0;
+ u64 reg;
+ u16 idx = src / BITS_PER_REGISTER;
- if (j)
- j -= IS_RCVURGENT_START % 64;
- /* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
- for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
- /* convert to bit in mask and clear */
- mask &= ~BIT_ULL(k);
- }
- return mask;
+ spin_lock(&dd->irq_src_lock);
+ reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
+ if (set)
+ reg |= bits;
+ else
+ reg &= ~bits;
+ write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
+ spin_unlock(&dd->irq_src_lock);
}
-/* ========================================================================= */
-
-/*
- * Enable/disable chip from delivering interrupts.
+/**
+ * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
+ * @dd: valid devdata
+ * @first: first IRQ source to set/clear
+ * @last: last IRQ source (inclusive) to set/clear
+ * @set: true == set the bits, false == clear the bits
+ *
+ * If first == last, set the exact source.
*/
-void set_intr_state(struct hfi1_devdata *dd, u32 enable)
+int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
{
- int i;
+ u64 bits = 0;
+ u64 bit;
+ u16 src;
- /*
- * In HFI, the mask needs to be 1 to allow interrupts.
- */
- if (enable) {
- /* enable all interrupts but urgent on kernel contexts */
- for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
- u64 mask = get_int_mask(dd, i);
+ if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
+ return -EINVAL;
- write_csr(dd, CCE_INT_MASK + (8 * i), mask);
- }
+ if (last < first)
+ return -ERANGE;
- init_qsfp_int(dd);
- } else {
- for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
+ for (src = first; src <= last; src++) {
+ bit = src % BITS_PER_REGISTER;
+ /* wrapped to next register? */
+ if (!bit && bits) {
+ read_mod_write(dd, src - 1, bits, set);
+ bits = 0;
+ }
+ bits |= BIT_ULL(bit);
}
+ read_mod_write(dd, last, bits, set);
+
+ return 0;
}
/*
* Clear all interrupt sources on the chip.
*/
-static void clear_all_interrupts(struct hfi1_devdata *dd)
+void clear_all_interrupts(struct hfi1_devdata *dd)
{
int i;
@@ -13043,38 +13066,11 @@ static void clear_all_interrupts(struct hfi1_devdata *dd)
write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
}
-/**
- * hfi1_clean_up_interrupts() - Free all IRQ resources
- * @dd: valid device data data structure
- *
- * Free the MSIx and assoicated PCI resources, if they have been allocated.
- */
-void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
-{
- int i;
- struct hfi1_msix_entry *me = dd->msix_entries;
-
- /* remove irqs - must happen before disabling/turning off */
- for (i = 0; i < dd->num_msix_entries; i++, me++) {
- if (!me->arg) /* => no irq, no affinity */
- continue;
- hfi1_put_irq_affinity(dd, me);
- pci_free_irq(dd->pcidev, i, me->arg);
- }
-
- /* clean structures */
- kfree(dd->msix_entries);
- dd->msix_entries = NULL;
- dd->num_msix_entries = 0;
-
- pci_free_irq_vectors(dd->pcidev);
-}
-
/*
* Remap the interrupt source from the general handler to the given MSI-X
* interrupt.
*/
-static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
+void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
{
u64 reg;
int m, n;
@@ -13098,8 +13094,7 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
write_csr(dd, CCE_INT_MAP + (8 * m), reg);
}
-static void remap_sdma_interrupts(struct hfi1_devdata *dd,
- int engine, int msix_intr)
+void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
{
/*
* SDMA engine interrupt sources grouped by type, rather than
@@ -13108,204 +13103,16 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd,
* SDMAProgress
* SDMAIdle
*/
- remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
- remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
- remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
-}
-
-static int request_msix_irqs(struct hfi1_devdata *dd)
-{
- int first_general, last_general;
- int first_sdma, last_sdma;
- int first_rx, last_rx;
- int i, ret = 0;
-
- /* calculate the ranges we are going to use */
- first_general = 0;
- last_general = first_general + 1;
- first_sdma = last_general;
- last_sdma = first_sdma + dd->num_sdma;
- first_rx = last_sdma;
- last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
-
- /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
- dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
-
- /*
- * Sanity check - the code expects all SDMA chip source
- * interrupts to be in the same CSR, starting at bit 0. Verify
- * that this is true by checking the bit location of the start.
- */
- BUILD_BUG_ON(IS_SDMA_START % 64);
-
- for (i = 0; i < dd->num_msix_entries; i++) {
- struct hfi1_msix_entry *me = &dd->msix_entries[i];
- const char *err_info;
- irq_handler_t handler;
- irq_handler_t thread = NULL;
- void *arg = NULL;
- int idx;
- struct hfi1_ctxtdata *rcd = NULL;
- struct sdma_engine *sde = NULL;
- char name[MAX_NAME_SIZE];
-
- /* obtain the arguments to pci_request_irq */
- if (first_general <= i && i < last_general) {
- idx = i - first_general;
- handler = general_interrupt;
- arg = dd;
- snprintf(name, sizeof(name),
- DRIVER_NAME "_%d", dd->unit);
- err_info = "general";
- me->type = IRQ_GENERAL;
- } else if (first_sdma <= i && i < last_sdma) {
- idx = i - first_sdma;
- sde = &dd->per_sdma[idx];
- handler = sdma_interrupt;
- arg = sde;
- snprintf(name, sizeof(name),
- DRIVER_NAME "_%d sdma%d", dd->unit, idx);
- err_info = "sdma";
- remap_sdma_interrupts(dd, idx, i);
- me->type = IRQ_SDMA;
- } else if (first_rx <= i && i < last_rx) {
- idx = i - first_rx;
- rcd = hfi1_rcd_get_by_index_safe(dd, idx);
- if (rcd) {
- /*
- * Set the interrupt register and mask for this
- * context's interrupt.
- */
- rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
- rcd->imask = ((u64)1) <<
- ((IS_RCVAVAIL_START + idx) % 64);
- handler = receive_context_interrupt;
- thread = receive_context_thread;
- arg = rcd;
- snprintf(name, sizeof(name),
- DRIVER_NAME "_%d kctxt%d",
- dd->unit, idx);
- err_info = "receive context";
- remap_intr(dd, IS_RCVAVAIL_START + idx, i);
- me->type = IRQ_RCVCTXT;
- rcd->msix_intr = i;
- hfi1_rcd_put(rcd);
- }
- } else {
- /* not in our expected range - complain, then
- * ignore it
- */
- dd_dev_err(dd,
- "Unexpected extra MSI-X interrupt %d\n", i);
- continue;
- }
- /* no argument, no interrupt */
- if (!arg)
- continue;
- /* make sure the name is terminated */
- name[sizeof(name) - 1] = 0;
- me->irq = pci_irq_vector(dd->pcidev, i);
- ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
- name);
- if (ret) {
- dd_dev_err(dd,
- "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
- err_info, me->irq, idx, ret);
- return ret;
- }
- /*
- * assign arg after pci_request_irq call, so it will be
- * cleaned up
- */
- me->arg = arg;
-
- ret = hfi1_get_irq_affinity(dd, me);
- if (ret)
- dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
- }
-
- return ret;
-}
-
-void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
-{
- int i;
-
- for (i = 0; i < dd->vnic.num_ctxt; i++) {
- struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
- struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
-
- synchronize_irq(me->irq);
- }
-}
-
-void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_devdata *dd = rcd->dd;
- struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
-
- if (!me->arg) /* => no irq, no affinity */
- return;
-
- hfi1_put_irq_affinity(dd, me);
- pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
-
- me->arg = NULL;
-}
-
-void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_devdata *dd = rcd->dd;
- struct hfi1_msix_entry *me;
- int idx = rcd->ctxt;
- void *arg = rcd;
- int ret;
-
- rcd->msix_intr = dd->vnic.msix_idx++;
- me = &dd->msix_entries[rcd->msix_intr];
-
- /*
- * Set the interrupt register and mask for this
- * context's interrupt.
- */
- rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
- rcd->imask = ((u64)1) <<
- ((IS_RCVAVAIL_START + idx) % 64);
- me->type = IRQ_RCVCTXT;
- me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
- remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
-
- ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
- receive_context_interrupt,
- receive_context_thread, arg,
- DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
- if (ret) {
- dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
- me->irq, idx, ret);
- return;
- }
- /*
- * assign arg after pci_request_irq call, so it will be
- * cleaned up
- */
- me->arg = arg;
-
- ret = hfi1_get_irq_affinity(dd, me);
- if (ret) {
- dd_dev_err(dd,
- "unable to pin IRQ %d\n", ret);
- pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
- }
+ remap_intr(dd, IS_SDMA_START + engine, msix_intr);
+ remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
+ remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
}
/*
* Set the general handler to accept all interrupts, remap all
* chip interrupts back to MSI-X 0.
*/
-static void reset_interrupts(struct hfi1_devdata *dd)
+void reset_interrupts(struct hfi1_devdata *dd)
{
int i;
@@ -13318,54 +13125,33 @@ static void reset_interrupts(struct hfi1_devdata *dd)
write_csr(dd, CCE_INT_MAP + (8 * i), 0);
}
+/**
+ * set_up_interrupts() - Initialize the IRQ resources and state
+ * @dd: valid devdata
+ *
+ */
static int set_up_interrupts(struct hfi1_devdata *dd)
{
- u32 total;
- int ret, request;
-
- /*
- * Interrupt count:
- * 1 general, "slow path" interrupt (includes the SDMA engines
- * slow source, SDMACleanupDone)
- * N interrupts - one per used SDMA engine
- * M interrupt - one per kernel receive context
- * V interrupt - one for each VNIC context
- */
- total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
-
- /* ask for MSI-X interrupts */
- request = request_msix(dd, total);
- if (request < 0) {
- ret = request;
- goto fail;
- } else {
- dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
- GFP_KERNEL);
- if (!dd->msix_entries) {
- ret = -ENOMEM;
- goto fail;
- }
- /* using MSI-X */
- dd->num_msix_entries = total;
- dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
- }
+ int ret;
/* mask all interrupts */
- set_intr_state(dd, 0);
+ set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
+
/* clear all pending interrupts */
clear_all_interrupts(dd);
/* reset general handler mask, chip MSI-X mappings */
reset_interrupts(dd);
- ret = request_msix_irqs(dd);
+ /* ask for MSI-X interrupts */
+ ret = msix_initialize(dd);
if (ret)
- goto fail;
+ return ret;
- return 0;
+ ret = msix_request_irqs(dd);
+ if (ret)
+ msix_clean_up_interrupts(dd);
-fail:
- hfi1_clean_up_interrupts(dd);
return ret;
}
@@ -14918,20 +14704,16 @@ err_exit:
}
/**
- * Allocate and initialize the device structure for the hfi.
+ * hfi1_init_dd() - Initialize most of the dd structure.
* @dev: the pci_dev for hfi1_ib device
* @ent: pci_device_id struct for this dev
*
- * Also allocates, initializes, and returns the devdata struct for this
- * device instance
- *
* This is global, and is called directly at init to set up the
* chip-specific function pointers for later use.
*/
-struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int hfi1_init_dd(struct hfi1_devdata *dd)
{
- struct hfi1_devdata *dd;
+ struct pci_dev *pdev = dd->pcidev;
struct hfi1_pportdata *ppd;
u64 reg;
int i, ret;
@@ -14942,13 +14724,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
"Functional simulator"
};
struct pci_dev *parent = pdev->bus->self;
- u32 sdma_engines;
+ u32 sdma_engines = chip_sdma_engines(dd);
- dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
- sizeof(struct hfi1_pportdata));
- if (IS_ERR(dd))
- goto bail;
- sdma_engines = chip_sdma_engines(dd);
ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++) {
int vl;
@@ -15127,6 +14904,12 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_cleanup;
+ /*
+ * This should probably occur in hfi1_pcie_init(), but historically
+ * occurs after the do_pcie_gen3_transition() code.
+ */
+ tune_pcie_caps(dd);
+
/* start setting dd values and adjusting CSRs */
init_early_variables(dd);
@@ -15239,14 +15022,13 @@ bail_free_cntrs:
free_cntrs(dd);
bail_clear_intr:
hfi1_comp_vectors_clean_up(dd);
- hfi1_clean_up_interrupts(dd);
+ msix_clean_up_interrupts(dd);
bail_cleanup:
hfi1_pcie_ddcleanup(dd);
bail_free:
hfi1_free_devdata(dd);
- dd = ERR_PTR(ret);
bail:
- return dd;
+ return ret;
}
static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 36b04d6300e5..6b9c8f12dff8 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -52,9 +52,7 @@
*/
/* sizes */
-#define CCE_NUM_MSIX_VECTORS 256
-#define CCE_NUM_INT_CSRS 12
-#define CCE_NUM_INT_MAP_CSRS 96
+#define BITS_PER_REGISTER (BITS_PER_BYTE * sizeof(u64))
#define NUM_INTERRUPT_SOURCES 768
#define RXE_NUM_CONTEXTS 160
#define RXE_PER_CONTEXT_SIZE 0x1000 /* 4k */
@@ -161,34 +159,49 @@
(CR_CREDIT_RETURN_DUE_TO_FORCE_MASK << \
CR_CREDIT_RETURN_DUE_TO_FORCE_SHIFT)
-/* interrupt source numbers */
-#define IS_GENERAL_ERR_START 0
-#define IS_SDMAENG_ERR_START 16
-#define IS_SENDCTXT_ERR_START 32
-#define IS_SDMA_START 192 /* includes SDmaProgress,SDmaIdle */
+/* Specific IRQ sources */
+#define CCE_ERR_INT 0
+#define RXE_ERR_INT 1
+#define MISC_ERR_INT 2
+#define PIO_ERR_INT 4
+#define SDMA_ERR_INT 5
+#define EGRESS_ERR_INT 6
+#define TXE_ERR_INT 7
+#define PBC_INT 240
+#define GPIO_ASSERT_INT 241
+#define QSFP1_INT 242
+#define QSFP2_INT 243
+#define TCRIT_INT 244
+
+/* interrupt source ranges */
+#define IS_FIRST_SOURCE CCE_ERR_INT
+#define IS_GENERAL_ERR_START 0
+#define IS_SDMAENG_ERR_START 16
+#define IS_SENDCTXT_ERR_START 32
+#define IS_SDMA_START 192
+#define IS_SDMA_PROGRESS_START 208
+#define IS_SDMA_IDLE_START 224
#define IS_VARIOUS_START 240
#define IS_DC_START 248
#define IS_RCVAVAIL_START 256
#define IS_RCVURGENT_START 416
#define IS_SENDCREDIT_START 576
#define IS_RESERVED_START 736
-#define IS_MAX_SOURCES 768
+#define IS_LAST_SOURCE 767
/* derived interrupt source values */
-#define IS_GENERAL_ERR_END IS_SDMAENG_ERR_START
-#define IS_SDMAENG_ERR_END IS_SENDCTXT_ERR_START
-#define IS_SENDCTXT_ERR_END IS_SDMA_START
-#define IS_SDMA_END IS_VARIOUS_START
-#define IS_VARIOUS_END IS_DC_START
-#define IS_DC_END IS_RCVAVAIL_START
-#define IS_RCVAVAIL_END IS_RCVURGENT_START
-#define IS_RCVURGENT_END IS_SENDCREDIT_START
-#define IS_SENDCREDIT_END IS_RESERVED_START
-#define IS_RESERVED_END IS_MAX_SOURCES
-
-/* absolute interrupt numbers for QSFP1Int and QSFP2Int */
-#define QSFP1_INT 242
-#define QSFP2_INT 243
+#define IS_GENERAL_ERR_END 7
+#define IS_SDMAENG_ERR_END 31
+#define IS_SENDCTXT_ERR_END 191
+#define IS_SDMA_END 207
+#define IS_SDMA_PROGRESS_END 223
+#define IS_SDMA_IDLE_END 239
+#define IS_VARIOUS_END 244
+#define IS_DC_END 255
+#define IS_RCVAVAIL_END 415
+#define IS_RCVURGENT_END 575
+#define IS_SENDCREDIT_END 735
+#define IS_RESERVED_END IS_LAST_SOURCE
/* DCC_CFG_PORT_CONFIG logical link states */
#define LSTATE_DOWN 0x1
@@ -1416,6 +1429,18 @@ void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
void hfi1_init_vnic_rsm(struct hfi1_devdata *dd);
void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd);
+irqreturn_t general_interrupt(int irq, void *data);
+irqreturn_t sdma_interrupt(int irq, void *data);
+irqreturn_t receive_context_interrupt(int irq, void *data);
+irqreturn_t receive_context_thread(int irq, void *data);
+
+int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set);
+void init_qsfp_int(struct hfi1_devdata *dd);
+void clear_all_interrupts(struct hfi1_devdata *dd);
+void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
+void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
+void reset_interrupts(struct hfi1_devdata *dd);
+
/*
* Interrupt source table.
*
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index ee6dca5e2a2f..c6163a347e93 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -878,6 +878,10 @@
#define SEND_CTRL (TXE + 0x000000000000)
#define SEND_CTRL_CM_RESET_SMASK 0x4ull
#define SEND_CTRL_SEND_ENABLE_SMASK 0x1ull
+#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
+#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xFFull
+#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
+ << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
#define SEND_CTRL_VL_ARBITER_ENABLE_SMASK 0x2ull
#define SEND_CTXT_CHECK_ENABLE (TXE + 0x000000100080)
#define SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 0x80ull
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1fc75647e47b..c22ebc774a6a 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -681,7 +681,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
HFI1_RCVCTRL_TAILUPD_DIS |
HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
- HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS |
+ HFI1_RCVCTRL_URGENT_DIS, uctxt);
/* Clear the context's J_KEY */
hfi1_clear_ctxt_jkey(dd, uctxt);
/*
@@ -1096,6 +1097,7 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
+ rcvctrl_ops |= HFI1_RCVCTRL_URGENT_ENB;
if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
/*
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index d9470317983f..1401b6ea4a28 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -80,6 +80,7 @@
#include "qsfp.h"
#include "platform.h"
#include "affinity.h"
+#include "msix.h"
/* bumped 1 from s/w major version of TrueScale */
#define HFI1_CHIP_VERS_MAJ 3U
@@ -620,6 +621,8 @@ struct rvt_sge_state;
#define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
#define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
#define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
+#define HFI1_RCVCTRL_URGENT_ENB 0x40000
+#define HFI1_RCVCTRL_URGENT_DIS 0x80000
/* partition enforcement flags */
#define HFI1_PART_ENFORCE_IN 0x1
@@ -667,6 +670,14 @@ struct hfi1_msix_entry {
struct irq_affinity_notify notify;
};
+struct hfi1_msix_info {
+ /* lock to synchronize in_use_msix access */
+ spinlock_t msix_lock;
+ DECLARE_BITMAP(in_use_msix, CCE_NUM_MSIX_VECTORS);
+ struct hfi1_msix_entry *msix_entries;
+ u16 max_requested;
+};
+
/* per-SL CCA information */
struct cca_timer {
struct hrtimer hrtimer;
@@ -992,7 +1003,6 @@ struct hfi1_vnic_data {
struct idr vesw_idr;
u8 rmt_start;
u8 num_ctxt;
- u32 msix_idx;
};
struct hfi1_vnic_vport_info;
@@ -1205,11 +1215,6 @@ struct hfi1_devdata {
struct diag_client *diag_client;
- /* MSI-X information */
- struct hfi1_msix_entry *msix_entries;
- u32 num_msix_entries;
- u32 first_dyn_msix_idx;
-
/* general interrupt: mask of handled interrupts */
u64 gi_mask[CCE_NUM_INT_CSRS];
@@ -1223,6 +1228,9 @@ struct hfi1_devdata {
*/
struct timer_list synth_stats_timer;
+ /* MSI-X information */
+ struct hfi1_msix_info msix_info;
+
/*
* device counters
*/
@@ -1349,6 +1357,8 @@ struct hfi1_devdata {
/* vnic data */
struct hfi1_vnic_data vnic;
+ /* Lock to protect IRQ SRC register access */
+ spinlock_t irq_src_lock;
};
static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
@@ -1431,9 +1441,6 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
void set_all_slowpath(struct hfi1_devdata *dd);
-void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd);
-void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd);
-void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd);
extern const struct pci_device_id hfi1_pci_tbl[];
void hfi1_make_ud_req_9B(struct rvt_qp *qp,
@@ -1887,10 +1894,8 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
#define HFI1_CTXT_WAITING_URG 4
/* free up any allocated data at closes */
-struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
- const struct pci_device_id *ent);
+int hfi1_init_dd(struct hfi1_devdata *dd);
void hfi1_free_devdata(struct hfi1_devdata *dd);
-struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
/* LED beaconing functions */
void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
@@ -1963,6 +1968,7 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
*/
extern const char ib_hfi1_version[];
+extern const struct attribute_group ib_hfi1_attr_group;
int hfi1_device_create(struct hfi1_devdata *dd);
void hfi1_device_remove(struct hfi1_devdata *dd);
@@ -1974,16 +1980,15 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
/* Hook for sysfs read of QSFP */
int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
-int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent);
-void hfi1_clean_up_interrupts(struct hfi1_devdata *dd);
+int hfi1_pcie_init(struct hfi1_devdata *dd);
void hfi1_pcie_cleanup(struct pci_dev *pdev);
int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *dd);
-int request_msix(struct hfi1_devdata *dd, u32 msireq);
int restore_pci_variables(struct hfi1_devdata *dd);
int save_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd);
+void tune_pcie_caps(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd);
int get_platform_config_field(struct hfi1_devdata *dd,
enum platform_config_table_type_encoding
@@ -2124,19 +2129,6 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
return base_sdma_integrity;
}
-/*
- * hfi1_early_err is used (only!) to print early errors before devdata is
- * allocated, or when dd->pcidev may not be valid, and at the tail end of
- * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is
- * the same as dd_dev_err, but is used when the message really needs
- * the IB port# to be definitive as to what's happening..
- */
-#define hfi1_early_err(dev, fmt, ...) \
- dev_err(dev, fmt, ##__VA_ARGS__)
-
-#define hfi1_early_info(dev, fmt, ...) \
- dev_info(dev, fmt, ##__VA_ARGS__)
-
#define dd_dev_emerg(dd, fmt, ...) \
dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 758d273c32cf..09044905284f 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -83,6 +83,8 @@
#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
+#define NUM_IB_PORTS 1
+
/*
* Number of user receive contexts we are configured to use (to allow for more
* pio buffers per ctxt, etc.) Zero means use one user context per CPU.
@@ -654,9 +656,8 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
if (loopback) {
- hfi1_early_err(&pdev->dev,
- "Faking data partition 0x8001 in idx %u\n",
- !default_pkey_idx);
+ dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
+ !default_pkey_idx);
ppd->pkeys[!default_pkey_idx] = 0x8001;
}
@@ -702,9 +703,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
return;
bail:
-
- hfi1_early_err(&pdev->dev,
- "Congestion Control Agent disabled for port %d\n", port);
+ dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
}
/*
@@ -833,6 +832,23 @@ wq_error:
}
/**
+ * enable_general_intr() - Enable the IRQs that will be handled by the
+ * general interrupt handler.
+ * @dd: valid devdata
+ *
+ */
+static void enable_general_intr(struct hfi1_devdata *dd)
+{
+ set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
+ set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
+ set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
+ set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
+ set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
+ set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
+ set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
+}
+
+/**
* hfi1_init - do the actual initialization sequence on the chip
* @dd: the hfi1_ib device
* @reinit: re-initializing, so don't allocate new memory
@@ -916,6 +932,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
ret = lastfail;
}
+ /* enable IRQ */
hfi1_rcd_put(rcd);
}
@@ -954,7 +971,8 @@ done:
HFI1_STATUS_INITTED;
if (!ret) {
/* enable all interrupts from the chip */
- set_intr_state(dd, 1);
+ enable_general_intr(dd);
+ init_qsfp_int(dd);
/* chip is OK for user apps; mark it as initialized */
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
@@ -1051,9 +1069,9 @@ static void shutdown_device(struct hfi1_devdata *dd)
}
dd->flags &= ~HFI1_INITTED;
- /* mask and clean up interrupts, but not errors */
- set_intr_state(dd, 0);
- hfi1_clean_up_interrupts(dd);
+ /* mask and clean up interrupts */
+ set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
+ msix_clean_up_interrupts(dd);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@@ -1246,15 +1264,19 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
kobject_put(&dd->kobj);
}
-/*
- * Allocate our primary per-unit data structure. Must be done via verbs
- * allocator, because the verbs cleanup process both does cleanup and
- * free of the data structure.
+/**
+ * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
+ * @pdev: Valid PCI device
+ * @extra: How many bytes to alloc past the default
+ *
+ * Must be done via verbs allocator, because the verbs cleanup process
+ * both does cleanup and free of the data structure.
* "extra" is for chip-specific data.
*
* Use the idr mechanism to get a unit number for this unit.
*/
-struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
+static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
+ size_t extra)
{
unsigned long flags;
struct hfi1_devdata *dd;
@@ -1287,8 +1309,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
idr_preload_end();
if (ret < 0) {
- hfi1_early_err(&pdev->dev,
- "Could not allocate unit ID: error %d\n", -ret);
+ dev_err(&pdev->dev,
+ "Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
@@ -1309,6 +1331,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
spin_lock_init(&dd->pio_map_lock);
mutex_init(&dd->dc8051_lock);
init_waitqueue_head(&dd->event_queue);
+ spin_lock_init(&dd->irq_src_lock);
dd->int_counter = alloc_percpu(u64);
if (!dd->int_counter) {
@@ -1481,9 +1504,6 @@ static int __init hfi1_mod_init(void)
idr_init(&hfi1_unit_table);
hfi1_dbg_init();
- ret = hfi1_wss_init();
- if (ret < 0)
- goto bail_wss;
ret = pci_register_driver(&hfi1_pci_driver);
if (ret < 0) {
pr_err("Unable to register driver: error %d\n", -ret);
@@ -1492,8 +1512,6 @@ static int __init hfi1_mod_init(void)
goto bail; /* all OK */
bail_dev:
- hfi1_wss_exit();
-bail_wss:
hfi1_dbg_exit();
idr_destroy(&hfi1_unit_table);
dev_cleanup();
@@ -1510,7 +1528,6 @@ static void __exit hfi1_mod_cleanup(void)
{
pci_unregister_driver(&hfi1_pci_driver);
node_affinity_destroy_all();
- hfi1_wss_exit();
hfi1_dbg_exit();
idr_destroy(&hfi1_unit_table);
@@ -1604,23 +1621,23 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
hfi1_free_devdata(dd);
}
-static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
+static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
{
if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(dev, "Receive header queue count too small\n");
+ dd_dev_err(dd, "Receive header queue count too small\n");
return -EINVAL;
}
if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(dev,
- "Receive header queue count cannot be greater than %u\n",
- HFI1_MAX_HDRQ_EGRBUF_CNT);
+ dd_dev_err(dd,
+ "Receive header queue count cannot be greater than %u\n",
+ HFI1_MAX_HDRQ_EGRBUF_CNT);
return -EINVAL;
}
if (thecnt % HDRQ_INCREMENT) {
- hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
- thecnt, HDRQ_INCREMENT);
+ dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
+ thecnt, HDRQ_INCREMENT);
return -EINVAL;
}
@@ -1639,22 +1656,29 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Validate dev ids */
if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
ent->device == PCI_DEVICE_ID_INTEL1)) {
- hfi1_early_err(&pdev->dev,
- "Failing on unknown Intel deviceid 0x%x\n",
- ent->device);
+ dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
+ ent->device);
ret = -ENODEV;
goto bail;
}
+ /* Allocate the dd so we can get to work */
+ dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
+ sizeof(struct hfi1_pportdata));
+ if (IS_ERR(dd)) {
+ ret = PTR_ERR(dd);
+ goto bail;
+ }
+
/* Validate some global module parameters */
- ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
+ ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt);
if (ret)
goto bail;
/* use the encoding function as a sanitization check */
if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
- hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
- hfi1_hdrq_entsize);
+ dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
+ hfi1_hdrq_entsize);
ret = -EINVAL;
goto bail;
}
@@ -1676,10 +1700,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
clamp_val(eager_buffer_size,
MIN_EAGER_BUFFER * 8,
MAX_EAGER_BUFFER_TOTAL);
- hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
- eager_buffer_size);
+ dd_dev_info(dd, "Eager buffer size %u\n",
+ eager_buffer_size);
} else {
- hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
+ dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
ret = -EINVAL;
goto bail;
}
@@ -1687,7 +1711,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* restrict value of hfi1_rcvarr_split */
hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
- ret = hfi1_pcie_init(pdev, ent);
+ ret = hfi1_pcie_init(dd);
if (ret)
goto bail;
@@ -1695,12 +1719,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* Do device-specific initialization, function table setup, dd
* allocation, etc.
*/
- dd = hfi1_init_dd(pdev, ent);
-
- if (IS_ERR(dd)) {
- ret = PTR_ERR(dd);
+ ret = hfi1_init_dd(dd);
+ if (ret)
goto clean_bail; /* error already printed */
- }
ret = create_workqueues(dd);
if (ret)
@@ -1731,7 +1752,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
if (initfail || ret) {
- hfi1_clean_up_interrupts(dd);
+ msix_clean_up_interrupts(dd);
stop_timers(dd);
flush_workqueue(ib_wq);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c
new file mode 100644
index 000000000000..582f1ba136ff
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/iowait.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#include "iowait.h"
+#include "trace_iowait.h"
+
+void iowait_set_flag(struct iowait *wait, u32 flag)
+{
+ trace_hfi1_iowait_set(wait, flag);
+ set_bit(flag, &wait->flags);
+}
+
+bool iowait_flag_set(struct iowait *wait, u32 flag)
+{
+ return test_bit(flag, &wait->flags);
+}
+
+inline void iowait_clear_flag(struct iowait *wait, u32 flag)
+{
+ trace_hfi1_iowait_clear(wait, flag);
+ clear_bit(flag, &wait->flags);
+}
+
+/**
+ * iowait_init() - initialize wait structure
+ * @wait: wait struct to initialize
+ * @tx_limit: limit for overflow queuing
+ * @func: restart function for workqueue
+ * @sleep: sleep function for no space
+ * @resume: wakeup function for no space
+ *
+ * This function initializes the iowait
+ * structure embedded in the QP or PQ.
+ *
+ */
+void iowait_init(struct iowait *wait, u32 tx_limit,
+ void (*func)(struct work_struct *work),
+ void (*tidfunc)(struct work_struct *work),
+ int (*sleep)(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ uint seq,
+ bool pkts_sent),
+ void (*wakeup)(struct iowait *wait, int reason),
+ void (*sdma_drained)(struct iowait *wait))
+{
+ int i;
+
+ wait->count = 0;
+ INIT_LIST_HEAD(&wait->list);
+ init_waitqueue_head(&wait->wait_dma);
+ init_waitqueue_head(&wait->wait_pio);
+ atomic_set(&wait->sdma_busy, 0);
+ atomic_set(&wait->pio_busy, 0);
+ wait->tx_limit = tx_limit;
+ wait->sleep = sleep;
+ wait->wakeup = wakeup;
+ wait->sdma_drained = sdma_drained;
+ wait->flags = 0;
+ for (i = 0; i < IOWAIT_SES; i++) {
+ wait->wait[i].iow = wait;
+ INIT_LIST_HEAD(&wait->wait[i].tx_head);
+ if (i == IOWAIT_IB_SE)
+ INIT_WORK(&wait->wait[i].iowork, func);
+ else
+ INIT_WORK(&wait->wait[i].iowork, tidfunc);
+ }
+}
+
+/**
+ * iowait_cancel_work - cancel all work in iowait
+ * @w: the iowait struct
+ */
+void iowait_cancel_work(struct iowait *w)
+{
+ cancel_work_sync(&iowait_get_ib_work(w)->iowork);
+ cancel_work_sync(&iowait_get_tid_work(w)->iowork);
+}
+
+/**
+ * iowait_set_work_flag - set work flag based on leg
+ * @w - the iowait work struct
+ */
+int iowait_set_work_flag(struct iowait_work *w)
+{
+ if (w == &w->iow->wait[IOWAIT_IB_SE]) {
+ iowait_set_flag(w->iow, IOWAIT_PENDING_IB);
+ return IOWAIT_IB_SE;
+ }
+ iowait_set_flag(w->iow, IOWAIT_PENDING_TID);
+ return IOWAIT_TID_SE;
+}
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 3d9c32c7c340..23a58ac0d47c 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_IOWAIT_H
#define _HFI1_IOWAIT_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -49,6 +49,7 @@
#include <linux/list.h>
#include <linux/workqueue.h>
+#include <linux/wait.h>
#include <linux/sched.h>
#include "sdma_txreq.h"
@@ -59,16 +60,47 @@
*/
typedef void (*restart_t)(struct work_struct *work);
+#define IOWAIT_PENDING_IB 0x0
+#define IOWAIT_PENDING_TID 0x1
+
+/*
+ * A QP can have multiple Send Engines (SEs).
+ *
+ * The current use case is for supporting a TID RDMA
+ * packet build/xmit mechanism independent from verbs.
+ */
+#define IOWAIT_SES 2
+#define IOWAIT_IB_SE 0
+#define IOWAIT_TID_SE 1
+
struct sdma_txreq;
struct sdma_engine;
/**
- * struct iowait - linkage for delayed progress/waiting
+ * @iowork: the work struct
+ * @tx_head: list of prebuilt packets
+ * @iow: the parent iowait structure
+ *
+ * This structure is the work item (process) specific
+ * details associated with the each of the two SEs of the
+ * QP.
+ *
+ * The workstruct and the queued TXs are unique to each
+ * SE.
+ */
+struct iowait;
+struct iowait_work {
+ struct work_struct iowork;
+ struct list_head tx_head;
+ struct iowait *iow;
+};
+
+/**
* @list: used to add/insert into QP/PQ wait lists
- * @lock: uses to record the list head lock
* @tx_head: overflow list of sdma_txreq's
* @sleep: no space callback
* @wakeup: space callback wakeup
* @sdma_drained: sdma count drained
+ * @lock: lock protected head of wait queue
* @iowork: workqueue overhead
* @wait_dma: wait for sdma_busy == 0
* @wait_pio: wait for pio_busy == 0
@@ -76,6 +108,8 @@ struct sdma_engine;
* @count: total number of descriptors in tx_head'ed list
* @tx_limit: limit for overflow queuing
* @tx_count: number of tx entry's in tx_head'ed list
+ * @flags: wait flags (one per QP)
+ * @wait: SE array
*
* This is to be embedded in user's state structure
* (QP or PQ).
@@ -98,13 +132,11 @@ struct sdma_engine;
* Waiters explicity know that, but the destroy
* code that unwaits QPs does not.
*/
-
struct iowait {
struct list_head list;
- struct list_head tx_head;
int (*sleep)(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
uint seq,
bool pkts_sent
@@ -112,7 +144,6 @@ struct iowait {
void (*wakeup)(struct iowait *wait, int reason);
void (*sdma_drained)(struct iowait *wait);
seqlock_t *lock;
- struct work_struct iowork;
wait_queue_head_t wait_dma;
wait_queue_head_t wait_pio;
atomic_t sdma_busy;
@@ -121,63 +152,37 @@ struct iowait {
u32 tx_limit;
u32 tx_count;
u8 starved_cnt;
+ unsigned long flags;
+ struct iowait_work wait[IOWAIT_SES];
};
#define SDMA_AVAIL_REASON 0
-/**
- * iowait_init() - initialize wait structure
- * @wait: wait struct to initialize
- * @tx_limit: limit for overflow queuing
- * @func: restart function for workqueue
- * @sleep: sleep function for no space
- * @resume: wakeup function for no space
- *
- * This function initializes the iowait
- * structure embedded in the QP or PQ.
- *
- */
+void iowait_set_flag(struct iowait *wait, u32 flag);
+bool iowait_flag_set(struct iowait *wait, u32 flag);
+void iowait_clear_flag(struct iowait *wait, u32 flag);
-static inline void iowait_init(
- struct iowait *wait,
- u32 tx_limit,
- void (*func)(struct work_struct *work),
- int (*sleep)(
- struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *tx,
- uint seq,
- bool pkts_sent),
- void (*wakeup)(struct iowait *wait, int reason),
- void (*sdma_drained)(struct iowait *wait))
-{
- wait->count = 0;
- wait->lock = NULL;
- INIT_LIST_HEAD(&wait->list);
- INIT_LIST_HEAD(&wait->tx_head);
- INIT_WORK(&wait->iowork, func);
- init_waitqueue_head(&wait->wait_dma);
- init_waitqueue_head(&wait->wait_pio);
- atomic_set(&wait->sdma_busy, 0);
- atomic_set(&wait->pio_busy, 0);
- wait->tx_limit = tx_limit;
- wait->sleep = sleep;
- wait->wakeup = wakeup;
- wait->sdma_drained = sdma_drained;
-}
+void iowait_init(struct iowait *wait, u32 tx_limit,
+ void (*func)(struct work_struct *work),
+ void (*tidfunc)(struct work_struct *work),
+ int (*sleep)(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ uint seq,
+ bool pkts_sent),
+ void (*wakeup)(struct iowait *wait, int reason),
+ void (*sdma_drained)(struct iowait *wait));
/**
- * iowait_schedule() - initialize wait structure
+ * iowait_schedule() - schedule the default send engine work
* @wait: wait struct to schedule
* @wq: workqueue for schedule
* @cpu: cpu
*/
-static inline void iowait_schedule(
- struct iowait *wait,
- struct workqueue_struct *wq,
- int cpu)
+static inline bool iowait_schedule(struct iowait *wait,
+ struct workqueue_struct *wq, int cpu)
{
- queue_work_on(cpu, wq, &wait->iowork);
+ return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork);
}
/**
@@ -228,6 +233,8 @@ static inline void iowait_sdma_add(struct iowait *wait, int count)
*/
static inline int iowait_sdma_dec(struct iowait *wait)
{
+ if (!wait)
+ return 0;
return atomic_dec_and_test(&wait->sdma_busy);
}
@@ -267,11 +274,13 @@ static inline void iowait_pio_inc(struct iowait *wait)
}
/**
- * iowait_sdma_dec - note pio complete
+ * iowait_pio_dec - note pio complete
* @wait: iowait structure
*/
static inline int iowait_pio_dec(struct iowait *wait)
{
+ if (!wait)
+ return 0;
return atomic_dec_and_test(&wait->pio_busy);
}
@@ -293,9 +302,9 @@ static inline void iowait_drain_wakeup(struct iowait *wait)
/**
* iowait_get_txhead() - get packet off of iowait list
*
- * @wait wait struture
+ * @wait iowait_work struture
*/
-static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
+static inline struct sdma_txreq *iowait_get_txhead(struct iowait_work *wait)
{
struct sdma_txreq *tx = NULL;
@@ -309,6 +318,28 @@ static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
return tx;
}
+static inline u16 iowait_get_desc(struct iowait_work *w)
+{
+ u16 num_desc = 0;
+ struct sdma_txreq *tx = NULL;
+
+ if (!list_empty(&w->tx_head)) {
+ tx = list_first_entry(&w->tx_head, struct sdma_txreq,
+ list);
+ num_desc = tx->num_desc;
+ }
+ return num_desc;
+}
+
+static inline u32 iowait_get_all_desc(struct iowait *w)
+{
+ u32 num_desc = 0;
+
+ num_desc = iowait_get_desc(&w->wait[IOWAIT_IB_SE]);
+ num_desc += iowait_get_desc(&w->wait[IOWAIT_TID_SE]);
+ return num_desc;
+}
+
/**
* iowait_queue - Put the iowait on a wait queue
* @pkts_sent: have some packets been sent before queuing?
@@ -372,12 +403,57 @@ static inline void iowait_starve_find_max(struct iowait *w, u8 *max,
}
/**
- * iowait_packet_queued() - determine if a packet is already built
- * @wait: the wait structure
+ * iowait_packet_queued() - determine if a packet is queued
+ * @wait: the iowait_work structure
*/
-static inline bool iowait_packet_queued(struct iowait *wait)
+static inline bool iowait_packet_queued(struct iowait_work *wait)
{
return !list_empty(&wait->tx_head);
}
+/**
+ * inc_wait_count - increment wait counts
+ * @w: the log work struct
+ * @n: the count
+ */
+static inline void iowait_inc_wait_count(struct iowait_work *w, u16 n)
+{
+ if (!w)
+ return;
+ w->iow->tx_count++;
+ w->iow->count += n;
+}
+
+/**
+ * iowait_get_tid_work - return iowait_work for tid SE
+ * @w: the iowait struct
+ */
+static inline struct iowait_work *iowait_get_tid_work(struct iowait *w)
+{
+ return &w->wait[IOWAIT_TID_SE];
+}
+
+/**
+ * iowait_get_ib_work - return iowait_work for ib SE
+ * @w: the iowait struct
+ */
+static inline struct iowait_work *iowait_get_ib_work(struct iowait *w)
+{
+ return &w->wait[IOWAIT_IB_SE];
+}
+
+/**
+ * iowait_ioww_to_iow - return iowait given iowait_work
+ * @w: the iowait_work struct
+ */
+static inline struct iowait *iowait_ioww_to_iow(struct iowait_work *w)
+{
+ if (likely(w))
+ return w->iow;
+ return NULL;
+}
+
+void iowait_cancel_work(struct iowait *w);
+int iowait_set_work_flag(struct iowait_work *w);
+
#endif
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 0307405491e0..88a0cf930136 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -4836,7 +4836,7 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
int ret;
int pkey_idx;
int local_mad = 0;
- u32 resp_len = 0;
+ u32 resp_len = in_wc->byte_len - sizeof(*in_grh);
struct hfi1_ibport *ibp = to_iport(ibdev, port);
pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
new file mode 100644
index 000000000000..d920b165d696
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "hfi.h"
+#include "affinity.h"
+#include "sdma.h"
+
+/**
+ * msix_initialize() - Calculate, request and configure MSIx IRQs
+ * @dd: valid hfi1 devdata
+ *
+ */
+int msix_initialize(struct hfi1_devdata *dd)
+{
+ u32 total;
+ int ret;
+ struct hfi1_msix_entry *entries;
+
+ /*
+ * MSIx interrupt count:
+ * one for the general, "slow path" interrupt
+ * one per used SDMA engine
+ * one per kernel receive context
+ * one for each VNIC context
+ * ...any new IRQs should be added here.
+ */
+ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
+
+ if (total >= CCE_NUM_MSIX_VECTORS)
+ return -EINVAL;
+
+ ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
+ return ret;
+ }
+
+ entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries),
+ GFP_KERNEL);
+ if (!entries) {
+ pci_free_irq_vectors(dd->pcidev);
+ return -ENOMEM;
+ }
+
+ dd->msix_info.msix_entries = entries;
+ spin_lock_init(&dd->msix_info.msix_lock);
+ bitmap_zero(dd->msix_info.in_use_msix, total);
+ dd->msix_info.max_requested = total;
+ dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
+
+ return 0;
+}
+
+/**
+ * msix_request_irq() - Allocate a free MSIx IRQ
+ * @dd: valid devdata
+ * @arg: context information for the IRQ
+ * @handler: IRQ handler
+ * @thread: IRQ thread handler (could be NULL)
+ * @idx: zero base idx if multiple devices are needed
+ * @type: affinty IRQ type
+ *
+ * Allocated an MSIx vector if available, and then create the appropriate
+ * meta data needed to keep track of the pci IRQ request.
+ *
+ * Return:
+ * < 0 Error
+ * >= 0 MSIx vector
+ *
+ */
+static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
+ irq_handler_t handler, irq_handler_t thread,
+ u32 idx, enum irq_type type)
+{
+ unsigned long nr;
+ int irq;
+ int ret;
+ const char *err_info;
+ char name[MAX_NAME_SIZE];
+ struct hfi1_msix_entry *me;
+
+ /* Allocate an MSIx vector */
+ spin_lock(&dd->msix_info.msix_lock);
+ nr = find_first_zero_bit(dd->msix_info.in_use_msix,
+ dd->msix_info.max_requested);
+ if (nr < dd->msix_info.max_requested)
+ __set_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+
+ if (nr == dd->msix_info.max_requested)
+ return -ENOSPC;
+
+ /* Specific verification and determine the name */
+ switch (type) {
+ case IRQ_GENERAL:
+ /* general interrupt must be MSIx vector 0 */
+ if (nr) {
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+ dd_dev_err(dd, "Invalid index %lu for GENERAL IRQ\n",
+ nr);
+ return -EINVAL;
+ }
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
+ err_info = "general";
+ break;
+ case IRQ_SDMA:
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
+ dd->unit, idx);
+ err_info = "sdma";
+ break;
+ case IRQ_RCVCTXT:
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
+ dd->unit, idx);
+ err_info = "receive context";
+ break;
+ case IRQ_OTHER:
+ default:
+ return -EINVAL;
+ }
+ name[sizeof(name) - 1] = 0;
+
+ irq = pci_irq_vector(dd->pcidev, nr);
+ ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: request for IRQ %d failed, MSIx %d, err %d\n",
+ err_info, irq, idx, ret);
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+ return ret;
+ }
+
+ /*
+ * assign arg after pci_request_irq call, so it will be
+ * cleaned up
+ */
+ me = &dd->msix_info.msix_entries[nr];
+ me->irq = irq;
+ me->arg = arg;
+ me->type = type;
+
+ /* This is a request, so a failure is not fatal */
+ ret = hfi1_get_irq_affinity(dd, me);
+ if (ret)
+ dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
+
+ return nr;
+}
+
+/**
+ * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * @rcd: valid rcd context
+ *
+ */
+int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+{
+ int nr;
+
+ nr = msix_request_irq(rcd->dd, rcd, receive_context_interrupt,
+ receive_context_thread, rcd->ctxt, IRQ_RCVCTXT);
+ if (nr < 0)
+ return nr;
+
+ /*
+ * Set the interrupt register and mask for this
+ * context's interrupt.
+ */
+ rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
+ rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
+ rcd->msix_intr = nr;
+ remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
+
+ return 0;
+}
+
+/**
+ * msix_request_smda_ira() - Helper for getting SDMA IRQ resources
+ * @sde: valid sdma engine
+ *
+ */
+int msix_request_sdma_irq(struct sdma_engine *sde)
+{
+ int nr;
+
+ nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
+ sde->this_idx, IRQ_SDMA);
+ if (nr < 0)
+ return nr;
+ sde->msix_intr = nr;
+ remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
+
+ return 0;
+}
+
+/**
+ * enable_sdma_src() - Helper to enable SDMA IRQ srcs
+ * @dd: valid devdata structure
+ * @i: index of SDMA engine
+ */
+static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
+{
+ set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
+ set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
+ IS_SDMA_PROGRESS_START + i, true);
+ set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
+ set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
+ true);
+}
+
+/**
+ * msix_request_irqs() - Allocate all MSIx IRQs
+ * @dd: valid devdata structure
+ *
+ * Helper function to request the used MSIx IRQs.
+ *
+ */
+int msix_request_irqs(struct hfi1_devdata *dd)
+{
+ int i;
+ int ret;
+
+ ret = msix_request_irq(dd, dd, general_interrupt, NULL, 0, IRQ_GENERAL);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dd->num_sdma; i++) {
+ struct sdma_engine *sde = &dd->per_sdma[i];
+
+ ret = msix_request_sdma_irq(sde);
+ if (ret)
+ return ret;
+ enable_sdma_srcs(sde->dd, i);
+ }
+
+ for (i = 0; i < dd->n_krcv_queues; i++) {
+ struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
+
+ if (rcd)
+ ret = msix_request_rcd_irq(rcd);
+ hfi1_rcd_put(rcd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * msix_free_irq() - Free the specified MSIx resources and IRQ
+ * @dd: valid devdata
+ * @msix_intr: MSIx vector to free.
+ *
+ */
+void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
+{
+ struct hfi1_msix_entry *me;
+
+ if (msix_intr >= dd->msix_info.max_requested)
+ return;
+
+ me = &dd->msix_info.msix_entries[msix_intr];
+
+ if (!me->arg) /* => no irq, no affinity */
+ return;
+
+ hfi1_put_irq_affinity(dd, me);
+ pci_free_irq(dd->pcidev, msix_intr, me->arg);
+
+ me->arg = NULL;
+
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(msix_intr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+}
+
+/**
+ * hfi1_clean_up_msix_interrupts() - Free all MSIx IRQ resources
+ * @dd: valid device data data structure
+ *
+ * Free the MSIx and associated PCI resources, if they have been allocated.
+ */
+void msix_clean_up_interrupts(struct hfi1_devdata *dd)
+{
+ int i;
+ struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
+
+ /* remove irqs - must happen before disabling/turning off */
+ for (i = 0; i < dd->msix_info.max_requested; i++, me++)
+ msix_free_irq(dd, i);
+
+ /* clean structures */
+ kfree(dd->msix_info.msix_entries);
+ dd->msix_info.msix_entries = NULL;
+ dd->msix_info.max_requested = 0;
+
+ pci_free_irq_vectors(dd->pcidev);
+}
+
+/**
+ * msix_vnic_syncrhonize_irq() - Vnic IRQ synchronize
+ * @dd: valid devdata
+ */
+void msix_vnic_synchronize_irq(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->vnic.num_ctxt; i++) {
+ struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
+ struct hfi1_msix_entry *me;
+
+ me = &dd->msix_info.msix_entries[rcd->msix_intr];
+
+ synchronize_irq(me->irq);
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/msix.h b/drivers/infiniband/hw/hfi1/msix.h
new file mode 100644
index 000000000000..a514881632a4
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/msix.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _HFI1_MSIX_H
+#define _HFI1_MSIX_H
+
+#include "hfi.h"
+
+/* MSIx interface */
+int msix_initialize(struct hfi1_devdata *dd);
+int msix_request_irqs(struct hfi1_devdata *dd);
+void msix_clean_up_interrupts(struct hfi1_devdata *dd);
+int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
+int msix_request_sdma_irq(struct sdma_engine *sde);
+void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
+
+/* VNIC interface */
+void msix_vnic_synchronize_irq(struct hfi1_devdata *dd);
+
+#endif
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 6c967dde58e7..c96d193bb236 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -61,19 +61,12 @@
*/
/*
- * Code to adjust PCIe capabilities.
- */
-static void tune_pcie_caps(struct hfi1_devdata *);
-
-/*
* Do all the common PCIe setup and initialization.
- * devdata is not yet allocated, and is not allocated until after this
- * routine returns success. Therefore dd_dev_err() can't be used for error
- * printing.
*/
-int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+int hfi1_pcie_init(struct hfi1_devdata *dd)
{
int ret;
+ struct pci_dev *pdev = dd->pcidev;
ret = pci_enable_device(pdev);
if (ret) {
@@ -89,15 +82,13 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
* about that, it appears. If the original BAR was retained
* in the kernel data structures, this may be OK.
*/
- hfi1_early_err(&pdev->dev, "pci enable failed: error %d\n",
- -ret);
- goto done;
+ dd_dev_err(dd, "pci enable failed: error %d\n", -ret);
+ return ret;
}
ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret) {
- hfi1_early_err(&pdev->dev,
- "pci_request_regions fails: err %d\n", -ret);
+ dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret);
goto bail;
}
@@ -110,8 +101,7 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- hfi1_early_err(&pdev->dev,
- "Unable to set DMA mask: %d\n", ret);
+ dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
goto bail;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -119,18 +109,16 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (ret) {
- hfi1_early_err(&pdev->dev,
- "Unable to set DMA consistent mask: %d\n", ret);
+ dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret);
goto bail;
}
pci_set_master(pdev);
(void)pci_enable_pcie_error_reporting(pdev);
- goto done;
+ return 0;
bail:
hfi1_pcie_cleanup(pdev);
-done:
return ret;
}
@@ -206,7 +194,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
dd_dev_err(dd, "WC mapping of send buffers failed\n");
goto nomem;
}
- dd_dev_info(dd, "WC piobase: %p\n for %x", dd->piobase, TXE_PIO_SIZE);
+ dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE);
dd->physaddr = addr; /* used for io_remap, etc. */
@@ -344,26 +332,6 @@ int pcie_speeds(struct hfi1_devdata *dd)
return 0;
}
-/*
- * Returns:
- * - actual number of interrupts allocated or
- * - error
- */
-int request_msix(struct hfi1_devdata *dd, u32 msireq)
-{
- int nvec;
-
- nvec = pci_alloc_irq_vectors(dd->pcidev, msireq, msireq, PCI_IRQ_MSIX);
- if (nvec < 0) {
- dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec);
- return nvec;
- }
-
- tune_pcie_caps(dd);
-
- return nvec;
-}
-
/* restore command and BARs after a reset has wiped them out */
int restore_pci_variables(struct hfi1_devdata *dd)
{
@@ -479,14 +447,19 @@ error:
* Check and optionally adjust them to maximize our throughput.
*/
static int hfi1_pcie_caps;
-module_param_named(pcie_caps, hfi1_pcie_caps, int, S_IRUGO);
+module_param_named(pcie_caps, hfi1_pcie_caps, int, 0444);
MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
uint aspm_mode = ASPM_MODE_DISABLED;
-module_param_named(aspm, aspm_mode, uint, S_IRUGO);
+module_param_named(aspm, aspm_mode, uint, 0444);
MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
-static void tune_pcie_caps(struct hfi1_devdata *dd)
+/**
+ * tune_pcie_caps() - Code to adjust PCIe capabilities.
+ * @dd: Valid device data structure
+ *
+ */
+void tune_pcie_caps(struct hfi1_devdata *dd)
{
struct pci_dev *parent;
u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
@@ -650,7 +623,6 @@ pci_resume(struct pci_dev *pdev)
struct hfi1_devdata *dd = pci_get_drvdata(pdev);
dd_dev_info(dd, "HFI1 resume function called\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
/*
* Running jobs will fail, since it's asynchronous
* unlike sysfs-requested reset. Better than
@@ -1029,6 +1001,7 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
const u8 (*ctle_tunings)[4];
uint static_ctle_mode;
int return_error = 0;
+ u32 target_width;
/* PCIe Gen3 is for the ASIC only */
if (dd->icode != ICODE_RTL_SILICON)
@@ -1068,6 +1041,9 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
return 0;
}
+ /* Previous Gen1/Gen2 bus width */
+ target_width = dd->lbus_width;
+
/*
* Do the Gen3 transition. Steps are those of the PCIe Gen3
* recipe.
@@ -1436,11 +1412,12 @@ retry:
dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
dd->lbus_info);
- if (dd->lbus_speed != target_speed) { /* not target */
+ if (dd->lbus_speed != target_speed ||
+ dd->lbus_width < target_width) { /* not target */
/* maybe retry */
do_retry = retry_count < pcie_retry;
- dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n",
- pcie_target, do_retry ? ", retrying" : "");
+ dd_dev_err(dd, "PCIe link speed or width did not match target%s\n",
+ do_retry ? ", retrying" : "");
retry_count++;
if (do_retry) {
msleep(100); /* allow time to settle */
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 752057647f09..9ab50d2308dc 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -71,14 +71,6 @@ void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
}
}
-/* defined in header release 48 and higher */
-#ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT
-#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
-#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull
-#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
- << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
-#endif
-
/* global control of PIO send */
void pio_send_control(struct hfi1_devdata *dd, int op)
{
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 9b1e84a6b1cc..6f3bc4dab858 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(qp_table_size, "QP table size");
static void flush_tx_list(struct rvt_qp *qp);
static int iowait_sleep(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *stx,
unsigned int seq,
bool pkts_sent);
@@ -134,15 +134,13 @@ const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
};
-static void flush_tx_list(struct rvt_qp *qp)
+static void flush_list_head(struct list_head *l)
{
- struct hfi1_qp_priv *priv = qp->priv;
-
- while (!list_empty(&priv->s_iowait.tx_head)) {
+ while (!list_empty(l)) {
struct sdma_txreq *tx;
tx = list_first_entry(
- &priv->s_iowait.tx_head,
+ l,
struct sdma_txreq,
list);
list_del_init(&tx->list);
@@ -151,6 +149,14 @@ static void flush_tx_list(struct rvt_qp *qp)
}
}
+static void flush_tx_list(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head);
+ flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head);
+}
+
static void flush_iowait(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
@@ -282,33 +288,46 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
}
/**
- * hfi1_check_send_wqe - validate wqe
+ * hfi1_setup_wqe - set up the wqe
* @qp - The qp
* @wqe - The built wqe
+ * @call_send - Determine if the send should be posted or scheduled.
*
- * validate wqe. This is called
- * prior to inserting the wqe into
- * the ring but after the wqe has been
- * setup.
+ * Perform setup of the wqe. This is called
+ * prior to inserting the wqe into the ring but after
+ * the wqe has been setup by RDMAVT. This function
+ * allows the driver the opportunity to perform
+ * validation and additional setup of the wqe.
*
* Returns 0 on success, -EINVAL on failure
*
*/
-int hfi1_check_send_wqe(struct rvt_qp *qp,
- struct rvt_swqe *wqe)
+int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct rvt_ah *ah;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
case IB_QPT_UC:
if (wqe->length > 0x80000000U)
return -EINVAL;
+ if (wqe->length > qp->pmtu)
+ *call_send = false;
break;
case IB_QPT_SMI:
- ah = ibah_to_rvtah(wqe->ud_wr.ah);
- if (wqe->length > (1 << ah->log_pmtu))
+ /*
+ * SM packets should exclusively use VL15 and their SL is
+ * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah
+ * is created, SL is 0 in most cases and as a result some
+ * fields (vl and pmtu) in ah may not be set correctly,
+ * depending on the SL2SC and SC2VL tables at the time.
+ */
+ ppd = ppd_from_ibp(ibp);
+ dd = dd_from_ppd(ppd);
+ if (wqe->length > dd->vld[15].mtu)
return -EINVAL;
break;
case IB_QPT_GSI:
@@ -321,7 +340,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
default:
break;
}
- return wqe->length <= piothreshold;
+ return 0;
}
/**
@@ -333,7 +352,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
* It is only used in the post send, which doesn't hold
* the s_lock.
*/
-void _hfi1_schedule_send(struct rvt_qp *qp)
+bool _hfi1_schedule_send(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibport *ibp =
@@ -341,10 +360,10 @@ void _hfi1_schedule_send(struct rvt_qp *qp)
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
- priv->s_sde ?
- priv->s_sde->cpu :
- cpumask_first(cpumask_of_node(dd->node)));
+ return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
+ priv->s_sde ?
+ priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->node)));
}
static void qp_pio_drain(struct rvt_qp *qp)
@@ -372,12 +391,32 @@ static void qp_pio_drain(struct rvt_qp *qp)
*
* This schedules qp progress and caller should hold
* the s_lock.
+ * @return true if the first leg is scheduled;
+ * false if the first leg is not scheduled.
*/
-void hfi1_schedule_send(struct rvt_qp *qp)
+bool hfi1_schedule_send(struct rvt_qp *qp)
{
lockdep_assert_held(&qp->s_lock);
- if (hfi1_send_ok(qp))
+ if (hfi1_send_ok(qp)) {
_hfi1_schedule_send(qp);
+ return true;
+ }
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
+ IOWAIT_PENDING_IB);
+ return false;
+}
+
+static void hfi1_qp_schedule(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ bool ret;
+
+ if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) {
+ ret = hfi1_schedule_send(qp);
+ if (ret)
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ }
}
void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
@@ -388,16 +427,22 @@ void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
if (qp->s_flags & flag) {
qp->s_flags &= ~flag;
trace_hfi1_qpwakeup(qp, flag);
- hfi1_schedule_send(qp);
+ hfi1_qp_schedule(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
/* Notify hfi1_destroy_qp() if it is waiting. */
rvt_put_qp(qp);
}
+void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait)
+{
+ if (iowait_set_work_flag(wait) == IOWAIT_IB_SE)
+ qp->s_flags &= ~RVT_S_BUSY;
+}
+
static int iowait_sleep(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *stx,
uint seq,
bool pkts_sent)
@@ -438,7 +483,7 @@ static int iowait_sleep(
rvt_get_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_qp_unbusy(qp, wait);
spin_unlock_irqrestore(&qp->s_lock, flags);
ret = -EBUSY;
} else {
@@ -637,6 +682,7 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
&priv->s_iowait,
1,
_hfi1_do_send,
+ NULL,
iowait_sleep,
iowait_wakeup,
iowait_sdma_drained);
@@ -686,7 +732,7 @@ void stop_send_queue(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- cancel_work_sync(&priv->s_iowait.iowork);
+ iowait_cancel_work(&priv->s_iowait);
}
void quiesce_qp(struct rvt_qp *qp)
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 078cff7560b6..7adb6dff6813 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -58,18 +58,6 @@ extern unsigned int hfi1_qp_table_size;
extern const struct rvt_operation_params hfi1_post_parms[];
/*
- * Send if not busy or waiting for I/O and either
- * a RC response is pending or we can process send work requests.
- */
-static inline int hfi1_send_ok(struct rvt_qp *qp)
-{
- return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
- (verbs_txreq_queued(qp) ||
- (qp->s_flags & RVT_S_RESP_PENDING) ||
- !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
-}
-
-/*
* Driver specific s_flags starting at bit 31 down to HFI1_S_MIN_BIT_MASK
*
* HFI1_S_AHG_VALID - ahg header valid on chip
@@ -90,6 +78,20 @@ static inline int hfi1_send_ok(struct rvt_qp *qp)
#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
/*
+ * Send if not busy or waiting for I/O and either
+ * a RC response is pending or we can process send work requests.
+ */
+static inline int hfi1_send_ok(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ return !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)) &&
+ (verbs_txreq_queued(iowait_get_ib_work(&priv->s_iowait)) ||
+ (qp->s_flags & RVT_S_RESP_PENDING) ||
+ !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
+}
+
+/*
* free_ahg - clear ahg from QP
*/
static inline void clear_ahg(struct rvt_qp *qp)
@@ -129,8 +131,8 @@ struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5);
void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
-void _hfi1_schedule_send(struct rvt_qp *qp);
-void hfi1_schedule_send(struct rvt_qp *qp);
+bool _hfi1_schedule_send(struct rvt_qp *qp);
+bool hfi1_schedule_send(struct rvt_qp *qp);
void hfi1_migrate_qp(struct rvt_qp *qp);
@@ -150,4 +152,5 @@ void quiesce_qp(struct rvt_qp *qp);
u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
int mtu_to_path_mtu(u32 mtu);
void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl);
+void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait);
#endif /* _QP_H */
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 9bd63abb2dfe..188aa4f686a0 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -309,7 +309,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
clear_ahg(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
goto done_free_tx;
@@ -378,9 +378,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
wqe->wr.ex.invalidate_rkey);
local_ops = 1;
}
- hfi1_send_complete(qp, wqe,
- err ? IB_WC_LOC_PROT_ERR
- : IB_WC_SUCCESS);
+ rvt_send_complete(qp, wqe,
+ err ? IB_WC_LOC_PROT_ERR
+ : IB_WC_SUCCESS);
if (local_ops)
atomic_dec(&qp->local_ops_pending);
goto done_free_tx;
@@ -1043,7 +1043,7 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
hfi1_migrate_qp(qp);
qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return;
} else { /* need to handle delayed completion */
@@ -1468,7 +1468,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
ibp->rvp.n_other_naks++;
class_b:
if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
break;
@@ -1644,7 +1644,8 @@ read_middle:
qp->s_rdma_read_len -= pmtu;
update_last_psn(qp, psn);
spin_unlock_irqrestore(&qp->s_lock, flags);
- hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, false, false);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, pmtu, false, false);
goto bail;
case OP(RDMA_READ_RESPONSE_ONLY):
@@ -1684,7 +1685,8 @@ read_last:
if (unlikely(tlen != qp->s_rdma_read_len))
goto ack_len_err;
aeth = be32_to_cpu(ohdr->u.aeth);
- hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, false, false);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, tlen, false, false);
WARN_ON(qp->s_rdma_read_sge.num_sge);
(void)do_rc_ack(qp, aeth, psn,
OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
@@ -1704,7 +1706,7 @@ ack_len_err:
status = IB_WC_LOC_LEN_ERR;
ack_err:
if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
ack_done:
@@ -2144,7 +2146,7 @@ send_middle:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto nack_inv;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -2200,7 +2202,7 @@ send_last:
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
- hfi1_copy_sge(&qp->r_sge, data, tlen, true, copy_last);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
rvt_put_ss(&qp->r_sge);
qp->r_msn++;
if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 5f56f3c1b4c4..7fb317c711df 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -156,333 +156,6 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
}
/**
- * ruc_loopback - handle UC and RC loopback requests
- * @sqp: the sending QP
- *
- * This is called from hfi1_do_send() to
- * forward a WQE addressed to the same HFI.
- * Note that although we are single threaded due to the send engine, we still
- * have to protect against post_send(). We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void ruc_loopback(struct rvt_qp *sqp)
-{
- struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- struct rvt_sge *sge;
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
- atomic64_t *maddr;
- enum ib_wc_status send_status;
- bool release;
- int ret;
- bool copy_last = false;
- int local_ops = 0;
-
- rcu_read_lock();
-
- /*
- * Note that we check the responder QP state after
- * checking the requester's state.
- */
- qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
- sqp->remote_qpn);
-
- spin_lock_irqsave(&sqp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT)) ||
- !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- goto unlock;
-
- sqp->s_flags |= RVT_S_BUSY;
-
-again:
- if (sqp->s_last == READ_ONCE(sqp->s_head))
- goto clr_busy;
- wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
-
- /* Return if it is not OK to start a new work request. */
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
- goto clr_busy;
- /* We are in the error state, flush the work request. */
- send_status = IB_WC_WR_FLUSH_ERR;
- goto flush_send;
- }
-
- /*
- * We can rely on the entry not changing without the s_lock
- * being held until we update s_last.
- * We increment s_cur to indicate s_last is in progress.
- */
- if (sqp->s_last == sqp->s_cur) {
- if (++sqp->s_cur >= sqp->s_size)
- sqp->s_cur = 0;
- }
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->rvp.n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
- goto serr;
- }
-
- memset(&wc, 0, sizeof(wc));
- send_status = IB_WC_SUCCESS;
-
- release = true;
- sqp->s_sge.sge = wqe->sg_list[0];
- sqp->s_sge.sg_list = wqe->sg_list + 1;
- sqp->s_sge.num_sge = wqe->wr.num_sge;
- sqp->s_len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_REG_MR:
- goto send_comp;
-
- case IB_WR_LOCAL_INV:
- if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
- if (rvt_invalidate_rkey(sqp,
- wqe->wr.ex.invalidate_rkey))
- send_status = IB_WC_LOC_PROT_ERR;
- local_ops = 1;
- }
- goto send_comp;
-
- case IB_WR_SEND_WITH_INV:
- if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
- wc.wc_flags = IB_WC_WITH_INVALIDATE;
- wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
- }
- goto send;
-
- case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
- case IB_WR_SEND:
-send:
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- break;
-
- case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- /* skip copy_last set and qp_access_flags recheck */
- goto do_write;
- case IB_WR_RDMA_WRITE:
- copy_last = rvt_is_user_qp(qp);
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
-do_write:
- if (wqe->length == 0)
- break;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_WRITE)))
- goto acc_err;
- qp->r_sge.sg_list = NULL;
- qp->r_sge.num_sge = 1;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
- release = false;
- sqp->s_sge.sg_list = NULL;
- sqp->s_sge.num_sge = 1;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->wr.num_sge;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- wqe->atomic_wr.remote_addr,
- wqe->atomic_wr.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
- sdata = wqe->atomic_wr.compare_add;
- *(u64 *)sqp->s_sge.sge.vaddr =
- (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64)atomic64_add_return(sdata, maddr) - sdata :
- (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
- sdata, wqe->atomic_wr.swap);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- goto send_comp;
-
- default:
- send_status = IB_WC_LOC_QP_OP_ERR;
- goto serr;
- }
-
- sge = &sqp->s_sge.sge;
- while (sqp->s_len) {
- u32 len = sqp->s_len;
-
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (!release)
- rvt_put_mr(sge->mr);
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- sqp->s_len -= len;
- }
- if (release)
- rvt_put_ss(&qp->r_sge);
-
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto send_comp;
-
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
- spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->rvp.n_loop_pkts++;
-flush_send:
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- hfi1_send_complete(sqp, wqe, send_status);
- if (local_ops) {
- atomic_dec(&sqp->local_ops_pending);
- local_ops = 0;
- }
- goto again;
-
-rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- ibp->rvp.n_rnr_naks++;
- /*
- * Note: we don't need the s_lock held since the BUSY flag
- * makes this single threaded.
- */
- if (sqp->s_rnr_retry == 0) {
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
- goto serr;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
- goto clr_busy;
- rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
- IB_AETH_CREDIT_SHIFT);
- goto clr_busy;
-
-op_err:
- send_status = IB_WC_REM_OP_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-acc_err:
- send_status = IB_WC_REM_ACCESS_ERR;
- wc.status = IB_WC_LOC_PROT_ERR;
-err:
- /* responder goes to error state */
- rvt_rc_error(qp, wc.status);
-
-serr:
- spin_lock_irqsave(&sqp->s_lock, flags);
- hfi1_send_complete(sqp, wqe, send_status);
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
- sqp->s_flags &= ~RVT_S_BUSY;
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = sqp->ibqp.device;
- ev.element.qp = &sqp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
- }
- goto done;
- }
-clr_busy:
- sqp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
- rcu_read_unlock();
-}
-
-/**
* hfi1_make_grh - construct a GRH header
* @ibp: a pointer to the IB port
* @hdr: a pointer to the GRH header being constructed
@@ -825,8 +498,8 @@ void hfi1_do_send_from_rvt(struct rvt_qp *qp)
void _hfi1_do_send(struct work_struct *work)
{
- struct iowait *wait = container_of(work, struct iowait, iowork);
- struct rvt_qp *qp = iowait_to_qp(wait);
+ struct iowait_work *w = container_of(work, struct iowait_work, iowork);
+ struct rvt_qp *qp = iowait_to_qp(w->iow);
hfi1_do_send(qp, true);
}
@@ -850,6 +523,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
ps.ppd = ppd_from_ibp(ps.ibp);
ps.in_thread = in_thread;
+ ps.wait = iowait_get_ib_work(&priv->s_iowait);
trace_hfi1_rc_do_send(qp, in_thread);
@@ -858,7 +532,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
~((1 << ps.ppd->lmc) - 1)) ==
ps.ppd->lid)) {
- ruc_loopback(qp);
+ rvt_ruc_loopback(qp);
return;
}
make_req = hfi1_make_rc_req;
@@ -868,7 +542,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
~((1 << ps.ppd->lmc) - 1)) ==
ps.ppd->lid)) {
- ruc_loopback(qp);
+ rvt_ruc_loopback(qp);
return;
}
make_req = hfi1_make_uc_req;
@@ -883,6 +557,8 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
/* Return if we are already busy processing a work request. */
if (!hfi1_send_ok(qp)) {
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
return;
}
@@ -896,7 +572,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
ps.pkts_sent = false;
/* insure a pre-built packet is handled */
- ps.s_txreq = get_waiting_verbs_txreq(qp);
+ ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
do {
/* Check for a constructed packet to be sent. */
if (ps.s_txreq) {
@@ -907,6 +583,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
*/
if (hfi1_verbs_send(qp, &ps))
return;
+
/* allow other tasks to run */
if (schedule_send_yield(qp, &ps))
return;
@@ -917,44 +594,3 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
}
-
-/*
- * This should be called with s_lock held.
- */
-void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status)
-{
- u32 old_last, last;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- return;
-
- last = qp->s_last;
- old_last = last;
- trace_hfi1_qp_send_completion(qp, wqe, last);
- if (++last >= qp->s_size)
- last = 0;
- trace_hfi1_qp_send_completion(qp, wqe, last);
- qp->s_last = last;
- /* See post_send() */
- barrier();
- rvt_put_swqe(wqe);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
-
- rvt_qp_swqe_complete(qp,
- wqe,
- ib_hfi1_wc_opcode[wqe->wr.opcode],
- status);
-
- if (qp->s_acked == old_last)
- qp->s_acked = last;
- if (qp->s_cur == old_last)
- qp->s_cur = last;
- if (qp->s_tail == old_last)
- qp->s_tail = last;
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
- qp->s_draining = 0;
-}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 88e326d6cc49..891d2386d1ca 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -378,7 +378,7 @@ static inline void complete_tx(struct sdma_engine *sde,
__sdma_txclean(sde->dd, tx);
if (complete)
(*complete)(tx, res);
- if (wait && iowait_sdma_dec(wait))
+ if (iowait_sdma_dec(wait))
iowait_drain_wakeup(wait);
}
@@ -1758,7 +1758,6 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
struct iowait *wait, *nw;
struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
uint i, n = 0, seq, max_idx = 0;
- struct sdma_txreq *stx;
struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
u8 max_starved_cnt = 0;
@@ -1779,19 +1778,13 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
nw,
&sde->dmawait,
list) {
- u16 num_desc = 0;
+ u32 num_desc;
if (!wait->wakeup)
continue;
if (n == ARRAY_SIZE(waits))
break;
- if (!list_empty(&wait->tx_head)) {
- stx = list_first_entry(
- &wait->tx_head,
- struct sdma_txreq,
- list);
- num_desc = stx->num_desc;
- }
+ num_desc = iowait_get_all_desc(wait);
if (num_desc > avail)
break;
avail -= num_desc;
@@ -2346,7 +2339,7 @@ static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
*/
static int sdma_check_progress(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
bool pkts_sent)
{
@@ -2356,12 +2349,12 @@ static int sdma_check_progress(
if (tx->num_desc <= sde->desc_avail)
return -EAGAIN;
/* pulse the head_lock */
- if (wait && wait->sleep) {
+ if (wait && iowait_ioww_to_iow(wait)->sleep) {
unsigned seq;
seq = raw_seqcount_begin(
(const seqcount_t *)&sde->head_lock.seqcount);
- ret = wait->sleep(sde, wait, tx, seq, pkts_sent);
+ ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent);
if (ret == -EAGAIN)
sde->desc_avail = sdma_descq_freecnt(sde);
} else {
@@ -2373,7 +2366,7 @@ static int sdma_check_progress(
/**
* sdma_send_txreq() - submit a tx req to ring
* @sde: sdma engine to use
- * @wait: wait structure to use when full (may be NULL)
+ * @wait: SE wait structure to use when full (may be NULL)
* @tx: sdma_txreq to submit
* @pkts_sent: has any packet been sent yet?
*
@@ -2386,7 +2379,7 @@ static int sdma_check_progress(
* -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
*/
int sdma_send_txreq(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
bool pkts_sent)
{
@@ -2397,7 +2390,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
/* user should have supplied entire packet */
if (unlikely(tx->tlen))
return -EINVAL;
- tx->wait = wait;
+ tx->wait = iowait_ioww_to_iow(wait);
spin_lock_irqsave(&sde->tail_lock, flags);
retry:
if (unlikely(!__sdma_running(sde)))
@@ -2406,14 +2399,14 @@ retry:
goto nodesc;
tail = submit_tx(sde, tx);
if (wait)
- iowait_sdma_inc(wait);
+ iowait_sdma_inc(iowait_ioww_to_iow(wait));
sdma_update_tail(sde, tail);
unlock:
spin_unlock_irqrestore(&sde->tail_lock, flags);
return ret;
unlock_noconn:
if (wait)
- iowait_sdma_inc(wait);
+ iowait_sdma_inc(iowait_ioww_to_iow(wait));
tx->next_descq_idx = 0;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
tx->sn = sde->tail_sn++;
@@ -2422,10 +2415,7 @@ unlock_noconn:
spin_lock(&sde->flushlist_lock);
list_add_tail(&tx->list, &sde->flushlist);
spin_unlock(&sde->flushlist_lock);
- if (wait) {
- wait->tx_count++;
- wait->count += tx->num_desc;
- }
+ iowait_inc_wait_count(wait, tx->num_desc);
schedule_work(&sde->flush_worker);
ret = -ECOMM;
goto unlock;
@@ -2442,9 +2432,9 @@ nodesc:
/**
* sdma_send_txlist() - submit a list of tx req to ring
* @sde: sdma engine to use
- * @wait: wait structure to use when full (may be NULL)
+ * @wait: SE wait structure to use when full (may be NULL)
* @tx_list: list of sdma_txreqs to submit
- * @count: pointer to a u32 which, after return will contain the total number of
+ * @count: pointer to a u16 which, after return will contain the total number of
* sdma_txreqs removed from the tx_list. This will include sdma_txreqs
* whose SDMA descriptors are submitted to the ring and the sdma_txreqs
* which are added to SDMA engine flush list if the SDMA engine state is
@@ -2467,8 +2457,8 @@ nodesc:
* -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
* -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
*/
-int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
- struct list_head *tx_list, u32 *count_out)
+int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
+ struct list_head *tx_list, u16 *count_out)
{
struct sdma_txreq *tx, *tx_next;
int ret = 0;
@@ -2479,7 +2469,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
spin_lock_irqsave(&sde->tail_lock, flags);
retry:
list_for_each_entry_safe(tx, tx_next, tx_list, list) {
- tx->wait = wait;
+ tx->wait = iowait_ioww_to_iow(wait);
if (unlikely(!__sdma_running(sde)))
goto unlock_noconn;
if (unlikely(tx->num_desc > sde->desc_avail))
@@ -2500,8 +2490,9 @@ retry:
update_tail:
total_count = submit_count + flush_count;
if (wait) {
- iowait_sdma_add(wait, total_count);
- iowait_starve_clear(submit_count > 0, wait);
+ iowait_sdma_add(iowait_ioww_to_iow(wait), total_count);
+ iowait_starve_clear(submit_count > 0,
+ iowait_ioww_to_iow(wait));
}
if (tail != INVALID_TAIL)
sdma_update_tail(sde, tail);
@@ -2511,7 +2502,7 @@ update_tail:
unlock_noconn:
spin_lock(&sde->flushlist_lock);
list_for_each_entry_safe(tx, tx_next, tx_list, list) {
- tx->wait = wait;
+ tx->wait = iowait_ioww_to_iow(wait);
list_del_init(&tx->list);
tx->next_descq_idx = 0;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
@@ -2520,10 +2511,7 @@ unlock_noconn:
#endif
list_add_tail(&tx->list, &sde->flushlist);
flush_count++;
- if (wait) {
- wait->tx_count++;
- wait->count += tx->num_desc;
- }
+ iowait_inc_wait_count(wait, tx->num_desc);
}
spin_unlock(&sde->flushlist_lock);
schedule_work(&sde->flush_worker);
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 46c775f255d1..6dc63d7c5685 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_SDMA_H
#define _HFI1_SDMA_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -62,16 +62,6 @@
/* Hardware limit for SDMA packet size */
#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
-#define SDMA_TXREQ_S_OK 0
-#define SDMA_TXREQ_S_SENDERROR 1
-#define SDMA_TXREQ_S_ABORTED 2
-#define SDMA_TXREQ_S_SHUTDOWN 3
-
-/* flags bits */
-#define SDMA_TXREQ_F_URGENT 0x0001
-#define SDMA_TXREQ_F_AHG_COPY 0x0002
-#define SDMA_TXREQ_F_USE_AHG 0x0004
-
#define SDMA_MAP_NONE 0
#define SDMA_MAP_SINGLE 1
#define SDMA_MAP_PAGE 2
@@ -415,6 +405,7 @@ struct sdma_engine {
struct list_head flushlist;
struct cpumask cpu_mask;
struct kobject kobj;
+ u32 msix_intr;
};
int sdma_init(struct hfi1_devdata *dd, u8 port);
@@ -849,16 +840,16 @@ static inline int sdma_txadd_kvaddr(
dd, SDMA_MAP_SINGLE, tx, addr, len);
}
-struct iowait;
+struct iowait_work;
int sdma_send_txreq(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
bool pkts_sent);
int sdma_send_txlist(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct list_head *tx_list,
- u32 *count);
+ u16 *count_out);
int sdma_ahg_alloc(struct sdma_engine *sde);
void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 25e867393463..2be513d4c9da 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -494,17 +494,18 @@ static struct kobj_type hfi1_vl2mtu_ktype = {
* Start of per-unit (or driver, in some cases, but replicated
* per unit) functions (these get a device *)
*/
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
{
struct hfi1_ibdev *dev =
container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hfi(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
@@ -517,8 +518,9 @@ static ssize_t show_hfi(struct device *device, struct device_attribute *attr,
ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
return ret;
}
+static DEVICE_ATTR_RO(board_id);
-static ssize_t show_boardversion(struct device *device,
+static ssize_t boardversion_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -528,8 +530,9 @@ static ssize_t show_boardversion(struct device *device,
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
}
+static DEVICE_ATTR_RO(boardversion);
-static ssize_t show_nctxts(struct device *device,
+static ssize_t nctxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -546,8 +549,9 @@ static ssize_t show_nctxts(struct device *device,
min(dd->num_user_contexts,
(u32)dd->sc_sizes[SC_USER].count));
}
+static DEVICE_ATTR_RO(nctxts);
-static ssize_t show_nfreectxts(struct device *device,
+static ssize_t nfreectxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -557,8 +561,9 @@ static ssize_t show_nfreectxts(struct device *device,
/* Return the number of free user ports (contexts) available. */
return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
}
+static DEVICE_ATTR_RO(nfreectxts);
-static ssize_t show_serial(struct device *device,
+static ssize_t serial_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -567,8 +572,9 @@ static ssize_t show_serial(struct device *device,
return scnprintf(buf, PAGE_SIZE, "%s", dd->serial);
}
+static DEVICE_ATTR_RO(serial);
-static ssize_t store_chip_reset(struct device *device,
+static ssize_t chip_reset_store(struct device *device,
struct device_attribute *attr, const char *buf,
size_t count)
{
@@ -586,6 +592,7 @@ static ssize_t store_chip_reset(struct device *device,
bail:
return ret < 0 ? ret : count;
}
+static DEVICE_ATTR_WO(chip_reset);
/*
* Convert the reported temperature from an integer (reported in
@@ -598,7 +605,7 @@ bail:
/*
* Dump tempsense values, in decimal, to ease shell-scripts.
*/
-static ssize_t show_tempsense(struct device *device,
+static ssize_t tempsense_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -622,6 +629,7 @@ static ssize_t show_tempsense(struct device *device,
}
return ret;
}
+static DEVICE_ATTR_RO(tempsense);
/*
* end of per-unit (or driver, in some cases, but replicated
@@ -629,24 +637,20 @@ static ssize_t show_tempsense(struct device *device,
*/
/* start of per-unit file structures and support code */
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hfi, NULL);
-static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
-static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
-static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-
-static struct device_attribute *hfi1_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_board_id,
- &dev_attr_nctxts,
- &dev_attr_nfreectxts,
- &dev_attr_serial,
- &dev_attr_boardversion,
- &dev_attr_tempsense,
- &dev_attr_chip_reset,
+static struct attribute *hfi1_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_nctxts.attr,
+ &dev_attr_nfreectxts.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_boardversion.attr,
+ &dev_attr_tempsense.attr,
+ &dev_attr_chip_reset.attr,
+ NULL,
+};
+
+const struct attribute_group ib_hfi1_attr_group = {
+ .attrs = hfi1_attributes,
};
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
@@ -832,12 +836,6 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
struct device *class_dev = &dev->dev;
int i, j, ret;
- for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i) {
- ret = device_create_file(&dev->dev, hfi1_attributes[i]);
- if (ret)
- goto bail;
- }
-
for (i = 0; i < dd->num_sdma; i++) {
ret = kobject_init_and_add(&dd->per_sdma[i].kobj,
&sde_ktype, &class_dev->kobj,
@@ -855,9 +853,6 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
return 0;
bail:
- for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i)
- device_remove_file(&dev->dev, hfi1_attributes[i]);
-
for (i = 0; i < dd->num_sdma; i++)
kobject_del(&dd->per_sdma[i].kobj);
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 8540463ef3f7..84458f1325e1 100644
--- a/drivers/infiniband/hw/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -62,3 +62,4 @@ __print_symbolic(etype, \
#include "trace_rx.h"
#include "trace_tx.h"
#include "trace_mmu.h"
+#include "trace_iowait.h"
diff --git a/drivers/infiniband/hw/hfi1/trace_iowait.h b/drivers/infiniband/hw/hfi1/trace_iowait.h
new file mode 100644
index 000000000000..27f4334ece2b
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_iowait.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#if !defined(__HFI1_TRACE_IOWAIT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_IOWAIT_H
+
+#include <linux/tracepoint.h>
+#include "iowait.h"
+#include "verbs.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_iowait
+
+DECLARE_EVENT_CLASS(hfi1_iowait_template,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag),
+ TP_STRUCT__entry(/* entry */
+ __field(unsigned long, addr)
+ __field(unsigned long, flags)
+ __field(u32, flag)
+ __field(u32, qpn)
+ ),
+ TP_fast_assign(/* assign */
+ __entry->addr = (unsigned long)wait;
+ __entry->flags = wait->flags;
+ __entry->flag = (1 << flag);
+ __entry->qpn = iowait_to_qp(wait)->ibqp.qp_num;
+ ),
+ TP_printk(/* print */
+ "iowait 0x%lx qp %u flags 0x%lx flag 0x%x",
+ __entry->addr,
+ __entry->qpn,
+ __entry->flags,
+ __entry->flag
+ )
+ );
+
+DEFINE_EVENT(hfi1_iowait_template, hfi1_iowait_set,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag));
+
+DEFINE_EVENT(hfi1_iowait_template, hfi1_iowait_clear,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag));
+
+#endif /* __HFI1_TRACE_IOWAIT_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_iowait
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index e254dcec6f64..6aca0c5a7f97 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -88,7 +88,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
clear_ahg(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done_free_tx;
}
@@ -140,7 +140,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp, wqe->wr.ex.invalidate_rkey);
local_ops = 1;
}
- hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
+ rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
: IB_WC_SUCCESS);
if (local_ops)
atomic_dec(&qp->local_ops_pending);
@@ -426,7 +426,7 @@ send_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto rewind;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, false, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
break;
case OP(SEND_LAST_WITH_IMMEDIATE):
@@ -449,7 +449,7 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
- hfi1_copy_sge(&qp->r_sge, data, tlen, false, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
rvt_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
@@ -523,7 +523,7 @@ rdma_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto drop;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -550,7 +550,7 @@ rdma_last_imm:
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
goto last_imm;
@@ -564,7 +564,7 @@ rdma_last:
tlen -= (hdrsize + extra_bytes);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
break;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 70d39fc450a1..4baa8f4d49de 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -210,8 +210,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
}
hfi1_make_grh(ibp, &grh, &grd, 0, 0);
- hfi1_copy_sge(&qp->r_sge, &grh,
- sizeof(grh), true, false);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else {
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
@@ -228,7 +228,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (len > sge->sge_length)
len = sge->sge_length;
WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
@@ -518,7 +518,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done_free_tx;
}
@@ -560,7 +560,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, tflags);
ps->flags = tflags;
- hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done_free_tx;
}
}
@@ -1019,8 +1019,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
goto drop;
}
if (packet->grh) {
- hfi1_copy_sge(&qp->r_sge, packet->grh,
- sizeof(struct ib_grh), true, false);
+ rvt_copy_sge(qp, &qp->r_sge, packet->grh,
+ sizeof(struct ib_grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else if (packet->etype == RHF_RCV_TYPE_BYPASS) {
struct ib_grh grh;
@@ -1030,14 +1030,14 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
* out when creating 16B, add back the GRH here.
*/
hfi1_make_ext_grh(packet, &grh, slid, dlid);
- hfi1_copy_sge(&qp->r_sge, &grh,
- sizeof(struct ib_grh), true, false);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(struct ib_grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else {
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
}
- hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
- true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
+ true, false);
rvt_put_ss(&qp->r_sge);
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 5c88706121c1..3f0aadccd9f6 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -76,8 +76,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
static unsigned initial_pkt_count = 8;
-static int user_sdma_send_pkts(struct user_sdma_request *req,
- unsigned maxpkts);
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
@@ -101,7 +100,7 @@ static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
static int defer_packet_queue(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *txreq,
uint seq,
bool pkts_sent);
@@ -124,13 +123,13 @@ static struct mmu_rb_ops sdma_rb_ops = {
static int defer_packet_queue(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *txreq,
uint seq,
bool pkts_sent)
{
struct hfi1_user_sdma_pkt_q *pq =
- container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
+ container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
struct user_sdma_txreq *tx =
container_of(txreq, struct user_sdma_txreq, txreq);
@@ -187,13 +186,12 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt;
pq->n_max_reqs = hfi1_sdma_comp_ring_size;
- pq->state = SDMA_PKT_Q_INACTIVE;
atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait);
atomic_set(&pq->n_locked, 0);
pq->mm = fd->mm;
- iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
+ iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
activate_packet_queue, NULL);
pq->reqidx = 0;
@@ -276,7 +274,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
/* Wait until all requests have been freed. */
wait_event_interruptible(
pq->wait,
- (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+ !atomic_read(&pq->n_reqs));
kfree(pq->reqs);
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
@@ -312,6 +310,13 @@ static u8 dlid_to_selector(u16 dlid)
return mapping[hash];
}
+/**
+ * hfi1_user_sdma_process_request() - Process and start a user sdma request
+ * @fd: valid file descriptor
+ * @iovec: array of io vectors to process
+ * @dim: overall iovec array size
+ * @count: number of io vector array entries processed
+ */
int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct iovec *iovec, unsigned long dim,
unsigned long *count)
@@ -328,7 +333,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
u8 opcode, sc, vl;
u16 pkey;
u32 slid;
- int req_queued = 0;
u16 dlid;
u32 selector;
@@ -392,7 +396,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->data_len = 0;
req->pq = pq;
req->cq = cq;
- req->status = -1;
req->ahg_idx = -1;
req->iov_idx = 0;
req->sent = 0;
@@ -400,12 +403,14 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->seqcomp = 0;
req->seqsubmitted = 0;
req->tids = NULL;
- req->done = 0;
req->has_error = 0;
INIT_LIST_HEAD(&req->txps);
memcpy(&req->info, &info, sizeof(info));
+ /* The request is initialized, count it */
+ atomic_inc(&pq->n_reqs);
+
if (req_opcode(info.ctrl) == EXPECTED) {
/* expected must have a TID info and at least one data vector */
if (req->data_iovs < 2) {
@@ -500,7 +505,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
ret = pin_vector_pages(req, &req->iovs[i]);
if (ret) {
req->data_iovs = i;
- req->status = ret;
goto free_req;
}
req->data_len += req->iovs[i].iov.iov_len;
@@ -561,23 +565,11 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->ahg_idx = sdma_ahg_alloc(req->sde);
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
- atomic_inc(&pq->n_reqs);
- req_queued = 1;
+ pq->state = SDMA_PKT_Q_ACTIVE;
/* Send the first N packets in the request to buy us some time */
ret = user_sdma_send_pkts(req, pcount);
- if (unlikely(ret < 0 && ret != -EBUSY)) {
- req->status = ret;
+ if (unlikely(ret < 0 && ret != -EBUSY))
goto free_req;
- }
-
- /*
- * It is possible that the SDMA engine would have processed all the
- * submitted packets by the time we get here. Therefore, only set
- * packet queue state to ACTIVE if there are still uncompleted
- * requests.
- */
- if (atomic_read(&pq->n_reqs))
- xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
/*
* This is a somewhat blocking send implementation.
@@ -588,14 +580,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
while (req->seqsubmitted != req->info.npkts) {
ret = user_sdma_send_pkts(req, pcount);
if (ret < 0) {
- if (ret != -EBUSY) {
- req->status = ret;
- WRITE_ONCE(req->has_error, 1);
- if (READ_ONCE(req->seqcomp) ==
- req->seqsubmitted - 1)
- goto free_req;
- return ret;
- }
+ if (ret != -EBUSY)
+ goto free_req;
wait_event_interruptible_timeout(
pq->busy.wait_dma,
(pq->state == SDMA_PKT_Q_ACTIVE),
@@ -606,10 +592,19 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
*count += idx;
return 0;
free_req:
- user_sdma_free_request(req, true);
- if (req_queued)
+ /*
+ * If the submitted seqsubmitted == npkts, the completion routine
+ * controls the final state. If sequbmitted < npkts, wait for any
+ * outstanding packets to finish before cleaning up.
+ */
+ if (req->seqsubmitted < req->info.npkts) {
+ if (req->seqsubmitted)
+ wait_event(pq->busy.wait_dma,
+ (req->seqcomp == req->seqsubmitted - 1));
+ user_sdma_free_request(req, true);
pq_update(pq);
- set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
+ set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
+ }
return ret;
}
@@ -760,9 +755,10 @@ static int user_sdma_txadd(struct user_sdma_request *req,
return ret;
}
-static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
{
- int ret = 0, count;
+ int ret = 0;
+ u16 count;
unsigned npkts = 0;
struct user_sdma_txreq *tx = NULL;
struct hfi1_user_sdma_pkt_q *pq = NULL;
@@ -864,8 +860,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
changes = set_txreq_header_ahg(req, tx,
datalen);
- if (changes < 0)
+ if (changes < 0) {
+ ret = changes;
goto free_tx;
+ }
}
} else {
ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
@@ -914,10 +912,11 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
npkts++;
}
dosend:
- ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
+ ret = sdma_send_txlist(req->sde,
+ iowait_get_ib_work(&pq->busy),
+ &req->txps, &count);
req->seqsubmitted += count;
if (req->seqsubmitted == req->info.npkts) {
- WRITE_ONCE(req->done, 1);
/*
* The txreq has already been submitted to the HW queue
* so we can free the AHG entry now. Corruption will not
@@ -1365,11 +1364,15 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
return idx;
}
-/*
- * SDMA tx request completion callback. Called when the SDMA progress
- * state machine gets notification that the SDMA descriptors for this
- * tx request have been processed by the DMA engine. Called in
- * interrupt context.
+/**
+ * user_sdma_txreq_cb() - SDMA tx request completion callback.
+ * @txreq: valid sdma tx request
+ * @status: success/failure of request
+ *
+ * Called when the SDMA progress state machine gets notification that
+ * the SDMA descriptors for this tx request have been processed by the
+ * DMA engine. Called in interrupt context.
+ * Only do work on completed sequences.
*/
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
{
@@ -1378,7 +1381,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
struct user_sdma_request *req;
struct hfi1_user_sdma_pkt_q *pq;
struct hfi1_user_sdma_comp_q *cq;
- u16 idx;
+ enum hfi1_sdma_comp_state state = COMPLETE;
if (!tx->req)
return;
@@ -1391,39 +1394,25 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
SDMA_DBG(req, "SDMA completion with error %d",
status);
WRITE_ONCE(req->has_error, 1);
+ state = ERROR;
}
req->seqcomp = tx->seqnum;
kmem_cache_free(pq->txreq_cache, tx);
- tx = NULL;
-
- idx = req->info.comp_idx;
- if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
- if (req->seqcomp == req->info.npkts - 1) {
- req->status = 0;
- user_sdma_free_request(req, false);
- pq_update(pq);
- set_comp_state(pq, cq, idx, COMPLETE, 0);
- }
- } else {
- if (status != SDMA_TXREQ_S_OK)
- req->status = status;
- if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
- (READ_ONCE(req->done) ||
- READ_ONCE(req->has_error))) {
- user_sdma_free_request(req, false);
- pq_update(pq);
- set_comp_state(pq, cq, idx, ERROR, req->status);
- }
- }
+
+ /* sequence isn't complete? We are done */
+ if (req->seqcomp != req->info.npkts - 1)
+ return;
+
+ user_sdma_free_request(req, false);
+ set_comp_state(pq, cq, req->info.comp_idx, state, status);
+ pq_update(pq);
}
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
{
- if (atomic_dec_and_test(&pq->n_reqs)) {
- xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
+ if (atomic_dec_and_test(&pq->n_reqs))
wake_up(&pq->wait);
- }
}
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
@@ -1448,6 +1437,8 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
if (!node)
continue;
+ req->iovs[i].node = NULL;
+
if (unpin)
hfi1_mmu_rb_remove(req->pq->handler,
&node->rb);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index d2bc77f75253..14dfd757dafd 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -105,9 +105,10 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
-#define SDMA_PKT_Q_INACTIVE BIT(0)
-#define SDMA_PKT_Q_ACTIVE BIT(1)
-#define SDMA_PKT_Q_DEFERRED BIT(2)
+enum pkt_q_sdma_state {
+ SDMA_PKT_Q_ACTIVE,
+ SDMA_PKT_Q_DEFERRED,
+};
/*
* Maximum retry attempts to submit a TX request
@@ -133,7 +134,7 @@ struct hfi1_user_sdma_pkt_q {
struct user_sdma_request *reqs;
unsigned long *req_in_use;
struct iowait busy;
- unsigned state;
+ enum pkt_q_sdma_state state;
wait_queue_head_t wait;
unsigned long unpinned;
struct mmu_rb_handler *handler;
@@ -203,14 +204,12 @@ struct user_sdma_request {
s8 ahg_idx;
/* Writeable fields shared with interrupt */
- u64 seqcomp ____cacheline_aligned_in_smp;
- u64 seqsubmitted;
- /* status of the last txreq completed */
- int status;
+ u16 seqcomp ____cacheline_aligned_in_smp;
+ u16 seqsubmitted;
/* Send side fields */
struct list_head txps ____cacheline_aligned_in_smp;
- u64 seqnum;
+ u16 seqnum;
/*
* KDETH.OFFSET (TID) field
* The offset can cover multiple packets, depending on the
@@ -228,7 +227,6 @@ struct user_sdma_request {
u16 tididx;
/* progress index moving along the iovs array */
u8 iov_idx;
- u8 done;
u8 has_error;
struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
@@ -248,7 +246,7 @@ struct user_sdma_txreq {
struct user_sdma_request *req;
u16 flags;
unsigned int busycount;
- u64 seqnum;
+ u16 seqnum;
};
int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index a7c586a5589d..48e11e510358 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -129,8 +129,6 @@ unsigned short piothreshold = 256;
module_param(piothreshold, ushort, S_IRUGO);
MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
-#define COPY_CACHELESS 1
-#define COPY_ADAPTIVE 2
static unsigned int sge_copy_mode;
module_param(sge_copy_mode, uint, S_IRUGO);
MODULE_PARM_DESC(sge_copy_mode,
@@ -151,159 +149,13 @@ static int pio_wait(struct rvt_qp *qp,
/* 16B trailing buffer */
static const u8 trail_buf[MAX_16B_PADDING];
-static uint wss_threshold;
+static uint wss_threshold = 80;
module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
static uint wss_clean_period = 256;
module_param(wss_clean_period, uint, S_IRUGO);
MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
-/* memory working set size */
-struct hfi1_wss {
- unsigned long *entries;
- atomic_t total_count;
- atomic_t clean_counter;
- atomic_t clean_entry;
-
- int threshold;
- int num_entries;
- long pages_mask;
-};
-
-static struct hfi1_wss wss;
-
-int hfi1_wss_init(void)
-{
- long llc_size;
- long llc_bits;
- long table_size;
- long table_bits;
-
- /* check for a valid percent range - default to 80 if none or invalid */
- if (wss_threshold < 1 || wss_threshold > 100)
- wss_threshold = 80;
- /* reject a wildly large period */
- if (wss_clean_period > 1000000)
- wss_clean_period = 256;
- /* reject a zero period */
- if (wss_clean_period == 0)
- wss_clean_period = 1;
-
- /*
- * Calculate the table size - the next power of 2 larger than the
- * LLC size. LLC size is in KiB.
- */
- llc_size = wss_llc_size() * 1024;
- table_size = roundup_pow_of_two(llc_size);
-
- /* one bit per page in rounded up table */
- llc_bits = llc_size / PAGE_SIZE;
- table_bits = table_size / PAGE_SIZE;
- wss.pages_mask = table_bits - 1;
- wss.num_entries = table_bits / BITS_PER_LONG;
-
- wss.threshold = (llc_bits * wss_threshold) / 100;
- if (wss.threshold == 0)
- wss.threshold = 1;
-
- atomic_set(&wss.clean_counter, wss_clean_period);
-
- wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries),
- GFP_KERNEL);
- if (!wss.entries) {
- hfi1_wss_exit();
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void hfi1_wss_exit(void)
-{
- /* coded to handle partially initialized and repeat callers */
- kfree(wss.entries);
- wss.entries = NULL;
-}
-
-/*
- * Advance the clean counter. When the clean period has expired,
- * clean an entry.
- *
- * This is implemented in atomics to avoid locking. Because multiple
- * variables are involved, it can be racy which can lead to slightly
- * inaccurate information. Since this is only a heuristic, this is
- * OK. Any innaccuracies will clean themselves out as the counter
- * advances. That said, it is unlikely the entry clean operation will
- * race - the next possible racer will not start until the next clean
- * period.
- *
- * The clean counter is implemented as a decrement to zero. When zero
- * is reached an entry is cleaned.
- */
-static void wss_advance_clean_counter(void)
-{
- int entry;
- int weight;
- unsigned long bits;
-
- /* become the cleaner if we decrement the counter to zero */
- if (atomic_dec_and_test(&wss.clean_counter)) {
- /*
- * Set, not add, the clean period. This avoids an issue
- * where the counter could decrement below the clean period.
- * Doing a set can result in lost decrements, slowing the
- * clean advance. Since this a heuristic, this possible
- * slowdown is OK.
- *
- * An alternative is to loop, advancing the counter by a
- * clean period until the result is > 0. However, this could
- * lead to several threads keeping another in the clean loop.
- * This could be mitigated by limiting the number of times
- * we stay in the loop.
- */
- atomic_set(&wss.clean_counter, wss_clean_period);
-
- /*
- * Uniquely grab the entry to clean and move to next.
- * The current entry is always the lower bits of
- * wss.clean_entry. The table size, wss.num_entries,
- * is always a power-of-2.
- */
- entry = (atomic_inc_return(&wss.clean_entry) - 1)
- & (wss.num_entries - 1);
-
- /* clear the entry and count the bits */
- bits = xchg(&wss.entries[entry], 0);
- weight = hweight64((u64)bits);
- /* only adjust the contended total count if needed */
- if (weight)
- atomic_sub(weight, &wss.total_count);
- }
-}
-
-/*
- * Insert the given address into the working set array.
- */
-static void wss_insert(void *address)
-{
- u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask;
- u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
- u32 nr = page & (BITS_PER_LONG - 1);
-
- if (!test_and_set_bit(nr, &wss.entries[entry]))
- atomic_inc(&wss.total_count);
-
- wss_advance_clean_counter();
-}
-
-/*
- * Is the working set larger than the threshold?
- */
-static inline bool wss_exceeds_threshold(void)
-{
- return atomic_read(&wss.total_count) >= wss.threshold;
-}
-
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
@@ -438,79 +290,6 @@ static const u32 pio_opmask[BIT(3)] = {
*/
__be64 ib_hfi1_sys_image_guid;
-/**
- * hfi1_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- * @release: boolean to release MR
- * @copy_last: do a separate copy of the last 8 bytes
- */
-void hfi1_copy_sge(
- struct rvt_sge_state *ss,
- void *data, u32 length,
- bool release,
- bool copy_last)
-{
- struct rvt_sge *sge = &ss->sge;
- int i;
- bool in_last = false;
- bool cacheless_copy = false;
-
- if (sge_copy_mode == COPY_CACHELESS) {
- cacheless_copy = length >= PAGE_SIZE;
- } else if (sge_copy_mode == COPY_ADAPTIVE) {
- if (length >= PAGE_SIZE) {
- /*
- * NOTE: this *assumes*:
- * o The first vaddr is the dest.
- * o If multiple pages, then vaddr is sequential.
- */
- wss_insert(sge->vaddr);
- if (length >= (2 * PAGE_SIZE))
- wss_insert(sge->vaddr + PAGE_SIZE);
-
- cacheless_copy = wss_exceeds_threshold();
- } else {
- wss_advance_clean_counter();
- }
- }
- if (copy_last) {
- if (length > 8) {
- length -= 8;
- } else {
- copy_last = false;
- in_last = true;
- }
- }
-
-again:
- while (length) {
- u32 len = rvt_get_sge_length(sge, length);
-
- WARN_ON_ONCE(len == 0);
- if (unlikely(in_last)) {
- /* enforce byte transfer ordering */
- for (i = 0; i < len; i++)
- ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
- } else if (cacheless_copy) {
- cacheless_memcpy(sge->vaddr, data, len);
- } else {
- memcpy(sge->vaddr, data, len);
- }
- rvt_update_sge(ss, len, release);
- data += len;
- length -= len;
- }
-
- if (copy_last) {
- copy_last = false;
- in_last = true;
- length = 8;
- goto again;
- }
-}
-
/*
* Make sure the QP is ready and able to accept the given opcode.
*/
@@ -713,7 +492,7 @@ static void verbs_sdma_complete(
spin_lock(&qp->s_lock);
if (tx->wqe) {
- hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
struct hfi1_opa_header *hdr;
@@ -737,7 +516,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
write_seqlock(&dev->iowait_lock);
list_add_tail(&ps->s_txreq->txreq.list,
- &priv->s_iowait.tx_head);
+ &ps->wait->tx_head);
if (list_empty(&priv->s_iowait.list)) {
if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1);
@@ -748,7 +527,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
rvt_get_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_qp_unbusy(qp, ps->wait);
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -950,8 +729,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (unlikely(ret))
goto bail_build;
}
- ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq,
- ps->pkts_sent);
+ ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent);
if (unlikely(ret < 0)) {
if (ret == -ECOMM)
goto bail_ecomm;
@@ -1001,7 +779,7 @@ static int pio_wait(struct rvt_qp *qp,
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
write_seqlock(&dev->iowait_lock);
list_add_tail(&ps->s_txreq->txreq.list,
- &priv->s_iowait.tx_head);
+ &ps->wait->tx_head);
if (list_empty(&priv->s_iowait.list)) {
struct hfi1_ibdev *dev = &dd->verbs_dev;
int was_empty;
@@ -1020,7 +798,7 @@ static int pio_wait(struct rvt_qp *qp,
hfi1_sc_wantpiobuf_intr(sc, 1);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_qp_unbusy(qp, ps->wait);
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1160,7 +938,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
pio_bail:
if (qp->s_wqe) {
spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(qp, qp->s_wqe, wc_status);
+ rvt_send_complete(qp, qp->s_wqe, wc_status);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1367,7 +1145,7 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
hfi1_cdbg(PIO, "%s() Failed. Completing with err",
__func__);
spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
return -EINVAL;
@@ -1943,7 +1721,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
- dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
+ dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe;
dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
hfi1_comp_vect_mappings_lookup;
@@ -1956,10 +1734,16 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
+ dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode;
+ dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold;
+ dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period;
/* post send table */
dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
+ /* opcode translation table */
+ dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode;
+
ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++)
rvt_init_port(&dd->verbs_dev.rdi,
@@ -1967,6 +1751,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
i,
ppd->pkeys);
+ rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev,
+ &ib_hfi1_attr_group);
+
ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_HFI1);
if (ret)
goto err_verbs_txreq;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index a4d06502f06d..64c9054db5f3 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -166,11 +166,13 @@ struct hfi1_qp_priv {
* This structure is used to hold commonly lookedup and computed values during
* the send engine progress.
*/
+struct iowait_work;
struct hfi1_pkt_state {
struct hfi1_ibdev *dev;
struct hfi1_ibport *ibp;
struct hfi1_pportdata *ppd;
struct verbs_txreq *s_txreq;
+ struct iowait_work *wait;
unsigned long flags;
unsigned long timeout;
unsigned long timeout_int;
@@ -247,7 +249,7 @@ static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
return container_of(rdi, struct hfi1_ibdev, rdi);
}
-static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
+static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
{
struct hfi1_qp_priv *priv;
@@ -313,9 +315,6 @@ void hfi1_put_txreq(struct verbs_txreq *tx);
int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-void hfi1_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
- bool release, bool copy_last);
-
void hfi1_cnp_rcv(struct hfi1_packet *packet);
void hfi1_uc_rcv(struct hfi1_packet *packet);
@@ -343,7 +342,8 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
-int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ bool *call_send);
extern const u32 rc_only_opcode;
extern const u32 uc_only_opcode;
@@ -363,9 +363,6 @@ void hfi1_do_send_from_rvt(struct rvt_qp *qp);
void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
-void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status);
-
void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn);
int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
@@ -390,28 +387,6 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
-int hfi1_wss_init(void);
-void hfi1_wss_exit(void);
-
-/* platform specific: return the lowest level cache (llc) size, in KiB */
-static inline int wss_llc_size(void)
-{
- /* assume that the boot CPU value is universal for all CPUs */
- return boot_cpu_data.x86_cache_size;
-}
-
-/* platform specific: cacheless copy */
-static inline void cacheless_memcpy(void *dst, void *src, size_t n)
-{
- /*
- * Use the only available X64 cacheless copy. Add a __user cast
- * to quiet sparse. The src agument is already in the kernel so
- * there are no security issues. The extra fault recovery machinery
- * is not invoked.
- */
- __copy_user_nocache(dst, (void __user *)src, n, 0);
-}
-
static inline bool opa_bth_is_migration(struct ib_other_headers *ohdr)
{
return ohdr->bth[1] & cpu_to_be32(OPA_BTH_MIG_REQ);
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 1c19bbc764b2..2a77af26a231 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -102,22 +102,19 @@ static inline struct sdma_txreq *get_sdma_txreq(struct verbs_txreq *tx)
return &tx->txreq;
}
-static inline struct verbs_txreq *get_waiting_verbs_txreq(struct rvt_qp *qp)
+static inline struct verbs_txreq *get_waiting_verbs_txreq(struct iowait_work *w)
{
struct sdma_txreq *stx;
- struct hfi1_qp_priv *priv = qp->priv;
- stx = iowait_get_txhead(&priv->s_iowait);
+ stx = iowait_get_txhead(w);
if (stx)
return container_of(stx, struct verbs_txreq, txreq);
return NULL;
}
-static inline bool verbs_txreq_queued(struct rvt_qp *qp)
+static inline bool verbs_txreq_queued(struct iowait_work *w)
{
- struct hfi1_qp_priv *priv = qp->priv;
-
- return iowait_packet_queued(&priv->s_iowait);
+ return iowait_packet_queued(w);
}
void hfi1_put_txreq(struct verbs_txreq *tx);
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index c643d80c5a53..c9876d9e3cb9 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -120,7 +120,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
uctxt->seq_cnt = 1;
uctxt->is_vnic = true;
- hfi1_set_vnic_msix_info(uctxt);
+ msix_request_rcd_irq(uctxt);
hfi1_stats.sps_ctxts++;
dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
@@ -135,8 +135,6 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
flush_wc();
- hfi1_reset_vnic_msix_info(uctxt);
-
/*
* Disable receive context and interrupt available, reset all
* RcvCtxtCtrl bits to default values.
@@ -148,6 +146,10 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
+ /* msix_intr will always be > 0, only clean up if this is true */
+ if (uctxt->msix_intr)
+ msix_free_irq(dd, uctxt->msix_intr);
+
uctxt->event_flags = 0;
hfi1_clear_tids(uctxt);
@@ -626,7 +628,7 @@ static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo)
idr_remove(&dd->vnic.vesw_idr, vinfo->vesw_id);
/* ensure irqs see the change */
- hfi1_vnic_synchronize_irq(dd);
+ msix_vnic_synchronize_irq(dd);
/* remove unread skbs */
for (i = 0; i < vinfo->num_rx_q; i++) {
@@ -690,8 +692,6 @@ static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
rc = hfi1_vnic_txreq_init(dd);
if (rc)
goto txreq_fail;
-
- dd->vnic.msix_idx = dd->first_dyn_msix_idx;
}
for (i = dd->vnic.num_ctxt; i < vinfo->num_rx_q; i++) {
diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
index c3c96c5869ed..97bd940a056a 100644
--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
+++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -198,8 +198,8 @@ int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
goto free_desc;
tx->retry_count = 0;
- ret = sdma_send_txreq(sde, &vnic_sdma->wait, &tx->txreq,
- vnic_sdma->pkts_sent);
+ ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
+ &tx->txreq, vnic_sdma->pkts_sent);
/* When -ECOMM, sdma callback will be called with ABORT status */
if (unlikely(ret && unlikely(ret != -ECOMM)))
goto free_desc;
@@ -230,13 +230,13 @@ tx_err:
* become available.
*/
static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *txreq,
uint seq,
bool pkts_sent)
{
struct hfi1_vnic_sdma *vnic_sdma =
- container_of(wait, struct hfi1_vnic_sdma, wait);
+ container_of(wait->iow, struct hfi1_vnic_sdma, wait);
struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
@@ -247,7 +247,7 @@ static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
write_seqlock(&dev->iowait_lock);
if (list_empty(&vnic_sdma->wait.list))
- iowait_queue(pkts_sent, wait, &sde->dmawait);
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
write_sequnlock(&dev->iowait_lock);
return -EBUSY;
}
@@ -285,7 +285,8 @@ void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
for (i = 0; i < vinfo->num_tx_q; i++) {
struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
- iowait_init(&vnic_sdma->wait, 0, NULL, hfi1_vnic_sdma_sleep,
+ iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
+ hfi1_vnic_sdma_sleep,
hfi1_vnic_sdma_wakeup, NULL);
vnic_sdma->sde = &vinfo->dd->per_sdma[i];
vnic_sdma->dd = vinfo->dd;
@@ -295,10 +296,12 @@ void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
/* Add a free descriptor watermark for wakeups */
if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
+ struct iowait_work *work;
+
INIT_LIST_HEAD(&vnic_sdma->stx.list);
vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
- list_add_tail(&vnic_sdma->stx.list,
- &vnic_sdma->wait.tx_head);
+ work = iowait_get_ib_work(&vnic_sdma->wait);
+ list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
}
}
}
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index fddb5fdf92de..21c2100b2ea9 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_HNS
tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
+ depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
depends on ARM64 || (COMPILE_TEST && 64BIT)
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 0d96c5bb38cd..9990dc9eb96a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -49,6 +49,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ bool vlan_en = false;
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
@@ -58,8 +59,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
gid_attr = ah_attr->grh.sgid_attr;
- if (is_vlan_dev(gid_attr->ndev))
+ if (is_vlan_dev(gid_attr->ndev)) {
vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
+ vlan_en = true;
+ }
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) &
@@ -71,6 +74,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
HNS_ROCE_PORT_NUM_SHIFT));
ah->av.gid_index = grh->sgid_index;
ah->av.vlan = cpu_to_le16(vlan_tag);
+ ah->av.vlan_en = vlan_en;
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
ah->av.vlan);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 9a24fd0ee3e7..d39bdfdb5de9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -88,8 +88,11 @@
#define BITMAP_RR 1
#define MR_TYPE_MR 0x00
+#define MR_TYPE_FRMR 0x01
#define MR_TYPE_DMA 0x03
+#define HNS_ROCE_FRMR_MAX_PA 512
+
#define PKEY_ID 0xffff
#define GUID_LEN 8
#define NODE_DESC_SIZE 64
@@ -193,6 +196,9 @@ enum {
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
+ HNS_ROCE_CAP_FLAG_MW = BIT(7),
+ HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
+ HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
};
enum hns_roce_mtt_type {
@@ -219,19 +225,11 @@ struct hns_roce_uar {
unsigned long logic_idx;
};
-struct hns_roce_vma_data {
- struct list_head list;
- struct vm_area_struct *vma;
- struct mutex *vma_list_mutex;
-};
-
struct hns_roce_ucontext {
struct ib_ucontext ibucontext;
struct hns_roce_uar uar;
struct list_head page_list;
struct mutex page_mutex;
- struct list_head vma_list;
- struct mutex vma_list_mutex;
};
struct hns_roce_pd {
@@ -293,6 +291,16 @@ struct hns_roce_mtt {
enum hns_roce_mtt_type mtt_type;
};
+struct hns_roce_mw {
+ struct ib_mw ibmw;
+ u32 pdn;
+ u32 rkey;
+ int enabled; /* MW's active status */
+ u32 pbl_hop_num;
+ u32 pbl_ba_pg_sz;
+ u32 pbl_buf_pg_sz;
+};
+
/* Only support 4K page size for mr register */
#define MR_SIZE_4K 0
@@ -304,6 +312,7 @@ struct hns_roce_mr {
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
u32 access;/* Access permission of MR */
+ u32 npages;
int enabled; /* MR's active status */
int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */
@@ -457,6 +466,7 @@ struct hns_roce_av {
u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[6];
__le16 vlan;
+ bool vlan_en;
};
struct hns_roce_ah {
@@ -656,6 +666,7 @@ struct hns_roce_eq_table {
};
struct hns_roce_caps {
+ u64 fw_ver;
u8 num_ports;
int gid_table_len[HNS_ROCE_MAX_PORTS];
int pkey_table_len[HNS_ROCE_MAX_PORTS];
@@ -665,7 +676,9 @@ struct hns_roce_caps {
u32 max_sq_sg; /* 2 */
u32 max_sq_inline; /* 32 */
u32 max_rq_sg; /* 2 */
+ u32 max_extend_sg;
int num_qps; /* 256k */
+ int reserved_qps;
u32 max_wqes; /* 16k */
u32 max_sq_desc_sz; /* 64 */
u32 max_rq_desc_sz; /* 64 */
@@ -738,6 +751,7 @@ struct hns_roce_work {
struct hns_roce_dev *hr_dev;
struct work_struct work;
u32 qpn;
+ u32 cqn;
int event_type;
int sub_type;
};
@@ -764,6 +778,8 @@ struct hns_roce_hw {
struct hns_roce_mr *mr, int flags, u32 pdn,
int mr_access_flags, u64 iova, u64 size,
void *mb_buf);
+ int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+ int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector);
@@ -863,6 +879,11 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
return container_of(ibmr, struct hns_roce_mr, ibmr);
}
+static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct hns_roce_mw, ibmw);
+}
+
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct hns_roce_qp, ibqp);
@@ -968,12 +989,20 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata);
+struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
+struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
+ struct ib_udata *udata);
+int hns_roce_dealloc_mw(struct ib_mw *ibmw);
+
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 081aa91fc162..ca05810c92dc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -731,7 +731,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
cq_init_attr.comp_vector = 0;
cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
if (IS_ERR(cq)) {
- dev_err(dev, "Create cq for reseved loop qp failed!");
+ dev_err(dev, "Create cq for reserved loop qp failed!");
return -ENOMEM;
}
free_mr->mr_free_cq = to_hr_cq(cq);
@@ -744,7 +744,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
if (IS_ERR(pd)) {
- dev_err(dev, "Create pd for reseved loop qp failed!");
+ dev_err(dev, "Create pd for reserved loop qp failed!");
ret = -ENOMEM;
goto alloc_pd_failed;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 0218c0f8c2a7..a4c62ae23a9a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -54,6 +54,59 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ struct hns_roce_wqe_frmr_seg *fseg,
+ const struct ib_reg_wr *wr)
+{
+ struct hns_roce_mr *mr = to_hr_mr(wr->mr);
+
+ /* use ib_access_flags */
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
+ wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
+ wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_RR_S,
+ wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_RW_S,
+ wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_LW_S,
+ wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
+
+ /* Data structure reuse may lead to confusion */
+ rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
+ rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
+
+ rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
+ rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
+ rc_sq_wqe->rkey = cpu_to_le32(wr->key);
+ rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
+
+ fseg->pbl_size = cpu_to_le32(mr->pbl_size);
+ roce_set_field(fseg->mode_buf_pg_sz,
+ V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
+ V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
+ mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+ roce_set_bit(fseg->mode_buf_pg_sz,
+ V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
+}
+
+static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
+ const struct ib_atomic_wr *wr)
+{
+ if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
+ aseg->cmp_data = cpu_to_le64(wr->compare_add);
+ } else {
+ aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
+ aseg->cmp_data = 0;
+ }
+}
+
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
unsigned int *sge_ind)
{
@@ -121,6 +174,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
}
if (wr->opcode == IB_WR_RDMA_READ) {
+ *bad_wr = wr;
dev_err(hr_dev->dev, "Not support inline data!\n");
return -EINVAL;
}
@@ -179,6 +233,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ struct hns_roce_wqe_frmr_seg *fseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
struct ib_qp_attr attr;
@@ -191,6 +246,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
int attr_mask;
u32 tmp_len;
int ret = 0;
+ u32 hr_op;
u8 *smac;
int nreq;
int i;
@@ -356,6 +412,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
V2_UD_SEND_WQE_BYTE_40_PORTN_S,
qp->port);
+ roce_set_bit(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
+ ah->av.vlan_en ? 1 : 0);
roce_set_field(ud_sq_wqe->byte_48,
V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
@@ -406,99 +465,100 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
switch (wr->opcode) {
case IB_WR_RDMA_READ:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_READ);
+ hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_RDMA_WRITE:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+ hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
+ hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_SEND:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND);
+ hr_op = HNS_ROCE_V2_WQE_OP_SEND;
break;
case IB_WR_SEND_WITH_INV:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
+ hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
break;
case IB_WR_SEND_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+ hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
break;
case IB_WR_LOCAL_INV:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+ hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
+ rc_sq_wqe->inv_key =
+ cpu_to_le32(wr->ex.invalidate_rkey);
+ break;
+ case IB_WR_REG_MR:
+ hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
+ fseg = wqe;
+ set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
break;
case IB_WR_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+ hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
+ rc_sq_wqe->rkey =
+ cpu_to_le32(atomic_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(atomic_wr(wr)->remote_addr);
break;
case IB_WR_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+ hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
+ rc_sq_wqe->rkey =
+ cpu_to_le32(atomic_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(atomic_wr(wr)->remote_addr);
break;
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
+ hr_op =
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
break;
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
+ hr_op =
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
break;
default:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_MASK);
+ hr_op = HNS_ROCE_V2_WQE_OP_MASK;
break;
}
- wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ struct hns_roce_v2_wqe_data_seg *dseg;
+
+ dseg = wqe;
+ set_data_seg_v2(dseg, wr->sg_list);
+ wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
+ set_atomic_seg(wqe, atomic_wr(wr));
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
+ wr->num_sge);
+ } else if (wr->opcode != IB_WR_REG_MR) {
+ ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
+ wqe, &sge_ind, bad_wr);
+ if (ret)
+ goto out;
+ }
- ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
- &sge_ind, bad_wr);
- if (ret)
- goto out;
ind++;
} else {
dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
@@ -935,7 +995,24 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
resp = (struct hns_roce_query_version *)desc.data;
hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
- hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
+ hr_dev->vendor_id = hr_dev->pci_dev->vendor;
+
+ return 0;
+}
+
+static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_query_fw_info *resp;
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ resp = (struct hns_roce_query_fw_info *)desc.data;
+ hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
return 0;
}
@@ -1158,6 +1235,13 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
ret = hns_roce_cmq_query_hw_info(hr_dev);
if (ret) {
+ dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hns_roce_query_fw_ver(hr_dev);
+ if (ret) {
dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
ret);
return ret;
@@ -1185,14 +1269,16 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
- hr_dev->vendor_part_id = 0;
- hr_dev->sys_image_guid = 0;
+
+ hr_dev->vendor_part_id = hr_dev->pci_dev->device;
+ hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
+ caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
@@ -1222,6 +1308,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->reserved_mrws = 1;
caps->reserved_uars = 0;
caps->reserved_cqs = 0;
+ caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
caps->qpc_ba_pg_sz = 0;
caps->qpc_buf_pg_sz = 0;
@@ -1255,6 +1342,11 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
HNS_ROCE_CAP_FLAG_RQ_INLINE |
HNS_ROCE_CAP_FLAG_RECORD_DB |
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
+
+ if (hr_dev->pci_dev->revision == 0x21)
+ caps->flags |= HNS_ROCE_CAP_FLAG_MW |
+ HNS_ROCE_CAP_FLAG_FRMR;
+
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
@@ -1262,6 +1354,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
+ if (hr_dev->pci_dev->revision == 0x21)
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC;
+
ret = hns_roce_v2_set_bt(hr_dev);
if (ret)
dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
@@ -1690,10 +1785,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
(mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
+ mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
(mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
@@ -1817,6 +1913,88 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
return 0;
}
+static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
+ mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, mr->pd);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
+ V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(mr->pbl_ba >> 3));
+
+ roce_set_field(mpt_entry->byte_64_buf_pa1,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+ mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+
+ return 0;
+}
+
+static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, mw->pdn);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_S,
+ mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
+ 0 : mw->pbl_hop_num);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
+ mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
+ mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
+
+ roce_set_field(mpt_entry->byte_64_buf_pa1,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+ mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+
+ mpt_entry->lkey = cpu_to_le32(mw->rkey);
+
+ return 0;
+}
+
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
@@ -2274,6 +2452,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->src_qp = (u8)roce_get_field(cqe->byte_32,
V2_CQE_BYTE_32_RMT_QPN_M,
V2_CQE_BYTE_32_RMT_QPN_S);
+ wc->slid = 0;
wc->wc_flags |= (roce_get_bit(cqe->byte_32,
V2_CQE_BYTE_32_GRH_S) ?
IB_WC_GRH : 0);
@@ -2287,7 +2466,14 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->smac[5] = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_SMAC_5_M,
V2_CQE_BYTE_28_SMAC_5_S);
- wc->vlan_id = 0xffff;
+ if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
+ wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_VID_M,
+ V2_CQE_BYTE_28_VID_S);
+ } else {
+ wc->vlan_id = 0xffff;
+ }
+
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
wc->network_hdr_type = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_PORT_TYPE_M,
@@ -2589,21 +2775,16 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
- roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
- V2_QPC_BYTE_60_MAPID_S, 0);
+ roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
+ V2_QPC_BYTE_60_TEMPID_S, 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid,
- V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
- 0);
+ roce_set_field(qpc_mask->byte_60_qpst_tempid,
+ V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_tempid,
+ V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_tempid,
+ V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
@@ -2685,7 +2866,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
- roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
+ roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
+ 0);
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
@@ -2694,8 +2876,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_144_raq,
V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
- roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
- 0);
roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
@@ -2721,14 +2901,12 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
-
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
roce_set_bit(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
roce_set_bit(qpc_mask->byte_168_irrl_idx,
@@ -2746,6 +2924,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
0);
+ roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
+ roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
+
roce_set_field(qpc_mask->byte_176_msg_pktn,
V2_QPC_BYTE_176_MSG_USE_PKTN_M,
V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
@@ -2790,6 +2971,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
+ roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
+ roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
+ 0);
+
qpc_mask->irrl_cur_sge_offset = 0;
roce_set_field(qpc_mask->byte_240_irrl_tail,
@@ -2955,13 +3143,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_56_dqpn_err,
V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
}
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
}
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
@@ -3271,13 +3452,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- roce_set_field(context->byte_60_qpst_mapid,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
- roce_set_field(qpc_mask->byte_60_qpst_mapid,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
-
context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
@@ -3538,6 +3712,17 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
}
+ if (is_vlan_dev(gid_attr->ndev)) {
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
+ roce_set_bit(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
+ }
+
roce_set_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, vlan);
@@ -3584,8 +3769,15 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, grh->traffic_class);
+ if (hr_dev->pci_dev->revision == 0x21 &&
+ gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ roce_set_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
+ grh->traffic_class >> 2);
+ else
+ roce_set_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
+ grh->traffic_class);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
@@ -3606,9 +3798,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
/* Every status migrate must change state */
- roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+ roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, new_state);
- roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+ roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, 0);
/* SW pass context to HW */
@@ -3728,7 +3920,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
goto out;
}
- state = roce_get_field(context->byte_60_qpst_mapid,
+ state = roce_get_field(context->byte_60_qpst_tempid,
V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
if (tmp_qp_state == -1) {
@@ -3995,13 +4187,103 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
{
struct hns_roce_work *irq_work =
container_of(work, struct hns_roce_work, work);
+ struct device *dev = irq_work->hr_dev->dev;
u32 qpn = irq_work->qpn;
+ u32 cqn = irq_work->cqn;
switch (irq_work->event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_info(dev, "Path migrated succeeded.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "Path migration failed.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_info(dev, "Communication established.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "Send queue drained.\n");
+ break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ dev_err(dev, "Local work queue catastrophic error.\n");
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ switch (irq_work->sub_type) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_err(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_err(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_err(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_err(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_err(dev, "QP %d, WQE shift error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n",
+ irq_work->sub_type);
+ break;
+ }
+ break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_err(dev, "Invalid request local work queue error.\n");
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ dev_err(dev, "Local access violation work queue error.\n");
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ switch (irq_work->sub_type) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_err(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_err(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_err(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_err(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_err(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_err(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n",
+ irq_work->sub_type);
+ break;
+ }
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ dev_warn(dev, "SRQ limit reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ dev_warn(dev, "SRQ last wqe reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ dev_err(dev, "SRQ catas error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_err(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ dev_warn(dev, "DB overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_FLR:
+ dev_warn(dev, "Function level reset.\n");
break;
default:
break;
@@ -4011,7 +4293,8 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
}
static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq, u32 qpn)
+ struct hns_roce_eq *eq,
+ u32 qpn, u32 cqn)
{
struct hns_roce_work *irq_work;
@@ -4022,6 +4305,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
irq_work->hr_dev = hr_dev;
irq_work->qpn = qpn;
+ irq_work->cqn = cqn;
irq_work->event_type = eq->event_type;
irq_work->sub_type = eq->sub_type;
queue_work(hr_dev->irq_workq, &(irq_work->work));
@@ -4058,124 +4342,6 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
hns_roce_write64_k(doorbell, eq->doorbell);
}
-static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- u32 qpn)
-{
- struct device *dev = hr_dev->dev;
- int sub_type;
-
- dev_warn(dev, "Local work queue catastrophic error.\n");
- sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
- HNS_ROCE_V2_AEQE_SUB_TYPE_S);
- switch (sub_type) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_warn(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_warn(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
- break;
- default:
- dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
- break;
- }
-}
-
-static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe, u32 qpn)
-{
- struct device *dev = hr_dev->dev;
- int sub_type;
-
- dev_warn(dev, "Local access violation work queue error.\n");
- sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
- HNS_ROCE_V2_AEQE_SUB_TYPE_S);
- switch (sub_type) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_warn(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_warn(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_warn(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_warn(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_warn(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_warn(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
- break;
- }
-}
-
-static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type, u32 qpn)
-{
- struct device *dev = hr_dev->dev;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- dev_warn(dev, "Communication established.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "Send queue drained.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "Invalid request local work queue error.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
- break;
- default:
- break;
- }
-
- hns_roce_qp_event(hr_dev, qpn, event_type);
-}
-
-static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type, u32 cqn)
-{
- struct device *dev = hr_dev->dev;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%x access err.\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
- break;
- default:
- break;
- }
-
- hns_roce_cq_event(hr_dev, cqn, event_type);
-}
-
static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
{
u32 buf_chk_sz;
@@ -4251,31 +4417,23 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_warn(dev, "Path migrated succeeded.\n");
- break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "Path migration failed.\n");
- break;
case HNS_ROCE_EVENT_TYPE_COMM_EST:
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type,
- qpn);
+ hns_roce_qp_event(hr_dev, qpn, event_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- dev_warn(dev, "SRQ not support.\n");
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type,
- cqn);
+ hns_roce_cq_event(hr_dev, cqn, event_type);
break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- dev_warn(dev, "DB overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_MB:
hns_roce_cmd_event(hr_dev,
@@ -4284,10 +4442,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
le64_to_cpu(aeqe->event.cmd.out_param));
break;
case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
- dev_warn(dev, "CEQ overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_FLR:
- dev_warn(dev, "Function level reset.\n");
break;
default:
dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
@@ -4304,7 +4460,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
dev_warn(dev, "cons_index overflow, set back to 0.\n");
eq->cons_index = 0;
}
- hns_roce_v2_init_irq_work(hr_dev, eq, qpn);
+ hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
}
set_eq_cons_index_v2(eq);
@@ -5125,6 +5281,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
create_singlethread_workqueue("hns_roce_irq_workqueue");
if (!hr_dev->irq_workq) {
dev_err(dev, "Create irq workqueue failed!\n");
+ ret = -ENOMEM;
goto err_request_irq_fail;
}
@@ -5195,6 +5352,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.set_mac = hns_roce_v2_set_mac,
.write_mtpt = hns_roce_v2_write_mtpt,
.rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
+ .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
+ .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
.write_cqc = hns_roce_v2_write_cqc,
.set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 14aa308befef..8bc820635bbd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -50,6 +50,7 @@
#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
+#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
@@ -78,6 +79,7 @@
#define HNS_ROCE_INVALID_LKEY 0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
+#define HNS_ROCE_V2_RSV_QPS 8
#define HNS_ROCE_CONTEXT_HOP_NUM 1
#define HNS_ROCE_MTT_HOP_NUM 1
@@ -201,6 +203,7 @@ enum {
/* CMQ command */
enum hns_roce_opcode_type {
+ HNS_QUERY_FW_VER = 0x0001,
HNS_ROCE_OPC_QUERY_HW_VER = 0x8000,
HNS_ROCE_OPC_CFG_GLOBAL_PARAM = 0x8001,
HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
@@ -324,6 +327,7 @@ struct hns_roce_v2_cq_context {
enum{
V2_MPT_ST_VALID = 0x1,
+ V2_MPT_ST_FREE = 0x2,
};
enum hns_roce_v2_qp_state {
@@ -350,7 +354,7 @@ struct hns_roce_v2_qp_context {
__le32 dmac;
__le32 byte_52_udpspn_dmac;
__le32 byte_56_dqpn_err;
- __le32 byte_60_qpst_mapid;
+ __le32 byte_60_qpst_tempid;
__le32 qkey_xrcd;
__le32 byte_68_rq_db;
__le32 rq_db_record_addr;
@@ -492,26 +496,15 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
-#define V2_QPC_BYTE_60_MAPID_S 0
-#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0)
+#define V2_QPC_BYTE_60_TEMPID_S 0
+#define V2_QPC_BYTE_60_TEMPID_M GENMASK(7, 0)
-#define V2_QPC_BYTE_60_INNER_MAP_IND_S 13
+#define V2_QPC_BYTE_60_SCC_TOKEN_S 8
+#define V2_QPC_BYTE_60_SCC_TOKEN_M GENMASK(26, 8)
-#define V2_QPC_BYTE_60_SQ_MAP_IND_S 14
+#define V2_QPC_BYTE_60_SQ_DB_DOING_S 27
-#define V2_QPC_BYTE_60_RQ_MAP_IND_S 15
-
-#define V2_QPC_BYTE_60_TEMPID_S 16
-#define V2_QPC_BYTE_60_TEMPID_M GENMASK(22, 16)
-
-#define V2_QPC_BYTE_60_EXT_MAP_IND_S 23
-
-#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24
-#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24)
-
-#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27
-
-#define V2_QPC_BYTE_60_SQ_EXT_IND_S 28
+#define V2_QPC_BYTE_60_RQ_DB_DOING_S 28
#define V2_QPC_BYTE_60_QP_ST_S 29
#define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29)
@@ -534,6 +527,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_76_RQIE_S 28
+#define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30
#define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
@@ -588,7 +582,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_140_RR_MAX_S 12
#define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12)
-#define V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15
+#define V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S 15
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16)
@@ -599,8 +593,6 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0)
-#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24
-
#define V2_QPC_BYTE_144_RAQ_CREDIT_S 25
#define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25)
@@ -637,9 +629,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_168_LP_SGEN_INI_S 22
#define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 22)
-#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24
-#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24)
-
+#define V2_QPC_BYTE_168_SQ_VLAN_EN_S 24
+#define V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S 25
+#define V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S 26
+#define V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S 27
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28)
@@ -725,6 +718,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20)
+#define V2_QPC_BYTE_232_SO_LP_VLD_S 29
+#define V2_QPC_BYTE_232_FENCE_LP_VLD_S 30
+#define V2_QPC_BYTE_232_IRRL_LP_VLD_S 31
+
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0)
@@ -743,6 +740,9 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_244_RNR_CNT_S 27
#define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27)
+#define V2_QPC_BYTE_244_LCL_OP_FLG_S 30
+#define V2_QPC_BYTE_244_IRRL_RD_FLG_S 31
+
#define V2_QPC_BYTE_248_IRRL_PSN_S 0
#define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0)
@@ -818,6 +818,11 @@ struct hns_roce_v2_cqe {
#define V2_CQE_BYTE_28_PORT_TYPE_S 16
#define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16)
+#define V2_CQE_BYTE_28_VID_S 18
+#define V2_CQE_BYTE_28_VID_M GENMASK(29, 18)
+
+#define V2_CQE_BYTE_28_VID_VLD_S 30
+
#define V2_CQE_BYTE_32_RMT_QPN_S 0
#define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0)
@@ -878,8 +883,19 @@ struct hns_roce_v2_mpt_entry {
#define V2_MPT_BYTE_8_LW_EN_S 7
+#define V2_MPT_BYTE_8_MW_CNT_S 8
+#define V2_MPT_BYTE_8_MW_CNT_M GENMASK(31, 8)
+
+#define V2_MPT_BYTE_12_FRE_S 0
+
#define V2_MPT_BYTE_12_PA_S 1
+#define V2_MPT_BYTE_12_MR_MW_S 4
+
+#define V2_MPT_BYTE_12_BPD_S 5
+
+#define V2_MPT_BYTE_12_BQP_S 6
+
#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8
@@ -988,6 +1004,8 @@ struct hns_roce_v2_ud_send_wqe {
#define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24
#define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24)
+#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30
+
#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31
#define V2_UD_SEND_WQE_DMAC_0_S 0
@@ -1042,6 +1060,16 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
+#define V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S 19
+
+#define V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S 20
+
+#define V2_RC_FRMR_WQE_BYTE_4_RR_S 21
+
+#define V2_RC_FRMR_WQE_BYTE_4_RW_S 22
+
+#define V2_RC_FRMR_WQE_BYTE_4_LW_S 23
+
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
@@ -1051,6 +1079,16 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+struct hns_roce_wqe_frmr_seg {
+ __le32 pbl_size;
+ __le32 mode_buf_pg_sz;
+};
+
+#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S 4
+#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M GENMASK(7, 4)
+
+#define V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S 8
+
struct hns_roce_v2_wqe_data_seg {
__le32 len;
__le32 lkey;
@@ -1068,6 +1106,11 @@ struct hns_roce_query_version {
__le32 rsv[5];
};
+struct hns_roce_query_fw_info {
+ __le32 fw_ver;
+ __le32 rsv[5];
+};
+
struct hns_roce_cfg_llm_a {
__le32 base_addr_l;
__le32 base_addr_h;
@@ -1564,4 +1607,9 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
+struct hns_roce_wqe_atomic_seg {
+ __le64 fetchadd_swap_data;
+ __le64 cmp_data;
+};
+
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c5cae9a38c04..1b3ee514f2ef 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -196,6 +196,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
memset(props, 0, sizeof(*props));
+ props->fw_ver = hr_dev->caps.fw_ver;
props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
props->max_mr_size = (u64)(~(0ULL));
props->page_size_cap = hr_dev->caps.page_size_cap;
@@ -215,7 +216,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_pd = hr_dev->caps.num_pds;
props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
- props->atomic_cap = IB_ATOMIC_NONE;
+ props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
+ IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->max_pkeys = 1;
props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
@@ -344,8 +346,6 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
if (ret)
goto error_fail_uar_alloc;
- INIT_LIST_HEAD(&context->vma_list);
- mutex_init(&context->vma_list_mutex);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
mutex_init(&context->page_mutex);
@@ -376,76 +376,34 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0;
}
-static void hns_roce_vma_open(struct vm_area_struct *vma)
-{
- vma->vm_ops = NULL;
-}
-
-static void hns_roce_vma_close(struct vm_area_struct *vma)
-{
- struct hns_roce_vma_data *vma_data;
-
- vma_data = (struct hns_roce_vma_data *)vma->vm_private_data;
- vma_data->vma = NULL;
- mutex_lock(vma_data->vma_list_mutex);
- list_del(&vma_data->list);
- mutex_unlock(vma_data->vma_list_mutex);
- kfree(vma_data);
-}
-
-static const struct vm_operations_struct hns_roce_vm_ops = {
- .open = hns_roce_vma_open,
- .close = hns_roce_vma_close,
-};
-
-static int hns_roce_set_vma_data(struct vm_area_struct *vma,
- struct hns_roce_ucontext *context)
-{
- struct list_head *vma_head = &context->vma_list;
- struct hns_roce_vma_data *vma_data;
-
- vma_data = kzalloc(sizeof(*vma_data), GFP_KERNEL);
- if (!vma_data)
- return -ENOMEM;
-
- vma_data->vma = vma;
- vma_data->vma_list_mutex = &context->vma_list_mutex;
- vma->vm_private_data = vma_data;
- vma->vm_ops = &hns_roce_vm_ops;
-
- mutex_lock(&context->vma_list_mutex);
- list_add(&vma_data->list, vma_head);
- mutex_unlock(&context->vma_list_mutex);
-
- return 0;
-}
-
static int hns_roce_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma)
{
struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
- if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
- return -EINVAL;
+ switch (vma->vm_pgoff) {
+ case 0:
+ return rdma_user_mmap_io(context, vma,
+ to_hr_ucontext(context)->uar.pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
+
+ /* vm_pgoff: 1 -- TPTR */
+ case 1:
+ if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
+ return -EINVAL;
+ /*
+ * FIXME: using io_remap_pfn_range on the dma address returned
+ * by dma_alloc_coherent is totally wrong.
+ */
+ return rdma_user_mmap_io(context, vma,
+ hr_dev->tptr_dma_addr >> PAGE_SHIFT,
+ hr_dev->tptr_size,
+ vma->vm_page_prot);
- if (vma->vm_pgoff == 0) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_hr_ucontext(context)->uar.pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
- } else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
- hr_dev->tptr_size) {
- /* vm_pgoff: 1 -- TPTR */
- if (io_remap_pfn_range(vma, vma->vm_start,
- hr_dev->tptr_dma_addr >> PAGE_SHIFT,
- hr_dev->tptr_size,
- vma->vm_page_prot))
- return -EAGAIN;
- } else
+ default:
return -EINVAL;
-
- return hns_roce_set_vma_data(vma, to_hr_ucontext(context));
+ }
}
static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
@@ -471,21 +429,6 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
- struct hns_roce_vma_data *vma_data, *n;
- struct vm_area_struct *vma;
-
- mutex_lock(&context->vma_list_mutex);
- list_for_each_entry_safe(vma_data, n, &context->vma_list, list) {
- vma = vma_data->vma;
- zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
-
- vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
- vma->vm_ops = NULL;
- list_del(&vma_data->list);
- kfree(vma_data);
- }
- mutex_unlock(&context->vma_list_mutex);
}
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
@@ -508,7 +451,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
- strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
ib_dev->owner = THIS_MODULE;
ib_dev->node_type = RDMA_NODE_IB_CA;
@@ -584,12 +526,27 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
}
+ /* MW */
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
+ ib_dev->alloc_mw = hns_roce_alloc_mw;
+ ib_dev->dealloc_mw = hns_roce_dealloc_mw;
+ ib_dev->uverbs_cmd_mask |=
+ (1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
+ (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
+ }
+
+ /* FRMR */
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
+ ib_dev->alloc_mr = hns_roce_alloc_mr;
+ ib_dev->map_mr_sg = hns_roce_map_mr_sg;
+ }
+
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
ib_dev->driver_id = RDMA_DRIVER_HNS;
- ret = ib_register_device(ib_dev, NULL);
+ ret = ib_register_device(ib_dev, "hns_%d", NULL);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index eb26a5f6fc58..521ad2aa3a4e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -329,7 +329,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
u64 bt_idx;
u64 size;
- mhop_num = hr_dev->caps.pbl_hop_num;
+ mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
@@ -351,7 +351,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr;
- mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+ mr->pbl_hop_num = mhop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
@@ -511,7 +511,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->key = hw_index_to_key(index); /* MR key */
if (size == ~0ull) {
- mr->type = MR_TYPE_DMA;
mr->pbl_buf = NULL;
mr->pbl_dma_addr = 0;
/* PBL multi-hop addressing parameters */
@@ -522,7 +521,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->pbl_l1_dma_addr = NULL;
mr->pbl_l0_dma_addr = 0;
} else {
- mr->type = MR_TYPE_MR;
if (!hr_dev->caps.pbl_hop_num) {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
@@ -548,9 +546,9 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
u32 mhop_num;
u64 bt_idx;
- npages = ib_umem_page_count(mr->umem);
+ npages = mr->pbl_size;
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- mhop_num = hr_dev->caps.pbl_hop_num;
+ mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num;
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return;
@@ -636,7 +634,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
}
if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
+ if (mr->type == MR_TYPE_MR)
+ npages = ib_umem_page_count(mr->umem);
if (!hr_dev->caps.pbl_hop_num)
dma_free_coherent(dev, (unsigned int)(npages * 8),
@@ -674,7 +673,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
goto err_table;
}
- ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ if (mr->type != MR_TYPE_FRMR)
+ ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ else
+ ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
if (ret) {
dev_err(dev, "Write mtpt fail!\n");
goto err_page;
@@ -855,6 +857,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
if (mr == NULL)
return ERR_PTR(-ENOMEM);
+ mr->type = MR_TYPE_DMA;
+
/* Allocate memory region key */
ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
~0ULL, acc, 0, mr);
@@ -1031,6 +1035,8 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
}
+ mr->type = MR_TYPE_MR;
+
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
access_flags, n, mr);
if (ret)
@@ -1201,3 +1207,193 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr)
return ret;
}
+
+struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_mr *mr;
+ u64 length;
+ u32 page_size;
+ int ret;
+
+ page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT);
+ length = max_num_sg * page_size;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
+ dev_err(dev, "max_num_sg larger than %d\n",
+ HNS_ROCE_FRMR_MAX_PA);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->type = MR_TYPE_FRMR;
+
+ /* Allocate memory region key */
+ ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length,
+ 0, max_num_sg, mr);
+ if (ret)
+ goto err_free;
+
+ ret = hns_roce_mr_enable(hr_dev, mr);
+ if (ret)
+ goto err_mr;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+err_mr:
+ hns_roce_mr_free(to_hr_dev(pd->device), mr);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+
+ mr->pbl_buf[mr->npages++] = cpu_to_le64(addr);
+
+ return 0;
+}
+
+int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+
+ mr->npages = 0;
+
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+}
+
+static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mw *mw)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ if (mw->enabled) {
+ ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
+ & (hr_dev->caps.num_mtpts - 1));
+ if (ret)
+ dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
+
+ hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
+ key_to_hw_index(mw->rkey));
+ }
+
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
+ key_to_hw_index(mw->rkey), BITMAP_NO_RR);
+}
+
+static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mw *mw)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = hr_dev->dev;
+ unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
+ int ret;
+
+ /* prepare HEM entry memory */
+ ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
+ if (ret)
+ return ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ret = PTR_ERR(mailbox);
+ goto err_table;
+ }
+
+ ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
+ if (ret) {
+ dev_err(dev, "MW write mtpt fail!\n");
+ goto err_page;
+ }
+
+ ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ if (ret) {
+ dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
+ goto err_page;
+ }
+
+ mw->enabled = 1;
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+err_page:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+err_table:
+ hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
+
+ return ret;
+}
+
+struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device);
+ struct hns_roce_mw *mw;
+ unsigned long index = 0;
+ int ret;
+
+ mw = kmalloc(sizeof(*mw), GFP_KERNEL);
+ if (!mw)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate a key for mw from bitmap */
+ ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
+ if (ret)
+ goto err_bitmap;
+
+ mw->rkey = hw_index_to_key(index);
+
+ mw->ibmw.rkey = mw->rkey;
+ mw->ibmw.type = type;
+ mw->pdn = to_hr_pd(ib_pd)->pdn;
+ mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+ mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+ mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+
+ ret = hns_roce_mw_enable(hr_dev, mw);
+ if (ret)
+ goto err_mw;
+
+ return &mw->ibmw;
+
+err_mw:
+ hns_roce_mw_free(hr_dev, mw);
+
+err_bitmap:
+ kfree(mw);
+
+ return ERR_PTR(ret);
+}
+
+int hns_roce_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
+ struct hns_roce_mw *mw = to_hr_mw(ibmw);
+
+ hns_roce_mw_free(hr_dev, mw);
+ kfree(mw);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index efb7e961ca65..5ebf481a39d9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
@@ -343,6 +344,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
{
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride);
+ u32 ex_sge_num;
u32 page_size;
u32 max_cnt;
@@ -372,7 +374,18 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
if (hr_qp->sq.max_gs > 2)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
+
+ if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
+ if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
+ dev_err(hr_dev->dev,
+ "The extended sge cnt error! sge_cnt=%d\n",
+ hr_qp->sge.sge_cnt);
+ return -EINVAL;
+ }
+ }
+
hr_qp->sge.sge_shift = 4;
+ ex_sge_num = hr_qp->sge.sge_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) {
@@ -386,6 +399,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ hr_qp->sge.sge_cnt =
+ max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num);
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
@@ -394,7 +409,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0;
- if (hr_qp->sge.sge_cnt) {
+ if (ex_sge_num) {
hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
@@ -465,6 +480,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sge.sge_shift = 4;
}
+ if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
+ if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
+ dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
+ hr_qp->sge.sge_cnt);
+ return -EINVAL;
+ }
+ }
+
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
@@ -472,6 +495,8 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
+ hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
+ (u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, page_size);
@@ -952,8 +977,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
}
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask)) {
dev_err(dev, "ib_modify_qp_is_ok failed\n");
goto out;
}
@@ -1106,14 +1131,20 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
int reserved_from_top = 0;
+ int reserved_from_bot;
int ret;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
- /* A port include two SQP, six port total 12 */
+ /* In hw v1, a port include two SQP, six ports total 12 */
+ if (hr_dev->caps.max_sq_sg <= 2)
+ reserved_from_bot = SQP_NUM;
+ else
+ reserved_from_bot = hr_dev->caps.reserved_qps;
+
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
- hr_dev->caps.num_qps - 1, SQP_NUM,
+ hr_dev->caps.num_qps - 1, reserved_from_bot,
reserved_from_top);
if (ret) {
dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 423818a7d333..771eb6bd0785 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1689,7 +1689,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
unsigned long flags;
rtnl_lock();
- for_each_netdev_rcu(&init_net, ip_dev) {
+ for_each_netdev(&init_net, ip_dev) {
if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
(rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
(ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index e2e6c74a7452..102875872bea 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -2135,10 +2135,10 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
}
/**
- * i40iw_show_rev
+ * hw_rev_show
*/
-static ssize_t i40iw_show_rev(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i40iw_ib_device *iwibdev = container_of(dev,
struct i40iw_ib_device,
@@ -2147,34 +2147,37 @@ static ssize_t i40iw_show_rev(struct device *dev,
return sprintf(buf, "%x\n", hw_rev);
}
+static DEVICE_ATTR_RO(hw_rev);
/**
- * i40iw_show_hca
+ * hca_type_show
*/
-static ssize_t i40iw_show_hca(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "I40IW\n");
}
+static DEVICE_ATTR_RO(hca_type);
/**
- * i40iw_show_board
+ * board_id_show
*/
-static ssize_t i40iw_show_board(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
+static struct attribute *i40iw_dev_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *i40iw_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group i40iw_attr_group = {
+ .attrs = i40iw_dev_attributes,
};
/**
@@ -2752,7 +2755,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
i40iw_pr_err("iwdev == NULL\n");
return NULL;
}
- strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
iwibdev->ibdev.owner = THIS_MODULE;
iwdev->iwibdev = iwibdev;
iwibdev->iwdev = iwdev;
@@ -2851,20 +2853,6 @@ void i40iw_port_ibevent(struct i40iw_device *iwdev)
}
/**
- * i40iw_unregister_rdma_device - unregister of iwarp from IB
- * @iwibdev: rdma device ptr
- */
-static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
- device_remove_file(&iwibdev->ibdev.dev,
- i40iw_dev_attributes[i]);
- ib_unregister_device(&iwibdev->ibdev);
-}
-
-/**
* i40iw_destroy_rdma_device - destroy rdma device and free resources
* @iwibdev: IB device ptr
*/
@@ -2873,7 +2861,7 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
if (!iwibdev)
return;
- i40iw_unregister_rdma_device(iwibdev);
+ ib_unregister_device(&iwibdev->ibdev);
kfree(iwibdev->ibdev.iwcm);
iwibdev->ibdev.iwcm = NULL;
wait_event_timeout(iwibdev->iwdev->close_wq,
@@ -2888,32 +2876,19 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
*/
int i40iw_register_rdma_device(struct i40iw_device *iwdev)
{
- int i, ret;
+ int ret;
struct i40iw_ib_device *iwibdev;
iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
if (!iwdev->iwibdev)
return -ENOMEM;
iwibdev = iwdev->iwibdev;
-
+ rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
iwibdev->ibdev.driver_id = RDMA_DRIVER_I40IW;
- ret = ib_register_device(&iwibdev->ibdev, NULL);
+ ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", NULL);
if (ret)
goto error;
- for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
- ret =
- device_create_file(&iwibdev->ibdev.dev,
- i40iw_dev_attributes[i]);
- if (ret) {
- while (i > 0) {
- i--;
- device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
- }
- ib_unregister_device(&iwibdev->ibdev);
- goto error;
- }
- }
return 0;
error:
kfree(iwdev->iwibdev->ibdev.iwcm);
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index db4aa13ebae0..d1de3285fd88 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,7 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI && INET
+ depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
depends on MAY_USE_DEVLINK
select NET_VENDOR_MELLANOX
select MLX4_CORE
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index e5466d786bb1..8942f5f7f04d 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -807,15 +807,17 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int err;
struct ib_port_attr pattr;
- if (in_wc && in_wc->qp->qp_num) {
- pr_debug("received MAD: slid:%d sqpn:%d "
- "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
- in_wc->slid, in_wc->src_qp,
- in_wc->dlid_path_bits,
- in_wc->qp->qp_num,
- in_wc->wc_flags,
- in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
- be16_to_cpu(in_mad->mad_hdr.attr_id));
+ if (in_wc && in_wc->qp) {
+ pr_debug("received MAD: port:%d slid:%d sqpn:%d "
+ "dlid_bits:%d dqpn:%d wc_flags:0x%x tid:%016llx cls:%x mtd:%x atr:%x\n",
+ port_num,
+ in_wc->slid, in_wc->src_qp,
+ in_wc->dlid_path_bits,
+ in_wc->qp->qp_num,
+ in_wc->wc_flags,
+ be64_to_cpu(in_mad->mad_hdr.tid),
+ in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
+ be16_to_cpu(in_mad->mad_hdr.attr_id));
if (in_wc->wc_flags & IB_WC_GRH) {
pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
be64_to_cpu(in_grh->sgid.global.subnet_prefix),
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0bbeaaae47e0..0def2323459c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1140,144 +1140,50 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0;
}
-static void mlx4_ib_vma_open(struct vm_area_struct *area)
-{
- /* vma_open is called when a new VMA is created on top of our VMA.
- * This is done through either mremap flow or split_vma (usually due
- * to mlock, madvise, munmap, etc.). We do not support a clone of the
- * vma, as this VMA is strongly hardware related. Therefore we set the
- * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
- * calling us again and trying to do incorrect actions. We assume that
- * the original vma size is exactly a single page that there will be no
- * "splitting" operations on.
- */
- area->vm_ops = NULL;
-}
-
-static void mlx4_ib_vma_close(struct vm_area_struct *area)
-{
- struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
-
- /* It's guaranteed that all VMAs opened on a FD are closed before the
- * file itself is closed, therefore no sync is needed with the regular
- * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
- * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
- * The close operation is usually called under mm->mmap_sem except when
- * process is exiting. The exiting case is handled explicitly as part
- * of mlx4_ib_disassociate_ucontext.
- */
- mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
- area->vm_private_data;
-
- /* set the vma context pointer to null in the mlx4_ib driver's private
- * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
- */
- mlx4_ib_vma_priv_data->vma = NULL;
-}
-
-static const struct vm_operations_struct mlx4_ib_vm_ops = {
- .open = mlx4_ib_vma_open,
- .close = mlx4_ib_vma_close
-};
-
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- int i;
- struct vm_area_struct *vma;
- struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
-
- /* need to protect from a race on closing the vma as part of
- * mlx4_ib_vma_close().
- */
- for (i = 0; i < HW_BAR_COUNT; i++) {
- vma = context->hw_bar_info[i].vma;
- if (!vma)
- continue;
-
- zap_vma_ptes(context->hw_bar_info[i].vma,
- context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);
-
- context->hw_bar_info[i].vma->vm_flags &=
- ~(VM_SHARED | VM_MAYSHARE);
- /* context going to be destroyed, should not access ops any more */
- context->hw_bar_info[i].vma->vm_ops = NULL;
- }
-}
-
-static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
- struct mlx4_ib_vma_private_data *vma_private_data)
-{
- vma_private_data->vma = vma;
- vma->vm_private_data = vma_private_data;
- vma->vm_ops = &mlx4_ib_vm_ops;
}
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct mlx4_ib_dev *dev = to_mdev(context->device);
- struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
- return -EINVAL;
-
- if (vma->vm_pgoff == 0) {
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_DB].vma)
- return -EINVAL;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_mucontext(context)->uar.pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
+ switch (vma->vm_pgoff) {
+ case 0:
+ return rdma_user_mmap_io(context, vma,
+ to_mucontext(context)->uar.pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
- } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_BF].vma)
+ case 1:
+ if (dev->dev->caps.bf_reg_size == 0)
return -EINVAL;
+ return rdma_user_mmap_io(
+ context, vma,
+ to_mucontext(context)->uar.pfn +
+ dev->dev->caps.num_uars,
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_mucontext(context)->uar.pfn +
- dev->dev->caps.num_uars,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
-
- } else if (vma->vm_pgoff == 3) {
+ case 3: {
struct mlx4_clock_params params;
int ret;
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
- return -EINVAL;
-
ret = mlx4_get_internal_clock_params(dev->dev, &params);
-
if (ret)
return ret;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (pci_resource_start(dev->dev->persist->pdev,
- params.bar) +
- params.offset)
- >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma,
- &mucontext->hw_bar_info[HW_BAR_CLOCK]);
- } else {
- return -EINVAL;
+ return rdma_user_mmap_io(
+ context, vma,
+ (pci_resource_start(dev->dev->persist->pdev,
+ params.bar) +
+ params.offset) >>
+ PAGE_SHIFT,
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
}
- return 0;
+ default:
+ return -EINVAL;
+ }
}
static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
@@ -2133,39 +2039,43 @@ out:
return err;
}
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->dev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
dev->dev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *mlx4_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *mlx4_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group mlx4_attr_group = {
+ .attrs = mlx4_class_attributes,
};
struct diag_counter {
@@ -2636,7 +2546,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->dev = dev;
ibdev->bond_next_port = 0;
- strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
@@ -2898,8 +2807,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_alloc_diag_counters(ibdev))
goto err_steer_free_bitmap;
+ rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4;
- if (ib_register_device(&ibdev->ib_dev, NULL))
+ if (ib_register_device(&ibdev->ib_dev, "mlx4_%d", NULL))
goto err_diag_counters;
if (mlx4_ib_mad_init(ibdev))
@@ -2922,12 +2832,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_notif;
}
- for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
- if (device_create_file(&ibdev->ib_dev.dev,
- mlx4_class_attributes[j]))
- goto err_notif;
- }
-
ibdev->ib_active = true;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 81ffc007e0a1..d844831179cf 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -673,7 +673,7 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
if (!list_empty(&group->pending_list))
req = list_first_entry(&group->pending_list,
struct mcast_req, group_list);
- if ((method == IB_MGMT_METHOD_GET_RESP)) {
+ if (method == IB_MGMT_METHOD_GET_RESP) {
if (req) {
send_reply_to_slave(req->func, group, &req->sa_mad, status);
--group->func[req->func].num_pend_reqs;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e10dccc7958f..8850dfc3826d 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -80,16 +80,11 @@ enum hw_bar_type {
HW_BAR_COUNT
};
-struct mlx4_ib_vma_private_data {
- struct vm_area_struct *vma;
-};
-
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
struct list_head db_page_list;
struct mutex db_page_mutex;
- struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
struct list_head wqn_ranges_list;
struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */
};
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 6dd3cd2c2f80..0711ca1dfb8f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2629,7 +2629,6 @@ enum {
static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
- enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
struct mlx4_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
@@ -2639,13 +2638,8 @@ static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (cur_state != new_state || cur_state != IB_QPS_RESET) {
- int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- ll = rdma_port_get_link_layer(&dev->ib_dev, port);
- }
-
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, ll)) {
+ attr_mask)) {
pr_debug("qpn 0x%x: invalid attribute mask specified "
"for transition %d to %d. qp_type %d,"
" attr_mask 0x%x\n",
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index e219093d2764..752bdd536130 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -818,9 +818,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
if (!mlx4_is_master(dev->dev))
return 0;
- dev->iov_parent =
- kobject_create_and_add("iov",
- kobject_get(dev->ib_dev.ports_parent->parent));
+ dev->iov_parent = kobject_create_and_add("iov", &dev->ib_dev.dev.kobj);
if (!dev->iov_parent) {
ret = -ENOMEM;
goto err;
@@ -850,7 +848,6 @@ err_add_entries:
err_ports:
kobject_put(dev->iov_parent);
err:
- kobject_put(dev->ib_dev.ports_parent->parent);
pr_err("mlx4_ib_device_register_sysfs error (%d)\n", ret);
return ret;
}
@@ -886,5 +883,4 @@ void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device)
kobject_put(device->ports_parent);
kobject_put(device->iov_parent);
kobject_put(device->iov_parent);
- kobject_put(device->ib_dev.ports_parent->parent);
}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index c84fef9a8a08..ca060a2e2b36 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -197,3 +197,132 @@ int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPCNT,
0, 0);
}
+
+void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {};
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+ MLX5_SET(destroy_tir_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+ MLX5_SET(destroy_tis_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {};
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(destroy_rqt_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
+ u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ int err;
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+
+ return err;
+}
+
+void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
+ u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+ MLX5_SET(dealloc_pd_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
+ void *gid;
+
+ MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
+ MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
+ MLX5_SET(attach_to_mcg_in, in, uid, uid);
+ gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
+ void *gid;
+
+ MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
+ MLX5_SET(detach_from_mcg_in, in, uid, uid);
+ gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+ MLX5_SET(alloc_xrcd_in, in, uid, uid);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
+ return err;
+}
+
+int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
+
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+ MLX5_SET(dealloc_xrcd_in, in, uid, uid);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 88cbb1c41703..c03c56455534 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -47,4 +47,18 @@ int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
u64 length, u32 alignment);
int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length);
+void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
+void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
+void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
+void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid);
+int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
+ u16 uid);
+void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
+ u16 uid);
+int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid);
+int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid);
+int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
+int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index cca1820802b8..7d769b5538b4 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -874,6 +874,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
}
+ MLX5_SET(create_cq_in, *cqb, uid, to_mucontext(context)->devx_uid);
return 0;
err_cqb:
@@ -1454,7 +1455,7 @@ ex:
return err;
}
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
{
struct mlx5_ib_cq *cq;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 66dc337e49a7..61aab7c0c513 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -19,7 +19,7 @@
#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
struct devx_obj {
struct mlx5_core_dev *mdev;
- u32 obj_id;
+ u64 obj_id;
u32 dinlen; /* destroy inbox length */
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
};
@@ -45,13 +45,14 @@ static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
return to_mucontext(ib_uverbs_get_ucontext(file));
}
-int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
u64 general_obj_types;
void *hdr;
int err;
+ u16 uid;
hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
@@ -60,9 +61,6 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *contex
!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
return -EINVAL;
- if (!capable(CAP_NET_RAW))
- return -EPERM;
-
MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
@@ -70,19 +68,18 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *contex
if (err)
return err;
- context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
- return 0;
+ uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return uid;
}
-void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context)
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, uid);
mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
@@ -109,150 +106,218 @@ bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
}
}
+/*
+ * As the obj_id in the firmware is not globally unique the object type
+ * must be considered upon checking for a valid object id.
+ * For that the opcode of the creator command is encoded as part of the obj_id.
+ */
+static u64 get_enc_obj_id(u16 opcode, u32 obj_id)
+{
+ return ((u64)opcode << 32) | obj_id;
+}
+
static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
- u32 obj_id;
+ u64 obj_id;
switch (opcode) {
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
- obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT,
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_id));
break;
case MLX5_CMD_OP_QUERY_MKEY:
- obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
+ MLX5_GET(query_mkey_in, in,
+ mkey_index));
break;
case MLX5_CMD_OP_QUERY_CQ:
- obj_id = MLX5_GET(query_cq_in, in, cqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ MLX5_GET(query_cq_in, in, cqn));
break;
case MLX5_CMD_OP_MODIFY_CQ:
- obj_id = MLX5_GET(modify_cq_in, in, cqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ MLX5_GET(modify_cq_in, in, cqn));
break;
case MLX5_CMD_OP_QUERY_SQ:
- obj_id = MLX5_GET(query_sq_in, in, sqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ MLX5_GET(query_sq_in, in, sqn));
break;
case MLX5_CMD_OP_MODIFY_SQ:
- obj_id = MLX5_GET(modify_sq_in, in, sqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ MLX5_GET(modify_sq_in, in, sqn));
break;
case MLX5_CMD_OP_QUERY_RQ:
- obj_id = MLX5_GET(query_rq_in, in, rqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(query_rq_in, in, rqn));
break;
case MLX5_CMD_OP_MODIFY_RQ:
- obj_id = MLX5_GET(modify_rq_in, in, rqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(modify_rq_in, in, rqn));
break;
case MLX5_CMD_OP_QUERY_RMP:
- obj_id = MLX5_GET(query_rmp_in, in, rmpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
+ MLX5_GET(query_rmp_in, in, rmpn));
break;
case MLX5_CMD_OP_MODIFY_RMP:
- obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
+ MLX5_GET(modify_rmp_in, in, rmpn));
break;
case MLX5_CMD_OP_QUERY_RQT:
- obj_id = MLX5_GET(query_rqt_in, in, rqtn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ MLX5_GET(query_rqt_in, in, rqtn));
break;
case MLX5_CMD_OP_MODIFY_RQT:
- obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ MLX5_GET(modify_rqt_in, in, rqtn));
break;
case MLX5_CMD_OP_QUERY_TIR:
- obj_id = MLX5_GET(query_tir_in, in, tirn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ MLX5_GET(query_tir_in, in, tirn));
break;
case MLX5_CMD_OP_MODIFY_TIR:
- obj_id = MLX5_GET(modify_tir_in, in, tirn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ MLX5_GET(modify_tir_in, in, tirn));
break;
case MLX5_CMD_OP_QUERY_TIS:
- obj_id = MLX5_GET(query_tis_in, in, tisn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ MLX5_GET(query_tis_in, in, tisn));
break;
case MLX5_CMD_OP_MODIFY_TIS:
- obj_id = MLX5_GET(modify_tis_in, in, tisn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ MLX5_GET(modify_tis_in, in, tisn));
break;
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
- obj_id = MLX5_GET(query_flow_table_in, in, table_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
+ MLX5_GET(query_flow_table_in, in,
+ table_id));
break;
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
- obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
+ MLX5_GET(modify_flow_table_in, in,
+ table_id));
break;
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
- obj_id = MLX5_GET(query_flow_group_in, in, group_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
+ MLX5_GET(query_flow_group_in, in,
+ group_id));
break;
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
- obj_id = MLX5_GET(query_fte_in, in, flow_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
+ MLX5_GET(query_fte_in, in,
+ flow_index));
break;
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
- obj_id = MLX5_GET(set_fte_in, in, flow_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
+ MLX5_GET(set_fte_in, in, flow_index));
break;
case MLX5_CMD_OP_QUERY_Q_COUNTER:
- obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
+ MLX5_GET(query_q_counter_in, in,
+ counter_set_id));
break;
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
- obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
+ MLX5_GET(query_flow_counter_in, in,
+ flow_counter_id));
break;
case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
- obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_id));
break;
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
- obj_id = MLX5_GET(query_scheduling_element_in, in,
- scheduling_element_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
+ MLX5_GET(query_scheduling_element_in,
+ in, scheduling_element_id));
break;
case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
- obj_id = MLX5_GET(modify_scheduling_element_in, in,
- scheduling_element_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
+ MLX5_GET(modify_scheduling_element_in,
+ in, scheduling_element_id));
break;
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
- obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
+ MLX5_GET(add_vxlan_udp_dport_in, in,
+ vxlan_udp_port));
break;
case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
- obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
+ MLX5_GET(query_l2_table_entry_in, in,
+ table_index));
break;
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
- obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
+ MLX5_GET(set_l2_table_entry_in, in,
+ table_index));
break;
case MLX5_CMD_OP_QUERY_QP:
- obj_id = MLX5_GET(query_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(query_qp_in, in, qpn));
break;
case MLX5_CMD_OP_RST2INIT_QP:
- obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rst2init_qp_in, in, qpn));
break;
case MLX5_CMD_OP_INIT2RTR_QP:
- obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(init2rtr_qp_in, in, qpn));
break;
case MLX5_CMD_OP_RTR2RTS_QP:
- obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rtr2rts_qp_in, in, qpn));
break;
case MLX5_CMD_OP_RTS2RTS_QP:
- obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rts2rts_qp_in, in, qpn));
break;
case MLX5_CMD_OP_SQERR2RTS_QP:
- obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(sqerr2rts_qp_in, in, qpn));
break;
case MLX5_CMD_OP_2ERR_QP:
- obj_id = MLX5_GET(qp_2err_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(qp_2err_in, in, qpn));
break;
case MLX5_CMD_OP_2RST_QP:
- obj_id = MLX5_GET(qp_2rst_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(qp_2rst_in, in, qpn));
break;
case MLX5_CMD_OP_QUERY_DCT:
- obj_id = MLX5_GET(query_dct_in, in, dctn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ MLX5_GET(query_dct_in, in, dctn));
break;
case MLX5_CMD_OP_QUERY_XRQ:
- obj_id = MLX5_GET(query_xrq_in, in, xrqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
+ MLX5_GET(query_xrq_in, in, xrqn));
break;
case MLX5_CMD_OP_QUERY_XRC_SRQ:
- obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
+ MLX5_GET(query_xrc_srq_in, in,
+ xrc_srqn));
break;
case MLX5_CMD_OP_ARM_XRC_SRQ:
- obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
+ MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
break;
case MLX5_CMD_OP_QUERY_SRQ:
- obj_id = MLX5_GET(query_srq_in, in, srqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
+ MLX5_GET(query_srq_in, in, srqn));
break;
case MLX5_CMD_OP_ARM_RQ:
- obj_id = MLX5_GET(arm_rq_in, in, srq_number);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(arm_rq_in, in, srq_number));
break;
case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
- obj_id = MLX5_GET(drain_dct_in, in, dctn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ MLX5_GET(drain_dct_in, in, dctn));
break;
case MLX5_CMD_OP_ARM_XRQ:
- obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
+ MLX5_GET(arm_xrq_in, in, xrqn));
break;
default:
return false;
@@ -264,11 +329,102 @@ static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
return false;
}
-static bool devx_is_obj_create_cmd(const void *in)
+static void devx_set_umem_valid(const void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
switch (opcode) {
+ case MLX5_CMD_OP_CREATE_MKEY:
+ MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
+ break;
+ case MLX5_CMD_OP_CREATE_CQ:
+ {
+ void *cqc;
+
+ MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
+ break;
+ }
+ case MLX5_CMD_OP_CREATE_QP:
+ {
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
+ MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_RQ:
+ {
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_SQ:
+ {
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_MODIFY_CQ:
+ MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
+ break;
+
+ case MLX5_CMD_OP_CREATE_RMP:
+ {
+ void *rmpc, *wq;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_XRQ:
+ {
+ void *xrqc, *wq;
+
+ xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
+ wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ {
+ void *xrc_srqc;
+
+ MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
+ xrc_srq_context_entry);
+ MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
+ break;
+ }
+
+ default:
+ return;
+ }
+}
+
+static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
+{
+ *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (*opcode) {
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
case MLX5_CMD_OP_CREATE_MKEY:
case MLX5_CMD_OP_CREATE_CQ:
@@ -385,12 +541,49 @@ static bool devx_is_obj_query_cmd(const void *in)
}
}
+static bool devx_is_whitelist_cmd(void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
+{
+ if (devx_is_whitelist_cmd(cmd_in)) {
+ struct mlx5_ib_dev *dev;
+
+ if (c->devx_uid)
+ return c->devx_uid;
+
+ dev = to_mdev(c->ibucontext.device);
+ if (dev->devx_whitelist_uid)
+ return dev->devx_whitelist_uid;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ return c->devx_uid;
+}
static bool devx_is_general_cmd(void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
switch (opcode) {
case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_VPORT_STATE:
case MLX5_CMD_OP_QUERY_ADAPTER:
case MLX5_CMD_OP_QUERY_ISSI:
@@ -498,14 +691,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
void *cmd_out;
int err;
+ int uid;
c = devx_ufile2uctx(file);
if (IS_ERR(c))
return PTR_ERR(c);
dev = to_mdev(c->ibucontext.device);
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
/* Only white list of some general HCA commands are allowed for this method. */
if (!devx_is_general_cmd(cmd_in))
@@ -515,7 +710,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
if (IS_ERR(cmd_out))
return PTR_ERR(cmd_out);
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
err = mlx5_cmd_exec(dev->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
cmd_out, cmd_out_len);
@@ -726,11 +921,15 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj;
int err;
+ int uid;
+ u32 obj_id;
+ u16 opcode;
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
- if (!devx_is_obj_create_cmd(cmd_in))
+ if (!devx_is_obj_create_cmd(cmd_in, &opcode))
return -EINVAL;
cmd_out = uverbs_zalloc(attrs, cmd_out_len);
@@ -741,7 +940,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (!obj)
return -ENOMEM;
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ devx_set_umem_valid(cmd_in);
+
err = mlx5_cmd_exec(dev->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
cmd_out, cmd_out_len);
@@ -750,13 +951,15 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
uobj->object = obj;
obj->mdev = dev->mdev;
- devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
+ devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
+ &obj_id);
WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
if (err)
goto obj_destroy;
+ obj->obj_id = get_enc_obj_id(opcode, obj_id);
return 0;
obj_destroy:
@@ -778,9 +981,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
struct devx_obj *obj = uobj->object;
void *cmd_out;
int err;
+ int uid;
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
if (!devx_is_obj_modify_cmd(cmd_in))
return -EINVAL;
@@ -792,7 +997,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
if (IS_ERR(cmd_out))
return PTR_ERR(cmd_out);
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ devx_set_umem_valid(cmd_in);
+
err = mlx5_cmd_exec(obj->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
cmd_out, cmd_out_len);
@@ -815,9 +1022,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
struct devx_obj *obj = uobj->object;
void *cmd_out;
int err;
+ int uid;
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
if (!devx_is_obj_query_cmd(cmd_in))
return -EINVAL;
@@ -829,7 +1038,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
if (IS_ERR(cmd_out))
return PTR_ERR(cmd_out);
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
err = mlx5_cmd_exec(obj->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
cmd_out, cmd_out_len);
@@ -928,6 +1137,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
int err;
if (!c->devx_uid)
+ return -EINVAL;
+
+ if (!capable(CAP_NET_RAW))
return -EPERM;
obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index 1a29f47f836e..f86cdcafdafc 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -7,7 +7,9 @@
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_ioctl.h>
+#include <rdma/uverbs_std_types.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/ib_umem.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
@@ -16,6 +18,24 @@
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
+static int
+mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
+ enum mlx5_flow_namespace_type *namespace)
+{
+ switch (table_type) {
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX:
+ *namespace = MLX5_FLOW_NAMESPACE_BYPASS;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_EGRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
[MLX5_IB_FLOW_TYPE_NORMAL] = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
@@ -38,11 +58,15 @@ static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
},
};
+#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
+ struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
struct mlx5_ib_flow_handler *flow_handler;
struct mlx5_ib_flow_matcher *fs_matcher;
+ struct ib_uobject **arr_flow_actions;
+ struct ib_uflow_resources *uflow_res;
void *devx_obj;
int dest_id, dest_type;
void *cmd_in;
@@ -52,6 +76,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct ib_uobject *uobj =
uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ int len, ret, i;
if (!capable(CAP_NET_RAW))
return -EPERM;
@@ -61,7 +86,14 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
dest_qp = uverbs_attr_is_valid(attrs,
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
- if ((dest_devx && dest_qp) || (!dest_devx && !dest_qp))
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS &&
+ ((dest_devx && dest_qp) || (!dest_devx && !dest_qp)))
+ return -EINVAL;
+
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS &&
+ (dest_devx || dest_qp))
return -EINVAL;
if (dest_devx) {
@@ -75,7 +107,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
*/
if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
return -EINVAL;
- } else {
+ } else if (dest_qp) {
struct mlx5_ib_qp *mqp;
qp = uverbs_attr_get_obj(attrs,
@@ -92,6 +124,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
else
dest_id = mqp->raw_packet_qp.rq.tirn;
dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ } else {
+ dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
}
if (dev->rep)
@@ -101,16 +135,48 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
inlen = uverbs_attr_get_len(attrs,
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
- fs_matcher = uverbs_attr_get_obj(attrs,
- MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
- flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, cmd_in, inlen,
+
+ uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
+ if (!uflow_res)
+ return -ENOMEM;
+
+ len = uverbs_attr_get_uobjs_arr(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
+ for (i = 0; i < len; i++) {
+ struct mlx5_ib_flow_action *maction =
+ to_mflow_act(arr_flow_actions[i]->object);
+
+ ret = parse_flow_flow_action(maction, false, &flow_act);
+ if (ret)
+ goto err_out;
+ flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE,
+ arr_flow_actions[i]->object);
+ }
+
+ ret = uverbs_copy_from(&flow_act.flow_tag, attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_TAG);
+ if (!ret) {
+ if (flow_act.flow_tag >= BIT(24)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+ flow_act.flags |= FLOW_ACT_HAS_TAG;
+ }
+
+ flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, &flow_act,
+ cmd_in, inlen,
dest_id, dest_type);
- if (IS_ERR(flow_handler))
- return PTR_ERR(flow_handler);
+ if (IS_ERR(flow_handler)) {
+ ret = PTR_ERR(flow_handler);
+ goto err_out;
+ }
- ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev);
+ ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res);
return 0;
+err_out:
+ ib_uverbs_flow_resources_free(uflow_res);
+ return ret;
}
static int flow_matcher_cleanup(struct ib_uobject *uobject,
@@ -134,12 +200,14 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
struct mlx5_ib_flow_matcher *obj;
+ u32 flags;
int err;
obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
if (!obj)
return -ENOMEM;
+ obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
obj->mask_len = uverbs_attr_get_len(
attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
err = uverbs_copy_from(&obj->matcher_mask,
@@ -165,6 +233,19 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
if (err)
goto end;
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ IB_FLOW_ATTR_FLAGS_EGRESS);
+ if (err)
+ goto end;
+
+ if (flags) {
+ err = mlx5_ib_ft_type_to_namespace(
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX, &obj->ns_type);
+ if (err)
+ goto end;
+ }
+
uobj->object = obj;
obj->mdev = dev->mdev;
atomic_set(&obj->usecnt, 0);
@@ -175,6 +256,248 @@ end:
return err;
}
+void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
+{
+ switch (maction->flow_action_raw.sub_type) {
+ case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
+ mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
+ maction->flow_action_raw.action_id);
+ break;
+ case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
+ mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
+ maction->flow_action_raw.action_id);
+ break;
+ case MLX5_IB_FLOW_ACTION_DECAP:
+ break;
+ default:
+ break;
+ }
+}
+
+static struct ib_flow_action *
+mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
+ enum mlx5_ib_uapi_flow_table_type ft_type,
+ u8 num_actions, void *in)
+{
+ enum mlx5_flow_namespace_type namespace;
+ struct mlx5_ib_flow_action *maction;
+ int ret;
+
+ ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ maction = kzalloc(sizeof(*maction), GFP_KERNEL);
+ if (!maction)
+ return ERR_PTR(-ENOMEM);
+
+ ret = mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in,
+ &maction->flow_action_raw.action_id);
+
+ if (ret) {
+ kfree(maction);
+ return ERR_PTR(ret);
+ }
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER;
+ maction->flow_action_raw.dev = dev;
+
+ return &maction->ib_action;
+}
+
+static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
+{
+ return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ max_modify_header_actions) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, max_modify_header_actions);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
+ struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ enum mlx5_ib_uapi_flow_table_type ft_type;
+ struct ib_flow_action *action;
+ size_t num_actions;
+ void *in;
+ int len;
+ int ret;
+
+ if (!mlx5_ib_modify_header_supported(mdev))
+ return -EOPNOTSUPP;
+
+ in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
+ len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
+
+ if (len % MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto))
+ return -EINVAL;
+
+ ret = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE);
+ if (ret)
+ return ret;
+
+ num_actions = len / MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto),
+ action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
+ if (IS_ERR(action))
+ return PTR_ERR(action);
+
+ uverbs_flow_action_fill_action(action, uobj, uobj->context->device,
+ IB_FLOW_ACTION_UNSPECIFIED);
+
+ return 0;
+}
+
+static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev,
+ u8 packet_reformat_type,
+ u8 ft_type)
+{
+ switch (packet_reformat_type) {
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
+ return MLX5_CAP_FLOWTABLE(ibdev->mdev,
+ encap_general_header);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
+ return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
+ reformat_l2_to_l3_tunnel);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
+ return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
+ reformat_l3_tunnel_to_l2);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
+ return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt)
+{
+ switch (dv_prt) {
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_flow_action_create_packet_reformat_ctx(
+ struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_action *maction,
+ u8 ft_type, u8 dv_prt,
+ void *in, size_t len)
+{
+ enum mlx5_flow_namespace_type namespace;
+ u8 prm_prt;
+ int ret;
+
+ ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
+ if (ret)
+ return ret;
+
+ ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt);
+ if (ret)
+ return ret;
+
+ ret = mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
+ in, namespace,
+ &maction->flow_action_raw.action_id);
+ if (ret)
+ return ret;
+
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
+ maction->flow_action_raw.dev = dev;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
+ struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
+ enum mlx5_ib_uapi_flow_table_type ft_type;
+ struct mlx5_ib_flow_action *maction;
+ int ret;
+
+ ret = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&dv_prt, attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE);
+ if (ret)
+ return ret;
+
+ if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
+ return -EOPNOTSUPP;
+
+ maction = kzalloc(sizeof(*maction), GFP_KERNEL);
+ if (!maction)
+ return -ENOMEM;
+
+ if (dv_prt ==
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) {
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_DECAP;
+ maction->flow_action_raw.dev = mdev;
+ } else {
+ void *in;
+ int len;
+
+ in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto free_maction;
+ }
+
+ len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
+
+ ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
+ maction, ft_type, dv_prt, in, len);
+ if (ret)
+ goto free_maction;
+ }
+
+ uverbs_flow_action_fill_action(&maction->ib_action, uobj,
+ uobj->context->device,
+ IB_FLOW_ACTION_UNSPECIFIED);
+ return 0;
+
+free_maction:
+ kfree(maction);
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_CREATE_FLOW,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
@@ -195,7 +518,15 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ACCESS_READ),
UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
MLX5_IB_OBJECT_DEVX_OBJ,
- UVERBS_ACCESS_READ));
+ UVERBS_ACCESS_READ),
+ UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_READ, 1,
+ MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_DESTROY_FLOW,
@@ -210,6 +541,44 @@ ADD_UVERBS_METHODS(mlx5_ib_fs,
&UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
+ UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
+ set_action_in_add_action_in_auto)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
+ UVERBS_ATTR_MIN_SIZE(1),
+ UA_ALLOC_AND_COPY,
+ UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
+ enum mlx5_ib_uapi_flow_action_packet_reformat_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(
+ mlx5_ib_flow_actions,
+ UVERBS_OBJECT_FLOW_ACTION,
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT));
+
+DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
MLX5_IB_OBJECT_FLOW_MATCHER,
@@ -224,7 +593,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
UVERBS_ATTR_TYPE(u8),
- UA_MANDATORY));
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ enum ib_flow_flags,
+ UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
@@ -247,6 +619,7 @@ int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
root[i++] = &flow_objects;
root[i++] = &mlx5_ib_fs;
+ root[i++] = &mlx5_ib_flow_actions;
return i;
}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 35a0e04c38f2..584ff2ea7810 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -39,9 +39,6 @@ static const struct mlx5_ib_profile rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init,
NULL),
- STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
- mlx5_ib_stage_class_attr_init,
- NULL),
};
static int
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index af32899bb72a..e9c428071df3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1571,14 +1571,57 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
}
-static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
+int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+{
+ int err = 0;
+
+ mutex_lock(&dev->lb.mutex);
+ if (td)
+ dev->lb.user_td++;
+ if (qp)
+ dev->lb.qps++;
+
+ if (dev->lb.user_td == 2 ||
+ dev->lb.qps == 1) {
+ if (!dev->lb.enabled) {
+ err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
+ dev->lb.enabled = true;
+ }
+ }
+
+ mutex_unlock(&dev->lb.mutex);
+
+ return err;
+}
+
+void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+{
+ mutex_lock(&dev->lb.mutex);
+ if (td)
+ dev->lb.user_td--;
+ if (qp)
+ dev->lb.qps--;
+
+ if (dev->lb.user_td == 1 &&
+ dev->lb.qps == 0) {
+ if (dev->lb.enabled) {
+ mlx5_nic_vport_update_local_lb(dev->mdev, false);
+ dev->lb.enabled = false;
+ }
+ }
+
+ mutex_unlock(&dev->lb.mutex);
+}
+
+static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
+ u16 uid)
{
int err;
if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
return 0;
- err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
+ err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
if (err)
return err;
@@ -1587,35 +1630,23 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return err;
- mutex_lock(&dev->lb_mutex);
- dev->user_td++;
-
- if (dev->user_td == 2)
- err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
-
- mutex_unlock(&dev->lb_mutex);
- return err;
+ return mlx5_ib_enable_lb(dev, true, false);
}
-static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
+static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
+ u16 uid)
{
if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
return;
- mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
+ mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
(!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return;
- mutex_lock(&dev->lb_mutex);
- dev->user_td--;
-
- if (dev->user_td < 2)
- mlx5_nic_vport_update_local_lb(dev->mdev, false);
-
- mutex_unlock(&dev->lb_mutex);
+ mlx5_ib_disable_lb(dev, true, false);
}
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
@@ -1727,30 +1758,24 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
#endif
- err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
- if (err)
- goto out_uars;
-
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
- /* Block DEVX on Infiniband as of SELinux */
- if (mlx5_ib_port_link_layer(ibdev, 1) != IB_LINK_LAYER_ETHERNET) {
- err = -EPERM;
- goto out_td;
- }
-
- err = mlx5_ib_devx_create(dev, context);
- if (err)
- goto out_td;
+ err = mlx5_ib_devx_create(dev);
+ if (err < 0)
+ goto out_uars;
+ context->devx_uid = err;
}
+ err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
+ context->devx_uid);
+ if (err)
+ goto out_devx;
+
if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
if (err)
goto out_mdev;
}
- INIT_LIST_HEAD(&context->vma_private_list);
- mutex_init(&context->vma_private_list_mutex);
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -1826,13 +1851,21 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->lib_caps = req.lib_caps;
print_lib_caps(dev, context->lib_caps);
+ if (mlx5_lag_is_active(dev->mdev)) {
+ u8 port = mlx5_core_native_port_num(dev->mdev);
+
+ atomic_set(&context->tx_port_affinity,
+ atomic_add_return(
+ 1, &dev->roce[port].tx_port_affinity));
+ }
+
return &context->ibucontext;
out_mdev:
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+out_devx:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
- mlx5_ib_devx_destroy(dev, context);
-out_td:
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
out_uars:
deallocate_uars(dev, context);
@@ -1855,11 +1888,18 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_bfreg_info *bfregi;
- if (context->devx_uid)
- mlx5_ib_devx_destroy(dev, context);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /* All umem's must be destroyed before destroying the ucontext. */
+ mutex_lock(&ibcontext->per_mm_list_lock);
+ WARN_ON(!list_empty(&ibcontext->per_mm_list));
+ mutex_unlock(&ibcontext->per_mm_list_lock);
+#endif
bfregi = &context->bfregi;
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+
+ if (context->devx_uid)
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
@@ -1900,94 +1940,9 @@ static int get_extended_index(unsigned long offset)
return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
}
-static void mlx5_ib_vma_open(struct vm_area_struct *area)
-{
- /* vma_open is called when a new VMA is created on top of our VMA. This
- * is done through either mremap flow or split_vma (usually due to
- * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
- * as this VMA is strongly hardware related. Therefore we set the
- * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
- * calling us again and trying to do incorrect actions. We assume that
- * the original VMA size is exactly a single page, and therefore all
- * "splitting" operation will not happen to it.
- */
- area->vm_ops = NULL;
-}
-
-static void mlx5_ib_vma_close(struct vm_area_struct *area)
-{
- struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
-
- /* It's guaranteed that all VMAs opened on a FD are closed before the
- * file itself is closed, therefore no sync is needed with the regular
- * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
- * However need a sync with accessing the vma as part of
- * mlx5_ib_disassociate_ucontext.
- * The close operation is usually called under mm->mmap_sem except when
- * process is exiting.
- * The exiting case is handled explicitly as part of
- * mlx5_ib_disassociate_ucontext.
- */
- mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
-
- /* setting the vma context pointer to null in the mlx5_ib driver's
- * private data, to protect a race condition in
- * mlx5_ib_disassociate_ucontext().
- */
- mlx5_ib_vma_priv_data->vma = NULL;
- mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
- list_del(&mlx5_ib_vma_priv_data->list);
- mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
- kfree(mlx5_ib_vma_priv_data);
-}
-
-static const struct vm_operations_struct mlx5_ib_vm_ops = {
- .open = mlx5_ib_vma_open,
- .close = mlx5_ib_vma_close
-};
-
-static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
- struct mlx5_ib_ucontext *ctx)
-{
- struct mlx5_ib_vma_private_data *vma_prv;
- struct list_head *vma_head = &ctx->vma_private_list;
-
- vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
- if (!vma_prv)
- return -ENOMEM;
-
- vma_prv->vma = vma;
- vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
- vma->vm_private_data = vma_prv;
- vma->vm_ops = &mlx5_ib_vm_ops;
-
- mutex_lock(&ctx->vma_private_list_mutex);
- list_add(&vma_prv->list, vma_head);
- mutex_unlock(&ctx->vma_private_list_mutex);
-
- return 0;
-}
static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- struct vm_area_struct *vma;
- struct mlx5_ib_vma_private_data *vma_private, *n;
- struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
-
- mutex_lock(&context->vma_private_list_mutex);
- list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
- list) {
- vma = vma_private->vma;
- zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
- /* context going to be destroyed, should
- * not access ops any more.
- */
- vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
- vma->vm_ops = NULL;
- list_del(&vma_private->list);
- kfree(vma_private);
- }
- mutex_unlock(&context->vma_private_list_mutex);
}
static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
@@ -2010,9 +1965,6 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
{
- phys_addr_t pfn;
- int err;
-
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
@@ -2025,13 +1977,8 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
if (!dev->mdev->clock_info_page)
return -EOPNOTSUPP;
- pfn = page_to_pfn(dev->mdev->clock_info_page);
- err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
- vma->vm_page_prot);
- if (err)
- return err;
-
- return mlx5_ib_set_vma_data(vma, context);
+ return rdma_user_mmap_page(&context->ibucontext, vma,
+ dev->mdev->clock_info_page, PAGE_SIZE);
}
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
@@ -2121,21 +2068,15 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
pfn = uar_index2pfn(dev, uar_index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
- vma->vm_page_prot = prot;
- err = io_remap_pfn_range(vma, vma->vm_start, pfn,
- PAGE_SIZE, vma->vm_page_prot);
+ err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
+ prot);
if (err) {
mlx5_ib_err(dev,
- "io_remap_pfn_range failed with error=%d, mmap_cmd=%s\n",
+ "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
err, mmap_cmd2str(cmd));
- err = -EAGAIN;
goto err;
}
- err = mlx5_ib_set_vma_data(vma, context);
- if (err)
- goto err;
-
if (dyn_uar)
bfregi->sys_pages[idx] = uar_index;
return 0;
@@ -2160,7 +2101,6 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
size_t map_size = vma->vm_end - vma->vm_start;
u32 npages = map_size >> PAGE_SHIFT;
phys_addr_t pfn;
- pgprot_t prot;
if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
page_idx + npages)
@@ -2170,14 +2110,8 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
PAGE_SHIFT) +
page_idx;
- prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_page_prot = prot;
-
- if (io_remap_pfn_range(vma, vma->vm_start, pfn, map_size,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return mlx5_ib_set_vma_data(vma, mctx);
+ return rdma_user_mmap_io(context, vma, pfn, map_size,
+ pgprot_writecombine(vma->vm_page_prot));
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -2318,21 +2252,30 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
struct mlx5_ib_alloc_pd_resp resp;
struct mlx5_ib_pd *pd;
int err;
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+ u16 uid = 0;
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
- err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
+ uid = context ? to_mucontext(context)->devx_uid : 0;
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+ MLX5_SET(alloc_pd_in, in, uid, uid);
+ err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
+ out, sizeof(out));
if (err) {
kfree(pd);
return ERR_PTR(err);
}
+ pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
+ pd->uid = uid;
if (context) {
resp.pdn = pd->pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
+ mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
kfree(pd);
return ERR_PTR(-EFAULT);
}
@@ -2346,7 +2289,7 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
- mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
+ mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
kfree(mpd);
return 0;
@@ -2452,20 +2395,50 @@ static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
offsetof(typeof(filter), field) -\
sizeof(filter.field))
-static int parse_flow_flow_action(const union ib_flow_spec *ib_spec,
- const struct ib_flow_attr *flow_attr,
- struct mlx5_flow_act *action)
+int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
+ bool is_egress,
+ struct mlx5_flow_act *action)
{
- struct mlx5_ib_flow_action *maction = to_mflow_act(ib_spec->action.act);
switch (maction->ib_action.type) {
case IB_FLOW_ACTION_ESP:
+ if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
+ return -EINVAL;
/* Currently only AES_GCM keymat is supported by the driver */
action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
- action->action |= flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS ?
+ action->action |= is_egress ?
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
return 0;
+ case IB_FLOW_ACTION_UNSPECIFIED:
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ return -EINVAL;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ action->modify_id = maction->flow_action_raw.action_id;
+ return 0;
+ }
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_DECAP) {
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ return -EINVAL;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ return 0;
+ }
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
+ if (action->action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+ return -EINVAL;
+ action->action |=
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ action->reformat_id =
+ maction->flow_action_raw.action_id;
+ return 0;
+ }
+ /* fall through */
default:
return -EOPNOTSUPP;
}
@@ -2802,7 +2775,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
break;
case IB_FLOW_SPEC_ACTION_HANDLE:
- ret = parse_flow_flow_action(ib_spec, flow_attr, action);
+ ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
+ flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
if (ret)
return ret;
break;
@@ -2883,7 +2857,7 @@ is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
* rules would be supported, always return VALID_SPEC_NA.
*/
if (!is_crypto)
- return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA;
+ return VALID_SPEC_NA;
return is_crypto && is_ipsec &&
(!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ?
@@ -3026,14 +3000,15 @@ enum flow_table_type {
static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
struct mlx5_ib_flow_prio *prio,
int priority,
- int num_entries, int num_groups)
+ int num_entries, int num_groups,
+ u32 flags)
{
struct mlx5_flow_table *ft;
ft = mlx5_create_auto_grouped_flow_table(ns, priority,
num_entries,
num_groups,
- 0, 0);
+ 0, flags);
if (IS_ERR(ft))
return ERR_CAST(ft);
@@ -3053,26 +3028,43 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
int max_table_size;
int num_entries;
int num_groups;
+ u32 flags = 0;
int priority;
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
log_max_ft_size));
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- if (ft_type == MLX5_IB_FT_TX)
- priority = 0;
- else if (flow_is_multicast_only(flow_attr) &&
- !dont_trap)
+ enum mlx5_flow_namespace_type fn_type;
+
+ if (flow_is_multicast_only(flow_attr) &&
+ !dont_trap)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
priority = ib_prio_to_core_prio(flow_attr->priority,
dont_trap);
- ns = mlx5_get_flow_namespace(dev->mdev,
- ft_type == MLX5_IB_FT_TX ?
- MLX5_FLOW_NAMESPACE_EGRESS :
- MLX5_FLOW_NAMESPACE_BYPASS);
+ if (ft_type == MLX5_IB_FT_RX) {
+ fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
+ prio = &dev->flow_db->prios[priority];
+ if (!dev->rep &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (!dev->rep &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ reformat_l3_tunnel_to_l2))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ } else {
+ max_table_size =
+ BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
+ log_max_ft_size));
+ fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ prio = &dev->flow_db->egress_prios[priority];
+ if (!dev->rep &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ }
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
num_entries = MLX5_FS_MAX_ENTRIES;
num_groups = MLX5_FS_MAX_TYPES;
- prio = &dev->flow_db->prios[priority];
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
ns = mlx5_get_flow_namespace(dev->mdev,
@@ -3104,7 +3096,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = prio->flow_table;
if (!ft)
- return _get_prio(ns, prio, priority, num_entries, num_groups);
+ return _get_prio(ns, prio, priority, num_entries, num_groups,
+ flags);
return prio;
}
@@ -3271,6 +3264,9 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!is_valid_attr(dev->mdev, flow_attr))
return ERR_PTR(-EINVAL);
+ if (dev->rep && is_egress)
+ return ERR_PTR(-EINVAL);
+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
handler = kzalloc(sizeof(*handler), GFP_KERNEL);
if (!handler || !spec) {
@@ -3661,34 +3657,54 @@ free_ucmd:
return ERR_PTR(err);
}
-static struct mlx5_ib_flow_prio *_get_flow_table(struct mlx5_ib_dev *dev,
- int priority, bool mcast)
+static struct mlx5_ib_flow_prio *
+_get_flow_table(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ bool mcast)
{
- int max_table_size;
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_ib_flow_prio *prio;
+ int max_table_size;
+ u32 flags = 0;
+ int priority;
+
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ log_max_ft_size));
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ reformat_l3_tunnel_to_l2))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
+ log_max_ft_size));
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ }
- max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
- log_max_ft_size));
if (max_table_size < MLX5_FS_MAX_ENTRIES)
return ERR_PTR(-ENOMEM);
if (mcast)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
- priority = ib_prio_to_core_prio(priority, false);
+ priority = ib_prio_to_core_prio(fs_matcher->priority, false);
- ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+ ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
if (!ns)
return ERR_PTR(-ENOTSUPP);
- prio = &dev->flow_db->prios[priority];
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
+ prio = &dev->flow_db->prios[priority];
+ else
+ prio = &dev->flow_db->egress_prios[priority];
if (prio->flow_table)
return prio;
return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
- MLX5_FS_MAX_TYPES);
+ MLX5_FS_MAX_TYPES, flags);
}
static struct mlx5_ib_flow_handler *
@@ -3696,10 +3712,10 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct mlx5_flow_destination *dst,
struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_act *flow_act,
void *cmd_in, int inlen)
{
struct mlx5_ib_flow_handler *handler;
- struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft = ft_prio->flow_table;
int err = 0;
@@ -3718,9 +3734,8 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
fs_matcher->mask_len);
spec->match_criteria_enable = fs_matcher->match_criteria_enable;
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
handler->rule = mlx5_add_flow_rules(ft, spec,
- &flow_act, dst, 1);
+ flow_act, dst, 1);
if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule);
@@ -3782,12 +3797,12 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
struct mlx5_ib_flow_handler *
mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_act *flow_act,
void *cmd_in, int inlen, int dest_id,
int dest_type)
{
struct mlx5_flow_destination *dst;
struct mlx5_ib_flow_prio *ft_prio;
- int priority = fs_matcher->priority;
struct mlx5_ib_flow_handler *handler;
bool mcast;
int err;
@@ -3805,7 +3820,7 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
mutex_lock(&dev->flow_db->lock);
- ft_prio = _get_flow_table(dev, priority, mcast);
+ ft_prio = _get_flow_table(dev, fs_matcher, mcast);
if (IS_ERR(ft_prio)) {
err = PTR_ERR(ft_prio);
goto unlock;
@@ -3814,13 +3829,18 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
dst->type = dest_type;
dst->tir_num = dest_id;
- } else {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
dst->ft_num = dest_id;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ } else {
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
}
- handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, cmd_in,
- inlen);
+ handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
+ cmd_in, inlen);
if (IS_ERR(handler)) {
err = PTR_ERR(handler);
@@ -3998,6 +4018,9 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
*/
mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
break;
+ case IB_FLOW_ACTION_UNSPECIFIED:
+ mlx5_ib_destroy_flow_action_raw(maction);
+ break;
default:
WARN_ON(true);
break;
@@ -4012,13 +4035,17 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *mqp = to_mqp(ibqp);
int err;
+ u16 uid;
+
+ uid = ibqp->pd ?
+ to_mpd(ibqp->pd)->uid : 0;
if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
return -EOPNOTSUPP;
}
- err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
+ err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
if (err)
mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
ibqp->qp_num, gid->raw);
@@ -4030,8 +4057,11 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
int err;
+ u16 uid;
- err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
+ uid = ibqp->pd ?
+ to_mpd(ibqp->pd)->uid : 0;
+ err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
if (err)
mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
ibqp->qp_num, gid->raw);
@@ -4052,16 +4082,17 @@ static int init_node_data(struct mlx5_ib_dev *dev)
return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
}
-static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t fw_pages_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
}
+static DEVICE_ATTR_RO(fw_pages);
-static ssize_t show_reg_pages(struct device *device,
+static ssize_t reg_pages_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
@@ -4069,44 +4100,47 @@ static ssize_t show_reg_pages(struct device *device,
return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
}
+static DEVICE_ATTR_RO(reg_pages);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->mdev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
dev->mdev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
-static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
+static struct attribute *mlx5_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_fw_pages.attr,
+ &dev_attr_reg_pages.attr,
+ NULL,
+};
-static struct device_attribute *mlx5_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
- &dev_attr_fw_pages,
- &dev_attr_reg_pages,
+static const struct attribute_group mlx5_attr_group = {
+ .attrs = mlx5_class_attributes,
};
static void pkey_change_handler(struct work_struct *work)
@@ -5631,7 +5665,6 @@ void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
- const char *name;
int err;
int i;
@@ -5664,12 +5697,6 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
- if (!mlx5_lag_is_active(mdev))
- name = "mlx5_%d";
- else
- name = "mlx5_bond_%d";
-
- strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
@@ -5876,7 +5903,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
- mutex_init(&dev->lb_mutex);
+ mutex_init(&dev->lb.mutex);
return 0;
}
@@ -6083,7 +6110,14 @@ static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev)
int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
{
- return ib_register_device(&dev->ib_dev, NULL);
+ const char *name;
+
+ rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
+ if (!mlx5_lag_is_active(dev->mdev))
+ name = "mlx5_%d";
+ else
+ name = "mlx5_bond_%d";
+ return ib_register_device(&dev->ib_dev, name, NULL);
}
void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
@@ -6113,21 +6147,6 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
cancel_delay_drop(dev);
}
-int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
-{
- int err;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
- err = device_create_file(&dev->ib_dev.dev,
- mlx5_class_attributes[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
{
mlx5_ib_register_vport_reps(dev);
@@ -6151,6 +6170,8 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
profile->stage[stage].cleanup(dev);
}
+ if (dev->devx_whitelist_uid)
+ mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
ib_dealloc_device((struct ib_device *)dev);
}
@@ -6159,8 +6180,7 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
{
int err;
int i;
-
- printk_once(KERN_INFO "%s", mlx5_version);
+ int uid;
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
if (profile->stage[i].init) {
@@ -6170,6 +6190,10 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
}
}
+ uid = mlx5_ib_devx_create(dev);
+ if (uid > 0)
+ dev->devx_whitelist_uid = uid;
+
dev->profile = profile;
dev->ib_active = true;
@@ -6230,9 +6254,6 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
mlx5_ib_stage_delay_drop_init,
mlx5_ib_stage_delay_drop_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
- mlx5_ib_stage_class_attr_init,
- NULL),
};
static const struct mlx5_ib_profile nic_rep_profile = {
@@ -6275,9 +6296,6 @@ static const struct mlx5_ib_profile nic_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init,
NULL),
- STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
- mlx5_ib_stage_class_attr_init,
- NULL),
STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
mlx5_ib_stage_rep_reg_init,
mlx5_ib_stage_rep_reg_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index f3dbd75a0a96..549234988bb4 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -57,7 +57,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
int entry;
unsigned long page_shift = umem->page_shift;
- if (umem->odp_data) {
+ if (umem->is_odp) {
*ncont = ib_umem_page_count(umem);
*count = *ncont << (page_shift - PAGE_SHIFT);
*shift = page_shift;
@@ -152,14 +152,13 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
struct scatterlist *sg;
int entry;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- const bool odp = umem->odp_data != NULL;
-
- if (odp) {
+ if (umem->is_odp) {
WARN_ON(shift != 0);
WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
for (i = 0; i < num_pages; ++i) {
- dma_addr_t pa = umem->odp_data->dma_list[offset + i];
+ dma_addr_t pa =
+ to_ib_umem_odp(umem)->dma_list[offset + i];
pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 289c18db2611..b651a7a6fde9 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -39,8 +39,10 @@
#include <rdma/ib_smi.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
+#include <linux/mlx5/fs.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
+#include <linux/mlx5/fs.h>
#include <linux/types.h>
#include <linux/mlx5/transobj.h>
#include <rdma/ib_user_verbs.h>
@@ -48,17 +50,17 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
-#define mlx5_ib_dbg(dev, format, arg...) \
-pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+#define mlx5_ib_dbg(_dev, format, arg...) \
+ dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
-#define mlx5_ib_err(dev, format, arg...) \
-pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+#define mlx5_ib_err(_dev, format, arg...) \
+ dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
-#define mlx5_ib_warn(dev, format, arg...) \
-pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+#define mlx5_ib_warn(_dev, format, arg...) \
+ dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
#define field_avail(type, fld, sz) (offsetof(type, fld) + \
sizeof(((type *)0)->fld) <= (sz))
@@ -114,13 +116,6 @@ enum {
MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
};
-struct mlx5_ib_vma_private_data {
- struct list_head list;
- struct vm_area_struct *vma;
- /* protect vma_private_list add/del */
- struct mutex *vma_private_list_mutex;
-};
-
struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext;
struct list_head db_page_list;
@@ -132,13 +127,12 @@ struct mlx5_ib_ucontext {
u8 cqe_version;
/* Transport Domain number */
u32 tdn;
- struct list_head vma_private_list;
- /* protect vma_private_list add/del */
- struct mutex vma_private_list_mutex;
u64 lib_caps;
DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
u16 devx_uid;
+ /* For RoCE LAG TX affinity */
+ atomic_t tx_port_affinity;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -149,6 +143,13 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
struct mlx5_ib_pd {
struct ib_pd ibpd;
u32 pdn;
+ u16 uid;
+};
+
+enum {
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
+ MLX5_IB_FLOW_ACTION_DECAP,
};
#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
@@ -180,6 +181,7 @@ struct mlx5_ib_flow_matcher {
struct mlx5_ib_match_params matcher_mask;
int mask_len;
enum mlx5_ib_flow_type flow_type;
+ enum mlx5_flow_namespace_type ns_type;
u16 priority;
struct mlx5_core_dev *mdev;
atomic_t usecnt;
@@ -188,6 +190,7 @@ struct mlx5_ib_flow_matcher {
struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
struct mlx5_flow_table *lag_demux_ft;
@@ -322,6 +325,7 @@ enum {
struct mlx5_ib_rwq_ind_table {
struct ib_rwq_ind_table ib_rwq_ind_tbl;
u32 rqtn;
+ u16 uid;
};
struct mlx5_ib_ubuffer {
@@ -428,7 +432,7 @@ struct mlx5_ib_qp {
struct list_head cq_send_list;
struct mlx5_rate_limit rl;
u32 underlay_qpn;
- bool tunnel_offload_en;
+ u32 flags_en;
/* storage for qp sub type when core qp type is IB_QPT_DRIVER */
enum ib_qp_type qp_sub_type;
};
@@ -536,6 +540,7 @@ struct mlx5_ib_srq {
struct mlx5_ib_xrcd {
struct ib_xrcd ibxrcd;
u32 xrcdn;
+ u16 uid;
};
enum mlx5_ib_mtt_access_flags {
@@ -700,7 +705,7 @@ struct mlx5_roce {
rwlock_t netdev_lock;
struct net_device *netdev;
struct notifier_block nb;
- atomic_t next_port;
+ atomic_t tx_port_affinity;
enum ib_port_state last_port_state;
struct mlx5_ib_dev *dev;
u8 native_port_num;
@@ -815,6 +820,11 @@ struct mlx5_ib_flow_action {
u64 ib_flags;
struct mlx5_accel_esp_xfrm *ctx;
} esp_aes_gcm;
+ struct {
+ struct mlx5_ib_dev *dev;
+ u32 sub_type;
+ u32 action_id;
+ } flow_action_raw;
};
};
@@ -859,9 +869,20 @@ to_mcounters(struct ib_counters *ibcntrs)
return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
}
+int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
+ bool is_egress,
+ struct mlx5_flow_act *action);
+struct mlx5_ib_lb_state {
+ /* protect the user_td */
+ struct mutex mutex;
+ u32 user_td;
+ int qps;
+ bool enabled;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
- const struct uverbs_object_tree_def *driver_trees[6];
+ const struct uverbs_object_tree_def *driver_trees[7];
struct mlx5_core_dev *mdev;
struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
@@ -900,13 +921,12 @@ struct mlx5_ib_dev {
const struct mlx5_ib_profile *profile;
struct mlx5_eswitch_rep *rep;
- /* protect the user_td */
- struct mutex lb_mutex;
- u32 user_td;
+ struct mlx5_ib_lb_state lb;
u8 umr_fence;
struct list_head ib_dev_list;
u64 sys_image_guid;
struct mlx5_memic memic;
+ u16 devx_whitelist_uid;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1017,6 +1037,8 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
int mlx5_ib_destroy_srq(struct ib_srq *srq);
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
+int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
+void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -1106,7 +1128,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, __be64 *pas, int access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
@@ -1141,7 +1163,7 @@ void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
unsigned long end);
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
@@ -1180,7 +1202,6 @@ void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage);
@@ -1229,22 +1250,20 @@ void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
-int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context);
-void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context);
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev);
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
- void *cmd_in, int inlen, int dest_id, int dest_type);
+ struct mlx5_flow_act *flow_act, void *cmd_in, int inlen,
+ int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
+void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else
static inline int
-mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
-static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context) {}
+mlx5_ib_devx_create(struct mlx5_ib_dev *dev) { return -EOPNOTSUPP; };
+static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
static inline const struct uverbs_object_tree_def *
mlx5_ib_get_devx_tree(void) { return NULL; }
static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
@@ -1257,6 +1276,11 @@ mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
{
return 0;
}
+static inline void
+mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
+{
+ return;
+};
#endif
static inline void init_query_mad(struct ib_smp *mad)
{
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index e22314837645..9b195d65a13e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -98,7 +98,7 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
static void update_odp_mr(struct mlx5_ib_mr *mr)
{
- if (mr->umem->odp_data) {
+ if (mr->umem->is_odp) {
/*
* This barrier prevents the compiler from moving the
* setting of umem->odp_data->private to point to our
@@ -107,7 +107,7 @@ static void update_odp_mr(struct mlx5_ib_mr *mr)
* handle invalidations.
*/
smp_wmb();
- mr->umem->odp_data->private = mr;
+ to_ib_umem_odp(mr->umem)->private = mr;
/*
* Make sure we will see the new
* umem->odp_data->private value in the invalidation
@@ -691,7 +691,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
init_completion(&ent->compl);
INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
- queue_work(cache->wq, &ent->work);
if (i > MR_CACHE_LAST_STD_ENTRY) {
mlx5_odp_init_mr_cache_entry(ent);
@@ -711,6 +710,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->limit = dev->mdev->profile->mr_cache[i].limit;
else
ent->limit = 0;
+ queue_work(cache->wq, &ent->work);
}
err = mlx5_mr_cache_debugfs_init(dev);
@@ -1627,14 +1627,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
struct ib_umem *umem = mr->umem;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (umem && umem->odp_data) {
+ if (umem && umem->is_odp) {
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
+
/* Prevent new page faults from succeeding */
mr->live = 0;
/* Wait for all running page-fault handlers to finish. */
synchronize_srcu(&dev->mr_srcu);
/* Destroy all page mappings */
- if (umem->odp_data->page_list)
- mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
+ if (umem_odp->page_list)
+ mlx5_ib_invalidate_range(umem_odp, ib_umem_start(umem),
ib_umem_end(umem));
else
mlx5_ib_free_implicit_mr(mr);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d216e0d2921d..b04eb6775326 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -61,13 +61,21 @@ static int check_parent(struct ib_umem_odp *odp,
return mr && mr->parent == parent && !odp->dying;
}
+struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
+{
+ if (WARN_ON(!mr || !mr->umem || !mr->umem->is_odp))
+ return NULL;
+
+ return to_ib_umem_odp(mr->umem)->per_mm;
+}
+
static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
{
struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
- struct ib_ucontext *ctx = odp->umem->context;
+ struct ib_ucontext_per_mm *per_mm = odp->per_mm;
struct rb_node *rb;
- down_read(&ctx->umem_rwsem);
+ down_read(&per_mm->umem_rwsem);
while (1) {
rb = rb_next(&odp->interval_tree.rb);
if (!rb)
@@ -79,19 +87,19 @@ static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
not_found:
odp = NULL;
end:
- up_read(&ctx->umem_rwsem);
+ up_read(&per_mm->umem_rwsem);
return odp;
}
-static struct ib_umem_odp *odp_lookup(struct ib_ucontext *ctx,
- u64 start, u64 length,
+static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
struct mlx5_ib_mr *parent)
{
+ struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
struct ib_umem_odp *odp;
struct rb_node *rb;
- down_read(&ctx->umem_rwsem);
- odp = rbt_ib_umem_lookup(&ctx->umem_tree, start, length);
+ down_read(&per_mm->umem_rwsem);
+ odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
if (!odp)
goto end;
@@ -102,13 +110,13 @@ static struct ib_umem_odp *odp_lookup(struct ib_ucontext *ctx,
if (!rb)
goto not_found;
odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (ib_umem_start(odp->umem) > start + length)
+ if (ib_umem_start(&odp->umem) > start + length)
goto not_found;
}
not_found:
odp = NULL;
end:
- up_read(&ctx->umem_rwsem);
+ up_read(&per_mm->umem_rwsem);
return odp;
}
@@ -116,7 +124,6 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
size_t nentries, struct mlx5_ib_mr *mr, int flags)
{
struct ib_pd *pd = mr->ibmr.pd;
- struct ib_ucontext *ctx = pd->uobject->context;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem_odp *odp;
unsigned long va;
@@ -131,13 +138,13 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
return;
}
- odp = odp_lookup(ctx, offset * MLX5_IMR_MTT_SIZE,
- nentries * MLX5_IMR_MTT_SIZE, mr);
+ odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
+ nentries * MLX5_IMR_MTT_SIZE, mr);
for (i = 0; i < nentries; i++, pklm++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
va = (offset + i) * MLX5_IMR_MTT_SIZE;
- if (odp && odp->umem->address == va) {
+ if (odp && odp->umem.address == va) {
struct mlx5_ib_mr *mtt = odp->private;
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
@@ -153,13 +160,13 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
static void mr_leaf_free_action(struct work_struct *work)
{
struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
- int idx = ib_umem_start(odp->umem) >> MLX5_IMR_MTT_SHIFT;
+ int idx = ib_umem_start(&odp->umem) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
mr->parent = NULL;
synchronize_srcu(&mr->dev->mr_srcu);
- ib_umem_release(odp->umem);
+ ib_umem_release(&odp->umem);
if (imr->live)
mlx5_ib_update_xlt(imr, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
@@ -170,22 +177,24 @@ static void mr_leaf_free_action(struct work_struct *work)
wake_up(&imr->q_leaf_free);
}
-void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
unsigned long end)
{
struct mlx5_ib_mr *mr;
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0;
+ struct ib_umem *umem;
int in_block = 0;
u64 addr;
- if (!umem || !umem->odp_data) {
+ if (!umem_odp) {
pr_err("invalidation called on NULL umem or non-ODP umem\n");
return;
}
+ umem = &umem_odp->umem;
- mr = umem->odp_data->private;
+ mr = umem_odp->private;
if (!mr || !mr->ibmr.pd)
return;
@@ -208,7 +217,7 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
* estimate the cost of another UMR vs. the cost of bigger
* UMR.
*/
- if (umem->odp_data->dma_list[idx] &
+ if (umem_odp->dma_list[idx] &
(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
if (!in_block) {
blk_start_idx = idx;
@@ -237,13 +246,13 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
* needed.
*/
- ib_umem_odp_unmap_dma_pages(umem, start, end);
+ ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
if (unlikely(!umem->npages && mr->parent &&
- !umem->odp_data->dying)) {
- WRITE_ONCE(umem->odp_data->dying, 1);
+ !umem_odp->dying)) {
+ WRITE_ONCE(umem_odp->dying, 1);
atomic_inc(&mr->parent->num_leaf_free);
- schedule_work(&umem->odp_data->work);
+ schedule_work(&umem_odp->work);
}
}
@@ -366,16 +375,15 @@ fail:
static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
u64 io_virt, size_t bcnt)
{
- struct ib_ucontext *ctx = mr->ibmr.pd->uobject->context;
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
struct ib_umem_odp *odp, *result = NULL;
+ struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 addr = io_virt & MLX5_IMR_MTT_MASK;
int nentries = 0, start_idx = 0, ret;
struct mlx5_ib_mr *mtt;
- struct ib_umem *umem;
- mutex_lock(&mr->umem->odp_data->umem_mutex);
- odp = odp_lookup(ctx, addr, 1, mr);
+ mutex_lock(&odp_mr->umem_mutex);
+ odp = odp_lookup(addr, 1, mr);
mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
io_virt, bcnt, addr, odp);
@@ -385,22 +393,23 @@ next_mr:
if (nentries)
nentries++;
} else {
- umem = ib_alloc_odp_umem(ctx, addr, MLX5_IMR_MTT_SIZE);
- if (IS_ERR(umem)) {
- mutex_unlock(&mr->umem->odp_data->umem_mutex);
- return ERR_CAST(umem);
+ odp = ib_alloc_odp_umem(odp_mr->per_mm, addr,
+ MLX5_IMR_MTT_SIZE);
+ if (IS_ERR(odp)) {
+ mutex_unlock(&odp_mr->umem_mutex);
+ return ERR_CAST(odp);
}
- mtt = implicit_mr_alloc(mr->ibmr.pd, umem, 0, mr->access_flags);
+ mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0,
+ mr->access_flags);
if (IS_ERR(mtt)) {
- mutex_unlock(&mr->umem->odp_data->umem_mutex);
- ib_umem_release(umem);
+ mutex_unlock(&odp_mr->umem_mutex);
+ ib_umem_release(&odp->umem);
return ERR_CAST(mtt);
}
- odp = umem->odp_data;
odp->private = mtt;
- mtt->umem = umem;
+ mtt->umem = &odp->umem;
mtt->mmkey.iova = addr;
mtt->parent = mr;
INIT_WORK(&odp->work, mr_leaf_free_action);
@@ -417,7 +426,7 @@ next_mr:
addr += MLX5_IMR_MTT_SIZE;
if (unlikely(addr < io_virt + bcnt)) {
odp = odp_next(odp);
- if (odp && odp->umem->address != addr)
+ if (odp && odp->umem.address != addr)
odp = NULL;
goto next_mr;
}
@@ -432,7 +441,7 @@ next_mr:
}
}
- mutex_unlock(&mr->umem->odp_data->umem_mutex);
+ mutex_unlock(&odp_mr->umem_mutex);
return result;
}
@@ -460,36 +469,36 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
return imr;
}
-static int mr_leaf_free(struct ib_umem *umem, u64 start,
- u64 end, void *cookie)
+static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
+ void *cookie)
{
- struct mlx5_ib_mr *mr = umem->odp_data->private, *imr = cookie;
+ struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
+ struct ib_umem *umem = &umem_odp->umem;
if (mr->parent != imr)
return 0;
- ib_umem_odp_unmap_dma_pages(umem,
- ib_umem_start(umem),
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
ib_umem_end(umem));
- if (umem->odp_data->dying)
+ if (umem_odp->dying)
return 0;
- WRITE_ONCE(umem->odp_data->dying, 1);
+ WRITE_ONCE(umem_odp->dying, 1);
atomic_inc(&imr->num_leaf_free);
- schedule_work(&umem->odp_data->work);
+ schedule_work(&umem_odp->work);
return 0;
}
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
- struct ib_ucontext *ctx = imr->ibmr.pd->uobject->context;
+ struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
- down_read(&ctx->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&ctx->umem_tree, 0, ULLONG_MAX,
+ down_read(&per_mm->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX,
mr_leaf_free, true, imr);
- up_read(&ctx->umem_rwsem);
+ up_read(&per_mm->umem_rwsem);
wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
}
@@ -497,6 +506,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u64 io_virt, size_t bcnt, u32 *bytes_mapped)
{
+ struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 access_mask = ODP_READ_ALLOWED_BIT;
int npages = 0, page_shift, np;
u64 start_idx, page_mask;
@@ -505,7 +515,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
size_t size;
int ret;
- if (!mr->umem->odp_data->page_list) {
+ if (!odp_mr->page_list) {
odp = implicit_mr_get_data(mr, io_virt, bcnt);
if (IS_ERR(odp))
@@ -513,11 +523,11 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
mr = odp->private;
} else {
- odp = mr->umem->odp_data;
+ odp = odp_mr;
}
next_mr:
- size = min_t(size_t, bcnt, ib_umem_end(odp->umem) - io_virt);
+ size = min_t(size_t, bcnt, ib_umem_end(&odp->umem) - io_virt);
page_shift = mr->umem->page_shift;
page_mask = ~(BIT(page_shift) - 1);
@@ -533,7 +543,7 @@ next_mr:
*/
smp_rmb();
- ret = ib_umem_odp_map_dma_pages(mr->umem, io_virt, size,
+ ret = ib_umem_odp_map_dma_pages(to_ib_umem_odp(mr->umem), io_virt, size,
access_mask, current_seq);
if (ret < 0)
@@ -542,7 +552,8 @@ next_mr:
np = ret;
mutex_lock(&odp->umem_mutex);
- if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) {
+ if (!ib_umem_mmu_notifier_retry(to_ib_umem_odp(mr->umem),
+ current_seq)) {
/*
* No need to check whether the MTTs really belong to
* this MR, since ib_umem_odp_map_dma_pages already
@@ -575,7 +586,7 @@ next_mr:
io_virt += size;
next = odp_next(odp);
- if (unlikely(!next || next->umem->address != io_virt)) {
+ if (unlikely(!next || next->umem.address != io_virt)) {
mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
io_virt, next);
return -EAGAIN;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index daf1eb84cd31..6841c0f9237f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
#include "ib_rep.h"
+#include "cmd.h"
/* not supported currently */
static int wq_signature;
@@ -850,6 +851,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem;
}
+ MLX5_SET(create_qp_in, *in, uid, to_mpd(pd)->uid);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
@@ -1051,7 +1053,8 @@ static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
static int is_connected(enum ib_qp_type qp_type)
{
- if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
+ if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
+ qp_type == MLX5_IB_QPT_DCI)
return 1;
return 0;
@@ -1059,11 +1062,13 @@ static int is_connected(enum ib_qp_type qp_type)
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
- struct mlx5_ib_sq *sq, u32 tdn)
+ struct mlx5_ib_sq *sq, u32 tdn,
+ struct ib_pd *pd)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
MLX5_SET(tisc, tisc, transport_domain, tdn);
if (qp->flags & MLX5_IB_QP_UNDERLAY)
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
@@ -1072,9 +1077,9 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
}
static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
- struct mlx5_ib_sq *sq)
+ struct mlx5_ib_sq *sq, struct ib_pd *pd)
{
- mlx5_core_destroy_tis(dev->mdev, sq->tisn);
+ mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
}
static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
@@ -1114,6 +1119,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
goto err_umem;
}
+ MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid);
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
@@ -1188,7 +1194,7 @@ static size_t get_rq_pas_size(void *qpc)
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin,
- size_t qpinlen)
+ size_t qpinlen, struct ib_pd *pd)
{
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
@@ -1209,6 +1215,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
MLX5_SET(rqc, rqc, vsd, 1);
@@ -1256,10 +1263,23 @@ static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
}
+static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq,
+ u32 qp_flags_en,
+ struct ib_pd *pd)
+{
+ if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
+}
+
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, u32 tdn,
- bool tunnel_offload_en)
+ u32 *qp_flags_en,
+ struct ib_pd *pd)
{
+ u8 lb_flag = 0;
u32 *in;
void *tirc;
int inlen;
@@ -1270,33 +1290,45 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
MLX5_SET(tirc, tirc, transport_domain, tdn);
- if (tunnel_offload_en)
+ if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
- if (dev->rep)
- MLX5_SET(tirc, tirc, self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ if (dev->rep) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ destroy_raw_packet_qp_tir(dev, rq, 0, pd);
+ }
kvfree(in);
return err;
}
-static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq)
-{
- mlx5_core_destroy_tir(dev->mdev, rq->tirn);
-}
-
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in, size_t inlen,
- struct ib_pd *pd)
+ struct ib_pd *pd,
+ struct ib_udata *udata,
+ struct mlx5_ib_create_qp_resp *resp)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
@@ -1306,9 +1338,10 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
int err;
u32 tdn = mucontext->tdn;
+ u16 uid = to_mpd(pd)->uid;
if (qp->sq.wqe_cnt) {
- err = create_raw_packet_qp_tis(dev, qp, sq, tdn);
+ err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
if (err)
return err;
@@ -1316,6 +1349,13 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
goto err_destroy_tis;
+ if (uid) {
+ resp->tisn = sq->tisn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN;
+ resp->sqn = sq->base.mqp.qpn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN;
+ }
+
sq->base.container_mibqp = qp;
sq->base.mqp.event = mlx5_ib_qp_event;
}
@@ -1327,22 +1367,32 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
- err = create_raw_packet_qp_rq(dev, rq, in, inlen);
+ err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
if (err)
goto err_destroy_sq;
-
- err = create_raw_packet_qp_tir(dev, rq, tdn,
- qp->tunnel_offload_en);
+ err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd);
if (err)
goto err_destroy_rq;
+
+ if (uid) {
+ resp->rqn = rq->base.mqp.qpn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
+ resp->tirn = rq->tirn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ }
}
qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
rq->base.mqp.qpn;
+ err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
+ if (err)
+ goto err_destroy_tir;
return 0;
+err_destroy_tir:
+ destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
err_destroy_rq:
destroy_raw_packet_qp_rq(dev, rq);
err_destroy_sq:
@@ -1350,7 +1400,7 @@ err_destroy_sq:
return err;
destroy_raw_packet_qp_sq(dev, sq);
err_destroy_tis:
- destroy_raw_packet_qp_tis(dev, sq);
+ destroy_raw_packet_qp_tis(dev, sq, pd);
return err;
}
@@ -1363,13 +1413,13 @@ static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
if (qp->rq.wqe_cnt) {
- destroy_raw_packet_qp_tir(dev, rq);
+ destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
destroy_raw_packet_qp_rq(dev, rq);
}
if (qp->sq.wqe_cnt) {
destroy_raw_packet_qp_sq(dev, sq);
- destroy_raw_packet_qp_tis(dev, sq);
+ destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
}
}
@@ -1387,7 +1437,11 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
- mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
+ if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
+ to_mpd(qp->ibqp.pd)->uid);
}
static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
@@ -1410,6 +1464,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 tdn = mucontext->tdn;
struct mlx5_ib_create_qp_rss ucmd = {};
size_t required_cmd_sz;
+ u8 lb_flag = 0;
if (init_attr->qp_type != IB_QPT_RAW_PACKET)
return -EOPNOTSUPP;
@@ -1444,7 +1499,9 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP;
}
- if (ucmd.flags & ~MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ if (ucmd.flags & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) {
mlx5_ib_dbg(dev, "invalid flags\n");
return -EOPNOTSUPP;
}
@@ -1461,6 +1518,16 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP;
}
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->rep) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
+ }
+
err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (err) {
mlx5_ib_dbg(dev, "copy failed\n");
@@ -1472,6 +1539,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type,
MLX5_TIRC_DISP_TYPE_INDIRECT);
@@ -1484,6 +1552,8 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
+
if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
else
@@ -1580,26 +1650,141 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
create_tir:
- if (dev->rep)
- MLX5_SET(tirc, tirc, self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
-
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
+ to_mpd(pd)->uid);
+ }
+
if (err)
goto err;
+ if (mucontext->devx_uid) {
+ resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ resp.tirn = qp->rss_qp.tirn;
+ }
+
+ err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
+ if (err)
+ goto err_copy;
+
kvfree(in);
/* qpn is reserved for that QP */
qp->trans_qp.base.mqp.qpn = 0;
qp->flags |= MLX5_IB_QP_RSS;
return 0;
+err_copy:
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
err:
kvfree(in);
return err;
}
+static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
+ void *qpc)
+{
+ int rcqe_sz;
+
+ if (init_attr->qp_type == MLX5_IB_QPT_DCI)
+ return;
+
+ rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
+
+ if (rcqe_sz == 128) {
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+ return;
+ }
+
+ if (init_attr->qp_type != MLX5_IB_QPT_DCT)
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
+}
+
+static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_create_qp *ucmd,
+ void *qpc)
+{
+ enum ib_qp_type qpt = init_attr->qp_type;
+ int scqe_sz;
+ bool allow_scat_cqe = 0;
+
+ if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
+ return;
+
+ if (ucmd)
+ allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+
+ if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
+ return;
+
+ scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
+ if (scqe_sz == 128) {
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
+ return;
+ }
+
+ if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
+ MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
+}
+
+static int atomic_size_to_mode(int size_mask)
+{
+ /* driver does not support atomic_size > 256B
+ * and does not know how to translate bigger sizes
+ */
+ int supported_size_mask = size_mask & 0x1ff;
+ int log_max_size;
+
+ if (!supported_size_mask)
+ return -EOPNOTSUPP;
+
+ log_max_size = __fls(supported_size_mask);
+
+ if (log_max_size > 3)
+ return log_max_size;
+
+ return MLX5_ATOMIC_MODE_8B;
+}
+
+static int get_atomic_mode(struct mlx5_ib_dev *dev,
+ enum ib_qp_type qp_type)
+{
+ u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
+ u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
+ int atomic_mode = -EOPNOTSUPP;
+ int atomic_size_mask;
+
+ if (!atomic)
+ return -EOPNOTSUPP;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+ else
+ atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
+ (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
+ atomic_mode = atomic_size_to_mode(atomic_size_mask);
+
+ if (atomic_mode <= 0 &&
+ (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
+ atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
+ atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
+
+ return atomic_mode;
+}
+
+static inline bool check_flags_mask(uint64_t input, uint64_t supported)
+{
+ return (input & ~supported) == 0;
+}
+
static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, struct mlx5_ib_qp *qp)
@@ -1697,20 +1882,47 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EFAULT;
}
+ if (!check_flags_mask(ucmd.flags,
+ MLX5_QP_FLAG_SIGNATURE |
+ MLX5_QP_FLAG_SCATTER_CQE |
+ MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_BFREG_INDEX |
+ MLX5_QP_FLAG_TYPE_DCT |
+ MLX5_QP_FLAG_TYPE_DCI |
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE))
+ return -EINVAL;
+
err = get_qp_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
return err;
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
- qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
+ if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
+ qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
!tunnel_offload_supported(mdev)) {
mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
return -EOPNOTSUPP;
}
- qp->tunnel_offload_en = true;
+ qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
}
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
@@ -1811,23 +2023,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, cd_slave_receive, 1);
if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
- int rcqe_sz;
- int scqe_sz;
-
- rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
- scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
-
- if (rcqe_sz == 128)
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
- else
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
-
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
- if (scqe_sz == 128)
- MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
- else
- MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
- }
+ configure_responder_scat_cqe(init_attr, qpc);
+ configure_requester_scat_cqe(dev, init_attr,
+ (pd && pd->uobject) ? &ucmd : NULL,
+ qpc);
}
if (qp->rq.wqe_cnt) {
@@ -1911,7 +2110,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
- err = create_raw_packet_qp(dev, qp, in, inlen, pd);
+ err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
+ &resp);
} else {
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
}
@@ -2192,6 +2392,7 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
goto err_free;
}
+ MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
qp->qp_sub_type = MLX5_IB_QPT_DCT;
MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
@@ -2200,6 +2401,9 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
MLX5_SET(dctc, dctc, user_index, uidx);
+ if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
+ configure_responder_scat_cqe(attr, dctc);
+
qp->state = IB_QPS_RESET;
return &qp->ibqp;
@@ -2405,13 +2609,15 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
return 0;
}
-static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
- int attr_mask)
+static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, __be32 *hw_access_flags)
{
- u32 hw_access_flags = 0;
u8 dest_rd_atomic;
u32 access_flags;
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic;
else
@@ -2426,13 +2632,25 @@ static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_att
access_flags &= IB_ACCESS_REMOTE_WRITE;
if (access_flags & IB_ACCESS_REMOTE_READ)
- hw_access_flags |= MLX5_QP_BIT_RRE;
- if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
- hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
+ *hw_access_flags |= MLX5_QP_BIT_RRE;
+ if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ qp->ibqp.qp_type == IB_QPT_RC) {
+ int atomic_mode;
+
+ atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
+ if (atomic_mode < 0)
+ return -EOPNOTSUPP;
+
+ *hw_access_flags |= MLX5_QP_BIT_RAE;
+ *hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
+ }
+
if (access_flags & IB_ACCESS_REMOTE_WRITE)
- hw_access_flags |= MLX5_QP_BIT_RWE;
+ *hw_access_flags |= MLX5_QP_BIT_RWE;
+
+ *hw_access_flags = cpu_to_be32(*hw_access_flags);
- return cpu_to_be32(hw_access_flags);
+ return 0;
}
enum {
@@ -2458,7 +2676,8 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
}
static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq, u8 sl)
+ struct mlx5_ib_sq *sq, u8 sl,
+ struct ib_pd *pd)
{
void *in;
void *tisc;
@@ -2471,6 +2690,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
return -ENOMEM;
MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
+ MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
@@ -2483,7 +2703,8 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
}
static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq, u8 tx_affinity)
+ struct mlx5_ib_sq *sq, u8 tx_affinity,
+ struct ib_pd *pd)
{
void *in;
void *tisc;
@@ -2496,6 +2717,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
return -ENOMEM;
MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
+ MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
@@ -2580,7 +2802,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
&qp->raw_packet_qp.sq,
- sl & 0xf);
+ sl & 0xf, qp->ibqp.pd);
return 0;
}
@@ -2728,9 +2950,9 @@ static int ib_mask_to_mlx5_opt(int ib_mask)
return result;
}
-static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq, int new_state,
- const struct mlx5_modify_raw_qp_param *raw_qp_param)
+static int modify_raw_packet_qp_rq(
+ struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
{
void *in;
void *rqc;
@@ -2743,6 +2965,7 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
return -ENOMEM;
MLX5_SET(modify_rq_in, in, rq_state, rq->state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid);
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(rqc, rqc, state, new_state);
@@ -2753,8 +2976,9 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
} else
- pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
- dev->ib_dev.name);
+ dev_info_once(
+ &dev->ib_dev.dev,
+ "RAW PACKET QP counters are not supported on current FW\n");
}
err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
@@ -2768,10 +2992,9 @@ out:
return err;
}
-static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq,
- int new_state,
- const struct mlx5_modify_raw_qp_param *raw_qp_param)
+static int modify_raw_packet_qp_sq(
+ struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
{
struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
struct mlx5_rate_limit old_rl = ibqp->rl;
@@ -2788,6 +3011,7 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
if (!in)
return -ENOMEM;
+ MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid);
MLX5_SET(modify_sq_in, in, sq_state, sq->state);
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
@@ -2890,7 +3114,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
if (modify_rq) {
- err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
+ err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
+ qp->ibqp.pd);
if (err)
return err;
}
@@ -2898,17 +3123,50 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (modify_sq) {
if (tx_affinity) {
err = modify_raw_packet_tx_affinity(dev->mdev, sq,
- tx_affinity);
+ tx_affinity,
+ qp->ibqp.pd);
if (err)
return err;
}
- return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param);
+ return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
+ raw_qp_param, qp->ibqp.pd);
}
return 0;
}
+static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_pd *pd,
+ struct mlx5_ib_qp_base *qp_base,
+ u8 port_num)
+{
+ struct mlx5_ib_ucontext *ucontext = NULL;
+ unsigned int tx_port_affinity;
+
+ if (pd && pd->ibpd.uobject && pd->ibpd.uobject->context)
+ ucontext = to_mucontext(pd->ibpd.uobject->context);
+
+ if (ucontext) {
+ tx_port_affinity = (unsigned int)atomic_add_return(
+ 1, &ucontext->tx_port_affinity) %
+ MLX5_MAX_PORTS +
+ 1;
+ mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
+ tx_port_affinity, qp_base->mqp.qpn, ucontext);
+ } else {
+ tx_port_affinity =
+ (unsigned int)atomic_add_return(
+ 1, &dev->roce[port_num].tx_port_affinity) %
+ MLX5_MAX_PORTS +
+ 1;
+ mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
+ tx_port_affinity, qp_base->mqp.qpn);
+ }
+
+ return tx_port_affinity;
+}
+
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state,
@@ -2974,6 +3232,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (!context)
return -ENOMEM;
+ pd = get_pd(qp);
context->flags = cpu_to_be32(mlx5_st << 16);
if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
@@ -3002,9 +3261,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (mlx5_lag_is_active(dev->mdev)) {
u8 p = mlx5_core_native_port_num(dev->mdev);
- tx_affinity = (unsigned int)atomic_add_return(1,
- &dev->roce[p].next_port) %
- MLX5_MAX_PORTS + 1;
+ tx_affinity = get_tx_affinity(dev, pd, base, p);
context->flags |= cpu_to_be32(tx_affinity << 24);
}
}
@@ -3062,7 +3319,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
goto out;
}
- pd = get_pd(qp);
get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
@@ -3092,8 +3348,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
}
- if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
- context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
+ __be32 access_flags = 0;
+
+ err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
+ if (err)
+ goto out;
+
+ context->params2 |= access_flags;
+ }
if (attr_mask & IB_QP_MIN_RNR_TIMER)
context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
@@ -3243,7 +3506,9 @@ static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new
int req = IB_QP_STATE;
int opt = 0;
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ if (new_state == IB_QPS_RESET) {
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
return is_valid_mask(attr_mask, req, opt);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
@@ -3307,10 +3572,14 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
MLX5_SET(dctc, dctc, rwe, 1);
if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
- if (!mlx5_ib_dc_atomic_is_supported(dev))
+ int atomic_mode;
+
+ atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
+ if (atomic_mode < 0)
return -EOPNOTSUPP;
+
+ MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
MLX5_SET(dctc, dctc, rae, 1);
- MLX5_SET(dctc, dctc, atomic_mode, MLX5_ATOMIC_MODE_DCT_CX);
}
MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
MLX5_SET(dctc, dctc, port, attr->port_num);
@@ -3367,7 +3636,6 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
size_t required_cmd_sz;
int err = -EINVAL;
int port;
- enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
if (ibqp->rwq_ind_tbl)
return -ENOSYS;
@@ -3413,7 +3681,6 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
}
if (qp->flags & MLX5_IB_QP_UNDERLAY) {
@@ -3424,7 +3691,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
qp_type != MLX5_IB_QPT_DCI &&
- !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type,
+ attr_mask)) {
mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
cur_state, new_state, ibqp->qp_type, attr_mask);
goto out;
@@ -4371,6 +4639,12 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
u8 next_fence = 0;
u8 fence;
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
@@ -4380,13 +4654,6 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
- err = -EIO;
- *bad_wr = wr;
- nreq = 0;
- goto out;
- }
-
for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
mlx5_ib_warn(dev, "\n");
@@ -4700,18 +4967,17 @@ static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int ind;
int i;
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
spin_lock_irqsave(&qp->rq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
- err = -EIO;
- *bad_wr = wr;
- nreq = 0;
- goto out;
- }
-
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; nreq++, wr = wr->next) {
@@ -5175,6 +5441,7 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_xrcd *xrcd;
int err;
+ u16 uid;
if (!MLX5_CAP_GEN(dev->mdev, xrc))
return ERR_PTR(-ENOSYS);
@@ -5183,12 +5450,14 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
if (!xrcd)
return ERR_PTR(-ENOMEM);
- err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
+ uid = context ? to_mucontext(context)->devx_uid : 0;
+ err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, uid);
if (err) {
kfree(xrcd);
return ERR_PTR(-ENOMEM);
}
+ xrcd->uid = uid;
return &xrcd->ibxrcd;
}
@@ -5196,9 +5465,10 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
+ u16 uid = to_mxrcd(xrcd)->uid;
int err;
- err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
+ err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, uid);
if (err)
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
@@ -5268,6 +5538,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
MLX5_SET(rqc, rqc, mem_rq_type,
MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
@@ -5443,8 +5714,7 @@ static int prepare_user_rq(struct ib_pd *pd,
err = create_user_rq(dev, pd, rwq, &ucmd);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
- if (err)
- return err;
+ return err;
}
rwq->user_index = ucmd.user_index;
@@ -5573,6 +5843,9 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
for (i = 0; i < sz; i++)
MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
+ rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
+ MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
+
err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
kvfree(in);
@@ -5591,7 +5864,7 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
return &rwq_ind_tbl->ib_rwq_ind_tbl;
err_copy:
- mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+ mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
err:
kfree(rwq_ind_tbl);
return ERR_PTR(err);
@@ -5602,7 +5875,7 @@ int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
- mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+ mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
kfree(rwq_ind_tbl);
return 0;
@@ -5653,6 +5926,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
if (wq_state == IB_WQS_ERR)
wq_state = MLX5_RQC_STATE_ERR;
MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid);
MLX5_SET(rqc, rqc, state, wq_state);
if (wq_attr_mask & IB_WQ_FLAGS) {
@@ -5684,8 +5958,9 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
MLX5_SET(rqc, rqc, counter_set_id,
dev->port->cnts.set_id);
} else
- pr_info_once("%s: Receive WQ counters are not supported on current FW\n",
- dev->ib_dev.name);
+ dev_info_once(
+ &dev->ib_dev.dev,
+ "Receive WQ counters are not supported on current FW\n");
}
err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index d359fecf7a5b..d012e7dbcc38 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -144,6 +144,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->page_offset = offset;
+ in->uid = to_mpd(pd)->uid;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
in->type != IB_SRQT_BASIC)
in->user_index = uidx;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 093f7755c843..2e5dc0a67cfc 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -58,8 +58,9 @@ static int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
ret = ib_query_port(&dev->ib_dev, port_num, tprops);
if (ret) {
- printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n",
- ret, dev->ib_dev.name, port_num);
+ dev_warn(&dev->ib_dev.dev,
+ "ib_query_port failed (%d) forport %d\n", ret,
+ port_num);
goto out;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index f3e80dec1334..92c49bff22bc 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -986,7 +986,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_free_dev;
}
- if (mthca_cmd_init(mdev)) {
+ err = mthca_cmd_init(mdev);
+ if (err) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
@@ -1014,8 +1015,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
err = mthca_setup_hca(mdev);
if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
- if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
- pci_free_irq_vectors(pdev);
+ pci_free_irq_vectors(pdev);
mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
err = mthca_setup_hca(mdev);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 0d3473b4596e..691c6f048938 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1076,16 +1076,17 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
return err;
}
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
@@ -1103,23 +1104,26 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
return sprintf(buf, "unknown\n");
}
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *mthca_dev_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *mthca_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group mthca_attr_group = {
+ .attrs = mthca_dev_attributes,
};
static int mthca_init_node_data(struct mthca_dev *dev)
@@ -1192,13 +1196,11 @@ static void get_dev_fw_str(struct ib_device *device, char *str)
int mthca_register_device(struct mthca_dev *dev)
{
int ret;
- int i;
ret = mthca_init_node_data(dev);
if (ret)
return ret;
- strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
@@ -1296,20 +1298,12 @@ int mthca_register_device(struct mthca_dev *dev)
mutex_init(&dev->cap_mask_mutex);
+ rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
dev->ib_dev.driver_id = RDMA_DRIVER_MTHCA;
- ret = ib_register_device(&dev->ib_dev, NULL);
+ ret = ib_register_device(&dev->ib_dev, "mthca%d", NULL);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
- ret = device_create_file(&dev->ib_dev.dev,
- mthca_dev_attributes[i]);
- if (ret) {
- ib_unregister_device(&dev->ib_dev);
- return ret;
- }
- }
-
mthca_start_catas_poll(dev);
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 3d37f2373d63..9d178ee3c96a 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -872,8 +872,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_UNSPECIFIED)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask)) {
mthca_dbg(dev, "Bad QP transition (transport %d) "
"%d->%d with attr 0x%08x\n",
qp->transport, cur_state, new_state,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 42b68aa999fc..e00add6d78ec 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -456,9 +456,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
void __iomem *mmio_regs = NULL;
u8 hw_rev;
- assert(pcidev != NULL);
- assert(ent != NULL);
-
printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n",
DRV_VERSION, pci_name(pcidev));
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bedaa02749fb..a895fe980d10 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -149,18 +149,9 @@ do { \
printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args); \
} while (0)
-#define assert(expr) \
-do { \
- if (!(expr)) { \
- printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-
#define NES_EVENT_TIMEOUT 1200000
#else
#define nes_debug(level, fmt, args...) no_printk(fmt, ##args)
-#define assert(expr) do {} while (0)
#define NES_EVENT_TIMEOUT 100000
#endif
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index bd0675d8f298..5517e392bc01 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1443,7 +1443,7 @@ static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_inde
mdelay(1);
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- } while ((temp_phy_data2 == temp_phy_data));
+ } while (temp_phy_data2 == temp_phy_data);
/* wait for tracking */
counter = 0;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 61014e251555..16f33454c198 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -146,8 +146,6 @@ static int nes_netdev_open(struct net_device *netdev)
struct list_head *list_pos, *list_temp;
unsigned long flags;
- assert(nesdev != NULL);
-
if (nesvnic->netdev_open == 1)
return 0;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 6940c7215961..92d1cadd4cfd 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -687,7 +687,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
}
nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
- nespd, nesvnic->nesibdev->ibdev.name);
+ nespd, dev_name(&nesvnic->nesibdev->ibdev.dev));
nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
@@ -2556,8 +2556,8 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
/**
* show_rev
*/
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct nes_ib_device *nesibdev =
container_of(dev, struct nes_ib_device, ibdev.dev);
@@ -2566,40 +2566,40 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
nes_debug(NES_DBG_INIT, "\n");
return sprintf(buf, "%x\n", nesvnic->nesdev->nesadapter->hw_rev);
}
-
+static DEVICE_ATTR_RO(hw_rev);
/**
* show_hca
*/
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
nes_debug(NES_DBG_INIT, "\n");
return sprintf(buf, "NES020\n");
}
-
+static DEVICE_ATTR_RO(hca_type);
/**
* show_board
*/
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
nes_debug(NES_DBG_INIT, "\n");
return sprintf(buf, "%.*s\n", 32, "NES020 Board ID");
}
+static DEVICE_ATTR_RO(board_id);
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-
-static struct device_attribute *nes_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static struct attribute *nes_dev_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
};
+static const struct attribute_group nes_attr_group = {
+ .attrs = nes_dev_attributes,
+};
/**
* nes_query_qp
@@ -3640,7 +3640,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
if (nesibdev == NULL) {
return NULL;
}
- strlcpy(nesibdev->ibdev.name, "nes%d", IB_DEVICE_NAME_MAX);
nesibdev->ibdev.owner = THIS_MODULE;
nesibdev->ibdev.node_type = RDMA_NODE_RNIC;
@@ -3795,10 +3794,11 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
struct nes_vnic *nesvnic = nesibdev->nesvnic;
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- int i, ret;
+ int ret;
+ rdma_set_device_sysfs_group(&nesvnic->nesibdev->ibdev, &nes_attr_group);
nesvnic->nesibdev->ibdev.driver_id = RDMA_DRIVER_NES;
- ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL);
+ ret = ib_register_device(&nesvnic->nesibdev->ibdev, "nes%d", NULL);
if (ret) {
return ret;
}
@@ -3809,19 +3809,6 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
nesibdev->max_qp = (nesadapter->max_qp-NES_FIRST_QPN) / nesadapter->port_count;
nesibdev->max_pd = nesadapter->max_pd / nesadapter->port_count;
- for (i = 0; i < ARRAY_SIZE(nes_dev_attributes); ++i) {
- ret = device_create_file(&nesibdev->ibdev.dev, nes_dev_attributes[i]);
- if (ret) {
- while (i > 0) {
- i--;
- device_remove_file(&nesibdev->ibdev.dev,
- nes_dev_attributes[i]);
- }
- ib_unregister_device(&nesibdev->ibdev);
- return ret;
- }
- }
-
nesvnic->of_device_registered = 1;
return 0;
@@ -3834,15 +3821,9 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
{
struct nes_vnic *nesvnic = nesibdev->nesvnic;
- int i;
- for (i = 0; i < ARRAY_SIZE(nes_dev_attributes); ++i) {
- device_remove_file(&nesibdev->ibdev.dev, nes_dev_attributes[i]);
- }
-
- if (nesvnic->of_device_registered) {
+ if (nesvnic->of_device_registered)
ib_unregister_device(&nesibdev->ibdev);
- }
nesvnic->of_device_registered = 0;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index e578281471af..241a57a07485 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -792,7 +792,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
qp->srq->ibsrq.
srq_context);
} else if (dev_event) {
- pr_err("%s: Fatal event received\n", dev->ibdev.name);
+ dev_err(&dev->ibdev.dev, "Fatal event received\n");
ib_dispatch_event(&ib_evt);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 7832ee3e0c84..873cc7f6fe61 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -114,9 +114,37 @@ static void get_dev_fw_str(struct ib_device *device, char *str)
snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", &dev->attr.fw_ver[0]);
}
+/* OCRDMA sysfs interface */
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *ocrdma_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group ocrdma_attr_group = {
+ .attrs = ocrdma_attributes,
+};
+
static int ocrdma_register_device(struct ocrdma_dev *dev)
{
- strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
@@ -213,8 +241,9 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.destroy_srq = ocrdma_destroy_srq;
dev->ibdev.post_srq_recv = ocrdma_post_srq_recv;
}
+ rdma_set_device_sysfs_group(&dev->ibdev, &ocrdma_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_OCRDMA;
- return ib_register_device(&dev->ibdev, NULL);
+ return ib_register_device(&dev->ibdev, "ocrdma%d", NULL);
}
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
@@ -260,42 +289,9 @@ static void ocrdma_free_resources(struct ocrdma_dev *dev)
kfree(dev->cq_tbl);
}
-/* OCRDMA sysfs interface */
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
-}
-
-static ssize_t show_hca_type(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
-
-static struct device_attribute *ocrdma_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type
-};
-
-static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
- device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
-}
-
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
- int status = 0, i;
+ int status = 0;
u8 lstate = 0;
struct ocrdma_dev *dev;
@@ -331,9 +327,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (!status)
ocrdma_update_link_state(dev, lstate);
- for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
- if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
- goto sysfs_err;
/* Init stats */
ocrdma_add_port_stats(dev);
/* Interrupt Moderation */
@@ -348,8 +341,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
dev_name(&dev->nic_info.pdev->dev), dev->id);
return dev;
-sysfs_err:
- ocrdma_remove_sysfiles(dev);
alloc_err:
ocrdma_free_resources(dev);
ocrdma_cleanup_hw(dev);
@@ -376,7 +367,6 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
* of the registered clients.
*/
cancel_delayed_work_sync(&dev->eqd_work);
- ocrdma_remove_sysfiles(dev);
ib_unregister_device(&dev->ibdev);
ocrdma_rem_port_stats(dev);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 24d20a4aa262..290d776edf48 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -764,7 +764,8 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
return;
/* Create post stats base dir */
- dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
+ dev->dir =
+ debugfs_create_dir(dev_name(&dev->ibdev.dev), ocrdma_dbgfs_dir);
if (!dev->dir)
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index c158ca9fde6d..06d2a7f3304c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1480,8 +1480,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_qps = old_qps;
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
__func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index a0af6d424aed..8d6ff9df49fe 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -133,6 +133,33 @@ static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+/* QEDR sysfs interface */
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qedr_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *qedr_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group qedr_attr_group = {
+ .attrs = qedr_attributes,
+};
+
static int qedr_iw_register_device(struct qedr_dev *dev)
{
dev->ibdev.node_type = RDMA_NODE_RNIC;
@@ -170,8 +197,6 @@ static int qedr_register_device(struct qedr_dev *dev)
{
int rc;
- strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
-
dev->ibdev.node_guid = dev->attr.node_guid;
memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
dev->ibdev.owner = THIS_MODULE;
@@ -262,9 +287,9 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.get_link_layer = qedr_link_layer;
dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
-
+ rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_QEDR;
- return ib_register_device(&dev->ibdev, NULL);
+ return ib_register_device(&dev->ibdev, "qedr%d", NULL);
}
/* This function allocates fast-path status block memory */
@@ -404,37 +429,6 @@ err1:
return rc;
}
-/* QEDR sysfs interface */
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct qedr_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
-}
-
-static ssize_t show_hca_type(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
-
-static struct device_attribute *qedr_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type
-};
-
-static void qedr_remove_sysfiles(struct qedr_dev *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
- device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
-}
-
static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
{
int rc = pci_enable_atomic_ops_to_root(pdev,
@@ -855,7 +849,7 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
{
struct qed_dev_rdma_info dev_info;
struct qedr_dev *dev;
- int rc = 0, i;
+ int rc = 0;
dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
if (!dev) {
@@ -914,18 +908,12 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
goto reg_err;
}
- for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
- if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
- goto sysfs_err;
-
if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
return dev;
-sysfs_err:
- ib_unregister_device(&dev->ibdev);
reg_err:
qedr_sync_free_irqs(dev);
irq_err:
@@ -944,7 +932,6 @@ static void qedr_remove(struct qedr_dev *dev)
/* First unregister with stack to stop all the active traffic
* of the registered clients.
*/
- qedr_remove_sysfiles(dev);
ib_unregister_device(&dev->ibdev);
qedr_stop_hw(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index a2d708dceb8d..53bbe6b4e6e6 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -43,7 +43,7 @@
#include "qedr_hsi_rdma.h"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
-#define DP_NAME(dev) ((dev)->ibdev.name)
+#define DP_NAME(_dev) dev_name(&(_dev)->ibdev.dev)
#define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
#define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index 85578887421b..e1ac2fd60bb1 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -519,9 +519,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
}
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
- packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+ packet->tx_dest = QED_LL2_TX_DEST_LB;
else
- packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+ packet->tx_dest = QED_LL2_TX_DEST_NW;
packet->roce_mode = roce_mode;
memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 8cc3df24e04e..82ee4b4a7084 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1447,7 +1447,6 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
u64 pbl_base_addr, phy_prod_pair_addr;
struct ib_ucontext *ib_ctx = NULL;
struct qedr_srq_hwq_info *hw_srq;
- struct qedr_ucontext *ctx = NULL;
u32 page_cnt, page_size;
struct qedr_srq *srq;
int rc = 0;
@@ -1473,7 +1472,6 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
if (udata && ibpd->uobject && ibpd->uobject->context) {
ib_ctx = ibpd->uobject->context;
- ctx = get_qedr_ucontext(ib_ctx);
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
DP_ERR(dev,
@@ -2240,8 +2238,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (rdma_protocol_roce(&dev->ibdev, 1)) {
if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
- ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ ibqp->qp_type, attr_mask)) {
DP_ERR(dev,
"modify qp: invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 3461df002f81..83d2349188db 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1390,13 +1390,13 @@ static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
*/
extern const char ib_qib_version[];
+extern const struct attribute_group qib_attr_group;
int qib_device_create(struct qib_devdata *);
void qib_device_remove(struct qib_devdata *);
int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
struct kobject *kobj);
-int qib_verbs_register_sysfs(struct qib_devdata *);
void qib_verbs_unregister_sysfs(struct qib_devdata *);
/* Hook for sysfs read of QSFP */
extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 5ac7b31c346b..30595b358d8f 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -597,7 +597,6 @@ qib_pci_resume(struct pci_dev *pdev)
struct qib_devdata *dd = pci_get_drvdata(pdev);
qib_devinfo(pdev, "QIB resume function called\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
/*
* Running jobs will fail, since it's asynchronous
* unlike sysfs-requested reset. Better than
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 344e401915f7..a81905df2d0f 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -378,25 +378,22 @@ void qib_flush_qp_waiters(struct rvt_qp *qp)
* qib_check_send_wqe - validate wr/wqe
* @qp - The qp
* @wqe - The built wqe
+ * @call_send - Determine if the send should be posted or scheduled
*
- * validate wr/wqe. This is called
- * prior to inserting the wqe into
- * the ring but after the wqe has been
- * setup.
- *
- * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
+ * Returns 0 on success, -EINVAL on failure
*/
int qib_check_send_wqe(struct rvt_qp *qp,
- struct rvt_swqe *wqe)
+ struct rvt_swqe *wqe, bool *call_send)
{
struct rvt_ah *ah;
- int ret = 0;
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
case IB_QPT_UC:
if (wqe->length > 0x80000000U)
return -EINVAL;
+ if (wqe->length > qp->pmtu)
+ *call_send = false;
break;
case IB_QPT_SMI:
case IB_QPT_GSI:
@@ -405,12 +402,12 @@ int qib_check_send_wqe(struct rvt_qp *qp,
if (wqe->length > (1 << ah->log_pmtu))
return -EINVAL;
/* progress hint */
- ret = 1;
+ *call_send = true;
break;
default:
break;
}
- return ret;
+ return 0;
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index f35fdeb14347..6fa002940451 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -254,7 +254,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
goto done;
@@ -838,7 +838,7 @@ void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
qib_migrate_qp(qp);
qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return;
} else /* XXX need to handle delayed completion */
@@ -1221,7 +1221,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
ibp->rvp.n_other_naks++;
class_b:
if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
break;
@@ -1425,7 +1425,8 @@ read_middle:
qp->s_rdma_read_len -= pmtu;
update_last_psn(qp, psn);
spin_unlock_irqrestore(&qp->s_lock, flags);
- qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, pmtu, false, false);
goto bail;
case OP(RDMA_READ_RESPONSE_ONLY):
@@ -1471,7 +1472,8 @@ read_last:
if (unlikely(tlen != qp->s_rdma_read_len))
goto ack_len_err;
aeth = be32_to_cpu(ohdr->u.aeth);
- qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, tlen, false, false);
WARN_ON(qp->s_rdma_read_sge.num_sge);
(void) do_rc_ack(qp, aeth, psn,
OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
@@ -1490,7 +1492,7 @@ ack_len_err:
status = IB_WC_LOC_LEN_ERR;
ack_err:
if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
ack_done:
@@ -1844,7 +1846,7 @@ send_middle:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto nack_inv;
- qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -1890,7 +1892,7 @@ send_last:
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
qp->r_msn++;
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index f8a7de795beb..1fa21938f310 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -171,307 +171,6 @@ err:
}
/**
- * qib_ruc_loopback - handle UC and RC lookback requests
- * @sqp: the sending QP
- *
- * This is called from qib_do_send() to
- * forward a WQE addressed to the same HCA.
- * Note that although we are single threaded due to the tasklet, we still
- * have to protect against post_send(). We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void qib_ruc_loopback(struct rvt_qp *sqp)
-{
- struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- struct rvt_sge *sge;
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
- atomic64_t *maddr;
- enum ib_wc_status send_status;
- int release;
- int ret;
-
- rcu_read_lock();
- /*
- * Note that we check the responder QP state after
- * checking the requester's state.
- */
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn);
- if (!qp)
- goto done;
-
- spin_lock_irqsave(&sqp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
- !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- goto unlock;
-
- sqp->s_flags |= RVT_S_BUSY;
-
-again:
- if (sqp->s_last == READ_ONCE(sqp->s_head))
- goto clr_busy;
- wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
-
- /* Return if it is not OK to start a new work reqeust. */
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
- goto clr_busy;
- /* We are in the error state, flush the work request. */
- send_status = IB_WC_WR_FLUSH_ERR;
- goto flush_send;
- }
-
- /*
- * We can rely on the entry not changing without the s_lock
- * being held until we update s_last.
- * We increment s_cur to indicate s_last is in progress.
- */
- if (sqp->s_last == sqp->s_cur) {
- if (++sqp->s_cur >= sqp->s_size)
- sqp->s_cur = 0;
- }
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->rvp.n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
- goto serr;
- }
-
- memset(&wc, 0, sizeof(wc));
- send_status = IB_WC_SUCCESS;
-
- release = 1;
- sqp->s_sge.sge = wqe->sg_list[0];
- sqp->s_sge.sg_list = wqe->sg_list + 1;
- sqp->s_sge.num_sge = wqe->wr.num_sge;
- sqp->s_len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
- case IB_WR_SEND:
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- break;
-
- case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- /* FALLTHROUGH */
- case IB_WR_RDMA_WRITE:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- if (wqe->length == 0)
- break;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_WRITE)))
- goto acc_err;
- qp->r_sge.sg_list = NULL;
- qp->r_sge.num_sge = 1;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
- release = 0;
- sqp->s_sge.sg_list = NULL;
- sqp->s_sge.num_sge = 1;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->wr.num_sge;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- wqe->atomic_wr.remote_addr,
- wqe->atomic_wr.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
- sdata = wqe->atomic_wr.compare_add;
- *(u64 *) sqp->s_sge.sge.vaddr =
- (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- sdata, wqe->atomic_wr.swap);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- goto send_comp;
-
- default:
- send_status = IB_WC_LOC_QP_OP_ERR;
- goto serr;
- }
-
- sge = &sqp->s_sge.sge;
- while (sqp->s_len) {
- u32 len = sqp->s_len;
-
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (!release)
- rvt_put_mr(sge->mr);
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- sqp->s_len -= len;
- }
- if (release)
- rvt_put_ss(&qp->r_sge);
-
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto send_comp;
-
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
- spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->rvp.n_loop_pkts++;
-flush_send:
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- qib_send_complete(sqp, wqe, send_status);
- goto again;
-
-rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- ibp->rvp.n_rnr_naks++;
- /*
- * Note: we don't need the s_lock held since the BUSY flag
- * makes this single threaded.
- */
- if (sqp->s_rnr_retry == 0) {
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
- goto serr;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
- goto clr_busy;
- rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
- IB_AETH_CREDIT_SHIFT);
- goto clr_busy;
-
-op_err:
- send_status = IB_WC_REM_OP_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-acc_err:
- send_status = IB_WC_REM_ACCESS_ERR;
- wc.status = IB_WC_LOC_PROT_ERR;
-err:
- /* responder goes to error state */
- rvt_rc_error(qp, wc.status);
-
-serr:
- spin_lock_irqsave(&sqp->s_lock, flags);
- qib_send_complete(sqp, wqe, send_status);
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
- sqp->s_flags &= ~RVT_S_BUSY;
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = sqp->ibqp.device;
- ev.element.qp = &sqp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
- }
- goto done;
- }
-clr_busy:
- sqp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
- rcu_read_unlock();
-}
-
-/**
* qib_make_grh - construct a GRH header
* @ibp: a pointer to the IB port
* @hdr: a pointer to the GRH header being constructed
@@ -573,7 +272,7 @@ void qib_do_send(struct rvt_qp *qp)
qp->ibqp.qp_type == IB_QPT_UC) &&
(rdma_ah_get_dlid(&qp->remote_ah_attr) &
~((1 << ppd->lmc) - 1)) == ppd->lid) {
- qib_ruc_loopback(qp);
+ rvt_ruc_loopback(qp);
return;
}
@@ -613,42 +312,3 @@ void qib_do_send(struct rvt_qp *qp)
spin_unlock_irqrestore(&qp->s_lock, flags);
}
-
-/*
- * This should be called with s_lock held.
- */
-void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status)
-{
- u32 old_last, last;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- return;
-
- last = qp->s_last;
- old_last = last;
- if (++last >= qp->s_size)
- last = 0;
- qp->s_last = last;
- /* See post_send() */
- barrier();
- rvt_put_swqe(wqe);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
-
- rvt_qp_swqe_complete(qp,
- wqe,
- ib_qib_wc_opcode[wqe->wr.opcode],
- status);
-
- if (qp->s_acked == old_last)
- qp->s_acked = last;
- if (qp->s_cur == old_last)
- qp->s_cur = last;
- if (qp->s_tail == old_last)
- qp->s_tail = last;
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
- qp->s_draining = 0;
-}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index d0723d4aef5c..757d4c9d713d 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -651,7 +651,7 @@ unmap:
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
rvt_error_qp(qp, IB_WC_GENERAL_ERR);
} else if (qp->s_wqe)
- qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock);
spin_unlock(&qp->r_lock);
/* return zero to process the next send work request */
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index ca2638d8f35e..1cf4ca3f23e3 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -551,17 +551,18 @@ static struct kobj_type qib_diagc_ktype = {
* Start of per-unit (or driver, in some cases, but replicated
* per unit) functions (these get a device *)
*/
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, rdi.ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, rdi.ibdev.dev);
@@ -574,15 +575,18 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
return ret;
}
+static DEVICE_ATTR_RO(hca_type);
+static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
-static ssize_t show_version(struct device *device,
+static ssize_t version_show(struct device *device,
struct device_attribute *attr, char *buf)
{
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
}
+static DEVICE_ATTR_RO(version);
-static ssize_t show_boardversion(struct device *device,
+static ssize_t boardversion_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -592,9 +596,9 @@ static ssize_t show_boardversion(struct device *device,
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
}
+static DEVICE_ATTR_RO(boardversion);
-
-static ssize_t show_localbus_info(struct device *device,
+static ssize_t localbus_info_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -604,9 +608,9 @@ static ssize_t show_localbus_info(struct device *device,
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
}
+static DEVICE_ATTR_RO(localbus_info);
-
-static ssize_t show_nctxts(struct device *device,
+static ssize_t nctxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -620,9 +624,10 @@ static ssize_t show_nctxts(struct device *device,
(dd->first_user_ctxt > dd->cfgctxts) ? 0 :
(dd->cfgctxts - dd->first_user_ctxt));
}
+static DEVICE_ATTR_RO(nctxts);
-static ssize_t show_nfreectxts(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t nfreectxts_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, rdi.ibdev.dev);
@@ -631,8 +636,9 @@ static ssize_t show_nfreectxts(struct device *device,
/* Return the number of free user ports (contexts) available. */
return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
}
+static DEVICE_ATTR_RO(nfreectxts);
-static ssize_t show_serial(struct device *device,
+static ssize_t serial_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -644,8 +650,9 @@ static ssize_t show_serial(struct device *device,
strcat(buf, "\n");
return strlen(buf);
}
+static DEVICE_ATTR_RO(serial);
-static ssize_t store_chip_reset(struct device *device,
+static ssize_t chip_reset_store(struct device *device,
struct device_attribute *attr, const char *buf,
size_t count)
{
@@ -663,11 +670,12 @@ static ssize_t store_chip_reset(struct device *device,
bail:
return ret < 0 ? ret : count;
}
+static DEVICE_ATTR_WO(chip_reset);
/*
* Dump tempsense regs. in decimal, to ease shell-scripts.
*/
-static ssize_t show_tempsense(struct device *device,
+static ssize_t tempsense_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -695,6 +703,7 @@ static ssize_t show_tempsense(struct device *device,
*(signed char *)(regvals + 7));
return ret;
}
+static DEVICE_ATTR_RO(tempsense);
/*
* end of per-unit (or driver, in some cases, but replicated
@@ -702,30 +711,23 @@ static ssize_t show_tempsense(struct device *device,
*/
/* start of per-unit file structures and support code */
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
-static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
-static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
-static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
-static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-
-static struct device_attribute *qib_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
- &dev_attr_version,
- &dev_attr_nctxts,
- &dev_attr_nfreectxts,
- &dev_attr_serial,
- &dev_attr_boardversion,
- &dev_attr_tempsense,
- &dev_attr_localbus_info,
- &dev_attr_chip_reset,
+static struct attribute *qib_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_version.attr,
+ &dev_attr_nctxts.attr,
+ &dev_attr_nfreectxts.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_boardversion.attr,
+ &dev_attr_tempsense.attr,
+ &dev_attr_localbus_info.attr,
+ &dev_attr_chip_reset.attr,
+ NULL,
+};
+
+const struct attribute_group qib_attr_group = {
+ .attrs = qib_attributes,
};
int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
@@ -827,27 +829,6 @@ bail:
}
/*
- * Register and create our files in /sys/class/infiniband.
- */
-int qib_verbs_register_sysfs(struct qib_devdata *dd)
-{
- struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
- ret = device_create_file(&dev->dev, qib_attributes[i]);
- if (ret)
- goto bail;
- }
-
- return 0;
-bail:
- for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i)
- device_remove_file(&dev->dev, qib_attributes[i]);
- return ret;
-}
-
-/*
* Unregister and remove our files in /sys/class/infiniband.
*/
void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 3e54bc11e0ae..30c70ad0f4bf 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -68,7 +68,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
@@ -359,7 +359,7 @@ send_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto rewind;
- qib_copy_sge(&qp->r_sge, data, pmtu, 0);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
break;
case OP(SEND_LAST_WITH_IMMEDIATE):
@@ -385,7 +385,7 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
- qib_copy_sge(&qp->r_sge, data, tlen, 0);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
rvt_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
@@ -449,7 +449,7 @@ rdma_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto drop;
- qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -479,7 +479,7 @@ rdma_last_imm:
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
goto last_imm;
@@ -495,7 +495,7 @@ rdma_last:
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
break;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index f8d029a2390f..4d4c31ea4e2d 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -162,8 +162,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
qib_make_grh(ibp, &grh, grd, 0, 0);
- qib_copy_sge(&qp->r_sge, &grh,
- sizeof(grh), 1);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
@@ -179,7 +179,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
- qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
@@ -260,7 +260,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
@@ -304,7 +304,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
qib_ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, tflags);
*flags = tflags;
- qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done;
}
}
@@ -551,12 +551,13 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
goto drop;
}
if (has_grh) {
- qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
- sizeof(struct ib_grh), 1);
+ rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh,
+ sizeof(struct ib_grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
- qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
+ true, false);
rvt_put_ss(&qp->r_sge);
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 41babbc0db58..4b0f5761a646 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -131,27 +131,6 @@ const enum ib_wc_opcode ib_qib_wc_opcode[] = {
*/
__be64 ib_qib_sys_image_guid;
-/**
- * qib_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- */
-void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
-{
- struct rvt_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = rvt_get_sge_length(sge, length);
-
- WARN_ON_ONCE(len == 0);
- memcpy(sge->vaddr, data, len);
- rvt_update_sge(ss, len, release);
- data += len;
- length -= len;
- }
-}
-
/*
* Count the number of DMA descriptors needed to send length bytes of data.
* Don't modify the qib_sge_state to get the count.
@@ -752,7 +731,7 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
spin_lock(&qp->s_lock);
if (tx->wqe)
- qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
else if (qp->ibqp.qp_type == IB_QPT_RC) {
struct ib_header *hdr;
@@ -1025,7 +1004,7 @@ done:
}
if (qp->s_wqe) {
spin_lock_irqsave(&qp->s_lock, flags);
- qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1512,6 +1491,9 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_mcast_grp;
/* post send table */
dd->verbs_dev.rdi.post_parms = qib_post_parms;
+
+ /* opcode translation table */
+ dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode;
}
/**
@@ -1588,7 +1570,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
- dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
+ dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe;
dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
@@ -1631,6 +1613,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
+ dd->verbs_dev.rdi.dparms.sge_copy_mode = RVT_SGE_COPY_MEMCPY;
qib_fill_device_attr(dd);
@@ -1642,19 +1625,14 @@ int qib_register_ib_device(struct qib_devdata *dd)
i,
dd->rcd[ctxt]->pkeys);
}
+ rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev, &qib_attr_group);
ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_QIB);
if (ret)
goto err_tx;
- ret = qib_verbs_register_sysfs(dd);
- if (ret)
- goto err_class;
-
return ret;
-err_class:
- rvt_unregister_device(&dd->verbs_dev.rdi);
err_tx:
while (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
@@ -1716,14 +1694,14 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
* It is only used in post send, which doesn't hold
* the s_lock.
*/
-void _qib_schedule_send(struct rvt_qp *qp)
+bool _qib_schedule_send(struct rvt_qp *qp)
{
struct qib_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_qp_priv *priv = qp->priv;
- queue_work(ppd->qib_wq, &priv->s_work);
+ return queue_work(ppd->qib_wq, &priv->s_work);
}
/**
@@ -1733,8 +1711,9 @@ void _qib_schedule_send(struct rvt_qp *qp)
* This schedules qp progress. The s_lock
* should be held.
*/
-void qib_schedule_send(struct rvt_qp *qp)
+bool qib_schedule_send(struct rvt_qp *qp)
{
if (qib_send_ok(qp))
- _qib_schedule_send(qp);
+ return _qib_schedule_send(qp);
+ return false;
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 666613eef88f..a4426c24b0d1 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012 - 2018 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -223,8 +223,8 @@ static inline int qib_send_ok(struct rvt_qp *qp)
!(qp->s_flags & RVT_S_ANY_WAIT_SEND));
}
-void _qib_schedule_send(struct rvt_qp *qp);
-void qib_schedule_send(struct rvt_qp *qp);
+bool _qib_schedule_send(struct rvt_qp *qp);
+bool qib_schedule_send(struct rvt_qp *qp);
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
{
@@ -292,9 +292,6 @@ void qib_put_txreq(struct qib_verbs_txreq *tx);
int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
u32 hdrwords, struct rvt_sge_state *ss, u32 len);
-void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
- int release);
-
void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
@@ -303,7 +300,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
-int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ bool *call_send);
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
@@ -333,9 +331,6 @@ void _qib_do_send(struct work_struct *work);
void qib_do_send(struct rvt_qp *qp);
-void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status);
-
void qib_send_rc_ack(struct rvt_qp *qp);
int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c
index 92dc66cc2d50..a3115709fb03 100644
--- a/drivers/infiniband/hw/usnic/usnic_debugfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c
@@ -165,6 +165,5 @@ void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow)
void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow)
{
- if (!IS_ERR_OR_NULL(qp_flow->dbgfs_dentry))
- debugfs_remove(qp_flow->dbgfs_dentry);
+ debugfs_remove(qp_flow->dbgfs_dentry);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index f0538a460328..73bd00f8d2c8 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -76,7 +76,7 @@ static LIST_HEAD(usnic_ib_ibdev_list);
static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
{
struct usnic_ib_vf *vf = obj;
- return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
+ return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev));
}
/* End callback dump funcs */
@@ -138,7 +138,7 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
netdev = us_ibdev->netdev;
switch (event) {
case NETDEV_REBOOT:
- usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_PORT_ERR;
ib_event.device = &us_ibdev->ib_dev;
@@ -151,7 +151,8 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
if (!us_ibdev->ufdev->link_up &&
netif_carrier_ok(netdev)) {
usnic_fwd_carrier_up(us_ibdev->ufdev);
- usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("Link UP on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
ib_event.event = IB_EVENT_PORT_ACTIVE;
ib_event.device = &us_ibdev->ib_dev;
ib_event.element.port_num = 1;
@@ -159,7 +160,8 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
} else if (us_ibdev->ufdev->link_up &&
!netif_carrier_ok(netdev)) {
usnic_fwd_carrier_down(us_ibdev->ufdev);
- usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("Link DOWN on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_PORT_ERR;
ib_event.device = &us_ibdev->ib_dev;
@@ -168,17 +170,17 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
} else {
usnic_dbg("Ignoring %s on %s\n",
netdev_cmd_to_name(event),
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
break;
case NETDEV_CHANGEADDR:
if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
sizeof(us_ibdev->ufdev->mac))) {
usnic_dbg("Ignoring addr change on %s\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
} else {
usnic_info(" %s old mac: %pM new mac: %pM\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
us_ibdev->ufdev->mac,
netdev->dev_addr);
usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
@@ -193,19 +195,19 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
case NETDEV_CHANGEMTU:
if (us_ibdev->ufdev->mtu != netdev->mtu) {
usnic_info("MTU Change on %s old: %u new: %u\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
us_ibdev->ufdev->mtu, netdev->mtu);
usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
} else {
usnic_dbg("Ignoring MTU change on %s\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
break;
default:
usnic_dbg("Ignoring event %s on %s",
netdev_cmd_to_name(event),
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
mutex_unlock(&us_ibdev->usdev_lock);
}
@@ -267,7 +269,7 @@ static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
default:
usnic_info("Ignoring event %s on %s",
netdev_cmd_to_name(event),
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
mutex_unlock(&us_ibdev->usdev_lock);
@@ -364,7 +366,6 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
us_ibdev->ib_dev.dev.parent = &dev->dev;
us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
- strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
us_ibdev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -416,7 +417,9 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.driver_id = RDMA_DRIVER_USNIC;
- if (ib_register_device(&us_ibdev->ib_dev, NULL))
+ rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
+
+ if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", NULL))
goto err_fwd_dealloc;
usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
@@ -437,9 +440,9 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
kref_init(&us_ibdev->vf_cnt);
usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
- us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
- us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
- us_ibdev->ufdev->mtu);
+ dev_name(&us_ibdev->ib_dev.dev),
+ netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac,
+ us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu);
return us_ibdev;
err_fwd_dealloc:
@@ -452,7 +455,7 @@ err_dealloc:
static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
{
- usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
+ usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_sysfs_unregister_usdev(us_ibdev);
usnic_fwd_dev_free(us_ibdev->ufdev);
ib_unregister_device(&us_ibdev->ib_dev);
@@ -591,7 +594,7 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
mutex_unlock(&pf->usdev_lock);
usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
- pf->ib_dev.name);
+ dev_name(&pf->ib_dev.dev));
usnic_ib_log_vf(vf);
return 0;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 4210ca14014d..a7e4b2ccfaf8 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -46,9 +46,8 @@
#include "usnic_ib_sysfs.h"
#include "usnic_log.h"
-static ssize_t usnic_ib_show_board(struct device *device,
- struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev =
container_of(device, struct usnic_ib_dev, ib_dev.dev);
@@ -60,13 +59,13 @@ static ssize_t usnic_ib_show_board(struct device *device,
return scnprintf(buf, PAGE_SIZE, "%hu\n", subsystem_device_id);
}
+static DEVICE_ATTR_RO(board_id);
/*
* Report the configuration for this PF
*/
static ssize_t
-usnic_ib_show_config(struct device *device, struct device_attribute *attr,
- char *buf)
+config_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
char *ptr;
@@ -94,7 +93,7 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
n = scnprintf(ptr, left,
"%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
busname,
PCI_SLOT(us_ibdev->pdev->devfn),
PCI_FUNC(us_ibdev->pdev->devfn),
@@ -119,17 +118,17 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
UPDATE_PTR_LEFT(n, ptr, left);
} else {
n = scnprintf(ptr, left, "%s: no VFs\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
UPDATE_PTR_LEFT(n, ptr, left);
}
mutex_unlock(&us_ibdev->usdev_lock);
return ptr - buf;
}
+static DEVICE_ATTR_RO(config);
static ssize_t
-usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
- char *buf)
+iface_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
@@ -138,10 +137,10 @@ usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%s\n",
netdev_name(us_ibdev->netdev));
}
+static DEVICE_ATTR_RO(iface);
static ssize_t
-usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+max_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
@@ -150,10 +149,10 @@ usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%u\n",
kref_read(&us_ibdev->vf_cnt));
}
+static DEVICE_ATTR_RO(max_vf);
static ssize_t
-usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+qp_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
int qp_per_vf;
@@ -165,10 +164,10 @@ usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE,
"%d\n", qp_per_vf);
}
+static DEVICE_ATTR_RO(qp_per_vf);
static ssize_t
-usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+cq_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
@@ -177,21 +176,20 @@ usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%d\n",
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
}
+static DEVICE_ATTR_RO(cq_per_vf);
+
+static struct attribute *usnic_class_attributes[] = {
+ &dev_attr_board_id.attr,
+ &dev_attr_config.attr,
+ &dev_attr_iface.attr,
+ &dev_attr_max_vf.attr,
+ &dev_attr_qp_per_vf.attr,
+ &dev_attr_cq_per_vf.attr,
+ NULL
+};
-static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL);
-static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL);
-static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL);
-static DEVICE_ATTR(max_vf, S_IRUGO, usnic_ib_show_max_vf, NULL);
-static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL);
-static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL);
-
-static struct device_attribute *usnic_class_attributes[] = {
- &dev_attr_board_id,
- &dev_attr_config,
- &dev_attr_iface,
- &dev_attr_max_vf,
- &dev_attr_qp_per_vf,
- &dev_attr_cq_per_vf,
+const struct attribute_group usnic_attr_group = {
+ .attrs = usnic_class_attributes,
};
struct qpn_attribute {
@@ -278,18 +276,6 @@ static struct kobj_type usnic_ib_qpn_type = {
int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
{
- int i;
- int err;
- for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
- err = device_create_file(&us_ibdev->ib_dev.dev,
- usnic_class_attributes[i]);
- if (err) {
- usnic_err("Failed to create device file %d for %s eith err %d",
- i, us_ibdev->ib_dev.name, err);
- return -EINVAL;
- }
- }
-
/* create kernel object for looking at individual QPs */
kobject_get(&us_ibdev->ib_dev.dev.kobj);
us_ibdev->qpn_kobj = kobject_create_and_add("qpn",
@@ -304,12 +290,6 @@ int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
- device_remove_file(&us_ibdev->ib_dev.dev,
- usnic_class_attributes[i]);
- }
-
kobject_put(us_ibdev->qpn_kobj);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
index 3d98e16cfeaf..b1f064cec850 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
@@ -41,4 +41,6 @@ void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev);
void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp);
void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp);
+extern const struct attribute_group usnic_attr_group;
+
#endif /* !USNIC_IB_SYSFS_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 9973ac893635..0b91ff36768a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -159,7 +159,8 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
err = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (err) {
- usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
+ usnic_err("Failed to copy udata for %s",
+ dev_name(&us_ibdev->ib_dev.dev));
return err;
}
@@ -197,7 +198,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
vnic = vf->vnic;
if (!usnic_vnic_check_room(vnic, res_spec)) {
usnic_dbg("Found used vnic %s from %s\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
pci_name(usnic_vnic_get_pdev(
vnic)));
qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
@@ -230,7 +231,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
spin_unlock(&vf->lock);
}
- usnic_info("No free qp grp found on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("No free qp grp found on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-ENOMEM);
qp_grp_check:
@@ -471,7 +473,7 @@ struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
}
usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
- pd, context, ibdev->name);
+ pd, context, dev_name(&ibdev->dev));
return &pd->ibpd;
}
@@ -508,20 +510,20 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
usnic_err("%s: cannot copy udata for create_qp\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-EINVAL);
}
err = create_qp_validate_user_data(cmd);
if (err) {
usnic_err("%s: Failed to validate user data\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-EINVAL);
}
if (init_attr->qp_type != IB_QPT_UD) {
usnic_err("%s asked to make a non-UD QP: %d\n",
- us_ibdev->ib_dev.name, init_attr->qp_type);
+ dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c
index e0a95538c364..82dd810bc000 100644
--- a/drivers/infiniband/hw/usnic/usnic_transport.c
+++ b/drivers/infiniband/hw/usnic/usnic_transport.c
@@ -121,7 +121,7 @@ void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num)
if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
spin_lock(&roce_bitmap_lock);
if (!port_num) {
- usnic_err("Unreserved unvalid port num 0 for %s\n",
+ usnic_err("Unreserved invalid port num 0 for %s\n",
usnic_transport_to_str(type));
goto out_roce_custom;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 9dd39daa602b..49275a548751 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -54,18 +54,6 @@ static struct workqueue_struct *usnic_uiom_wq;
((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
-static void usnic_uiom_reg_account(struct work_struct *work)
-{
- struct usnic_uiom_reg *umem = container_of(work,
- struct usnic_uiom_reg, work);
-
- down_write(&umem->mm->mmap_sem);
- umem->mm->locked_vm -= umem->diff;
- up_write(&umem->mm->mmap_sem);
- mmput(umem->mm);
- kfree(umem);
-}
-
static int usnic_uiom_dma_fault(struct iommu_domain *domain,
struct device *dev,
unsigned long iova, int flags,
@@ -99,8 +87,9 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
}
static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
- int dmasync, struct list_head *chunk_list)
+ int dmasync, struct usnic_uiom_reg *uiomr)
{
+ struct list_head *chunk_list = &uiomr->chunk_list;
struct page **page_list;
struct scatterlist *sg;
struct usnic_uiom_chunk *chunk;
@@ -114,6 +103,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int flags;
dma_addr_t pa;
unsigned int gup_flags;
+ struct mm_struct *mm;
/*
* If the combination of the addr and size requested for this memory
@@ -136,7 +126,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
- down_write(&current->mm->mmap_sem);
+ uiomr->owning_mm = mm = current->mm;
+ down_write(&mm->mmap_sem);
locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -196,10 +187,12 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
out:
if (ret < 0)
usnic_uiom_put_pages(chunk_list, 0);
- else
- current->mm->pinned_vm = locked;
+ else {
+ mm->pinned_vm = locked;
+ mmgrab(uiomr->owning_mm);
+ }
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
free_page((unsigned long) page_list);
return ret;
}
@@ -379,7 +372,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
uiomr->pd = pd;
err = usnic_uiom_get_pages(addr, size, writable, dmasync,
- &uiomr->chunk_list);
+ uiomr);
if (err) {
usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
vpn_start, vpn_last, err);
@@ -426,29 +419,39 @@ out_put_intervals:
out_put_pages:
usnic_uiom_put_pages(&uiomr->chunk_list, 0);
spin_unlock(&pd->lock);
+ mmdrop(uiomr->owning_mm);
out_free_uiomr:
kfree(uiomr);
return ERR_PTR(err);
}
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
- struct ib_ucontext *ucontext)
+static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
{
- struct task_struct *task;
- struct mm_struct *mm;
- unsigned long diff;
+ mmdrop(uiomr->owning_mm);
+ kfree(uiomr);
+}
- __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
+static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
+{
+ return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+}
- task = get_pid_task(ucontext->tgid, PIDTYPE_PID);
- if (!task)
- goto out;
- mm = get_task_mm(task);
- put_task_struct(task);
- if (!mm)
- goto out;
+static void usnic_uiom_release_defer(struct work_struct *work)
+{
+ struct usnic_uiom_reg *uiomr =
+ container_of(work, struct usnic_uiom_reg, work);
- diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+ down_write(&uiomr->owning_mm->mmap_sem);
+ uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
+ up_write(&uiomr->owning_mm->mmap_sem);
+
+ __usnic_uiom_release_tail(uiomr);
+}
+
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
+ struct ib_ucontext *context)
+{
+ __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
/*
* We may be called with the mm's mmap_sem already held. This
@@ -456,25 +459,21 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
* the last reference to our file and calls our release
* method. If there are memory regions to destroy, we'll end
* up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting to the system workqueue.
+ * we defer the vm_locked accounting to a workqueue.
*/
- if (ucontext->closing) {
- if (!down_write_trylock(&mm->mmap_sem)) {
- INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
- uiomr->mm = mm;
- uiomr->diff = diff;
-
+ if (context->closing) {
+ if (!down_write_trylock(&uiomr->owning_mm->mmap_sem)) {
+ INIT_WORK(&uiomr->work, usnic_uiom_release_defer);
queue_work(usnic_uiom_wq, &uiomr->work);
return;
}
- } else
- down_write(&mm->mmap_sem);
+ } else {
+ down_write(&uiomr->owning_mm->mmap_sem);
+ }
+ uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
+ up_write(&uiomr->owning_mm->mmap_sem);
- mm->pinned_vm -= diff;
- up_write(&mm->mmap_sem);
- mmput(mm);
-out:
- kfree(uiomr);
+ __usnic_uiom_release_tail(uiomr);
}
struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index 8c096acff123..b86a9731071b 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -71,8 +71,7 @@ struct usnic_uiom_reg {
int writable;
struct list_head chunk_list;
struct work_struct work;
- struct mm_struct *mm;
- unsigned long diff;
+ struct mm_struct *owning_mm;
};
struct usnic_uiom_chunk {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index a5719899f49a..398443f43dc3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -65,32 +65,36 @@ static struct workqueue_struct *event_wq;
static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", PVRDMA_REV_ID);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *pvrdma_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL,
+};
-static struct device_attribute *pvrdma_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group pvrdma_attr_group = {
+ .attrs = pvrdma_class_attributes,
};
static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
@@ -160,9 +164,7 @@ static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
static int pvrdma_register_device(struct pvrdma_dev *dev)
{
int ret = -1;
- int i = 0;
- strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
dev->flags = 0;
@@ -266,24 +268,16 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
}
dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA;
spin_lock_init(&dev->srq_tbl_lock);
+ rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
- ret = ib_register_device(&dev->ib_dev, NULL);
+ ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", NULL);
if (ret)
goto err_srq_free;
- for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) {
- ret = device_create_file(&dev->ib_dev.dev,
- pvrdma_class_attributes[i]);
- if (ret)
- goto err_class;
- }
-
dev->ib_active = true;
return 0;
-err_class:
- ib_unregister_device(&dev->ib_dev);
err_srq_free:
kfree(dev->srq_tbl);
err_qp_free:
@@ -735,7 +729,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
default:
dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
- event, dev->ib_dev.name);
+ event, dev_name(&dev->ib_dev.dev));
break;
}
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 60083c0363a5..cf22f57a9f0d 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -499,7 +499,7 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
- attr_mask, IB_LINK_LAYER_ETHERNET)) {
+ attr_mask)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index 98e798007f75..7df896a18d38 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
- depends on 64BIT && ARCH_DMA_ADDR_T_64BIT
+ depends on X86_64 && ARCH_DMA_ADDR_T_64BIT
depends on PCI
select DMA_VIRT_OPS
---help---
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 5ce403c6cddb..1735deb1a9d4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -118,6 +118,187 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
};
EXPORT_SYMBOL(ib_rvt_state_ops);
+/* platform specific: return the last level cache (llc) size, in KiB */
+static int rvt_wss_llc_size(void)
+{
+ /* assume that the boot CPU value is universal for all CPUs */
+ return boot_cpu_data.x86_cache_size;
+}
+
+/* platform specific: cacheless copy */
+static void cacheless_memcpy(void *dst, void *src, size_t n)
+{
+ /*
+ * Use the only available X64 cacheless copy. Add a __user cast
+ * to quiet sparse. The src agument is already in the kernel so
+ * there are no security issues. The extra fault recovery machinery
+ * is not invoked.
+ */
+ __copy_user_nocache(dst, (void __user *)src, n, 0);
+}
+
+void rvt_wss_exit(struct rvt_dev_info *rdi)
+{
+ struct rvt_wss *wss = rdi->wss;
+
+ if (!wss)
+ return;
+
+ /* coded to handle partially initialized and repeat callers */
+ kfree(wss->entries);
+ wss->entries = NULL;
+ kfree(rdi->wss);
+ rdi->wss = NULL;
+}
+
+/**
+ * rvt_wss_init - Init wss data structures
+ *
+ * Return: 0 on success
+ */
+int rvt_wss_init(struct rvt_dev_info *rdi)
+{
+ unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
+ unsigned int wss_threshold = rdi->dparms.wss_threshold;
+ unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
+ long llc_size;
+ long llc_bits;
+ long table_size;
+ long table_bits;
+ struct rvt_wss *wss;
+ int node = rdi->dparms.node;
+
+ if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
+ rdi->wss = NULL;
+ return 0;
+ }
+
+ rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
+ if (!rdi->wss)
+ return -ENOMEM;
+ wss = rdi->wss;
+
+ /* check for a valid percent range - default to 80 if none or invalid */
+ if (wss_threshold < 1 || wss_threshold > 100)
+ wss_threshold = 80;
+
+ /* reject a wildly large period */
+ if (wss_clean_period > 1000000)
+ wss_clean_period = 256;
+
+ /* reject a zero period */
+ if (wss_clean_period == 0)
+ wss_clean_period = 1;
+
+ /*
+ * Calculate the table size - the next power of 2 larger than the
+ * LLC size. LLC size is in KiB.
+ */
+ llc_size = rvt_wss_llc_size() * 1024;
+ table_size = roundup_pow_of_two(llc_size);
+
+ /* one bit per page in rounded up table */
+ llc_bits = llc_size / PAGE_SIZE;
+ table_bits = table_size / PAGE_SIZE;
+ wss->pages_mask = table_bits - 1;
+ wss->num_entries = table_bits / BITS_PER_LONG;
+
+ wss->threshold = (llc_bits * wss_threshold) / 100;
+ if (wss->threshold == 0)
+ wss->threshold = 1;
+
+ wss->clean_period = wss_clean_period;
+ atomic_set(&wss->clean_counter, wss_clean_period);
+
+ wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
+ GFP_KERNEL, node);
+ if (!wss->entries) {
+ rvt_wss_exit(rdi);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Advance the clean counter. When the clean period has expired,
+ * clean an entry.
+ *
+ * This is implemented in atomics to avoid locking. Because multiple
+ * variables are involved, it can be racy which can lead to slightly
+ * inaccurate information. Since this is only a heuristic, this is
+ * OK. Any innaccuracies will clean themselves out as the counter
+ * advances. That said, it is unlikely the entry clean operation will
+ * race - the next possible racer will not start until the next clean
+ * period.
+ *
+ * The clean counter is implemented as a decrement to zero. When zero
+ * is reached an entry is cleaned.
+ */
+static void wss_advance_clean_counter(struct rvt_wss *wss)
+{
+ int entry;
+ int weight;
+ unsigned long bits;
+
+ /* become the cleaner if we decrement the counter to zero */
+ if (atomic_dec_and_test(&wss->clean_counter)) {
+ /*
+ * Set, not add, the clean period. This avoids an issue
+ * where the counter could decrement below the clean period.
+ * Doing a set can result in lost decrements, slowing the
+ * clean advance. Since this a heuristic, this possible
+ * slowdown is OK.
+ *
+ * An alternative is to loop, advancing the counter by a
+ * clean period until the result is > 0. However, this could
+ * lead to several threads keeping another in the clean loop.
+ * This could be mitigated by limiting the number of times
+ * we stay in the loop.
+ */
+ atomic_set(&wss->clean_counter, wss->clean_period);
+
+ /*
+ * Uniquely grab the entry to clean and move to next.
+ * The current entry is always the lower bits of
+ * wss.clean_entry. The table size, wss.num_entries,
+ * is always a power-of-2.
+ */
+ entry = (atomic_inc_return(&wss->clean_entry) - 1)
+ & (wss->num_entries - 1);
+
+ /* clear the entry and count the bits */
+ bits = xchg(&wss->entries[entry], 0);
+ weight = hweight64((u64)bits);
+ /* only adjust the contended total count if needed */
+ if (weight)
+ atomic_sub(weight, &wss->total_count);
+ }
+}
+
+/*
+ * Insert the given address into the working set array.
+ */
+static void wss_insert(struct rvt_wss *wss, void *address)
+{
+ u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
+ u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
+ u32 nr = page & (BITS_PER_LONG - 1);
+
+ if (!test_and_set_bit(nr, &wss->entries[entry]))
+ atomic_inc(&wss->total_count);
+
+ wss_advance_clean_counter(wss);
+}
+
+/*
+ * Is the working set larger than the threshold?
+ */
+static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
+{
+ return atomic_read(&wss->total_count) >= wss->threshold;
+}
+
static void get_map_page(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map)
{
@@ -1164,11 +1345,8 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int lastwqe = 0;
int mig = 0;
int pmtu = 0; /* for gcc warning only */
- enum rdma_link_layer link;
int opa_ah;
- link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
-
spin_lock_irq(&qp->r_lock);
spin_lock(&qp->s_hlock);
spin_lock(&qp->s_lock);
@@ -1179,7 +1357,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, link))
+ attr_mask))
goto inval;
if (rdi->driver_f.check_modify_qp &&
@@ -1718,7 +1896,7 @@ static inline int rvt_qp_is_avail(
*/
static int rvt_post_one_wr(struct rvt_qp *qp,
const struct ib_send_wr *wr,
- int *call_send)
+ bool *call_send)
{
struct rvt_swqe *wqe;
u32 next;
@@ -1823,15 +2001,11 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
wqe->wr.num_sge = j;
}
- /* general part of wqe valid - allow for driver checks */
- if (rdi->driver_f.check_send_wqe) {
- ret = rdi->driver_f.check_send_wqe(qp, wqe);
- if (ret < 0)
- goto bail_inval_free;
- if (ret)
- *call_send = ret;
- }
-
+ /*
+ * Calculate and set SWQE PSN values prior to handing it off
+ * to the driver's check routine. This give the driver the
+ * opportunity to adjust PSN values based on internal checks.
+ */
log_pmtu = qp->log_pmtu;
if (qp->ibqp.qp_type != IB_QPT_UC &&
qp->ibqp.qp_type != IB_QPT_RC) {
@@ -1856,8 +2030,18 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
(wqe->length ?
((wqe->length - 1) >> log_pmtu) :
0);
- qp->s_next_psn = wqe->lpsn + 1;
}
+
+ /* general part of wqe valid - allow for driver checks */
+ if (rdi->driver_f.setup_wqe) {
+ ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
+ if (ret < 0)
+ goto bail_inval_free_ref;
+ }
+
+ if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
+ qp->s_next_psn = wqe->lpsn + 1;
+
if (unlikely(reserved_op)) {
wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
rvt_qp_wqe_reserve(qp, wqe);
@@ -1871,6 +2055,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
return 0;
+bail_inval_free_ref:
+ if (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC)
+ atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
bail_inval_free:
/* release mr holds */
while (j) {
@@ -1897,7 +2085,7 @@ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
unsigned long flags = 0;
- int call_send;
+ bool call_send;
unsigned nreq = 0;
int err = 0;
@@ -1930,7 +2118,11 @@ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
bail:
spin_unlock_irqrestore(&qp->s_hlock, flags);
if (nreq) {
- if (call_send)
+ /*
+ * Only call do_send if there is exactly one packet, and the
+ * driver said it was ok.
+ */
+ if (nreq == 1 && call_send)
rdi->driver_f.do_send(qp);
else
rdi->driver_f.schedule_send_no_lock(qp);
@@ -2465,3 +2657,454 @@ void rvt_qp_iter(struct rvt_dev_info *rdi,
rcu_read_unlock();
}
EXPORT_SYMBOL(rvt_qp_iter);
+
+/*
+ * This should be called with s_lock held.
+ */
+void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ enum ib_wc_status status)
+{
+ u32 old_last, last;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ last = qp->s_last;
+ old_last = last;
+ trace_rvt_qp_send_completion(qp, wqe, last);
+ if (++last >= qp->s_size)
+ last = 0;
+ trace_rvt_qp_send_completion(qp, wqe, last);
+ qp->s_last = last;
+ /* See post_send() */
+ barrier();
+ rvt_put_swqe(wqe);
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
+
+ rvt_qp_swqe_complete(qp,
+ wqe,
+ rdi->wc_opcode[wqe->wr.opcode],
+ status);
+
+ if (qp->s_acked == old_last)
+ qp->s_acked = last;
+ if (qp->s_cur == old_last)
+ qp->s_cur = last;
+ if (qp->s_tail == old_last)
+ qp->s_tail = last;
+ if (qp->state == IB_QPS_SQD && last == qp->s_cur)
+ qp->s_draining = 0;
+}
+EXPORT_SYMBOL(rvt_send_complete);
+
+/**
+ * rvt_copy_sge - copy data to SGE memory
+ * @qp: associated QP
+ * @ss: the SGE state
+ * @data: the data to copy
+ * @length: the length of the data
+ * @release: boolean to release MR
+ * @copy_last: do a separate copy of the last 8 bytes
+ */
+void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
+ void *data, u32 length,
+ bool release, bool copy_last)
+{
+ struct rvt_sge *sge = &ss->sge;
+ int i;
+ bool in_last = false;
+ bool cacheless_copy = false;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ struct rvt_wss *wss = rdi->wss;
+ unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
+
+ if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
+ cacheless_copy = length >= PAGE_SIZE;
+ } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
+ if (length >= PAGE_SIZE) {
+ /*
+ * NOTE: this *assumes*:
+ * o The first vaddr is the dest.
+ * o If multiple pages, then vaddr is sequential.
+ */
+ wss_insert(wss, sge->vaddr);
+ if (length >= (2 * PAGE_SIZE))
+ wss_insert(wss, (sge->vaddr + PAGE_SIZE));
+
+ cacheless_copy = wss_exceeds_threshold(wss);
+ } else {
+ wss_advance_clean_counter(wss);
+ }
+ }
+
+ if (copy_last) {
+ if (length > 8) {
+ length -= 8;
+ } else {
+ copy_last = false;
+ in_last = true;
+ }
+ }
+
+again:
+ while (length) {
+ u32 len = rvt_get_sge_length(sge, length);
+
+ WARN_ON_ONCE(len == 0);
+ if (unlikely(in_last)) {
+ /* enforce byte transfer ordering */
+ for (i = 0; i < len; i++)
+ ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
+ } else if (cacheless_copy) {
+ cacheless_memcpy(sge->vaddr, data, len);
+ } else {
+ memcpy(sge->vaddr, data, len);
+ }
+ rvt_update_sge(ss, len, release);
+ data += len;
+ length -= len;
+ }
+
+ if (copy_last) {
+ copy_last = false;
+ in_last = true;
+ length = 8;
+ goto again;
+ }
+}
+EXPORT_SYMBOL(rvt_copy_sge);
+
+/**
+ * ruc_loopback - handle UC and RC loopback requests
+ * @sqp: the sending QP
+ *
+ * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
+ * Note that although we are single threaded due to the send engine, we still
+ * have to protect against post_send(). We don't have to worry about
+ * receive interrupts since this is a connected protocol and all packets
+ * will pass through here.
+ */
+void rvt_ruc_loopback(struct rvt_qp *sqp)
+{
+ struct rvt_ibport *rvp = NULL;
+ struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
+ struct rvt_sge *sge;
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+ atomic64_t *maddr;
+ enum ib_wc_status send_status;
+ bool release;
+ int ret;
+ bool copy_last = false;
+ int local_ops = 0;
+
+ rcu_read_lock();
+ rvp = rdi->ports[sqp->port_num - 1];
+
+ /*
+ * Note that we check the responder QP state after
+ * checking the requester's state.
+ */
+
+ qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
+ sqp->remote_qpn);
+
+ spin_lock_irqsave(&sqp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
+ !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ goto unlock;
+
+ sqp->s_flags |= RVT_S_BUSY;
+
+again:
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
+ goto clr_busy;
+ wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
+
+ /* Return if it is not OK to start a new work request. */
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
+ goto clr_busy;
+ /* We are in the error state, flush the work request. */
+ send_status = IB_WC_WR_FLUSH_ERR;
+ goto flush_send;
+ }
+
+ /*
+ * We can rely on the entry not changing without the s_lock
+ * being held until we update s_last.
+ * We increment s_cur to indicate s_last is in progress.
+ */
+ if (sqp->s_last == sqp->s_cur) {
+ if (++sqp->s_cur >= sqp->s_size)
+ sqp->s_cur = 0;
+ }
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+
+ if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
+ qp->ibqp.qp_type != sqp->ibqp.qp_type) {
+ rvp->n_pkt_drops++;
+ /*
+ * For RC, the requester would timeout and retry so
+ * shortcut the timeouts and just signal too many retries.
+ */
+ if (sqp->ibqp.qp_type == IB_QPT_RC)
+ send_status = IB_WC_RETRY_EXC_ERR;
+ else
+ send_status = IB_WC_SUCCESS;
+ goto serr;
+ }
+
+ memset(&wc, 0, sizeof(wc));
+ send_status = IB_WC_SUCCESS;
+
+ release = true;
+ sqp->s_sge.sge = wqe->sg_list[0];
+ sqp->s_sge.sg_list = wqe->sg_list + 1;
+ sqp->s_sge.num_sge = wqe->wr.num_sge;
+ sqp->s_len = wqe->length;
+ switch (wqe->wr.opcode) {
+ case IB_WR_REG_MR:
+ goto send_comp;
+
+ case IB_WR_LOCAL_INV:
+ if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
+ if (rvt_invalidate_rkey(sqp,
+ wqe->wr.ex.invalidate_rkey))
+ send_status = IB_WC_LOC_PROT_ERR;
+ local_ops = 1;
+ }
+ goto send_comp;
+
+ case IB_WR_SEND_WITH_INV:
+ if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
+ }
+ goto send;
+
+ case IB_WR_SEND_WITH_IMM:
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ /* FALLTHROUGH */
+ case IB_WR_SEND:
+send:
+ ret = rvt_get_rwqe(qp, false);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ ret = rvt_get_rwqe(qp, true);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ /* skip copy_last set and qp_access_flags recheck */
+ goto do_write;
+ case IB_WR_RDMA_WRITE:
+ copy_last = rvt_is_user_qp(qp);
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+do_write:
+ if (wqe->length == 0)
+ break;
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ wqe->rdma_wr.remote_addr,
+ wqe->rdma_wr.rkey,
+ IB_ACCESS_REMOTE_WRITE)))
+ goto acc_err;
+ qp->r_sge.sg_list = NULL;
+ qp->r_sge.num_sge = 1;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_RDMA_READ:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto inv_err;
+ if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ wqe->rdma_wr.remote_addr,
+ wqe->rdma_wr.rkey,
+ IB_ACCESS_REMOTE_READ)))
+ goto acc_err;
+ release = false;
+ sqp->s_sge.sg_list = NULL;
+ sqp->s_sge.num_sge = 1;
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->wr.num_sge;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto inv_err;
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ wqe->atomic_wr.remote_addr,
+ wqe->atomic_wr.rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
+ sdata = wqe->atomic_wr.compare_add;
+ *(u64 *)sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+ (u64)atomic64_add_return(sdata, maddr) - sdata :
+ (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
+ sdata, wqe->atomic_wr.swap);
+ rvt_put_mr(qp->r_sge.sge.mr);
+ qp->r_sge.num_sge = 0;
+ goto send_comp;
+
+ default:
+ send_status = IB_WC_LOC_QP_OP_ERR;
+ goto serr;
+ }
+
+ sge = &sqp->s_sge.sge;
+ while (sqp->s_len) {
+ u32 len = sqp->s_len;
+
+ if (len > sge->length)
+ len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ WARN_ON_ONCE(len == 0);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
+ len, release, copy_last);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (!release)
+ rvt_put_mr(sge->mr);
+ if (--sqp->s_sge.num_sge)
+ *sge = *sqp->s_sge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= RVT_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ sqp->s_len -= len;
+ }
+ if (release)
+ rvt_put_ss(&qp->r_sge);
+
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
+ goto send_comp;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
+ wc.port_num = 1;
+ /* Signal completion event if the solicited bit is set. */
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
+
+send_comp:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ rvp->n_loop_pkts++;
+flush_send:
+ sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+ rvt_send_complete(sqp, wqe, send_status);
+ if (local_ops) {
+ atomic_dec(&sqp->local_ops_pending);
+ local_ops = 0;
+ }
+ goto again;
+
+rnr_nak:
+ /* Handle RNR NAK */
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ goto send_comp;
+ rvp->n_rnr_naks++;
+ /*
+ * Note: we don't need the s_lock held since the BUSY flag
+ * makes this single threaded.
+ */
+ if (sqp->s_rnr_retry == 0) {
+ send_status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto serr;
+ }
+ if (sqp->s_rnr_retry_cnt < 7)
+ sqp->s_rnr_retry--;
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
+ goto clr_busy;
+ rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
+ IB_AETH_CREDIT_SHIFT);
+ goto clr_busy;
+
+op_err:
+ send_status = IB_WC_REM_OP_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+inv_err:
+ send_status = IB_WC_REM_INV_REQ_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+acc_err:
+ send_status = IB_WC_REM_ACCESS_ERR;
+ wc.status = IB_WC_LOC_PROT_ERR;
+err:
+ /* responder goes to error state */
+ rvt_rc_error(qp, wc.status);
+
+serr:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ rvt_send_complete(sqp, wqe, send_status);
+ if (sqp->ibqp.qp_type == IB_QPT_RC) {
+ int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+
+ sqp->s_flags &= ~RVT_S_BUSY;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = sqp->ibqp.device;
+ ev.element.qp = &sqp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
+ }
+ goto done;
+ }
+clr_busy:
+ sqp->s_flags &= ~RVT_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+done:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(rvt_ruc_loopback);
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h
index 264811fdc530..6d883972e0b8 100644
--- a/drivers/infiniband/sw/rdmavt/qp.h
+++ b/drivers/infiniband/sw/rdmavt/qp.h
@@ -66,4 +66,6 @@ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
+int rvt_wss_init(struct rvt_dev_info *rdi);
+void rvt_wss_exit(struct rvt_dev_info *rdi);
#endif /* DEF_RVTQP_H */
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
index 0ef25fc49f25..d5df352eadb1 100644
--- a/drivers/infiniband/sw/rdmavt/trace_tx.h
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -153,6 +153,48 @@ TRACE_EVENT(
)
);
+TRACE_EVENT(
+ rvt_qp_send_completion,
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
+ TP_ARGS(qp, wqe, idx),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(struct rvt_swqe *, wqe)
+ __field(u64, wr_id)
+ __field(u32, qpn)
+ __field(u32, qpt)
+ __field(u32, length)
+ __field(u32, idx)
+ __field(u32, ssn)
+ __field(enum ib_wr_opcode, opcode)
+ __field(int, send_flags)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->wqe = wqe;
+ __entry->wr_id = wqe->wr.wr_id;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->qpt = qp->ibqp.qp_type;
+ __entry->length = wqe->length;
+ __entry->idx = idx;
+ __entry->ssn = wqe->ssn;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->send_flags = wqe->wr.send_flags;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->qpt,
+ __entry->wqe,
+ __entry->idx,
+ __entry->wr_id,
+ __entry->length,
+ __entry->ssn,
+ __entry->opcode,
+ __entry->send_flags
+ )
+);
#endif /* __RVT_TRACE_TX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 17e4abc067af..723d3daf2eba 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -774,6 +774,13 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
goto bail_no_mr;
}
+ /* Memory Working Set Size */
+ ret = rvt_wss_init(rdi);
+ if (ret) {
+ rvt_pr_err(rdi, "Error in WSS init.\n");
+ goto bail_mr;
+ }
+
/* Completion queues */
spin_lock_init(&rdi->n_cqs_lock);
@@ -828,10 +835,11 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
rdi->ibdev.driver_id = driver_id;
/* We are now good to announce we exist */
- ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
+ ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev),
+ rdi->driver_f.port_callback);
if (ret) {
rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
- goto bail_mr;
+ goto bail_wss;
}
rvt_create_mad_agents(rdi);
@@ -839,6 +847,8 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
rvt_pr_info(rdi, "Registration with rdmavt done.\n");
return ret;
+bail_wss:
+ rvt_wss_exit(rdi);
bail_mr:
rvt_mr_exit(rdi);
@@ -862,6 +872,7 @@ void rvt_unregister_device(struct rvt_dev_info *rdi)
rvt_free_mad_agents(rdi);
ib_unregister_device(&rdi->ibdev);
+ rvt_wss_exit(rdi);
rvt_mr_exit(rdi);
rvt_qp_exit(rdi);
}
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 10999fa69281..383e65c7bbc0 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -103,7 +103,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
rxe->attr.max_ee_init_rd_atom = RXE_MAX_EE_INIT_RD_ATOM;
- rxe->attr.atomic_cap = RXE_ATOMIC_CAP;
+ rxe->attr.atomic_cap = IB_ATOMIC_HCA;
rxe->attr.max_ee = RXE_MAX_EE;
rxe->attr.max_rdd = RXE_MAX_RDD;
rxe->attr.max_mw = RXE_MAX_MW;
@@ -128,9 +128,9 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
/* initialize port attributes */
static int rxe_init_port_param(struct rxe_port *port)
{
- port->attr.state = RXE_PORT_STATE;
- port->attr.max_mtu = RXE_PORT_MAX_MTU;
- port->attr.active_mtu = RXE_PORT_ACTIVE_MTU;
+ port->attr.state = IB_PORT_DOWN;
+ port->attr.max_mtu = IB_MTU_4096;
+ port->attr.active_mtu = IB_MTU_256;
port->attr.gid_tbl_len = RXE_PORT_GID_TBL_LEN;
port->attr.port_cap_flags = RXE_PORT_PORT_CAP_FLAGS;
port->attr.max_msg_sz = RXE_PORT_MAX_MSG_SZ;
@@ -147,8 +147,7 @@ static int rxe_init_port_param(struct rxe_port *port)
port->attr.active_width = RXE_PORT_ACTIVE_WIDTH;
port->attr.active_speed = RXE_PORT_ACTIVE_SPEED;
port->attr.phys_state = RXE_PORT_PHYS_STATE;
- port->mtu_cap =
- ib_mtu_enum_to_int(RXE_PORT_ACTIVE_MTU);
+ port->mtu_cap = ib_mtu_enum_to_int(IB_MTU_256);
port->subnet_prefix = cpu_to_be64(RXE_PORT_SUBNET_PREFIX);
return 0;
@@ -300,7 +299,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
mtu = eth_mtu_int_to_enum(ndev_mtu);
/* Make sure that new MTU in range */
- mtu = mtu ? min_t(enum ib_mtu, mtu, RXE_PORT_MAX_MTU) : IB_MTU_256;
+ mtu = mtu ? min_t(enum ib_mtu, mtu, IB_MTU_4096) : IB_MTU_256;
port->attr.active_mtu = mtu;
port->mtu_cap = ib_mtu_enum_to_int(mtu);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 83311dd07019..ea089cb091ad 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -191,6 +191,7 @@ static inline void reset_retry_counters(struct rxe_qp *qp)
{
qp->comp.retry_cnt = qp->attr.retry_cnt;
qp->comp.rnr_retry = qp->attr.rnr_retry;
+ qp->comp.started_retry = 0;
}
static inline enum comp_state check_psn(struct rxe_qp *qp,
@@ -253,6 +254,17 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
+ /* read retries of partial data may restart from
+ * read response first or response only.
+ */
+ if ((pkt->psn == wqe->first_psn &&
+ pkt->opcode ==
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
+ (wqe->first_psn == wqe->last_psn &&
+ pkt->opcode ==
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
+ break;
+
return COMPST_ERROR;
}
break;
@@ -499,11 +511,11 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- qp->comp.opcode = -1;
-
- if (pkt) {
- if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
- qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ if (pkt && wqe->state == wqe_state_pending) {
+ if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
+ qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
+ qp->comp.opcode = -1;
+ }
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
@@ -676,6 +688,20 @@ int rxe_completer(void *arg)
goto exit;
}
+ /* if we've started a retry, don't start another
+ * retry sequence, unless this is a timeout.
+ */
+ if (qp->comp.started_retry &&
+ !qp->comp.timeout_retry) {
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ skb = NULL;
+ }
+
+ goto done;
+ }
+
if (qp->comp.retry_cnt > 0) {
if (qp->comp.retry_cnt != 7)
qp->comp.retry_cnt--;
@@ -692,6 +718,7 @@ int rxe_completer(void *arg)
rxe_counter_inc(rxe,
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
+ qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 1);
}
@@ -701,7 +728,7 @@ int rxe_completer(void *arg)
skb = NULL;
}
- goto exit;
+ goto done;
} else {
rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 2ee4b08b00ea..a57276f2cb84 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -30,7 +30,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
@@ -97,7 +97,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context,
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
if (err) {
- kvfree(cq->queue->buf);
+ vfree(cq->queue->buf);
kfree(cq->queue);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 87d14f7ef21b..afd53f57a62b 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -144,8 +144,7 @@ void rxe_loopback(struct sk_buff *skb);
int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt);
-int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, u32 *crc);
+int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num);
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
struct device *rxe_dma_device(struct rxe_dev *rxe);
@@ -196,7 +195,7 @@ static inline int qp_mtu(struct rxe_qp *qp)
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
return qp->attr.path_mtu;
else
- return RXE_PORT_MAX_MTU;
+ return IB_MTU_4096;
}
static inline int rcv_wqe_size(int max_sge)
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index dff605fdf60f..9d3916b93f23 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -573,33 +573,20 @@ struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
int index = key >> 8;
- if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) {
- mem = rxe_pool_get_index(&rxe->mr_pool, index);
- if (!mem)
- goto err1;
- } else {
- goto err1;
+ mem = rxe_pool_get_index(&rxe->mr_pool, index);
+ if (!mem)
+ return NULL;
+
+ if (unlikely((type == lookup_local && mem->lkey != key) ||
+ (type == lookup_remote && mem->rkey != key) ||
+ mem->pd != pd ||
+ (access && !(access & mem->access)) ||
+ mem->state != RXE_MEM_STATE_VALID)) {
+ rxe_drop_ref(mem);
+ mem = NULL;
}
- if ((type == lookup_local && mem->lkey != key) ||
- (type == lookup_remote && mem->rkey != key))
- goto err2;
-
- if (mem->pd != pd)
- goto err2;
-
- if (access && !(access & mem->access))
- goto err2;
-
- if (mem->state != RXE_MEM_STATE_VALID)
- goto err2;
-
return mem;
-
-err2:
- rxe_drop_ref(mem);
-err1:
- return NULL;
}
int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 8094cbaa54a9..40e82e0f6c2d 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -72,7 +72,7 @@ struct rxe_dev *get_rxe_by_name(const char *name)
spin_lock_bh(&dev_list_lock);
list_for_each_entry(rxe, &rxe_dev_list, list) {
- if (!strcmp(name, rxe->ib_dev.name)) {
+ if (!strcmp(name, dev_name(&rxe->ib_dev.dev))) {
found = rxe;
break;
}
@@ -182,19 +182,11 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
#endif
-static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
+static struct dst_entry *rxe_find_route(struct net_device *ndev,
struct rxe_qp *qp,
struct rxe_av *av)
{
- const struct ib_gid_attr *attr;
struct dst_entry *dst = NULL;
- struct net_device *ndev;
-
- attr = rdma_get_gid_attr(&rxe->ib_dev, qp->attr.port_num,
- av->grh.sgid_index);
- if (IS_ERR(attr))
- return NULL;
- ndev = attr->ndev;
if (qp_type(qp) == IB_QPT_RC)
dst = sk_dst_get(qp->sk->sk);
@@ -229,7 +221,6 @@ static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
sk_dst_set(qp->sk->sk, dst);
}
}
- rdma_put_gid_attr(attr);
return dst;
}
@@ -377,8 +368,8 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
}
-static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, struct rxe_av *av)
+static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb,
+ struct rxe_av *av)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
@@ -387,7 +378,7 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
- dst = rxe_find_route(rxe, qp, av);
+ dst = rxe_find_route(skb->dev, qp, av);
if (!dst) {
pr_err("Host not reachable\n");
return -EHOSTUNREACH;
@@ -396,8 +387,8 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
if (!memcmp(saddr, daddr, sizeof(*daddr)))
pkt->mask |= RXE_LOOPBACK_MASK;
- prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
- htons(ROCE_V2_UDP_DPORT));
+ prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
+ cpu_to_be16(ROCE_V2_UDP_DPORT));
prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
av->grh.traffic_class, av->grh.hop_limit, df, xnet);
@@ -406,15 +397,15 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return 0;
}
-static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, struct rxe_av *av)
+static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb,
+ struct rxe_av *av)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
- dst = rxe_find_route(rxe, qp, av);
+ dst = rxe_find_route(skb->dev, qp, av);
if (!dst) {
pr_err("Host not reachable\n");
return -EHOSTUNREACH;
@@ -423,8 +414,8 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
if (!memcmp(saddr, daddr, sizeof(*daddr)))
pkt->mask |= RXE_LOOPBACK_MASK;
- prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
- htons(ROCE_V2_UDP_DPORT));
+ prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
+ cpu_to_be16(ROCE_V2_UDP_DPORT));
prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
av->grh.traffic_class,
@@ -434,16 +425,15 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return 0;
}
-int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, u32 *crc)
+int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
{
int err = 0;
struct rxe_av *av = rxe_get_av(pkt);
if (av->network_type == RDMA_NETWORK_IPV4)
- err = prepare4(rxe, pkt, skb, av);
+ err = prepare4(pkt, skb, av);
else if (av->network_type == RDMA_NETWORK_IPV6)
- err = prepare6(rxe, pkt, skb, av);
+ err = prepare6(pkt, skb, av);
*crc = rxe_icrc_hdr(pkt, skb);
@@ -501,11 +491,6 @@ void rxe_loopback(struct sk_buff *skb)
rxe_rcv(skb);
}
-static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
-{
- return rxe->port.port_guid == av->grh.dgid.global.interface_id;
-}
-
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt)
{
@@ -625,7 +610,7 @@ void rxe_port_up(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
- pr_info("set %s active\n", rxe->ib_dev.name);
+ dev_info(&rxe->ib_dev.dev, "set active\n");
}
/* Caller must hold net_info_lock */
@@ -638,7 +623,7 @@ void rxe_port_down(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
rxe_port_event(rxe, IB_EVENT_PORT_ERR);
- pr_info("set %s down\n", rxe->ib_dev.name);
+ dev_info(&rxe->ib_dev.dev, "set down\n");
}
static int rxe_notify(struct notifier_block *not_blk,
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 4555510d86c4..bdea899a58ac 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -90,7 +90,6 @@ enum rxe_device_param {
RXE_MAX_RES_RD_ATOM = 0x3f000,
RXE_MAX_QP_INIT_RD_ATOM = 128,
RXE_MAX_EE_INIT_RD_ATOM = 0,
- RXE_ATOMIC_CAP = 1,
RXE_MAX_EE = 0,
RXE_MAX_RDD = 0,
RXE_MAX_MW = 0,
@@ -139,9 +138,6 @@ enum rxe_device_param {
/* default/initial rxe port parameters */
enum rxe_port_param {
- RXE_PORT_STATE = IB_PORT_DOWN,
- RXE_PORT_MAX_MTU = IB_MTU_4096,
- RXE_PORT_ACTIVE_MTU = IB_MTU_256,
RXE_PORT_GID_TBL_LEN = 1024,
RXE_PORT_PORT_CAP_FLAGS = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP,
RXE_PORT_MAX_MSG_SZ = 0x800000,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index b4a8acc7bb7d..36b53fb94a49 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -207,7 +207,7 @@ int rxe_pool_init(
kref_init(&pool->ref_cnt);
- spin_lock_init(&pool->pool_lock);
+ rwlock_init(&pool->pool_lock);
if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
err = rxe_pool_init_index(pool,
@@ -222,7 +222,7 @@ int rxe_pool_init(
pool->key_size = rxe_type_info[type].key_size;
}
- pool->state = rxe_pool_valid;
+ pool->state = RXE_POOL_STATE_VALID;
out:
return err;
@@ -232,7 +232,7 @@ static void rxe_pool_release(struct kref *kref)
{
struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
- pool->state = rxe_pool_invalid;
+ pool->state = RXE_POOL_STATE_INVALID;
kfree(pool->table);
}
@@ -245,12 +245,12 @@ int rxe_pool_cleanup(struct rxe_pool *pool)
{
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
- pool->state = rxe_pool_invalid;
+ write_lock_irqsave(&pool->pool_lock, flags);
+ pool->state = RXE_POOL_STATE_INVALID;
if (atomic_read(&pool->num_elem) > 0)
pr_warn("%s pool destroyed with unfree'd elem\n",
pool_name(pool));
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
rxe_pool_put(pool);
@@ -336,10 +336,10 @@ void rxe_add_key(void *arg, void *key)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
insert_key(pool, elem);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void rxe_drop_key(void *arg)
@@ -348,9 +348,9 @@ void rxe_drop_key(void *arg)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
rb_erase(&elem->node, &pool->tree);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void rxe_add_index(void *arg)
@@ -359,10 +359,10 @@ void rxe_add_index(void *arg)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
elem->index = alloc_index(pool);
insert_index(pool, elem);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void rxe_drop_index(void *arg)
@@ -371,10 +371,10 @@ void rxe_drop_index(void *arg)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
clear_bit(elem->index - pool->min_index, pool->table);
rb_erase(&elem->node, &pool->tree);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void *rxe_alloc(struct rxe_pool *pool)
@@ -384,13 +384,13 @@ void *rxe_alloc(struct rxe_pool *pool)
might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
- spin_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != rxe_pool_valid) {
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_lock_irqsave(&pool->pool_lock, flags);
+ if (pool->state != RXE_POOL_STATE_VALID) {
+ read_unlock_irqrestore(&pool->pool_lock, flags);
return NULL;
}
kref_get(&pool->ref_cnt);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
kref_get(&pool->rxe->ref_cnt);
@@ -436,9 +436,9 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
struct rxe_pool_entry *elem = NULL;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != rxe_pool_valid)
+ if (pool->state != RXE_POOL_STATE_VALID)
goto out;
node = pool->tree.rb_node;
@@ -450,15 +450,14 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
node = node->rb_left;
else if (elem->index < index)
node = node->rb_right;
- else
+ else {
+ kref_get(&elem->ref_cnt);
break;
+ }
}
- if (node)
- kref_get(&elem->ref_cnt);
-
out:
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
return node ? elem : NULL;
}
@@ -469,9 +468,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
int cmp;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != rxe_pool_valid)
+ if (pool->state != RXE_POOL_STATE_VALID)
goto out;
node = pool->tree.rb_node;
@@ -494,6 +493,6 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
kref_get(&elem->ref_cnt);
out:
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
return node ? elem : NULL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 47df28e43acf..aa4ba307097b 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -74,8 +74,8 @@ struct rxe_type_info {
extern struct rxe_type_info rxe_type_info[];
enum rxe_pool_state {
- rxe_pool_invalid,
- rxe_pool_valid,
+ RXE_POOL_STATE_INVALID,
+ RXE_POOL_STATE_VALID,
};
struct rxe_pool_entry {
@@ -90,7 +90,7 @@ struct rxe_pool_entry {
struct rxe_pool {
struct rxe_dev *rxe;
- spinlock_t pool_lock; /* pool spinlock */
+ rwlock_t pool_lock; /* protects pool add/del/search */
size_t elem_size;
struct kref ref_cnt;
void (*cleanup)(struct rxe_pool_entry *obj);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index c58452daffc7..b9710907dac2 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -34,6 +34,7 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/sched.h>
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
@@ -227,6 +228,16 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
qp->sk->sk->sk_user_data = qp;
+ /* pick a source UDP port number for this QP based on
+ * the source QPN. this spreads traffic for different QPs
+ * across different NIC RX queues (while using a single
+ * flow for a given QP to maintain packet order).
+ * the port number must be in the Dynamic Ports range
+ * (0xc000 - 0xffff).
+ */
+ qp->src_port = RXE_ROCE_V2_SPORT +
+ (hash_32_generic(qp_num(qp), 14) & 0x3fff);
+
qp->sq.max_wr = init->cap.max_send_wr;
qp->sq.max_sge = init->cap.max_send_sge;
qp->sq.max_inline = init->cap.max_inline_data;
@@ -247,7 +258,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
&qp->sq.queue->ip);
if (err) {
- kvfree(qp->sq.queue->buf);
+ vfree(qp->sq.queue->buf);
kfree(qp->sq.queue);
return err;
}
@@ -300,7 +311,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->rq.queue->buf, qp->rq.queue->buf_size,
&qp->rq.queue->ip);
if (err) {
- kvfree(qp->rq.queue->buf);
+ vfree(qp->rq.queue->buf);
kfree(qp->rq.queue);
return err;
}
@@ -408,8 +419,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
- IB_LINK_LAYER_ETHERNET)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
pr_warn("invalid mask or state for qp\n");
goto err1;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index d30dbac24583..5c29a1bb575a 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -122,7 +122,7 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
set_bad_pkey_cntr(port);
goto err1;
}
- } else if (qpn != 0) {
+ } else {
if (unlikely(!pkey_match(pkey,
port->pkey_tbl[qp->attr.pkey_index]
))) {
@@ -134,7 +134,7 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
}
if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
- qpn != 0 && pkt->mask) {
+ pkt->mask) {
u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
if (unlikely(deth_qkey(pkt) != qkey)) {
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 8be27238a86e..6c361d70d7cd 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -73,9 +73,6 @@ static void req_retry(struct rxe_qp *qp)
int npsn;
int first = 1;
- wqe = queue_head(qp->sq.queue);
- npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;
-
qp->req.wqe_index = consumer_index(qp->sq.queue);
qp->req.psn = qp->comp.psn;
qp->req.opcode = -1;
@@ -107,11 +104,17 @@ static void req_retry(struct rxe_qp *qp)
if (first) {
first = 0;
- if (mask & WR_WRITE_OR_SEND_MASK)
+ if (mask & WR_WRITE_OR_SEND_MASK) {
+ npsn = (qp->comp.psn - wqe->first_psn) &
+ BTH_PSN_MASK;
retry_first_write_send(qp, wqe, mask, npsn);
+ }
- if (mask & WR_READ_MASK)
+ if (mask & WR_READ_MASK) {
+ npsn = (wqe->dma.length - wqe->dma.resid) /
+ qp->mtu;
wqe->iova += npsn * qp->mtu;
+ }
}
wqe->state = wqe_state_posted;
@@ -435,7 +438,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
if (pkt->mask & RXE_RETH_MASK) {
reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
reth_set_va(pkt, wqe->iova);
- reth_set_len(pkt, wqe->dma.length);
+ reth_set_len(pkt, wqe->dma.resid);
}
if (pkt->mask & RXE_IMMDT_MASK)
@@ -476,7 +479,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
u32 *p;
int err;
- err = rxe_prepare(rxe, pkt, skb, &crc);
+ err = rxe_prepare(pkt, skb, &crc);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index aa5833318372..c962160292f4 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -637,7 +637,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
if (ack->mask & RXE_ATMACK_MASK)
atmack_set_orig(ack, qp->resp.atomic_orig);
- err = rxe_prepare(rxe, ack, skb, &crc);
+ err = rxe_prepare(ack, skb, &crc);
if (err) {
kfree_skb(skb);
return NULL;
@@ -682,6 +682,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
rxe_advance_resp_resource(qp);
res->type = RXE_READ_MASK;
+ res->replay = 0;
res->read.va = qp->resp.va;
res->read.va_org = qp->resp.va;
@@ -752,7 +753,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
state = RESPST_DONE;
} else {
qp->resp.res = NULL;
- qp->resp.opcode = -1;
+ if (!res->replay)
+ qp->resp.opcode = -1;
if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
qp->resp.psn = res->cur_psn;
state = RESPST_CLEANUP;
@@ -814,6 +816,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
/* next expected psn, read handles this separately */
qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
qp->resp.opcode = pkt->opcode;
qp->resp.status = IB_WC_SUCCESS;
@@ -1065,7 +1068,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
enum resp_states rc;
- u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
+ u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
@@ -1108,6 +1111,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
res->state = (pkt->psn == res->first_psn) ?
rdatm_res_state_new :
rdatm_res_state_replay;
+ res->replay = 1;
/* Reset the resource, except length. */
res->read.va_org = iova;
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 0d6c04ba7fc3..c41a5fee81f7 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
@@ -129,13 +130,18 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf,
q->buf_size, &q->ip);
- if (err)
+ if (err) {
+ vfree(q->buf);
+ kfree(q);
return err;
+ }
if (uresp) {
if (copy_to_user(&uresp->srq_num, &srq->srq_num,
- sizeof(uresp->srq_num)))
+ sizeof(uresp->srq_num))) {
+ rxe_queue_cleanup(q);
return -EFAULT;
+ }
}
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
index d5ed7571128f..73a19f808e1b 100644
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -105,7 +105,7 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
}
rxe_set_port_state(ndev);
- pr_info("added %s to %s\n", rxe->ib_dev.name, intf);
+ dev_info(&rxe->ib_dev.dev, "added %s\n", intf);
err:
if (ndev)
dev_put(ndev);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index f5b1e0ad6142..9c19f2027511 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1148,18 +1148,21 @@ static ssize_t parent_show(struct device *device,
static DEVICE_ATTR_RO(parent);
-static struct device_attribute *rxe_dev_attributes[] = {
- &dev_attr_parent,
+static struct attribute *rxe_dev_attributes[] = {
+ &dev_attr_parent.attr,
+ NULL
+};
+
+static const struct attribute_group rxe_attr_group = {
+ .attrs = rxe_dev_attributes,
};
int rxe_register_device(struct rxe_dev *rxe)
{
int err;
- int i;
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
- strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
dev->owner = THIS_MODULE;
@@ -1260,26 +1263,16 @@ int rxe_register_device(struct rxe_dev *rxe)
}
rxe->tfm = tfm;
+ rdma_set_device_sysfs_group(dev, &rxe_attr_group);
dev->driver_id = RDMA_DRIVER_RXE;
- err = ib_register_device(dev, NULL);
+ err = ib_register_device(dev, "rxe%d", NULL);
if (err) {
pr_warn("%s failed with error %d\n", __func__, err);
goto err1;
}
- for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
- err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
- if (err) {
- pr_warn("%s failed with error %d for attr number %d\n",
- __func__, err, i);
- goto err2;
- }
- }
-
return 0;
-err2:
- ib_unregister_device(dev);
err1:
crypto_free_shash(rxe->tfm);
@@ -1288,12 +1281,8 @@ err1:
int rxe_unregister_device(struct rxe_dev *rxe)
{
- int i;
struct ib_device *dev = &rxe->ib_dev;
- for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
- device_remove_file(&dev->dev, rxe_dev_attributes[i]);
-
ib_unregister_device(dev);
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index af1470d29391..82e670d6eeea 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -158,6 +158,7 @@ struct rxe_comp_info {
int opcode;
int timeout;
int timeout_retry;
+ int started_retry;
u32 retry_cnt;
u32 rnr_retry;
struct rxe_task task;
@@ -171,6 +172,7 @@ enum rdatm_res_state {
struct resp_res {
int type;
+ int replay;
u32 first_psn;
u32 last_psn;
u32 cur_psn;
@@ -195,6 +197,7 @@ struct rxe_resp_info {
enum rxe_qp_state state;
u32 msn;
u32 psn;
+ u32 ack_psn;
int opcode;
int drop_msg;
int goto_error;
@@ -248,6 +251,7 @@ struct rxe_qp {
struct socket *sk;
u32 dst_cookie;
+ u16 src_port;
struct rxe_av pri_av;
struct rxe_av alt_av;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 3d5424f335cb..0428e01e8f69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1438,11 +1438,15 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
- if (skb->protocol == htons(ETH_P_IP))
+ if (skb->protocol == htons(ETH_P_IP)) {
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ }
#if IS_ENABLED(CONFIG_IPV6)
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ }
#endif
dev_kfree_skb_any(skb);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8baa75a705c5..8710214594d8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -243,7 +243,8 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
- if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
+ if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
+ new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL;
priv->admin_mtu = new_mtu;
@@ -1880,6 +1881,8 @@ static int ipoib_parent_init(struct net_device *ndev)
sizeof(union ib_gid));
SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
+ priv->dev->dev_port = priv->port - 1;
+ /* Let's set this one too for backwards compatibility. */
priv->dev->dev_id = priv->port - 1;
return 0;
@@ -2385,6 +2388,35 @@ int ipoib_add_pkey_attr(struct net_device *dev)
return device_create_file(&dev->dev, &dev_attr_pkey);
}
+/*
+ * We erroneously exposed the iface's port number in the dev_id
+ * sysfs field long after dev_port was introduced for that purpose[1],
+ * and we need to stop everyone from relying on that.
+ * Let's overload the shower routine for the dev_id file here
+ * to gently bring the issue up.
+ *
+ * [1] https://www.spinics.net/lists/netdev/msg272123.html
+ */
+static ssize_t dev_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+
+ if (ndev->dev_id == ndev->dev_port)
+ netdev_info_once(ndev,
+ "\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
+ current->comm);
+
+ return sprintf(buf, "%#x\n", ndev->dev_id);
+}
+static DEVICE_ATTR_RO(dev_id);
+
+int ipoib_intercept_dev_id_attr(struct net_device *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_dev_id);
+ return device_create_file(&dev->dev, &dev_attr_dev_id);
+}
+
static struct net_device *ipoib_add_port(const char *format,
struct ib_device *hca, u8 port)
{
@@ -2437,6 +2469,8 @@ static struct net_device *ipoib_add_port(const char *format,
*/
ndev->priv_destructor = ipoib_intf_free;
+ if (ipoib_intercept_dev_id_attr(ndev))
+ goto sysfs_failed;
if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(ndev))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 9f36ca786df8..1e88213459f2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -277,7 +277,7 @@ void ipoib_event(struct ib_event_handler *handler,
return;
ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
- record->device->name, record->element.port_num);
+ dev_name(&record->device->dev), record->element.port_num);
if (record->event == IB_EVENT_SM_CHANGE ||
record->event == IB_EVENT_CLIENT_REREGISTER) {
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 2f6388596f88..96af06cfe0af 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -589,13 +589,19 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
ib_conn->post_recv_buf_count--;
}
-static inline void
+static inline int
iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
- if (likely(rkey == desc->rsc.mr->rkey))
+ if (likely(rkey == desc->rsc.mr->rkey)) {
desc->rsc.mr_valid = 0;
- else if (likely(rkey == desc->pi_ctx->sig_mr->rkey))
+ } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) {
desc->pi_ctx->sig_mr_valid = 0;
+ } else {
+ iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
+ return -EINVAL;
+ }
+
+ return 0;
}
static int
@@ -623,12 +629,14 @@ iser_check_remote_inv(struct iser_conn *iser_conn,
if (iser_task->dir[ISER_DIR_IN]) {
desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h;
- iser_inv_desc(desc, rkey);
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
}
if (iser_task->dir[ISER_DIR_OUT]) {
desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h;
- iser_inv_desc(desc, rkey);
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
}
} else {
iser_err("failed to get task for itt=%d\n", hdr->itt);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b686a4aaffe8..946b623ba5eb 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -55,7 +55,7 @@ static void iser_event_handler(struct ib_event_handler *handler,
{
iser_err("async event %s (%d) on device %s port %d\n",
ib_event_msg(event->event), event->event,
- event->device->name, event->element.port_num);
+ dev_name(&event->device->dev), event->element.port_num);
}
/**
@@ -85,7 +85,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe);
iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
- device->comps_used, ib_dev->name,
+ device->comps_used, dev_name(&ib_dev->dev),
ib_dev->num_comp_vectors, max_cqe);
device->pd = ib_alloc_pd(ib_dev,
@@ -468,7 +468,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
iser_conn->max_cmds =
ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr);
iser_dbg("device %s supports max_send_wr %d\n",
- device->ib_device->name, ib_dev->attrs.max_qp_wr);
+ dev_name(&device->ib_device->dev),
+ ib_dev->attrs.max_qp_wr);
}
}
@@ -764,7 +765,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
IB_DEVICE_SIGNATURE_HANDOVER)) {
iser_warn("T10-PI requested but not supported on %s, "
"continue without T10-PI\n",
- ib_conn->device->ib_device->name);
+ dev_name(&ib_conn->device->ib_device->dev));
ib_conn->pi_support = false;
} else {
ib_conn->pi_support = true;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index f39670c5c25c..e3dd13798d79 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -262,7 +262,7 @@ isert_alloc_comps(struct isert_device *device)
isert_info("Using %d CQs, %s supports %d vectors support "
"pi_capable %d\n",
- device->comps_used, device->ib_device->name,
+ device->comps_used, dev_name(&device->ib_device->dev),
device->ib_device->num_comp_vectors,
device->pi_capable);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
index 267da8215e08..31cd361416ac 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -351,7 +351,8 @@ static uint32_t opa_vnic_get_dlid(struct opa_vnic_adapter *adapter,
if (unlikely(!dlid))
v_warn("Null dlid in MAC address\n");
} else if (def_port != OPA_VNIC_INVALID_PORT) {
- dlid = info->vesw.u_ucast_dlid[def_port];
+ if (def_port < OPA_VESW_MAX_NUM_DEF_PORT)
+ dlid = info->vesw.u_ucast_dlid[def_port];
}
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 15711dcc6f58..d119d9afa845 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -888,7 +888,8 @@ static void opa_vnic_event(struct ib_event_handler *handler,
return;
c_dbg("OPA_VNIC received event %d on device %s port %d\n",
- record->event, record->device->name, record->element.port_num);
+ record->event, dev_name(&record->device->dev),
+ record->element.port_num);
if (record->event == IB_EVENT_PORT_ERR)
idr_for_each(&port->vport_idr, vema_disable_vport, NULL);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 0b34e909505f..eed0eb3bb04c 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1330,17 +1330,8 @@ static void srp_terminate_io(struct srp_rport *rport)
{
struct srp_target_port *target = rport->lld_data;
struct srp_rdma_ch *ch;
- struct Scsi_Host *shost = target->scsi_host;
- struct scsi_device *sdev;
int i, j;
- /*
- * Invoking srp_terminate_io() while srp_queuecommand() is running
- * is not safe. Hence the warning statement below.
- */
- shost_for_each_device(sdev, shost)
- WARN_ON_ONCE(sdev->request_queue->request_fn_active);
-
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
@@ -3124,7 +3115,8 @@ static ssize_t show_local_ib_device(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
+ return sprintf(buf, "%s\n",
+ dev_name(&target->srp_host->srp_dev->dev->dev));
}
static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
@@ -3987,7 +3979,7 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
- return sprintf(buf, "%s\n", host->srp_dev->dev->name);
+ return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
@@ -4019,7 +4011,8 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
host->dev.class = &srp_class;
host->dev.parent = device->dev->dev.parent;
- dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
+ dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
+ port);
if (device_register(&host->dev))
goto free_host;
@@ -4095,7 +4088,7 @@ static void srp_add_one(struct ib_device *device)
srp_dev->mr_max_size = srp_dev->mr_page_size *
srp_dev->max_pages_per_mr;
pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
- device->name, mr_page_shift, attr->max_mr_size,
+ dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
attr->max_fast_reg_page_list_len,
srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index f37cbad022a2..2357aa727dcf 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -148,7 +148,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
return;
pr_debug("ASYNC event= %d on device= %s\n", event->event,
- sdev->device->name);
+ dev_name(&sdev->device->dev));
switch (event->event) {
case IB_EVENT_PORT_ERR:
@@ -1941,7 +1941,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport)
if (srpt_disconnect_ch(ch) >= 0)
pr_info("Closing channel %s because target %s_%d has been disabled\n",
ch->sess_name,
- sport->sdev->device->name, sport->port);
+ dev_name(&sport->sdev->device->dev),
+ sport->port);
srpt_close_ch(ch);
}
}
@@ -2127,7 +2128,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
if (!sport->enabled) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
- sport->sdev->device->name, port_num);
+ dev_name(&sport->sdev->device->dev), port_num);
goto reject;
}
@@ -2267,7 +2268,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
- sdev->device->name, port_num);
+ dev_name(&sdev->device->dev), port_num);
mutex_unlock(&sport->mutex);
goto reject;
}
@@ -2708,7 +2709,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
break;
}
- if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
+ if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))
return;
/* For read commands, transfer the data to the initiator. */
@@ -2842,7 +2843,7 @@ static int srpt_release_sport(struct srpt_port *sport)
while (wait_event_timeout(sport->ch_releaseQ,
srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
pr_info("%s_%d: waiting for session unregistration ...\n",
- sport->sdev->device->name, sport->port);
+ dev_name(&sport->sdev->device->dev), sport->port);
rcu_read_lock();
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
@@ -2932,7 +2933,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
}
pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
- sdev->device->attrs.max_srq_wr, device->name);
+ sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
sdev->ioctx_ring = (struct srpt_recv_ioctx **)
srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
@@ -2965,8 +2966,8 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
} else if (use_srq && !sdev->srq) {
ret = srpt_alloc_srq(sdev);
}
- pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, device->name,
- sdev->use_srq, ret);
+ pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__,
+ dev_name(&device->dev), sdev->use_srq, ret);
return ret;
}
@@ -3052,7 +3053,7 @@ static void srpt_add_one(struct ib_device *device)
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
- sdev->device->name, i);
+ dev_name(&sdev->device->dev), i);
goto err_event;
}
}
@@ -3063,7 +3064,7 @@ static void srpt_add_one(struct ib_device *device)
out:
ib_set_client_data(device, &srpt_client, sdev);
- pr_debug("added %s.\n", device->name);
+ pr_debug("added %s.\n", dev_name(&device->dev));
return;
err_event:
@@ -3078,7 +3079,7 @@ free_dev:
kfree(sdev);
err:
sdev = NULL;
- pr_info("%s(%s) failed.\n", __func__, device->name);
+ pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
goto out;
}
@@ -3093,7 +3094,8 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
int i;
if (!sdev) {
- pr_info("%s(%s): nothing to do.\n", __func__, device->name);
+ pr_info("%s(%s): nothing to do.\n", __func__,
+ dev_name(&device->dev));
return;
}
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index b86c1e5fbc11..9e8684ab48f4 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/sort.h>
+#include <linux/pm_wakeirq.h>
#include <linux/mfd/ti_am335x_tscadc.h>
@@ -46,6 +47,7 @@ static const int config_pins[] = {
struct titsc {
struct input_dev *input;
struct ti_tscadc_dev *mfd_tscadc;
+ struct device *dev;
unsigned int irq;
unsigned int wires;
unsigned int x_plate_resistance;
@@ -276,7 +278,7 @@ static irqreturn_t titsc_irq(int irq, void *dev)
if (status & IRQENB_HW_PEN) {
ts_dev->pen_down = true;
irqclr |= IRQENB_HW_PEN;
- pm_stay_awake(ts_dev->mfd_tscadc->dev);
+ pm_stay_awake(ts_dev->dev);
}
if (status & IRQENB_PENUP) {
@@ -286,7 +288,7 @@ static irqreturn_t titsc_irq(int irq, void *dev)
input_report_key(input_dev, BTN_TOUCH, 0);
input_report_abs(input_dev, ABS_PRESSURE, 0);
input_sync(input_dev);
- pm_relax(ts_dev->mfd_tscadc->dev);
+ pm_relax(ts_dev->dev);
} else {
ts_dev->pen_down = true;
}
@@ -422,6 +424,7 @@ static int titsc_probe(struct platform_device *pdev)
ts_dev->mfd_tscadc = tscadc_dev;
ts_dev->input = input_dev;
ts_dev->irq = tscadc_dev->irq;
+ ts_dev->dev = &pdev->dev;
err = titsc_parse_dt(pdev, ts_dev);
if (err) {
@@ -436,6 +439,11 @@ static int titsc_probe(struct platform_device *pdev)
goto err_free_mem;
}
+ device_init_wakeup(&pdev->dev, true);
+ err = dev_pm_set_wake_irq(&pdev->dev, ts_dev->irq);
+ if (err)
+ dev_err(&pdev->dev, "irq wake enable failed.\n");
+
titsc_writel(ts_dev, REG_IRQSTATUS, TSC_IRQENB_MASK);
titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO0THRES);
titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_EOS);
@@ -467,6 +475,8 @@ static int titsc_probe(struct platform_device *pdev)
return 0;
err_free_irq:
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
free_irq(ts_dev->irq, ts_dev);
err_free_mem:
input_free_device(input_dev);
@@ -479,6 +489,8 @@ static int titsc_remove(struct platform_device *pdev)
struct titsc *ts_dev = platform_get_drvdata(pdev);
u32 steps;
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
free_irq(ts_dev->irq, ts_dev);
/* total steps followed by the enable mask */
@@ -499,7 +511,7 @@ static int __maybe_unused titsc_suspend(struct device *dev)
unsigned int idle;
tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
- if (device_may_wakeup(tscadc_dev->dev)) {
+ if (device_may_wakeup(dev)) {
titsc_writel(ts_dev, REG_IRQSTATUS, TSC_IRQENB_MASK);
idle = titsc_readl(ts_dev, REG_IRQENABLE);
titsc_writel(ts_dev, REG_IRQENABLE,
@@ -515,11 +527,11 @@ static int __maybe_unused titsc_resume(struct device *dev)
struct ti_tscadc_dev *tscadc_dev;
tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
- if (device_may_wakeup(tscadc_dev->dev)) {
+ if (device_may_wakeup(dev)) {
titsc_writel(ts_dev, REG_IRQWAKEUP,
0x00);
titsc_writel(ts_dev, REG_IRQCLR, IRQENB_HW_PEN);
- pm_relax(ts_dev->mfd_tscadc->dev);
+ pm_relax(dev);
}
titsc_step_config(ts_dev);
titsc_writel(ts_dev, REG_FIFO0THR,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c60395b7470f..d9a25715650e 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -186,6 +186,19 @@ config INTEL_IOMMU
and include PCI device scope covered by these DMA
remapping devices.
+config INTEL_IOMMU_DEBUGFS
+ bool "Export Intel IOMMU internals in Debugfs"
+ depends on INTEL_IOMMU && IOMMU_DEBUGFS
+ help
+ !!!WARNING!!!
+
+ DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
+
+ Expose Intel IOMMU internals in Debugfs.
+
+ This option is -NOT- intended for production environments, and should
+ only be enabled for debugging Intel IOMMU.
+
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86
@@ -372,6 +385,14 @@ config S390_CCW_IOMMU
Enables bits of IOMMU API required by VFIO. The iommu_ops
is not implemented as it is not necessary for VFIO.
+config S390_AP_IOMMU
+ bool "S390 AP IOMMU Support"
+ depends on S390 && ZCRYPT
+ select IOMMU_API
+ help
+ Enables bits of IOMMU API required by VFIO. The iommu_ops
+ is not implemented as it is not necessary for VFIO.
+
config MTK_IOMMU
bool "MTK IOMMU Support"
depends on ARM || ARM64
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index ab5eba6edf82..a158a68c8ea8 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
+obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index bee0dfb7b93b..1167ff0416cf 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3083,6 +3083,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
+ default:
+ break;
}
return false;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 3931c7de7c69..bb2cd29e1658 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1719,7 +1719,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
NULL,
};
-static int iommu_init_pci(struct amd_iommu *iommu)
+static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 5059d09f3202..6947ccf26512 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IOMMU API for ARM architected SMMUv3 implementations.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Copyright (C) 2015 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
@@ -567,7 +556,8 @@ struct arm_smmu_device {
int gerr_irq;
int combined_irq;
- atomic_t sync_nr;
+ u32 sync_nr;
+ u8 prev_cmd_opcode;
unsigned long ias; /* IPA */
unsigned long oas; /* PA */
@@ -611,6 +601,7 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
+ bool non_strict;
enum arm_smmu_domain_stage stage;
union {
@@ -708,7 +699,7 @@ static void queue_inc_prod(struct arm_smmu_queue *q)
}
/*
- * Wait for the SMMU to consume items. If drain is true, wait until the queue
+ * Wait for the SMMU to consume items. If sync is true, wait until the queue
* is empty. Otherwise, wait until there is at least one free slot.
*/
static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
@@ -901,6 +892,8 @@ static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
struct arm_smmu_queue *q = &smmu->cmdq.q;
bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+ smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]);
+
while (queue_insert_raw(q, cmd) == -ENOSPC) {
if (queue_poll_cons(q, false, wfe))
dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
@@ -948,15 +941,21 @@ static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
struct arm_smmu_cmdq_ent ent = {
.opcode = CMDQ_OP_CMD_SYNC,
.sync = {
- .msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
.msiaddr = virt_to_phys(&smmu->sync_count),
},
};
- arm_smmu_cmdq_build_cmd(cmd, &ent);
-
spin_lock_irqsave(&smmu->cmdq.lock, flags);
- arm_smmu_cmdq_insert_cmd(smmu, cmd);
+
+ /* Piggy-back on the previous command if it's a SYNC */
+ if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) {
+ ent.sync.msidata = smmu->sync_nr;
+ } else {
+ ent.sync.msidata = ++smmu->sync_nr;
+ arm_smmu_cmdq_build_cmd(cmd, &ent);
+ arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ }
+
spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
@@ -1372,15 +1371,11 @@ static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
}
/* IO_PGTABLE API */
-static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
-{
- arm_smmu_cmdq_issue_sync(smmu);
-}
-
static void arm_smmu_tlb_sync(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- __arm_smmu_tlb_sync(smmu_domain->smmu);
+
+ arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
}
static void arm_smmu_tlb_inv_context(void *cookie)
@@ -1398,8 +1393,14 @@ static void arm_smmu_tlb_inv_context(void *cookie)
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
+ /*
+ * NOTE: when io-pgtable is in non-strict mode, we may get here with
+ * PTEs previously cleared by unmaps on the current CPU not yet visible
+ * to the SMMU. We are relying on the DSB implicit in queue_inc_prod()
+ * to guarantee those are observed before the TLBI. Do be careful, 007.
+ */
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- __arm_smmu_tlb_sync(smmu);
+ arm_smmu_cmdq_issue_sync(smmu);
}
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
@@ -1624,6 +1625,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
+ if (smmu_domain->non_strict)
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops)
return -ENOMEM;
@@ -1772,12 +1776,20 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
return ops->unmap(ops, iova, size);
}
+static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->smmu)
+ arm_smmu_tlb_inv_context(smmu_domain);
+}
+
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
if (smmu)
- __arm_smmu_tlb_sync(smmu);
+ arm_smmu_cmdq_issue_sync(smmu);
}
static phys_addr_t
@@ -1917,15 +1929,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = smmu_domain->non_strict;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
default:
- return -ENODEV;
+ return -EINVAL;
}
}
@@ -1935,26 +1959,37 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
mutex_lock(&smmu_domain->init_mutex);
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ if (*(int *)data)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch(attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ smmu_domain->non_strict = *(int *)data;
+ break;
+ default:
+ ret = -ENODEV;
}
-
- if (*(int *)data)
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
- else
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-
break;
default:
- ret = -ENODEV;
+ ret = -EINVAL;
}
out_unlock:
@@ -1999,7 +2034,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
- .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
@@ -2180,7 +2215,6 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
- atomic_set(&smmu->sync_nr, 0);
ret = arm_smmu_init_queues(smmu);
if (ret)
return ret;
@@ -2353,8 +2387,8 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
irq = smmu->combined_irq;
if (irq) {
/*
- * Cavium ThunderX2 implementation doesn't not support unique
- * irq lines. Use single irq line for all the SMMUv3 interrupts.
+ * Cavium ThunderX2 implementation doesn't support unique irq
+ * lines. Use a single irq line for all the SMMUv3 interrupts.
*/
ret = devm_request_threaded_irq(smmu->dev, irq,
arm_smmu_combined_irq_handler,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fd1b80ef9490..5a28ae892504 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -52,6 +52,7 @@
#include <linux/spinlock.h>
#include <linux/amba/bus.h>
+#include <linux/fsl/mc.h>
#include "io-pgtable.h"
#include "arm-smmu-regs.h"
@@ -246,6 +247,7 @@ struct arm_smmu_domain {
const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
+ bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
@@ -447,7 +449,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
- writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+ /*
+ * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
+ * cleared by the current CPU are visible to the SMMU before the TLBI.
+ */
+ writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
arm_smmu_tlb_sync_context(cookie);
}
@@ -457,7 +463,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_GR0(smmu);
- writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+ /* NOTE: see above */
+ writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
arm_smmu_tlb_sync_global(smmu);
}
@@ -469,6 +476,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
+
if (stage1) {
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
@@ -510,6 +520,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
struct arm_smmu_domain *smmu_domain = cookie;
void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
+
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
}
@@ -863,6 +876,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
+ if (smmu_domain->non_strict)
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+
smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
@@ -1252,6 +1268,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ops->unmap(ops, iova, size);
}
+static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->tlb_ops)
+ smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
+}
+
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -1459,6 +1483,8 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
if (dev_is_pci(dev))
group = pci_device_group(dev);
+ else if (dev_is_fsl_mc(dev))
+ group = fsl_mc_device_group(dev);
else
group = generic_device_group(dev);
@@ -1470,15 +1496,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
+ switch(domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = smmu_domain->non_strict;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
default:
- return -ENODEV;
+ return -EINVAL;
}
}
@@ -1488,28 +1526,38 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
mutex_lock(&smmu_domain->init_mutex);
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
+ switch(domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ if (*(int *)data)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ smmu_domain->non_strict = *(int *)data;
+ break;
+ default:
+ ret = -ENODEV;
}
-
- if (*(int *)data)
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
- else
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-
break;
default:
- ret = -ENODEV;
+ ret = -EINVAL;
}
-
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@@ -1562,7 +1610,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
- .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
@@ -2036,6 +2084,10 @@ static void arm_smmu_bus_init(void)
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
}
#endif
+#ifdef CONFIG_FSL_MC_BUS
+ if (!iommu_present(&fsl_mc_bus_type))
+ bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
+#endif
}
static int arm_smmu_device_probe(struct platform_device *pdev)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 511ff9a1d6d9..d1b04753b204 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -55,6 +55,9 @@ struct iommu_dma_cookie {
};
struct list_head msi_page_list;
spinlock_t msi_lock;
+
+ /* Domain for flush queue callback; NULL if flush queue not in use */
+ struct iommu_domain *fq_domain;
};
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
@@ -257,6 +260,20 @@ static int iova_reserve_iommu_regions(struct device *dev,
return ret;
}
+static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
+{
+ struct iommu_dma_cookie *cookie;
+ struct iommu_domain *domain;
+
+ cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
+ domain = cookie->fq_domain;
+ /*
+ * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
+ * implies that ops->flush_iotlb_all must be non-NULL.
+ */
+ domain->ops->flush_iotlb_all(domain);
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -275,6 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
+ int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -308,6 +326,13 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
init_iova_domain(iovad, 1UL << order, base_pfn);
+
+ if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
+ cookie->fq_domain = domain;
+ init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
+ }
+
if (!dev)
return 0;
@@ -393,6 +418,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
/* The MSI case is only ever cleaning up its most recent allocation */
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
+ else if (cookie->fq_domain) /* non-strict mode */
+ queue_iova(iovad, iova_pfn(iovad, iova),
+ size >> iova_shift(iovad), 0);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
@@ -408,7 +436,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
- WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
+ WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
+ if (!cookie->fq_domain)
+ iommu_tlb_sync(domain);
iommu_dma_free_iova(cookie, dma_addr, size);
}
@@ -491,7 +521,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
*handle = IOMMU_MAPPING_ERROR;
}
@@ -518,7 +548,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
unsigned long attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct page **pages;
@@ -606,9 +636,8 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot)
+ size_t size, int prot, struct iommu_domain *domain)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
size_t iova_off = 0;
dma_addr_t iova;
@@ -632,13 +661,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, int prot)
{
- return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
+ return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
+ iommu_get_dma_domain(dev));
}
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
}
/*
@@ -726,7 +756,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int prot)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
@@ -811,20 +841,21 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
sg = tmp;
}
end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
}
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+ iommu_get_dma_domain(dev));
}
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
}
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -850,7 +881,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot);
+ iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
if (iommu_dma_mapping_error(dev, iova))
goto out_free_page;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 8540625796a1..1b955aea44dd 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -543,7 +543,7 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
return ~(u32)0;
}
- for_each_node_by_type(node, "cpu") {
+ for_each_of_cpu_node(node) {
prop = of_get_property(node, "reg", &len);
for (i = 0; i < len / sizeof(u32); i++) {
if (be32_to_cpup(&prop[i]) == vcpu) {
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index f089136e9c3f..9b528cfcc6c3 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -814,6 +814,55 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
return 0;
}
+static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
+{
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
+ if (dma_domain->enabled) {
+ pr_debug("Can't set geometry attributes as domain is active\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EBUSY;
+ }
+
+ /* Ensure that the geometry has been set for the domain */
+ if (!dma_domain->geom_size) {
+ pr_debug("Please configure geometry before setting the number of windows\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure we have valid window count i.e. it should be less than
+ * maximum permissible limit and should be a power of two.
+ */
+ if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
+ pr_debug("Invalid window count\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
+ w_count > 1 ? w_count : 0);
+ if (!ret) {
+ kfree(dma_domain->win_arr);
+ dma_domain->win_arr = kcalloc(w_count,
+ sizeof(*dma_domain->win_arr),
+ GFP_ATOMIC);
+ if (!dma_domain->win_arr) {
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -ENOMEM;
+ }
+ dma_domain->win_cnt = w_count;
+ }
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+ return ret;
+}
+
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
{
@@ -830,6 +879,9 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
case DOMAIN_ATTR_FSL_PAMU_ENABLE:
ret = configure_domain_dma_state(dma_domain, *(int *)data);
break;
+ case DOMAIN_ATTR_WINDOWS:
+ ret = fsl_pamu_set_windows(domain, *(u32 *)data);
+ break;
default:
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
@@ -856,6 +908,9 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
case DOMAIN_ATTR_FSL_PAMUV1:
*(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
break;
+ case DOMAIN_ATTR_WINDOWS:
+ *(u32 *)data = dma_domain->win_cnt;
+ break;
default:
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
@@ -916,13 +971,13 @@ static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
{
struct pci_controller *pci_ctl;
- bool pci_endpt_partioning;
+ bool pci_endpt_partitioning;
struct iommu_group *group = NULL;
pci_ctl = pci_bus_to_host(pdev->bus);
- pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
+ pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
/* We can partition PCIe devices so assign device group to the device */
- if (pci_endpt_partioning) {
+ if (pci_endpt_partitioning) {
group = pci_device_group(&pdev->dev);
/*
@@ -994,62 +1049,6 @@ static void fsl_pamu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
-static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
- if (dma_domain->enabled) {
- pr_debug("Can't set geometry attributes as domain is active\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EBUSY;
- }
-
- /* Ensure that the geometry has been set for the domain */
- if (!dma_domain->geom_size) {
- pr_debug("Please configure geometry before setting the number of windows\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- /*
- * Ensure we have valid window count i.e. it should be less than
- * maximum permissible limit and should be a power of two.
- */
- if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
- pr_debug("Invalid window count\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
- w_count > 1 ? w_count : 0);
- if (!ret) {
- kfree(dma_domain->win_arr);
- dma_domain->win_arr = kcalloc(w_count,
- sizeof(*dma_domain->win_arr),
- GFP_ATOMIC);
- if (!dma_domain->win_arr) {
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -ENOMEM;
- }
- dma_domain->win_cnt = w_count;
- }
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return ret;
-}
-
-static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
-
- return dma_domain->win_cnt;
-}
-
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc,
@@ -1058,8 +1057,6 @@ static const struct iommu_ops fsl_pamu_ops = {
.detach_dev = fsl_pamu_detach_device,
.domain_window_enable = fsl_pamu_window_enable,
.domain_window_disable = fsl_pamu_window_disable,
- .domain_get_windows = fsl_pamu_get_windows,
- .domain_set_windows = fsl_pamu_set_windows,
.iova_to_phys = fsl_pamu_iova_to_phys,
.domain_set_attr = fsl_pamu_set_domain_attr,
.domain_get_attr = fsl_pamu_get_domain_attr,
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
new file mode 100644
index 000000000000..7fabf9b1c2dc
--- /dev/null
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2018 Intel Corporation.
+ *
+ * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
+ * Sohil Mehta <sohil.mehta@intel.com>
+ * Jacob Pan <jacob.jun.pan@linux.intel.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dmar.h>
+#include <linux/intel-iommu.h>
+#include <linux/pci.h>
+
+#include <asm/irq_remapping.h>
+
+struct iommu_regset {
+ int offset;
+ const char *regs;
+};
+
+#define IOMMU_REGSET_ENTRY(_reg_) \
+ { DMAR_##_reg_##_REG, __stringify(_reg_) }
+static const struct iommu_regset iommu_regs[] = {
+ IOMMU_REGSET_ENTRY(VER),
+ IOMMU_REGSET_ENTRY(CAP),
+ IOMMU_REGSET_ENTRY(ECAP),
+ IOMMU_REGSET_ENTRY(GCMD),
+ IOMMU_REGSET_ENTRY(GSTS),
+ IOMMU_REGSET_ENTRY(RTADDR),
+ IOMMU_REGSET_ENTRY(CCMD),
+ IOMMU_REGSET_ENTRY(FSTS),
+ IOMMU_REGSET_ENTRY(FECTL),
+ IOMMU_REGSET_ENTRY(FEDATA),
+ IOMMU_REGSET_ENTRY(FEADDR),
+ IOMMU_REGSET_ENTRY(FEUADDR),
+ IOMMU_REGSET_ENTRY(AFLOG),
+ IOMMU_REGSET_ENTRY(PMEN),
+ IOMMU_REGSET_ENTRY(PLMBASE),
+ IOMMU_REGSET_ENTRY(PLMLIMIT),
+ IOMMU_REGSET_ENTRY(PHMBASE),
+ IOMMU_REGSET_ENTRY(PHMLIMIT),
+ IOMMU_REGSET_ENTRY(IQH),
+ IOMMU_REGSET_ENTRY(IQT),
+ IOMMU_REGSET_ENTRY(IQA),
+ IOMMU_REGSET_ENTRY(ICS),
+ IOMMU_REGSET_ENTRY(IRTA),
+ IOMMU_REGSET_ENTRY(PQH),
+ IOMMU_REGSET_ENTRY(PQT),
+ IOMMU_REGSET_ENTRY(PQA),
+ IOMMU_REGSET_ENTRY(PRS),
+ IOMMU_REGSET_ENTRY(PECTL),
+ IOMMU_REGSET_ENTRY(PEDATA),
+ IOMMU_REGSET_ENTRY(PEADDR),
+ IOMMU_REGSET_ENTRY(PEUADDR),
+ IOMMU_REGSET_ENTRY(MTRRCAP),
+ IOMMU_REGSET_ENTRY(MTRRDEF),
+ IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
+ IOMMU_REGSET_ENTRY(VCCAP),
+ IOMMU_REGSET_ENTRY(VCMD),
+ IOMMU_REGSET_ENTRY(VCRSP),
+};
+
+static int iommu_regset_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ unsigned long flag;
+ int i, ret = 0;
+ u64 value;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!drhd->reg_base_addr) {
+ seq_puts(m, "IOMMU: Invalid base address\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
+ iommu->name, drhd->reg_base_addr);
+ seq_puts(m, "Name\t\t\tOffset\t\tContents\n");
+ /*
+ * Publish the contents of the 64-bit hardware registers
+ * by adding the offset to the pointer (virtual address).
+ */
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
+ value = dmar_readq(iommu->reg + iommu_regs[i].offset);
+ seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
+ iommu_regs[i].regs, iommu_regs[i].offset,
+ value);
+ }
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ seq_putc(m, '\n');
+ }
+out:
+ rcu_read_unlock();
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(iommu_regset);
+
+static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
+ int bus)
+{
+ struct context_entry *context;
+ int devfn;
+
+ seq_printf(m, " Context Table Entries for Bus: %d\n", bus);
+ seq_puts(m, " Entry\tB:D.F\tHigh\tLow\n");
+
+ for (devfn = 0; devfn < 256; devfn++) {
+ context = iommu_context_addr(iommu, bus, devfn, 0);
+ if (!context)
+ return;
+
+ if (!context_present(context))
+ continue;
+
+ seq_printf(m, " %-5d\t%02x:%02x.%x\t%-6llx\t%llx\n", devfn,
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ context[0].hi, context[0].lo);
+ }
+}
+
+static void root_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu)
+{
+ unsigned long flags;
+ int bus;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ seq_printf(m, "IOMMU %s: Root Table Address:%llx\n", iommu->name,
+ (u64)virt_to_phys(iommu->root_entry));
+ seq_puts(m, "Root Table Entries:\n");
+
+ for (bus = 0; bus < 256; bus++) {
+ if (!(iommu->root_entry[bus].lo & 1))
+ continue;
+
+ seq_printf(m, " Bus: %d H: %llx L: %llx\n", bus,
+ iommu->root_entry[bus].hi,
+ iommu->root_entry[bus].lo);
+
+ ctx_tbl_entry_show(m, iommu, bus);
+ seq_putc(m, '\n');
+ }
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static int dmar_translation_struct_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ root_tbl_entry_show(m, iommu);
+ seq_putc(m, '\n');
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
+
+#ifdef CONFIG_IRQ_REMAP
+static void ir_tbl_remap_entry_show(struct seq_file *m,
+ struct intel_iommu *iommu)
+{
+ struct irte *ri_entry;
+ unsigned long flags;
+ int idx;
+
+ seq_puts(m, " Entry SrcID DstID Vct IRTE_high\t\tIRTE_low\n");
+
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
+ ri_entry = &iommu->ir_table->base[idx];
+ if (!ri_entry->present || ri_entry->p_pst)
+ continue;
+
+ seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x %016llx\t%016llx\n",
+ idx, PCI_BUS_NUM(ri_entry->sid),
+ PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid),
+ ri_entry->dest_id, ri_entry->vector,
+ ri_entry->high, ri_entry->low);
+ }
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+}
+
+static void ir_tbl_posted_entry_show(struct seq_file *m,
+ struct intel_iommu *iommu)
+{
+ struct irte *pi_entry;
+ unsigned long flags;
+ int idx;
+
+ seq_puts(m, " Entry SrcID PDA_high PDA_low Vct IRTE_high\t\tIRTE_low\n");
+
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
+ pi_entry = &iommu->ir_table->base[idx];
+ if (!pi_entry->present || !pi_entry->p_pst)
+ continue;
+
+ seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x %016llx\t%016llx\n",
+ idx, PCI_BUS_NUM(pi_entry->sid),
+ PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid),
+ pi_entry->pda_h, pi_entry->pda_l << 6,
+ pi_entry->vector, pi_entry->high,
+ pi_entry->low);
+ }
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+}
+
+/*
+ * For active IOMMUs go through the Interrupt remapping
+ * table and print valid entries in a table format for
+ * Remapped and Posted Interrupts.
+ */
+static int ir_translation_struct_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ u64 irta;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!ecap_ir_support(iommu->ecap))
+ continue;
+
+ seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
+ iommu->name);
+
+ if (iommu->ir_table) {
+ irta = virt_to_phys(iommu->ir_table->base);
+ seq_printf(m, " IR table address:%llx\n", irta);
+ ir_tbl_remap_entry_show(m, iommu);
+ } else {
+ seq_puts(m, "Interrupt Remapping is not enabled\n");
+ }
+ seq_putc(m, '\n');
+ }
+
+ seq_puts(m, "****\n\n");
+
+ for_each_active_iommu(iommu, drhd) {
+ if (!cap_pi_support(iommu->cap))
+ continue;
+
+ seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n",
+ iommu->name);
+
+ if (iommu->ir_table) {
+ irta = virt_to_phys(iommu->ir_table->base);
+ seq_printf(m, " IR table address:%llx\n", irta);
+ ir_tbl_posted_entry_show(m, iommu);
+ } else {
+ seq_puts(m, "Interrupt Remapping is not enabled\n");
+ }
+ seq_putc(m, '\n');
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
+#endif
+
+void __init intel_iommu_debugfs_init(void)
+{
+ struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
+ iommu_debugfs_dir);
+
+ debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
+ &iommu_regset_fops);
+ debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
+ NULL, &dmar_translation_struct_fops);
+#ifdef CONFIG_IRQ_REMAP
+ debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
+ NULL, &ir_translation_struct_fops);
+#endif
+}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 76f0a5d16ed3..f3ccf025108b 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -185,16 +185,6 @@ static int rwbf_quirk;
static int force_on = 0;
int intel_iommu_tboot_noforce;
-/*
- * 0: Present
- * 1-11: Reserved
- * 12-63: Context Ptr (12 - (haw-1))
- * 64-127: Reserved
- */
-struct root_entry {
- u64 lo;
- u64 hi;
-};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
/*
@@ -220,21 +210,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
-/*
- * low 64 bits:
- * 0: present
- * 1: fault processing disable
- * 2-3: translation type
- * 12-63: address space root
- * high 64 bits:
- * 0-2: address width
- * 3-6: aval
- * 8-23: domain id
- */
-struct context_entry {
- u64 lo;
- u64 hi;
-};
static inline void context_clear_pasid_enable(struct context_entry *context)
{
@@ -261,7 +236,7 @@ static inline bool __context_present(struct context_entry *context)
return (context->lo & 1);
}
-static inline bool context_present(struct context_entry *context)
+bool context_present(struct context_entry *context)
{
return context_pasid_enabled(context) ?
__context_present(context) :
@@ -788,8 +763,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->iommu_superpage = domain_update_iommu_superpage(NULL);
}
-static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
- u8 bus, u8 devfn, int alloc)
+struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
+ u8 devfn, int alloc)
{
struct root_entry *root = &iommu->root_entry[bus];
struct context_entry *context;
@@ -4860,6 +4835,7 @@ int __init intel_iommu_init(void)
cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
intel_iommu_cpu_dead);
intel_iommu_enabled = 1;
+ intel_iommu_debugfs_init();
return 0;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 967450bd421a..c2d6c11431de 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -76,7 +76,7 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS];
* in single-threaded environment with interrupt disabled, so no need to tabke
* the dmar_global_lock.
*/
-static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
+DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static const struct irq_domain_ops intel_ir_domain_ops;
static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index b5948ba6b3b3..445c3bde0480 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -587,6 +587,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
}
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
+ io_pgtable_tlb_sync(&data->iop);
return size;
}
@@ -642,6 +643,13 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte[i], lvl);
__arm_v7s_free_table(ptep, lvl + 1, data);
+ } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
+ /*
+ * Order the PTE update against queueing the IOVA, to
+ * guarantee that a flush callback from a different CPU
+ * has observed it before the TLBIALL can be issued.
+ */
+ smp_wmb();
} else {
io_pgtable_tlb_add_flush(iop, iova, blk_size,
blk_size, true);
@@ -712,7 +720,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
IO_PGTABLE_QUIRK_NO_PERMS |
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_4GB |
- IO_PGTABLE_QUIRK_NO_DMA))
+ IO_PGTABLE_QUIRK_NO_DMA |
+ IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 88641b4560bc..237cacd4a62b 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -574,13 +574,13 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
return 0;
tablep = iopte_deref(pte, data);
+ } else if (unmap_idx >= 0) {
+ io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
+ io_pgtable_tlb_sync(&data->iop);
+ return size;
}
- if (unmap_idx < 0)
- return __arm_lpae_unmap(data, iova, size, lvl, tablep);
-
- io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
- return size;
+ return __arm_lpae_unmap(data, iova, size, lvl, tablep);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
@@ -610,6 +610,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
+ } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
+ /*
+ * Order the PTE update against queueing the IOVA, to
+ * guarantee that a flush callback from a different CPU
+ * has observed it before the TLBIALL can be issued.
+ */
+ smp_wmb();
} else {
io_pgtable_tlb_add_flush(iop, iova, size, size, true);
}
@@ -772,7 +779,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
u64 reg;
struct arm_lpae_io_pgtable *data;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
+ IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -864,7 +872,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
/* The NS quirk doesn't apply at stage 2 */
- if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA)
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
+ IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 2df79093cad9..47d5ae559329 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -71,12 +71,17 @@ struct io_pgtable_cfg {
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
* software-emulated IOMMU), such that pagetable updates need not
* be treated as explicit DMA data.
+ *
+ * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
+ * on unmap, for DMA domains using the flush queue mechanism for
+ * delayed invalidation.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
+ #define IO_PGTABLE_QUIRK_NON_STRICT BIT(5)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8c15c5980299..edbdf5d6962c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -32,6 +32,7 @@
#include <linux/pci.h>
#include <linux/bitops.h>
#include <linux/property.h>
+#include <linux/fsl/mc.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
@@ -41,6 +42,7 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
#else
static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
#endif
+static bool iommu_dma_strict __read_mostly = true;
struct iommu_callback_data {
const struct iommu_ops *ops;
@@ -131,6 +133,12 @@ static int __init iommu_set_def_domain_type(char *str)
}
early_param("iommu.passthrough", iommu_set_def_domain_type);
+static int __init iommu_dma_setup(char *str)
+{
+ return kstrtobool(str, &iommu_dma_strict);
+}
+early_param("iommu.strict", iommu_dma_setup);
+
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
@@ -1024,6 +1032,18 @@ struct iommu_group *pci_device_group(struct device *dev)
return iommu_group_alloc();
}
+/* Get the IOMMU group for device on fsl-mc bus */
+struct iommu_group *fsl_mc_device_group(struct device *dev)
+{
+ struct device *cont_dev = fsl_mc_cont_dev(dev);
+ struct iommu_group *group;
+
+ group = iommu_group_get(cont_dev);
+ if (!group)
+ group = iommu_group_alloc();
+ return group;
+}
+
/**
* iommu_group_get_for_dev - Find or create the IOMMU group for a device
* @dev: target device
@@ -1072,6 +1092,13 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
group->default_domain = dom;
if (!group->domain)
group->domain = dom;
+
+ if (dom && !iommu_dma_strict) {
+ int attr = 1;
+ iommu_domain_set_attr(dom,
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
+ &attr);
+ }
}
ret = iommu_group_add_device(group, dev);
@@ -1416,7 +1443,16 @@ struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
/*
- * IOMMU groups are really the natrual working unit of the IOMMU, but
+ * For IOMMU_DOMAIN_DMA implementations which already provide their own
+ * guarantees that the group and its default domain are valid and correct.
+ */
+struct iommu_domain *iommu_get_dma_domain(struct device *dev)
+{
+ return dev->iommu_group->default_domain;
+}
+
+/*
+ * IOMMU groups are really the natural working unit of the IOMMU, but
* the IOMMU API works on domains and devices. Bridge that gap by
* iterating over the devices in a group. Ideally we'd have a single
* device which represents the requestor ID of the group, but we also
@@ -1796,7 +1832,6 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
struct iommu_domain_geometry *geometry;
bool *paging;
int ret = 0;
- u32 *count;
switch (attr) {
case DOMAIN_ATTR_GEOMETRY:
@@ -1808,15 +1843,6 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
paging = data;
*paging = (domain->pgsize_bitmap != 0UL);
break;
- case DOMAIN_ATTR_WINDOWS:
- count = data;
-
- if (domain->ops->domain_get_windows != NULL)
- *count = domain->ops->domain_get_windows(domain);
- else
- ret = -ENODEV;
-
- break;
default:
if (!domain->ops->domain_get_attr)
return -EINVAL;
@@ -1832,18 +1858,8 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
int ret = 0;
- u32 *count;
switch (attr) {
- case DOMAIN_ATTR_WINDOWS:
- count = data;
-
- if (domain->ops->domain_set_windows != NULL)
- ret = domain->ops->domain_set_windows(domain, *count);
- else
- ret = -ENODEV;
-
- break;
default:
if (domain->ops->domain_set_attr == NULL)
return -EINVAL;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 83fe2621effe..f8d3ba247523 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -56,6 +56,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
iovad->flush_cb = NULL;
iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
@@ -139,8 +140,10 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
if (free->pfn_hi < iovad->dma_32bit_pfn &&
- free->pfn_lo >= cached_iova->pfn_lo)
+ free->pfn_lo >= cached_iova->pfn_lo) {
iovad->cached32_node = rb_next(&free->node);
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
+ }
cached_iova = rb_entry(iovad->cached_node, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo)
@@ -190,6 +193,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ if (limit_pfn <= iovad->dma_32bit_pfn &&
+ size >= iovad->max32_alloc_size)
+ goto iova32_full;
+
curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = rb_entry(curr, struct iova, node);
do {
@@ -200,10 +207,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr_iova = rb_entry(curr, struct iova, node);
} while (curr && new_pfn <= curr_iova->pfn_hi);
- if (limit_pfn < size || new_pfn < iovad->start_pfn) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return -ENOMEM;
- }
+ if (limit_pfn < size || new_pfn < iovad->start_pfn)
+ goto iova32_full;
/* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = new_pfn;
@@ -214,9 +219,12 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
__cached_rbnode_insert_update(iovad, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
-
return 0;
+
+iova32_full:
+ iovad->max32_alloc_size = size;
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
}
static struct kmem_cache *iova_cache;
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 22b94f8a9a04..b98a03189580 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IPMMU VMSA
*
* Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/bitmap.h>
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index f7787e757244..c5dd63072529 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -24,6 +24,7 @@
#include <linux/of_iommu.h>
#include <linux/of_pci.h>
#include <linux/slab.h>
+#include <linux/fsl/mc.h>
#define NO_IOMMU 1
@@ -132,9 +133,8 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
struct of_phandle_args iommu_spec = { .args_count = 1 };
int err;
- err = of_pci_map_rid(info->np, alias, "iommu-map",
- "iommu-map-mask", &iommu_spec.np,
- iommu_spec.args);
+ err = of_map_rid(info->np, alias, "iommu-map", "iommu-map-mask",
+ &iommu_spec.np, iommu_spec.args);
if (err)
return err == -ENODEV ? NO_IOMMU : err;
@@ -143,6 +143,23 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
return err;
}
+static int of_fsl_mc_iommu_init(struct fsl_mc_device *mc_dev,
+ struct device_node *master_np)
+{
+ struct of_phandle_args iommu_spec = { .args_count = 1 };
+ int err;
+
+ err = of_map_rid(master_np, mc_dev->icid, "iommu-map",
+ "iommu-map-mask", &iommu_spec.np,
+ iommu_spec.args);
+ if (err)
+ return err == -ENODEV ? NO_IOMMU : err;
+
+ err = of_iommu_xlate(&mc_dev->dev, &iommu_spec);
+ of_node_put(iommu_spec.np);
+ return err;
+}
+
const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
@@ -174,6 +191,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
err = pci_for_each_dma_alias(to_pci_dev(dev),
of_pci_iommu_init, &info);
+ } else if (dev_is_fsl_mc(dev)) {
+ err = of_fsl_mc_iommu_init(to_fsl_mc_device(dev), master_np);
} else {
struct of_phandle_args iommu_spec;
int idx = 0;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 383e7b70221d..96451b581452 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -310,6 +310,9 @@ config MVEBU_ODMI
config MVEBU_PIC
bool
+config MVEBU_SEI
+ bool
+
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index fbd1ec8070ef..b822199445ff 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_MVEBU_GICP) += irq-mvebu-gicp.o
obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
+obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c2df341ff6fa..db20e992a40f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -19,13 +19,16 @@
#include <linux/acpi_iort.h>
#include <linux/bitmap.h>
#include <linux/cpu.h>
+#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
+#include <linux/efi.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/log2.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/msi.h>
#include <linux/of.h>
@@ -52,6 +55,7 @@
#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
+#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
static u32 lpi_id_bits;
@@ -64,7 +68,7 @@ static u32 lpi_id_bits;
#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
-#define LPI_PROP_DEFAULT_PRIO 0xa0
+#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
/*
* Collection structure - just an ID, and a redistributor address to
@@ -173,6 +177,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
+#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
@@ -1028,7 +1033,7 @@ static inline u32 its_get_event_id(struct irq_data *d)
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{
irq_hw_number_t hwirq;
- struct page *prop_page;
+ void *va;
u8 *cfg;
if (irqd_is_forwarded_to_vcpu(d)) {
@@ -1036,7 +1041,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
u32 event = its_get_event_id(d);
struct its_vlpi_map *map;
- prop_page = its_dev->event_map.vm->vprop_page;
+ va = page_address(its_dev->event_map.vm->vprop_page);
map = &its_dev->event_map.vlpi_maps[event];
hwirq = map->vintid;
@@ -1044,11 +1049,11 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
map->properties &= ~clr;
map->properties |= set | LPI_PROP_GROUP1;
} else {
- prop_page = gic_rdists->prop_page;
+ va = gic_rdists->prop_table_va;
hwirq = d->hwirq;
}
- cfg = page_address(prop_page) + hwirq - 8192;
+ cfg = va + hwirq - 8192;
*cfg &= ~clr;
*cfg |= set | LPI_PROP_GROUP1;
@@ -1597,6 +1602,15 @@ static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
kfree(bitmap);
}
+static void gic_reset_prop_table(void *va)
+{
+ /* Priority 0xa0, Group-1, disabled */
+ memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
+
+ /* Make sure the GIC will observe the written configuration */
+ gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
+}
+
static struct page *its_allocate_prop_table(gfp_t gfp_flags)
{
struct page *prop_page;
@@ -1605,13 +1619,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
if (!prop_page)
return NULL;
- /* Priority 0xa0, Group-1, disabled */
- memset(page_address(prop_page),
- LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
- LPI_PROPBASE_SZ);
-
- /* Make sure the GIC will observe the written configuration */
- gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
+ gic_reset_prop_table(page_address(prop_page));
return prop_page;
}
@@ -1622,20 +1630,74 @@ static void its_free_prop_table(struct page *prop_page)
get_order(LPI_PROPBASE_SZ));
}
-static int __init its_alloc_lpi_tables(void)
+static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
{
- phys_addr_t paddr;
+ phys_addr_t start, end, addr_end;
+ u64 i;
- lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
- ITS_MAX_LPI_NRBITS);
- gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
- if (!gic_rdists->prop_page) {
- pr_err("Failed to allocate PROPBASE\n");
- return -ENOMEM;
+ /*
+ * We don't bother checking for a kdump kernel as by
+ * construction, the LPI tables are out of this kernel's
+ * memory map.
+ */
+ if (is_kdump_kernel())
+ return true;
+
+ addr_end = addr + size - 1;
+
+ for_each_reserved_mem_region(i, &start, &end) {
+ if (addr >= start && addr_end <= end)
+ return true;
+ }
+
+ /* Not found, not a good sign... */
+ pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
+ &addr, &addr_end);
+ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+ return false;
+}
+
+static int gic_reserve_range(phys_addr_t addr, unsigned long size)
+{
+ if (efi_enabled(EFI_CONFIG_TABLES))
+ return efi_mem_reserve_persistent(addr, size);
+
+ return 0;
+}
+
+static int __init its_setup_lpi_prop_table(void)
+{
+ if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
+ u64 val;
+
+ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
+ lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
+
+ gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
+ gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
+ LPI_PROPBASE_SZ,
+ MEMREMAP_WB);
+ gic_reset_prop_table(gic_rdists->prop_table_va);
+ } else {
+ struct page *page;
+
+ lpi_id_bits = min_t(u32,
+ GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+ ITS_MAX_LPI_NRBITS);
+ page = its_allocate_prop_table(GFP_NOWAIT);
+ if (!page) {
+ pr_err("Failed to allocate PROPBASE\n");
+ return -ENOMEM;
+ }
+
+ gic_rdists->prop_table_pa = page_to_phys(page);
+ gic_rdists->prop_table_va = page_address(page);
+ WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
+ LPI_PROPBASE_SZ));
}
- paddr = page_to_phys(gic_rdists->prop_page);
- pr_info("GIC: using LPI property table @%pa\n", &paddr);
+ pr_info("GICv3: using LPI property table @%pa\n",
+ &gic_rdists->prop_table_pa);
return its_lpi_init(lpi_id_bits);
}
@@ -1924,12 +1986,9 @@ static int its_alloc_collections(struct its_node *its)
static struct page *its_allocate_pending_table(gfp_t gfp_flags)
{
struct page *pend_page;
- /*
- * The pending pages have to be at least 64kB aligned,
- * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
- */
+
pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
- get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+ get_order(LPI_PENDBASE_SZ));
if (!pend_page)
return NULL;
@@ -1941,36 +2000,103 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
static void its_free_pending_table(struct page *pt)
{
- free_pages((unsigned long)page_address(pt),
- get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+ free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
+}
+
+/*
+ * Booting with kdump and LPIs enabled is generally fine. Any other
+ * case is wrong in the absence of firmware/EFI support.
+ */
+static bool enabled_lpis_allowed(void)
+{
+ phys_addr_t addr;
+ u64 val;
+
+ /* Check whether the property table is in a reserved region */
+ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
+ addr = val & GENMASK_ULL(51, 12);
+
+ return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
+}
+
+static int __init allocate_lpi_tables(void)
+{
+ u64 val;
+ int err, cpu;
+
+ /*
+ * If LPIs are enabled while we run this from the boot CPU,
+ * flag the RD tables as pre-allocated if the stars do align.
+ */
+ val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
+ if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
+ gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
+ RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
+ pr_info("GICv3: Using preallocated redistributor tables\n");
+ }
+
+ err = its_setup_lpi_prop_table();
+ if (err)
+ return err;
+
+ /*
+ * We allocate all the pending tables anyway, as we may have a
+ * mix of RDs that have had LPIs enabled, and some that
+ * don't. We'll free the unused ones as each CPU comes online.
+ */
+ for_each_possible_cpu(cpu) {
+ struct page *pend_page;
+
+ pend_page = its_allocate_pending_table(GFP_NOWAIT);
+ if (!pend_page) {
+ pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
+ return -ENOMEM;
+ }
+
+ gic_data_rdist_cpu(cpu)->pend_page = pend_page;
+ }
+
+ return 0;
}
static void its_cpu_init_lpis(void)
{
void __iomem *rbase = gic_data_rdist_rd_base();
struct page *pend_page;
+ phys_addr_t paddr;
u64 val, tmp;
- /* If we didn't allocate the pending table yet, do it now */
- pend_page = gic_data_rdist()->pend_page;
- if (!pend_page) {
- phys_addr_t paddr;
+ if (gic_data_rdist()->lpi_enabled)
+ return;
- pend_page = its_allocate_pending_table(GFP_NOWAIT);
- if (!pend_page) {
- pr_err("Failed to allocate PENDBASE for CPU%d\n",
- smp_processor_id());
- return;
- }
+ val = readl_relaxed(rbase + GICR_CTLR);
+ if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
+ (val & GICR_CTLR_ENABLE_LPIS)) {
+ /*
+ * Check that we get the same property table on all
+ * RDs. If we don't, this is hopeless.
+ */
+ paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
+ paddr &= GENMASK_ULL(51, 12);
+ if (WARN_ON(gic_rdists->prop_table_pa != paddr))
+ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+
+ paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
+ paddr &= GENMASK_ULL(51, 16);
- paddr = page_to_phys(pend_page);
- pr_info("CPU%d: using LPI pending table @%pa\n",
- smp_processor_id(), &paddr);
- gic_data_rdist()->pend_page = pend_page;
+ WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
+ its_free_pending_table(gic_data_rdist()->pend_page);
+ gic_data_rdist()->pend_page = NULL;
+
+ goto out;
}
+ pend_page = gic_data_rdist()->pend_page;
+ paddr = page_to_phys(pend_page);
+ WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
+
/* set PROPBASE */
- val = (page_to_phys(gic_rdists->prop_page) |
+ val = (gic_rdists->prop_table_pa |
GICR_PROPBASER_InnerShareable |
GICR_PROPBASER_RaWaWb |
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
@@ -2020,6 +2146,12 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
+out:
+ gic_data_rdist()->lpi_enabled = true;
+ pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
+ smp_processor_id(),
+ gic_data_rdist()->pend_page ? "allocated" : "reserved",
+ &paddr);
}
static void its_cpu_init_collection(struct its_node *its)
@@ -3498,16 +3630,6 @@ static int redist_disable_lpis(void)
u64 timeout = USEC_PER_SEC;
u64 val;
- /*
- * If coming via a CPU hotplug event, we don't need to disable
- * LPIs before trying to re-enable them. They are already
- * configured and all is well in the world. Detect this case
- * by checking the allocation of the pending table for the
- * current CPU.
- */
- if (gic_data_rdist()->pend_page)
- return 0;
-
if (!gic_rdists_supports_plpis()) {
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
return -ENXIO;
@@ -3517,7 +3639,21 @@ static int redist_disable_lpis(void)
if (!(val & GICR_CTLR_ENABLE_LPIS))
return 0;
- pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
+ /*
+ * If coming via a CPU hotplug event, we don't need to disable
+ * LPIs before trying to re-enable them. They are already
+ * configured and all is well in the world.
+ *
+ * If running with preallocated tables, there is nothing to do.
+ */
+ if (gic_data_rdist()->lpi_enabled ||
+ (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
+ return 0;
+
+ /*
+ * From that point on, we only try to do some damage control.
+ */
+ pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
smp_processor_id());
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
@@ -3773,7 +3909,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
}
gic_rdists = rdists;
- err = its_alloc_lpi_tables();
+
+ err = allocate_lpi_tables();
if (err)
return err;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d5912f1ec884..8f87f40c9460 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -348,48 +348,45 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
{
u32 irqnr;
- do {
- irqnr = gic_read_iar();
+ irqnr = gic_read_iar();
- if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
- int err;
+ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
+ int err;
- if (static_branch_likely(&supports_deactivate_key))
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_write_eoir(irqnr);
+ else
+ isb();
+
+ err = handle_domain_irq(gic_data.domain, irqnr, regs);
+ if (err) {
+ WARN_ONCE(true, "Unexpected interrupt received!\n");
+ if (static_branch_likely(&supports_deactivate_key)) {
+ if (irqnr < 8192)
+ gic_write_dir(irqnr);
+ } else {
gic_write_eoir(irqnr);
- else
- isb();
-
- err = handle_domain_irq(gic_data.domain, irqnr, regs);
- if (err) {
- WARN_ONCE(true, "Unexpected interrupt received!\n");
- if (static_branch_likely(&supports_deactivate_key)) {
- if (irqnr < 8192)
- gic_write_dir(irqnr);
- } else {
- gic_write_eoir(irqnr);
- }
}
- continue;
}
- if (irqnr < 16) {
- gic_write_eoir(irqnr);
- if (static_branch_likely(&supports_deactivate_key))
- gic_write_dir(irqnr);
+ return;
+ }
+ if (irqnr < 16) {
+ gic_write_eoir(irqnr);
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_write_dir(irqnr);
#ifdef CONFIG_SMP
- /*
- * Unlike GICv2, we don't need an smp_rmb() here.
- * The control dependency from gic_read_iar to
- * the ISB in gic_write_eoir is enough to ensure
- * that any shared data read by handle_IPI will
- * be read after the ACK.
- */
- handle_IPI(irqnr, regs);
+ /*
+ * Unlike GICv2, we don't need an smp_rmb() here.
+ * The control dependency from gic_read_iar to
+ * the ISB in gic_write_eoir is enough to ensure
+ * that any shared data read by handle_IPI will
+ * be read after the ACK.
+ */
+ handle_IPI(irqnr, regs);
#else
- WARN_ONCE(true, "Unexpected SGI received!\n");
+ WARN_ONCE(true, "Unexpected SGI received!\n");
#endif
- continue;
- }
- } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
+ }
}
static void __init gic_dist_init(void)
@@ -653,7 +650,9 @@ early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
static int gic_dist_supports_lpis(void)
{
- return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi;
+ return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
+ !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
+ !gicv3_nolpi);
}
static void gic_cpu_init(void)
@@ -673,10 +672,6 @@ static void gic_cpu_init(void)
gic_cpu_config(rbase, gic_redist_wait_for_rwp);
- /* Give LPIs a spin */
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
- its_cpu_init();
-
/* initialise system registers */
gic_cpu_sys_reg_init();
}
@@ -689,6 +684,10 @@ static void gic_cpu_init(void)
static int gic_starting_cpu(unsigned int cpu)
{
gic_cpu_init();
+
+ if (gic_dist_supports_lpis())
+ its_cpu_init();
+
return 0;
}
@@ -1127,14 +1126,16 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_update_vlpi_properties();
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
- its_init(handle, &gic_data.rdists, gic_data.domain);
-
gic_smp_init();
gic_dist_init();
gic_cpu_init();
gic_cpu_pm_init();
+ if (gic_dist_supports_lpis()) {
+ its_init(handle, &gic_data.rdists, gic_data.domain);
+ its_cpu_init();
+ }
+
return 0;
out_free:
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 13063339b416..547045d89c4b 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
@@ -26,6 +27,10 @@
#define ICU_SETSPI_NSR_AH 0x14
#define ICU_CLRSPI_NSR_AL 0x18
#define ICU_CLRSPI_NSR_AH 0x1c
+#define ICU_SET_SEI_AL 0x50
+#define ICU_SET_SEI_AH 0x54
+#define ICU_CLR_SEI_AL 0x58
+#define ICU_CLR_SEI_AH 0x5C
#define ICU_INT_CFG(x) (0x100 + 4 * (x))
#define ICU_INT_ENABLE BIT(24)
#define ICU_IS_EDGE BIT(28)
@@ -36,12 +41,23 @@
#define ICU_SATA0_ICU_ID 109
#define ICU_SATA1_ICU_ID 107
+struct mvebu_icu_subset_data {
+ unsigned int icu_group;
+ unsigned int offset_set_ah;
+ unsigned int offset_set_al;
+ unsigned int offset_clr_ah;
+ unsigned int offset_clr_al;
+};
+
struct mvebu_icu {
- struct irq_chip irq_chip;
void __iomem *base;
- struct irq_domain *domain;
struct device *dev;
+};
+
+struct mvebu_icu_msi_data {
+ struct mvebu_icu *icu;
atomic_t initialized;
+ const struct mvebu_icu_subset_data *subset_data;
};
struct mvebu_icu_irq_data {
@@ -50,28 +66,40 @@ struct mvebu_icu_irq_data {
unsigned int type;
};
-static void mvebu_icu_init(struct mvebu_icu *icu, struct msi_msg *msg)
+DEFINE_STATIC_KEY_FALSE(legacy_bindings);
+
+static void mvebu_icu_init(struct mvebu_icu *icu,
+ struct mvebu_icu_msi_data *msi_data,
+ struct msi_msg *msg)
{
- if (atomic_cmpxchg(&icu->initialized, false, true))
+ const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
+
+ if (atomic_cmpxchg(&msi_data->initialized, false, true))
return;
- /* Set Clear/Set ICU SPI message address in AP */
- writel_relaxed(msg[0].address_hi, icu->base + ICU_SETSPI_NSR_AH);
- writel_relaxed(msg[0].address_lo, icu->base + ICU_SETSPI_NSR_AL);
- writel_relaxed(msg[1].address_hi, icu->base + ICU_CLRSPI_NSR_AH);
- writel_relaxed(msg[1].address_lo, icu->base + ICU_CLRSPI_NSR_AL);
+ /* Set 'SET' ICU SPI message address in AP */
+ writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
+ writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
+
+ if (subset->icu_group != ICU_GRP_NSR)
+ return;
+
+ /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
+ writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
+ writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
}
static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct irq_data *d = irq_get_irq_data(desc->irq);
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d->domain);
struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
struct mvebu_icu *icu = icu_irqd->icu;
unsigned int icu_int;
if (msg->address_lo || msg->address_hi) {
- /* One off initialization */
- mvebu_icu_init(icu, msg);
+ /* One off initialization per domain */
+ mvebu_icu_init(icu, msi_data, msg);
/* Configure the ICU with irq number & type */
icu_int = msg->data | ICU_INT_ENABLE;
if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
@@ -101,37 +129,66 @@ static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
}
}
+static struct irq_chip mvebu_icu_nsr_chip = {
+ .name = "ICU-NSR",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static struct irq_chip mvebu_icu_sei_chip = {
+ .name = "ICU-SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
static int
mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
- struct mvebu_icu *icu = d->host_data;
- unsigned int icu_group;
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d);
+ struct mvebu_icu *icu = platform_msi_get_host_data(d);
+ unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
/* Check the count of the parameters in dt */
- if (WARN_ON(fwspec->param_count < 3)) {
+ if (WARN_ON(fwspec->param_count != param_count)) {
dev_err(icu->dev, "wrong ICU parameter count %d\n",
fwspec->param_count);
return -EINVAL;
}
- /* Only ICU group type is handled */
- icu_group = fwspec->param[0];
- if (icu_group != ICU_GRP_NSR && icu_group != ICU_GRP_SR &&
- icu_group != ICU_GRP_SEI && icu_group != ICU_GRP_REI) {
- dev_err(icu->dev, "wrong ICU group type %x\n", icu_group);
- return -EINVAL;
+ if (static_branch_unlikely(&legacy_bindings)) {
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ if (fwspec->param[0] != ICU_GRP_NSR) {
+ dev_err(icu->dev, "wrong ICU group type %x\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+ } else {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * The ICU receives level interrupts. While the NSR are also
+ * level interrupts, SEI are edge interrupts. Force the type
+ * here in this case. Please note that this makes the interrupt
+ * handling unreliable.
+ */
+ if (msi_data->subset_data->icu_group == ICU_GRP_SEI)
+ *type = IRQ_TYPE_EDGE_RISING;
}
- *hwirq = fwspec->param[1];
if (*hwirq >= ICU_MAX_IRQS) {
dev_err(icu->dev, "invalid interrupt number %ld\n", *hwirq);
return -EINVAL;
}
- /* Mask the type to prevent wrong DT configuration */
- *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
-
return 0;
}
@@ -142,8 +199,10 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
int err;
unsigned long hwirq;
struct irq_fwspec *fwspec = args;
- struct mvebu_icu *icu = platform_msi_get_host_data(domain);
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain);
+ struct mvebu_icu *icu = msi_data->icu;
struct mvebu_icu_irq_data *icu_irqd;
+ struct irq_chip *chip = &mvebu_icu_nsr_chip;
icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
if (!icu_irqd)
@@ -156,7 +215,10 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
goto free_irqd;
}
- icu_irqd->icu_group = fwspec->param[0];
+ if (static_branch_unlikely(&legacy_bindings))
+ icu_irqd->icu_group = fwspec->param[0];
+ else
+ icu_irqd->icu_group = msi_data->subset_data->icu_group;
icu_irqd->icu = icu;
err = platform_msi_domain_alloc(domain, virq, nr_irqs);
@@ -170,8 +232,11 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (err)
goto free_msi;
+ if (icu_irqd->icu_group == ICU_GRP_SEI)
+ chip = &mvebu_icu_sei_chip;
+
err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- &icu->irq_chip, icu_irqd);
+ chip, icu_irqd);
if (err) {
dev_err(icu->dev, "failed to set the data to IRQ domain\n");
goto free_msi;
@@ -204,11 +269,84 @@ static const struct irq_domain_ops mvebu_icu_domain_ops = {
.free = mvebu_icu_irq_domain_free,
};
+static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = {
+ .icu_group = ICU_GRP_NSR,
+ .offset_set_ah = ICU_SETSPI_NSR_AH,
+ .offset_set_al = ICU_SETSPI_NSR_AL,
+ .offset_clr_ah = ICU_CLRSPI_NSR_AH,
+ .offset_clr_al = ICU_CLRSPI_NSR_AL,
+};
+
+static const struct mvebu_icu_subset_data mvebu_icu_sei_subset_data = {
+ .icu_group = ICU_GRP_SEI,
+ .offset_set_ah = ICU_SET_SEI_AH,
+ .offset_set_al = ICU_SET_SEI_AL,
+};
+
+static const struct of_device_id mvebu_icu_subset_of_match[] = {
+ {
+ .compatible = "marvell,cp110-icu-nsr",
+ .data = &mvebu_icu_nsr_subset_data,
+ },
+ {
+ .compatible = "marvell,cp110-icu-sei",
+ .data = &mvebu_icu_sei_subset_data,
+ },
+ {},
+};
+
+static int mvebu_icu_subset_probe(struct platform_device *pdev)
+{
+ struct mvebu_icu_msi_data *msi_data;
+ struct device_node *msi_parent_dn;
+ struct device *dev = &pdev->dev;
+ struct irq_domain *irq_domain;
+
+ msi_data = devm_kzalloc(dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
+ if (static_branch_unlikely(&legacy_bindings)) {
+ msi_data->icu = dev_get_drvdata(dev);
+ msi_data->subset_data = &mvebu_icu_nsr_subset_data;
+ } else {
+ msi_data->icu = dev_get_drvdata(dev->parent);
+ msi_data->subset_data = of_device_get_match_data(dev);
+ }
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_PLATFORM_MSI);
+ if (!dev->msi_domain)
+ return -EPROBE_DEFER;
+
+ msi_parent_dn = irq_domain_get_of_node(dev->msi_domain);
+ if (!msi_parent_dn)
+ return -ENODEV;
+
+ irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS,
+ mvebu_icu_write_msg,
+ &mvebu_icu_domain_ops,
+ msi_data);
+ if (!irq_domain) {
+ dev_err(dev, "Failed to create ICU MSI domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct platform_driver mvebu_icu_subset_driver = {
+ .probe = mvebu_icu_subset_probe,
+ .driver = {
+ .name = "mvebu-icu-subset",
+ .of_match_table = mvebu_icu_subset_of_match,
+ },
+};
+builtin_platform_driver(mvebu_icu_subset_driver);
+
static int mvebu_icu_probe(struct platform_device *pdev)
{
struct mvebu_icu *icu;
- struct device_node *node = pdev->dev.of_node;
- struct device_node *gicp_dn;
struct resource *res;
int i;
@@ -226,53 +364,38 @@ static int mvebu_icu_probe(struct platform_device *pdev)
return PTR_ERR(icu->base);
}
- icu->irq_chip.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "ICU.%x",
- (unsigned int)res->start);
- if (!icu->irq_chip.name)
- return -ENOMEM;
-
- icu->irq_chip.irq_mask = irq_chip_mask_parent;
- icu->irq_chip.irq_unmask = irq_chip_unmask_parent;
- icu->irq_chip.irq_eoi = irq_chip_eoi_parent;
- icu->irq_chip.irq_set_type = irq_chip_set_type_parent;
-#ifdef CONFIG_SMP
- icu->irq_chip.irq_set_affinity = irq_chip_set_affinity_parent;
-#endif
-
/*
- * We're probed after MSI domains have been resolved, so force
- * resolution here.
+ * Legacy bindings: ICU is one node with one MSI parent: force manually
+ * the probe of the NSR interrupts side.
+ * New bindings: ICU node has children, one per interrupt controller
+ * having its own MSI parent: call platform_populate().
+ * All ICU instances should use the same bindings.
*/
- pdev->dev.msi_domain = of_msi_get_domain(&pdev->dev, node,
- DOMAIN_BUS_PLATFORM_MSI);
- if (!pdev->dev.msi_domain)
- return -EPROBE_DEFER;
-
- gicp_dn = irq_domain_get_of_node(pdev->dev.msi_domain);
- if (!gicp_dn)
- return -ENODEV;
+ if (!of_get_child_count(pdev->dev.of_node))
+ static_branch_enable(&legacy_bindings);
/*
- * Clean all ICU interrupts with type SPI_NSR, required to
+ * Clean all ICU interrupts of type NSR and SEI, required to
* avoid unpredictable SPI assignments done by firmware.
*/
for (i = 0 ; i < ICU_MAX_IRQS ; i++) {
- u32 icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i));
- if ((icu_int >> ICU_GROUP_SHIFT) == ICU_GRP_NSR)
+ u32 icu_int, icu_grp;
+
+ icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i));
+ icu_grp = icu_int >> ICU_GROUP_SHIFT;
+
+ if (icu_grp == ICU_GRP_NSR ||
+ (icu_grp == ICU_GRP_SEI &&
+ !static_branch_unlikely(&legacy_bindings)))
writel_relaxed(0x0, icu->base + ICU_INT_CFG(i));
}
- icu->domain =
- platform_msi_create_device_domain(&pdev->dev, ICU_MAX_IRQS,
- mvebu_icu_write_msg,
- &mvebu_icu_domain_ops, icu);
- if (!icu->domain) {
- dev_err(&pdev->dev, "Failed to create ICU domain\n");
- return -ENOMEM;
- }
+ platform_set_drvdata(pdev, icu);
- return 0;
+ if (static_branch_unlikely(&legacy_bindings))
+ return mvebu_icu_subset_probe(pdev);
+ else
+ return devm_of_platform_populate(&pdev->dev);
}
static const struct of_device_id mvebu_icu_of_match[] = {
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
new file mode 100644
index 000000000000..566d69a2edbc
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "mvebu-sei: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/* Cause register */
+#define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
+/* Mask register */
+#define GICP_SEMR(idx) (0x20 + ((idx) * 0x4))
+#define GICP_SET_SEI_OFFSET 0x30
+
+#define SEI_IRQ_COUNT_PER_REG 32
+#define SEI_IRQ_REG_COUNT 2
+#define SEI_IRQ_COUNT (SEI_IRQ_COUNT_PER_REG * SEI_IRQ_REG_COUNT)
+#define SEI_IRQ_REG_IDX(irq_id) ((irq_id) / SEI_IRQ_COUNT_PER_REG)
+#define SEI_IRQ_REG_BIT(irq_id) ((irq_id) % SEI_IRQ_COUNT_PER_REG)
+
+struct mvebu_sei_interrupt_range {
+ u32 first;
+ u32 size;
+};
+
+struct mvebu_sei_caps {
+ struct mvebu_sei_interrupt_range ap_range;
+ struct mvebu_sei_interrupt_range cp_range;
+};
+
+struct mvebu_sei {
+ struct device *dev;
+ void __iomem *base;
+ struct resource *res;
+ struct irq_domain *sei_domain;
+ struct irq_domain *ap_domain;
+ struct irq_domain *cp_domain;
+ const struct mvebu_sei_caps *caps;
+
+ /* Lock on MSI allocations/releases */
+ struct mutex cp_msi_lock;
+ DECLARE_BITMAP(cp_msi_bitmap, SEI_IRQ_COUNT);
+
+ /* Lock on IRQ masking register */
+ raw_spinlock_t mask_lock;
+};
+
+static void mvebu_sei_ack_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+
+ writel_relaxed(BIT(SEI_IRQ_REG_BIT(d->hwirq)),
+ sei->base + GICP_SECR(reg_idx));
+}
+
+static void mvebu_sei_mask_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+ unsigned long flags;
+
+ /* 1 disables the interrupt */
+ raw_spin_lock_irqsave(&sei->mask_lock, flags);
+ reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
+ reg |= BIT(SEI_IRQ_REG_BIT(d->hwirq));
+ writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
+ raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
+}
+
+static void mvebu_sei_unmask_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+ unsigned long flags;
+
+ /* 0 enables the interrupt */
+ raw_spin_lock_irqsave(&sei->mask_lock, flags);
+ reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
+ reg &= ~BIT(SEI_IRQ_REG_BIT(d->hwirq));
+ writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
+ raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
+}
+
+static int mvebu_sei_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static int mvebu_sei_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ /* We can only clear the pending state by acking the interrupt */
+ if (which != IRQCHIP_STATE_PENDING || state)
+ return -EINVAL;
+
+ mvebu_sei_ack_irq(d);
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_irq_chip = {
+ .name = "SEI",
+ .irq_ack = mvebu_sei_ack_irq,
+ .irq_mask = mvebu_sei_mask_irq,
+ .irq_unmask = mvebu_sei_unmask_irq,
+ .irq_set_affinity = mvebu_sei_set_affinity,
+ .irq_set_irqchip_state = mvebu_sei_set_irqchip_state,
+};
+
+static int mvebu_sei_ap_set_type(struct irq_data *data, unsigned int type)
+{
+ if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_ap_irq_chip = {
+ .name = "AP SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = mvebu_sei_ap_set_type,
+};
+
+static void mvebu_sei_cp_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct mvebu_sei *sei = data->chip_data;
+ phys_addr_t set = sei->res->start + GICP_SET_SEI_OFFSET;
+
+ msg->data = data->hwirq + sei->caps->cp_range.first;
+ msg->address_lo = lower_32_bits(set);
+ msg->address_hi = upper_32_bits(set);
+}
+
+static int mvebu_sei_cp_set_type(struct irq_data *data, unsigned int type)
+{
+ if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_cp_irq_chip = {
+ .name = "CP SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = mvebu_sei_cp_set_type,
+ .irq_compose_msi_msg = mvebu_sei_cp_compose_msi_msg,
+};
+
+static int mvebu_sei_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+
+ /* Not much to do, just setup the irqdata */
+ irq_domain_set_hwirq_and_chip(domain, virq, fwspec->param[0],
+ &mvebu_sei_irq_chip, sei);
+
+ return 0;
+}
+
+static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops mvebu_sei_domain_ops = {
+ .alloc = mvebu_sei_domain_alloc,
+ .free = mvebu_sei_domain_free,
+};
+
+static int mvebu_sei_ap_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ *hwirq = fwspec->param[0];
+ *type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int mvebu_sei_ap_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec fwspec;
+ unsigned long hwirq;
+ unsigned int type;
+ int err;
+
+ mvebu_sei_ap_translate(domain, arg, &hwirq, &type);
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq + sei->caps->ap_range.first;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &mvebu_sei_ap_irq_chip, sei,
+ handle_level_irq, NULL, NULL);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mvebu_sei_ap_domain_ops = {
+ .translate = mvebu_sei_ap_translate,
+ .alloc = mvebu_sei_ap_alloc,
+ .free = irq_domain_free_irqs_parent,
+};
+
+static void mvebu_sei_cp_release_irq(struct mvebu_sei *sei, unsigned long hwirq)
+{
+ mutex_lock(&sei->cp_msi_lock);
+ clear_bit(hwirq, sei->cp_msi_bitmap);
+ mutex_unlock(&sei->cp_msi_lock);
+}
+
+static int mvebu_sei_cp_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec fwspec;
+ unsigned long hwirq;
+ int ret;
+
+ /* The software only supports single allocations for now */
+ if (nr_irqs != 1)
+ return -ENOTSUPP;
+
+ mutex_lock(&sei->cp_msi_lock);
+ hwirq = find_first_zero_bit(sei->cp_msi_bitmap,
+ sei->caps->cp_range.size);
+ if (hwirq < sei->caps->cp_range.size)
+ set_bit(hwirq, sei->cp_msi_bitmap);
+ mutex_unlock(&sei->cp_msi_lock);
+
+ if (hwirq == sei->caps->cp_range.size)
+ return -ENOSPC;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq + sei->caps->cp_range.first;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ goto free_irq;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &mvebu_sei_cp_irq_chip, sei,
+ handle_edge_irq, NULL, NULL);
+
+ return 0;
+
+free_irq:
+ mvebu_sei_cp_release_irq(sei, hwirq);
+ return ret;
+}
+
+static void mvebu_sei_cp_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ if (nr_irqs != 1 || d->hwirq >= sei->caps->cp_range.size) {
+ dev_err(sei->dev, "Invalid hwirq %lu\n", d->hwirq);
+ return;
+ }
+
+ mvebu_sei_cp_release_irq(sei, d->hwirq);
+ irq_domain_free_irqs_parent(domain, virq, 1);
+}
+
+static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
+ .alloc = mvebu_sei_cp_domain_alloc,
+ .free = mvebu_sei_cp_domain_free,
+};
+
+static struct irq_chip mvebu_sei_msi_irq_chip = {
+ .name = "SEI pMSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+};
+
+static struct msi_domain_ops mvebu_sei_msi_ops = {
+};
+
+static struct msi_domain_info mvebu_sei_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS,
+ .ops = &mvebu_sei_msi_ops,
+ .chip = &mvebu_sei_msi_irq_chip,
+};
+
+static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
+{
+ struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 idx;
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < SEI_IRQ_REG_COUNT; idx++) {
+ unsigned long irqmap;
+ int bit;
+
+ irqmap = readl_relaxed(sei->base + GICP_SECR(idx));
+ for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) {
+ unsigned long hwirq;
+ unsigned int virq;
+
+ hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit;
+ virq = irq_find_mapping(sei->sei_domain, hwirq);
+ if (likely(virq)) {
+ generic_handle_irq(virq);
+ continue;
+ }
+
+ dev_warn(sei->dev,
+ "Spurious IRQ detected (hwirq %lu)\n", hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void mvebu_sei_reset(struct mvebu_sei *sei)
+{
+ u32 reg_idx;
+
+ /* Clear IRQ cause registers, mask all interrupts */
+ for (reg_idx = 0; reg_idx < SEI_IRQ_REG_COUNT; reg_idx++) {
+ writel_relaxed(0xFFFFFFFF, sei->base + GICP_SECR(reg_idx));
+ writel_relaxed(0xFFFFFFFF, sei->base + GICP_SEMR(reg_idx));
+ }
+}
+
+static int mvebu_sei_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct irq_domain *plat_domain;
+ struct mvebu_sei *sei;
+ u32 parent_irq;
+ int ret;
+
+ sei = devm_kzalloc(&pdev->dev, sizeof(*sei), GFP_KERNEL);
+ if (!sei)
+ return -ENOMEM;
+
+ sei->dev = &pdev->dev;
+
+ mutex_init(&sei->cp_msi_lock);
+ raw_spin_lock_init(&sei->mask_lock);
+
+ sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sei->base = devm_ioremap_resource(sei->dev, sei->res);
+ if (!sei->base) {
+ dev_err(sei->dev, "Failed to remap SEI resource\n");
+ return -ENODEV;
+ }
+
+ /* Retrieve the SEI capabilities with the interrupt ranges */
+ sei->caps = of_device_get_match_data(&pdev->dev);
+ if (!sei->caps) {
+ dev_err(sei->dev,
+ "Could not retrieve controller capabilities\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Reserve the single (top-level) parent SPI IRQ from which all the
+ * interrupts handled by this driver will be signaled.
+ */
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (parent_irq <= 0) {
+ dev_err(sei->dev, "Failed to retrieve top-level SPI IRQ\n");
+ return -ENODEV;
+ }
+
+ /* Create the root SEI domain */
+ sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ (sei->caps->ap_range.size +
+ sei->caps->cp_range.size),
+ &mvebu_sei_domain_ops,
+ sei);
+ if (!sei->sei_domain) {
+ dev_err(sei->dev, "Failed to create SEI IRQ domain\n");
+ ret = -ENOMEM;
+ goto dispose_irq;
+ }
+
+ irq_domain_update_bus_token(sei->sei_domain, DOMAIN_BUS_NEXUS);
+
+ /* Create the 'wired' domain */
+ sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
+ sei->caps->ap_range.size,
+ of_node_to_fwnode(node),
+ &mvebu_sei_ap_domain_ops,
+ sei);
+ if (!sei->ap_domain) {
+ dev_err(sei->dev, "Failed to create AP IRQ domain\n");
+ ret = -ENOMEM;
+ goto remove_sei_domain;
+ }
+
+ irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED);
+
+ /* Create the 'MSI' domain */
+ sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
+ sei->caps->cp_range.size,
+ of_node_to_fwnode(node),
+ &mvebu_sei_cp_domain_ops,
+ sei);
+ if (!sei->cp_domain) {
+ pr_err("Failed to create CPs IRQ domain\n");
+ ret = -ENOMEM;
+ goto remove_ap_domain;
+ }
+
+ irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
+
+ plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+ &mvebu_sei_msi_domain_info,
+ sei->cp_domain);
+ if (!plat_domain) {
+ pr_err("Failed to create CPs MSI domain\n");
+ ret = -ENOMEM;
+ goto remove_cp_domain;
+ }
+
+ mvebu_sei_reset(sei);
+
+ irq_set_chained_handler_and_data(parent_irq,
+ mvebu_sei_handle_cascade_irq,
+ sei);
+
+ return 0;
+
+remove_cp_domain:
+ irq_domain_remove(sei->cp_domain);
+remove_ap_domain:
+ irq_domain_remove(sei->ap_domain);
+remove_sei_domain:
+ irq_domain_remove(sei->sei_domain);
+dispose_irq:
+ irq_dispose_mapping(parent_irq);
+
+ return ret;
+}
+
+struct mvebu_sei_caps mvebu_sei_ap806_caps = {
+ .ap_range = {
+ .first = 0,
+ .size = 21,
+ },
+ .cp_range = {
+ .first = 21,
+ .size = 43,
+ },
+};
+
+static const struct of_device_id mvebu_sei_of_match[] = {
+ {
+ .compatible = "marvell,ap806-sei",
+ .data = &mvebu_sei_ap806_caps,
+ },
+ {},
+};
+
+static struct platform_driver mvebu_sei_driver = {
+ .probe = mvebu_sei_probe,
+ .driver = {
+ .name = "mvebu-sei",
+ .of_match_table = mvebu_sei_of_match,
+ },
+};
+builtin_platform_driver(mvebu_sei_driver);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 532e9d68c704..357e9daf94ae 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -15,6 +15,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
+#include <asm/smp.h>
/*
* This driver implements a version of the RISC-V PLIC with the actual layout
@@ -176,7 +177,7 @@ static int plic_find_hart_id(struct device_node *node)
{
for (; node; node = node->parent) {
if (of_device_is_compatible(node, "riscv"))
- return riscv_of_processor_hart(node);
+ return riscv_of_processor_hartid(node);
}
return -1;
@@ -218,7 +219,7 @@ static int __init plic_init(struct device_node *node,
struct of_phandle_args parent;
struct plic_handler *handler;
irq_hw_number_t hwirq;
- int cpu;
+ int cpu, hartid;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
@@ -229,12 +230,13 @@ static int __init plic_init(struct device_node *node,
if (parent.args[0] == -1)
continue;
- cpu = plic_find_hart_id(parent.np);
- if (cpu < 0) {
+ hartid = plic_find_hart_id(parent.np);
+ if (hartid < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i);
continue;
}
+ cpu = riscv_hartid_to_cpuid(hartid);
handler = per_cpu_ptr(&plic_handlers, cpu);
handler->present = true;
handler->ctxid = i;
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index b1b47a40a278..faa7d61b9d6c 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -124,6 +124,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
break;
case IRQ_TYPE_EDGE_BOTH:
pdc_type = PDC_EDGE_DUAL;
+ type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_HIGH:
pdc_type = PDC_LEVEL_HIGH;
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index ca623e6446e4..fca31640e3ef 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -20,13 +20,13 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
-#include <asm/macintosh.h>
-#include <asm/macints.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
#include <asm/mac_iop.h>
#include <asm/mac_oss.h>
#include <asm/adb_iop.h>
-#include <linux/adb.h>
+#include <linux/adb.h>
/*#define DEBUG_ADB_IOP*/
@@ -38,9 +38,9 @@ static unsigned char *reply_ptr;
#endif
static enum adb_iop_state {
- idle,
- sending,
- awaiting_reply
+ idle,
+ sending,
+ awaiting_reply
} adb_iop_state;
static void adb_iop_start(void);
@@ -66,7 +66,8 @@ static void adb_iop_end_req(struct adb_request *req, int state)
{
req->complete = 1;
current_req = req->next;
- if (req->done) (*req->done)(req);
+ if (req->done)
+ (*req->done)(req);
adb_iop_state = state;
}
@@ -100,7 +101,7 @@ static void adb_iop_complete(struct iop_msg *msg)
static void adb_iop_listen(struct iop_msg *msg)
{
- struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message;
+ struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message;
struct adb_request *req;
unsigned long flags;
#ifdef DEBUG_ADB_IOP
@@ -113,9 +114,9 @@ static void adb_iop_listen(struct iop_msg *msg)
#ifdef DEBUG_ADB_IOP
printk("adb_iop_listen %p: rcvd packet, %d bytes: %02X %02X", req,
- (uint) amsg->count + 2, (uint) amsg->flags, (uint) amsg->cmd);
+ (uint)amsg->count + 2, (uint)amsg->flags, (uint)amsg->cmd);
for (i = 0; i < amsg->count; i++)
- printk(" %02X", (uint) amsg->data[i]);
+ printk(" %02X", (uint)amsg->data[i]);
printk("\n");
#endif
@@ -168,14 +169,15 @@ static void adb_iop_start(void)
/* get the packet to send */
req = current_req;
- if (!req) return;
+ if (!req)
+ return;
local_irq_save(flags);
#ifdef DEBUG_ADB_IOP
printk("adb_iop_start %p: sending packet, %d bytes:", req, req->nbytes);
- for (i = 0 ; i < req->nbytes ; i++)
- printk(" %02X", (uint) req->data[i]);
+ for (i = 0; i < req->nbytes; i++)
+ printk(" %02X", (uint)req->data[i]);
printk("\n");
#endif
@@ -196,19 +198,20 @@ static void adb_iop_start(void)
/* Now send it. The IOP manager will call adb_iop_complete */
/* when the packet has been sent. */
- iop_send_message(ADB_IOP, ADB_CHAN, req,
- sizeof(amsg), (__u8 *) &amsg, adb_iop_complete);
+ iop_send_message(ADB_IOP, ADB_CHAN, req, sizeof(amsg), (__u8 *)&amsg,
+ adb_iop_complete);
}
int adb_iop_probe(void)
{
- if (!iop_ism_present) return -ENODEV;
+ if (!iop_ism_present)
+ return -ENODEV;
return 0;
}
int adb_iop_init(void)
{
- printk("adb: IOP ISM driver v0.4 for Unified ADB.\n");
+ pr_info("adb: IOP ISM driver v0.4 for Unified ADB\n");
iop_listen(ADB_IOP, ADB_CHAN, adb_iop_listen, "ADB");
return 0;
}
@@ -218,10 +221,12 @@ int adb_iop_send_request(struct adb_request *req, int sync)
int err;
err = adb_iop_write(req);
- if (err) return err;
+ if (err)
+ return err;
if (sync) {
- while (!req->complete) adb_iop_poll();
+ while (!req->complete)
+ adb_iop_poll();
}
return 0;
}
@@ -251,7 +256,9 @@ static int adb_iop_write(struct adb_request *req)
}
local_irq_restore(flags);
- if (adb_iop_state == idle) adb_iop_start();
+
+ if (adb_iop_state == idle)
+ adb_iop_start();
return 0;
}
@@ -263,7 +270,8 @@ int adb_iop_autopoll(int devs)
void adb_iop_poll(void)
{
- if (adb_iop_state == idle) adb_iop_start();
+ if (adb_iop_state == idle)
+ adb_iop_start();
iop_ism_irq_poll(ADB_IOP);
}
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 76e98f0f7a3e..e49d1f287a17 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -203,15 +203,15 @@ static int adb_scan_bus(void)
}
/* Now fill in the handler_id field of the adb_handler entries. */
- pr_debug("adb devices:\n");
for (i = 1; i < 16; i++) {
if (adb_handler[i].original_address == 0)
continue;
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
(i << 4) | 0xf);
adb_handler[i].handler_id = req.reply[2];
- pr_debug(" [%d]: %d %x\n", i, adb_handler[i].original_address,
- adb_handler[i].handler_id);
+ printk(KERN_DEBUG "adb device [%d]: %d 0x%X\n", i,
+ adb_handler[i].original_address,
+ adb_handler[i].handler_id);
devmask |= 1 << i;
}
return devmask;
@@ -579,6 +579,8 @@ adb_try_handler_change(int address, int new_id)
mutex_lock(&adb_handler_mutex);
ret = try_handler_change(address, new_id);
mutex_unlock(&adb_handler_mutex);
+ if (ret)
+ pr_debug("adb handler change: [%d] 0x%X\n", address, new_id);
return ret;
}
EXPORT_SYMBOL(adb_try_handler_change);
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index a261892c03b3..75482eeab2c4 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -757,6 +757,7 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
struct input_dev *input_dev;
int err;
int i;
+ char *keyboard_type;
if (adbhid[id]) {
pr_err("Trying to reregister ADB HID on ID %d\n", id);
@@ -798,24 +799,23 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
memcpy(hid->keycode, adb_to_linux_keycodes, sizeof(adb_to_linux_keycodes));
- pr_info("Detected ADB keyboard, type ");
switch (original_handler_id) {
default:
- pr_cont("<unknown>.\n");
+ keyboard_type = "<unknown>";
input_dev->id.version = ADB_KEYBOARD_UNKNOWN;
break;
case 0x01: case 0x02: case 0x03: case 0x06: case 0x08:
case 0x0C: case 0x10: case 0x18: case 0x1B: case 0x1C:
case 0xC0: case 0xC3: case 0xC6:
- pr_cont("ANSI.\n");
+ keyboard_type = "ANSI";
input_dev->id.version = ADB_KEYBOARD_ANSI;
break;
case 0x04: case 0x05: case 0x07: case 0x09: case 0x0D:
case 0x11: case 0x14: case 0x19: case 0x1D: case 0xC1:
case 0xC4: case 0xC7:
- pr_cont("ISO, swapping keys.\n");
+ keyboard_type = "ISO, swapping keys";
input_dev->id.version = ADB_KEYBOARD_ISO;
i = hid->keycode[10];
hid->keycode[10] = hid->keycode[50];
@@ -824,10 +824,11 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
case 0x12: case 0x15: case 0x16: case 0x17: case 0x1A:
case 0x1E: case 0xC2: case 0xC5: case 0xC8: case 0xC9:
- pr_cont("JIS.\n");
+ keyboard_type = "JIS";
input_dev->id.version = ADB_KEYBOARD_JIS;
break;
}
+ pr_info("Detected ADB keyboard, type %s.\n", keyboard_type);
for (i = 0; i < 128; i++)
if (hid->keycode[i])
@@ -972,16 +973,13 @@ adbhid_probe(void)
->get it to send separate codes for left and right shift,
control, option keys */
#if 0 /* handler 5 doesn't send separate codes for R modifiers */
- if (adb_try_handler_change(id, 5))
- printk("ADB keyboard at %d, handler set to 5\n", id);
- else
+ if (!adb_try_handler_change(id, 5))
#endif
- if (adb_try_handler_change(id, 3))
- printk("ADB keyboard at %d, handler set to 3\n", id);
- else
- printk("ADB keyboard at %d, handler 1\n", id);
+ adb_try_handler_change(id, 3);
adb_get_infos(id, &default_id, &cur_handler_id);
+ printk(KERN_DEBUG "ADB keyboard at %d has handler 0x%X\n",
+ id, cur_handler_id);
reg |= adbhid_input_reregister(id, default_id, org_handler_id,
cur_handler_id, 0);
}
@@ -999,48 +997,44 @@ adbhid_probe(void)
for (i = 0; i < mouse_ids.nids; i++) {
int id = mouse_ids.id[i];
int mouse_kind;
+ char *desc = "standard";
adb_get_infos(id, &default_id, &org_handler_id);
if (adb_try_handler_change(id, 4)) {
- printk("ADB mouse at %d, handler set to 4", id);
mouse_kind = ADBMOUSE_EXTENDED;
}
else if (adb_try_handler_change(id, 0x2F)) {
- printk("ADB mouse at %d, handler set to 0x2F", id);
mouse_kind = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 0x42)) {
- printk("ADB mouse at %d, handler set to 0x42", id);
mouse_kind = ADBMOUSE_TRACKBALLPRO;
}
else if (adb_try_handler_change(id, 0x66)) {
- printk("ADB mouse at %d, handler set to 0x66", id);
mouse_kind = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 0x5F)) {
- printk("ADB mouse at %d, handler set to 0x5F", id);
mouse_kind = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 3)) {
- printk("ADB mouse at %d, handler set to 3", id);
mouse_kind = ADBMOUSE_MS_A3;
}
else if (adb_try_handler_change(id, 2)) {
- printk("ADB mouse at %d, handler set to 2", id);
mouse_kind = ADBMOUSE_STANDARD_200;
}
else {
- printk("ADB mouse at %d, handler 1", id);
mouse_kind = ADBMOUSE_STANDARD_100;
}
if ((mouse_kind == ADBMOUSE_TRACKBALLPRO)
|| (mouse_kind == ADBMOUSE_MICROSPEED)) {
+ desc = "Microspeed/MacPoint or compatible";
init_microspeed(id);
} else if (mouse_kind == ADBMOUSE_MS_A3) {
+ desc = "Mouse Systems A3 Mouse or compatible";
init_ms_a3(id);
} else if (mouse_kind == ADBMOUSE_EXTENDED) {
+ desc = "extended";
/*
* Register 1 is usually used for device
* identification. Here, we try to identify
@@ -1054,32 +1048,36 @@ adbhid_probe(void)
(req.reply[1] == 0x9a) && ((req.reply[2] == 0x21)
|| (req.reply[2] == 0x20))) {
mouse_kind = ADBMOUSE_TRACKBALL;
+ desc = "trackman/mouseman";
init_trackball(id);
}
else if ((req.reply_len >= 4) &&
(req.reply[1] == 0x74) && (req.reply[2] == 0x70) &&
(req.reply[3] == 0x61) && (req.reply[4] == 0x64)) {
mouse_kind = ADBMOUSE_TRACKPAD;
+ desc = "trackpad";
init_trackpad(id);
}
else if ((req.reply_len >= 4) &&
(req.reply[1] == 0x4b) && (req.reply[2] == 0x4d) &&
(req.reply[3] == 0x4c) && (req.reply[4] == 0x31)) {
mouse_kind = ADBMOUSE_TURBOMOUSE5;
+ desc = "TurboMouse 5";
init_turbomouse(id);
}
else if ((req.reply_len == 9) &&
(req.reply[1] == 0x4b) && (req.reply[2] == 0x4f) &&
(req.reply[3] == 0x49) && (req.reply[4] == 0x54)) {
if (adb_try_handler_change(id, 0x42)) {
- pr_cont("\nADB MacAlly 2-button mouse at %d, handler set to 0x42", id);
mouse_kind = ADBMOUSE_MACALLY2;
+ desc = "MacAlly 2-button";
}
}
}
- pr_cont("\n");
adb_get_infos(id, &default_id, &cur_handler_id);
+ printk(KERN_DEBUG "ADB mouse (%s) at %d has handler 0x%X\n",
+ desc, id, cur_handler_id);
reg |= adbhid_input_reregister(id, default_id, org_handler_id,
cur_handler_id, mouse_kind);
}
@@ -1092,12 +1090,10 @@ init_trackpad(int id)
struct adb_request req;
unsigned char r1_buffer[8];
- pr_cont(" (trackpad)");
-
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
ADB_READREG(id,1));
if (req.reply_len < 8)
- pr_cont("bad length for reg. 1\n");
+ pr_err("%s: bad length for reg. 1\n", __func__);
else
{
memcpy(r1_buffer, &req.reply[1], 8);
@@ -1145,8 +1141,6 @@ init_trackball(int id)
{
struct adb_request req;
- pr_cont(" (trackman/mouseman)");
-
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 00,0x81);
@@ -1177,8 +1171,6 @@ init_turbomouse(int id)
{
struct adb_request req;
- pr_cont(" (TurboMouse 5)");
-
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(3));
@@ -1213,8 +1205,6 @@ init_microspeed(int id)
{
struct adb_request req;
- pr_cont(" (Microspeed/MacPoint or compatible)");
-
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
/* This will initialize mice using the Microspeed, MacPoint and
@@ -1253,7 +1243,6 @@ init_ms_a3(int id)
{
struct adb_request req;
- pr_cont(" (Mouse Systems A3 Mouse, or compatible)");
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id, 0x2),
0x00,
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 07074820a167..17d3bc917562 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -360,9 +360,10 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
struct macio_dev *in_bay,
struct resource *parent_res)
{
+ char name[MAX_NODE_NAME_SIZE + 1];
struct macio_dev *dev;
const u32 *reg;
-
+
if (np == NULL)
return NULL;
@@ -402,6 +403,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
#endif
/* MacIO itself has a different reg, we use it's PCI base */
+ snprintf(name, sizeof(name), "%pOFn", np);
if (np == chip->of_node) {
dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s",
chip->lbus.index,
@@ -410,12 +412,12 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
#else
0, /* NuBus may want to do something better here */
#endif
- MAX_NODE_NAME_SIZE, np->name);
+ MAX_NODE_NAME_SIZE, name);
} else {
reg = of_get_property(np, "reg", NULL);
dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s",
chip->lbus.index,
- reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name);
+ reg ? *reg : 0, MAX_NODE_NAME_SIZE, name);
}
/* Setup interrupts & resources */
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index ca4fcffe454b..d2451e58acb9 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -58,7 +58,13 @@ static ssize_t devspec_show(struct device *dev,
static DEVICE_ATTR_RO(modalias);
static DEVICE_ATTR_RO(devspec);
-macio_config_of_attr (name, "%s\n");
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%pOFn\n", dev->of_node);
+}
+static DEVICE_ATTR_RO(name);
+
macio_config_of_attr (type, "%s\n");
static struct attribute *macio_dev_attrs[] = {
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 98dd702eb867..bbec6ac0a966 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -766,3 +766,38 @@ cuda_input(unsigned char *buf, int nb)
buf, nb, false);
}
}
+
+/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+#define RTC_OFFSET 2082844800
+
+time64_t cuda_get_time(void)
+{
+ struct adb_request req;
+ u32 now;
+
+ if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
+ return 0;
+ while (!req.complete)
+ cuda_poll();
+ if (req.reply_len != 7)
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ now = (req.reply[3] << 24) + (req.reply[4] << 16) +
+ (req.reply[5] << 8) + req.reply[6];
+ return (time64_t)now - RTC_OFFSET;
+}
+
+int cuda_set_rtc_time(struct rtc_time *tm)
+{
+ u32 now;
+ struct adb_request req;
+
+ now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
+ if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
+ now >> 24, now >> 16, now >> 8, now) < 0)
+ return -ENXIO;
+ while (!req.complete)
+ cuda_poll();
+ if ((req.reply_len != 3) && (req.reply_len != 7))
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ return 0;
+}
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index cf6f7d52d6be..ac824d7b2dcf 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -12,7 +12,7 @@
*
* 1999-08-02 (jmt) - Initial rewrite for Unified ADB.
* 2000-03-29 Tony Mantler <tonym@mac.linux-m68k.org>
- * - Big overhaul, should actually work now.
+ * - Big overhaul, should actually work now.
* 2006-12-31 Finn Thain - Another overhaul.
*
* Suggested reading:
@@ -23,7 +23,7 @@
* Apple's "ADB Analyzer" bus sniffer is invaluable:
* ftp://ftp.apple.com/developer/Tool_Chest/Devices_-_Hardware/Apple_Desktop_Bus/
*/
-
+
#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -77,7 +77,7 @@ static volatile unsigned char *via;
#define ST_ODD 0x20 /* ADB state: odd data byte */
#define ST_IDLE 0x30 /* ADB state: idle, nothing to send */
-static int macii_init_via(void);
+static int macii_init_via(void);
static void macii_start(void);
static irqreturn_t macii_interrupt(int irq, void *arg);
static void macii_queue_poll(void);
@@ -120,31 +120,15 @@ static int srq_asserted; /* have to poll for the device that asserted it */
static int command_byte; /* the most recent command byte transmitted */
static int autopoll_devs; /* bits set are device addresses to be polled */
-/* Sanity check for request queue. Doesn't check for cycles. */
-static int request_is_queued(struct adb_request *req) {
- struct adb_request *cur;
- unsigned long flags;
- local_irq_save(flags);
- cur = current_req;
- while (cur) {
- if (cur == req) {
- local_irq_restore(flags);
- return 1;
- }
- cur = cur->next;
- }
- local_irq_restore(flags);
- return 0;
-}
-
/* Check for MacII style ADB */
static int macii_probe(void)
{
- if (macintosh_config->adb_type != MAC_ADB_II) return -ENODEV;
+ if (macintosh_config->adb_type != MAC_ADB_II)
+ return -ENODEV;
via = via1;
- printk("adb: Mac II ADB Driver v1.0 for Unified ADB\n");
+ pr_info("adb: Mac II ADB Driver v1.0 for Unified ADB\n");
return 0;
}
@@ -153,15 +137,17 @@ int macii_init(void)
{
unsigned long flags;
int err;
-
+
local_irq_save(flags);
-
+
err = macii_init_via();
- if (err) goto out;
+ if (err)
+ goto out;
err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
macii_interrupt);
- if (err) goto out;
+ if (err)
+ goto out;
macii_state = idle;
out:
@@ -169,7 +155,7 @@ out:
return err;
}
-/* initialize the hardware */
+/* initialize the hardware */
static int macii_init_via(void)
{
unsigned char x;
@@ -179,7 +165,7 @@ static int macii_init_via(void)
/* Set up state: idle */
via[B] |= ST_IDLE;
- last_status = via[B] & (ST_MASK|CTLR_IRQ);
+ last_status = via[B] & (ST_MASK | CTLR_IRQ);
/* Shift register on input */
via[ACR] = (via[ACR] & ~SR_CTRL) | SR_EXT;
@@ -205,7 +191,8 @@ static void macii_queue_poll(void)
int next_device;
static struct adb_request req;
- if (!autopoll_devs) return;
+ if (!autopoll_devs)
+ return;
device_mask = (1 << (((command_byte & 0xF0) >> 4) + 1)) - 1;
if (autopoll_devs & ~device_mask)
@@ -213,10 +200,7 @@ static void macii_queue_poll(void)
else
next_device = ffs(autopoll_devs) - 1;
- BUG_ON(request_is_queued(&req));
-
- adb_request(&req, NULL, ADBREQ_NOSEND, 1,
- ADB_READREG(next_device, 0));
+ adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_READREG(next_device, 0));
req.sent = 0;
req.complete = 0;
@@ -235,45 +219,47 @@ static void macii_queue_poll(void)
static int macii_send_request(struct adb_request *req, int sync)
{
int err;
- unsigned long flags;
- BUG_ON(request_is_queued(req));
-
- local_irq_save(flags);
err = macii_write(req);
- local_irq_restore(flags);
+ if (err)
+ return err;
- if (!err && sync) {
- while (!req->complete) {
+ if (sync)
+ while (!req->complete)
macii_poll();
- }
- BUG_ON(request_is_queued(req));
- }
- return err;
+ return 0;
}
/* Send an ADB request (append to request queue) */
static int macii_write(struct adb_request *req)
{
+ unsigned long flags;
+
if (req->nbytes < 2 || req->data[0] != ADB_PACKET || req->nbytes > 15) {
req->complete = 1;
return -EINVAL;
}
-
+
req->next = NULL;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
+ local_irq_save(flags);
+
if (current_req != NULL) {
last_req->next = req;
last_req = req;
} else {
current_req = req;
last_req = req;
- if (macii_state == idle) macii_start();
+ if (macii_state == idle)
+ macii_start();
}
+
+ local_irq_restore(flags);
+
return 0;
}
@@ -287,7 +273,8 @@ static int macii_autopoll(int devs)
/* bit 1 == device 1, and so on. */
autopoll_devs = devs & 0xFFFE;
- if (!autopoll_devs) return 0;
+ if (!autopoll_devs)
+ return 0;
local_irq_save(flags);
@@ -304,7 +291,8 @@ static int macii_autopoll(int devs)
return err;
}
-static inline int need_autopoll(void) {
+static inline int need_autopoll(void)
+{
/* Was the last command Talk Reg 0
* and is the target on the autopoll list?
*/
@@ -317,21 +305,17 @@ static inline int need_autopoll(void) {
/* Prod the chip without interrupts */
static void macii_poll(void)
{
- disable_irq(IRQ_MAC_ADB);
macii_interrupt(0, NULL);
- enable_irq(IRQ_MAC_ADB);
}
/* Reset the bus */
static int macii_reset_bus(void)
{
static struct adb_request req;
-
- if (request_is_queued(&req))
- return 0;
/* Command = 0, Address = ignored */
- adb_request(&req, NULL, 0, 1, ADB_BUSRESET);
+ adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_BUSRESET);
+ macii_send_request(&req, 1);
/* Don't want any more requests during the Global Reset low time. */
udelay(3000);
@@ -346,10 +330,6 @@ static void macii_start(void)
req = current_req;
- BUG_ON(req == NULL);
-
- BUG_ON(macii_state != idle);
-
/* Now send it. Be careful though, that first byte of the request
* is actually ADB_PACKET; the real data begins at index 1!
* And req->nbytes is the number of bytes of real data plus one.
@@ -375,7 +355,7 @@ static void macii_start(void)
* to be activity on the ADB bus. The chip will poll to achieve this.
*
* The basic ADB state machine was left unchanged from the original MacII code
- * by Alan Cox, which was based on the CUDA driver for PowerMac.
+ * by Alan Cox, which was based on the CUDA driver for PowerMac.
* The syntax of the ADB status lines is totally different on MacII,
* though. MacII uses the states Command -> Even -> Odd -> Even ->...-> Idle
* for sending and Idle -> Even -> Odd -> Even ->...-> Idle for receiving.
@@ -387,164 +367,166 @@ static void macii_start(void)
static irqreturn_t macii_interrupt(int irq, void *arg)
{
int x;
- static int entered;
struct adb_request *req;
+ unsigned long flags;
+
+ local_irq_save(flags);
if (!arg) {
/* Clear the SR IRQ flag when polling. */
if (via[IFR] & SR_INT)
via[IFR] = SR_INT;
- else
+ else {
+ local_irq_restore(flags);
return IRQ_NONE;
+ }
}
- BUG_ON(entered++);
-
last_status = status;
- status = via[B] & (ST_MASK|CTLR_IRQ);
+ status = via[B] & (ST_MASK | CTLR_IRQ);
switch (macii_state) {
- case idle:
- if (reading_reply) {
- reply_ptr = current_req->reply;
- } else {
- BUG_ON(current_req != NULL);
- reply_ptr = reply_buf;
- }
+ case idle:
+ if (reading_reply) {
+ reply_ptr = current_req->reply;
+ } else {
+ WARN_ON(current_req);
+ reply_ptr = reply_buf;
+ }
- x = via[SR];
+ x = via[SR];
- if ((status & CTLR_IRQ) && (x == 0xFF)) {
- /* Bus timeout without SRQ sequence:
- * data is "FF" while CTLR_IRQ is "H"
- */
- reply_len = 0;
- srq_asserted = 0;
- macii_state = read_done;
- } else {
- macii_state = reading;
- *reply_ptr = x;
- reply_len = 1;
- }
+ if ((status & CTLR_IRQ) && (x == 0xFF)) {
+ /* Bus timeout without SRQ sequence:
+ * data is "FF" while CTLR_IRQ is "H"
+ */
+ reply_len = 0;
+ srq_asserted = 0;
+ macii_state = read_done;
+ } else {
+ macii_state = reading;
+ *reply_ptr = x;
+ reply_len = 1;
+ }
- /* set ADB state = even for first data byte */
- via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
- break;
+ /* set ADB state = even for first data byte */
+ via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
+ break;
- case sending:
- req = current_req;
- if (data_index >= req->nbytes) {
- req->sent = 1;
- macii_state = idle;
-
- if (req->reply_expected) {
- reading_reply = 1;
- } else {
- req->complete = 1;
- current_req = req->next;
- if (req->done) (*req->done)(req);
-
- if (current_req)
- macii_start();
- else
- if (need_autopoll())
- macii_autopoll(autopoll_devs);
- }
+ case sending:
+ req = current_req;
+ if (data_index >= req->nbytes) {
+ req->sent = 1;
+ macii_state = idle;
- if (macii_state == idle) {
- /* reset to shift in */
- via[ACR] &= ~SR_OUT;
- x = via[SR];
- /* set ADB state idle - might get SRQ */
- via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
- }
+ if (req->reply_expected) {
+ reading_reply = 1;
} else {
- via[SR] = req->data[data_index++];
-
- if ( (via[B] & ST_MASK) == ST_CMD ) {
- /* just sent the command byte, set to EVEN */
- via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
- } else {
- /* invert state bits, toggle ODD/EVEN */
- via[B] ^= ST_MASK;
- }
- }
- break;
-
- case reading:
- x = via[SR];
- BUG_ON((status & ST_MASK) == ST_CMD ||
- (status & ST_MASK) == ST_IDLE);
-
- /* Bus timeout with SRQ sequence:
- * data is "XX FF" while CTLR_IRQ is "L L"
- * End of packet without SRQ sequence:
- * data is "XX...YY 00" while CTLR_IRQ is "L...H L"
- * End of packet SRQ sequence:
- * data is "XX...YY 00" while CTLR_IRQ is "L...L L"
- * (where XX is the first response byte and
- * YY is the last byte of valid response data.)
- */
+ req->complete = 1;
+ current_req = req->next;
+ if (req->done)
+ (*req->done)(req);
- srq_asserted = 0;
- if (!(status & CTLR_IRQ)) {
- if (x == 0xFF) {
- if (!(last_status & CTLR_IRQ)) {
- macii_state = read_done;
- reply_len = 0;
- srq_asserted = 1;
- }
- } else if (x == 0x00) {
- macii_state = read_done;
- if (!(last_status & CTLR_IRQ))
- srq_asserted = 1;
- }
+ if (current_req)
+ macii_start();
+ else if (need_autopoll())
+ macii_autopoll(autopoll_devs);
}
- if (macii_state == reading) {
- BUG_ON(reply_len > 15);
- reply_ptr++;
- *reply_ptr = x;
- reply_len++;
+ if (macii_state == idle) {
+ /* reset to shift in */
+ via[ACR] &= ~SR_OUT;
+ x = via[SR];
+ /* set ADB state idle - might get SRQ */
+ via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
}
+ } else {
+ via[SR] = req->data[data_index++];
- /* invert state bits, toggle ODD/EVEN */
- via[B] ^= ST_MASK;
- break;
+ if ((via[B] & ST_MASK) == ST_CMD) {
+ /* just sent the command byte, set to EVEN */
+ via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
+ } else {
+ /* invert state bits, toggle ODD/EVEN */
+ via[B] ^= ST_MASK;
+ }
+ }
+ break;
- case read_done:
- x = via[SR];
+ case reading:
+ x = via[SR];
+ WARN_ON((status & ST_MASK) == ST_CMD ||
+ (status & ST_MASK) == ST_IDLE);
+
+ /* Bus timeout with SRQ sequence:
+ * data is "XX FF" while CTLR_IRQ is "L L"
+ * End of packet without SRQ sequence:
+ * data is "XX...YY 00" while CTLR_IRQ is "L...H L"
+ * End of packet SRQ sequence:
+ * data is "XX...YY 00" while CTLR_IRQ is "L...L L"
+ * (where XX is the first response byte and
+ * YY is the last byte of valid response data.)
+ */
- if (reading_reply) {
- reading_reply = 0;
- req = current_req;
- req->reply_len = reply_len;
- req->complete = 1;
- current_req = req->next;
- if (req->done) (*req->done)(req);
- } else if (reply_len && autopoll_devs)
- adb_input(reply_buf, reply_len, 0);
+ srq_asserted = 0;
+ if (!(status & CTLR_IRQ)) {
+ if (x == 0xFF) {
+ if (!(last_status & CTLR_IRQ)) {
+ macii_state = read_done;
+ reply_len = 0;
+ srq_asserted = 1;
+ }
+ } else if (x == 0x00) {
+ macii_state = read_done;
+ if (!(last_status & CTLR_IRQ))
+ srq_asserted = 1;
+ }
+ }
- macii_state = idle;
+ if (macii_state == reading &&
+ reply_len < ARRAY_SIZE(reply_buf)) {
+ reply_ptr++;
+ *reply_ptr = x;
+ reply_len++;
+ }
- /* SRQ seen before, initiate poll now */
- if (srq_asserted)
- macii_queue_poll();
+ /* invert state bits, toggle ODD/EVEN */
+ via[B] ^= ST_MASK;
+ break;
- if (current_req)
- macii_start();
- else
- if (need_autopoll())
- macii_autopoll(autopoll_devs);
+ case read_done:
+ x = via[SR];
- if (macii_state == idle)
- via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
- break;
+ if (reading_reply) {
+ reading_reply = 0;
+ req = current_req;
+ req->reply_len = reply_len;
+ req->complete = 1;
+ current_req = req->next;
+ if (req->done)
+ (*req->done)(req);
+ } else if (reply_len && autopoll_devs)
+ adb_input(reply_buf, reply_len, 0);
+
+ macii_state = idle;
+
+ /* SRQ seen before, initiate poll now */
+ if (srq_asserted)
+ macii_queue_poll();
+
+ if (current_req)
+ macii_start();
+ else if (need_autopoll())
+ macii_autopoll(autopoll_devs);
+
+ if (macii_state == idle)
+ via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
+ break;
- default:
+ default:
break;
}
- entered--;
+ local_irq_restore(flags);
return IRQ_HANDLED;
}
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index d72c450aebe5..60f57e2abf21 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -1737,6 +1737,39 @@ pmu_enable_irled(int on)
pmu_wait_complete(&req);
}
+/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+#define RTC_OFFSET 2082844800
+
+time64_t pmu_get_time(void)
+{
+ struct adb_request req;
+ u32 now;
+
+ if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
+ return 0;
+ pmu_wait_complete(&req);
+ if (req.reply_len != 4)
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ now = (req.reply[0] << 24) + (req.reply[1] << 16) +
+ (req.reply[2] << 8) + req.reply[3];
+ return (time64_t)now - RTC_OFFSET;
+}
+
+int pmu_set_rtc_time(struct rtc_time *tm)
+{
+ u32 now;
+ struct adb_request req;
+
+ now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
+ if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
+ now >> 24, now >> 16, now >> 8, now) < 0)
+ return -ENXIO;
+ pmu_wait_complete(&req);
+ if (req.reply_len != 0)
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ return 0;
+}
+
void
pmu_restart(void)
{
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index d174c7437337..86d65462a61c 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -277,7 +277,7 @@ static int __init smu_controls_init(void)
fct = smu_fan_create(fan, 0);
if (fct == NULL) {
printk(KERN_WARNING "windfarm: Failed to create SMU "
- "RPM fan %s\n", fan->name);
+ "RPM fan %pOFn\n", fan);
continue;
}
list_add(&fct->link, &smu_fans);
@@ -296,7 +296,7 @@ static int __init smu_controls_init(void)
fct = smu_fan_create(fan, 1);
if (fct == NULL) {
printk(KERN_WARNING "windfarm: Failed to create SMU "
- "PWM fan %s\n", fan->name);
+ "PWM fan %pOFn\n", fan);
continue;
}
list_add(&fct->link, &smu_fans);
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index da7f4fc1a51d..a0f61eb853c5 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -22,14 +22,6 @@
#define VERSION "1.0"
-#define DEBUG
-
-#ifdef DEBUG
-#define DBG(args...) printk(args)
-#else
-#define DBG(args...) do { } while(0)
-#endif
-
/* If the cache is older than 800ms we'll refetch it */
#define MAX_AGE msecs_to_jiffies(800)
@@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
buf[i+2] = data[3];
buf[i+3] = data[2];
}
-#ifdef DEBUG
- DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id);
- for (i = 0; i < len; ++i)
- DBG(" %x", buf[i]);
- DBG("\n");
-#endif
+ printk(KERN_DEBUG "sat %d partition %x:", sat_id, id);
+ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
if (size)
*size = len;
return (struct smu_sdbp_header *) buf;
@@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat)
if (err < 0)
return err;
sat->last_read = jiffies;
+
#ifdef LOTSA_DEBUG
{
int i;
- DBG(KERN_DEBUG "wf_sat_get: data is");
- for (i = 0; i < 16; ++i)
- DBG(" %.2x", sat->cache[i]);
- DBG("\n");
+ printk(KERN_DEBUG "wf_sat_get: data is");
+ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
+ 16, 1, sat->cache, 16, false);
}
#endif
return 0;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 8b8c123cae66..3db222509e44 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -215,17 +215,6 @@ config BLK_DEV_DM
If unsure, say N.
-config DM_MQ_DEFAULT
- bool "request-based DM: use blk-mq I/O path by default"
- depends on BLK_DEV_DM
- ---help---
- This option enables the blk-mq based I/O path for request-based
- DM devices by default. With the option the dm_mod.use_blk_mq
- module/boot option defaults to Y, without it to N, but it can
- still be overriden either way.
-
- If unsure say N.
-
config DM_DEBUG
bool "Device mapper debugging support"
depends on BLK_DEV_DM
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 1b5b9ad9e492..b61aac00ff40 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1200,7 +1200,7 @@ static void queue_demotion(struct smq_policy *mq)
struct policy_work work;
struct entry *e;
- if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
+ if (WARN_ON_ONCE(!mq->migrations_allowed))
return;
e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 7d480c930eaf..224d44503a06 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -112,18 +112,8 @@ struct mapped_device {
struct dm_stats stats;
- struct kthread_worker kworker;
- struct task_struct *kworker_task;
-
- /* for request-based merge heuristic in dm_request_fn() */
- unsigned seq_rq_merge_deadline_usecs;
- int last_rq_rw;
- sector_t last_rq_pos;
- ktime_t last_rq_start_time;
-
/* for blk-mq request-based DM support */
struct blk_mq_tag_set *tag_set;
- bool use_blk_mq:1;
bool init_tio_pdu:1;
struct srcu_struct io_barrier;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0481223b1deb..b8eec515a003 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2661,6 +2661,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
+ const char *devname = dm_table_device_name(ti->table);
int key_size;
unsigned int align_mask;
unsigned long long tmpll;
@@ -2806,18 +2807,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io/%s",
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ 1, devname);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ 1, devname);
else
- cc->crypt_queue = alloc_workqueue("kcryptd",
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
- num_online_cpus());
+ num_online_cpus(), devname);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
@@ -2826,7 +2831,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
spin_lock_init(&cc->write_thread_lock);
cc->write_tree = RB_ROOT;
- cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
+ cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
if (IS_ERR(cc->write_thread)) {
ret = PTR_ERR(cc->write_thread);
cc->write_thread = NULL;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 32aabe27b37c..3cb97fa4c11d 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -315,10 +315,6 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
if (bio_op(bio) == REQ_OP_ZONE_RESET)
goto map_bio;
- /* We need to remap reported zones, so remember the BIO iter */
- if (bio_op(bio) == REQ_OP_ZONE_REPORT)
- goto map_bio;
-
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
@@ -380,11 +376,6 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
if (bio_op(bio) == REQ_OP_ZONE_RESET)
return DM_ENDIO_DONE;
- if (bio_op(bio) == REQ_OP_ZONE_REPORT) {
- dm_remap_zone_report(ti, bio, fc->start);
- return DM_ENDIO_DONE;
- }
-
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc)) {
@@ -457,6 +448,26 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
return 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int flakey_report_zones(struct dm_target *ti, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+ struct flakey_c *fc = ti->private;
+ int ret;
+
+ /* Do report and remap it */
+ ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector),
+ zones, nr_zones, gfp_mask);
+ if (ret != 0)
+ return ret;
+
+ if (*nr_zones)
+ dm_remap_zone_report(ti, fc->start, zones, nr_zones);
+ return 0;
+}
+#endif
+
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
{
struct flakey_c *fc = ti->private;
@@ -469,6 +480,7 @@ static struct target_type flakey_target = {
.version = {1, 5, 0},
#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM,
+ .report_zones = flakey_report_zones,
#endif
.module = THIS_MODULE,
.ctr = flakey_ctr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index e1fa6baf4e8e..bb3096bf2cc6 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -559,7 +559,12 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
}
memset(result + size, 0, JOURNAL_MAC_SIZE - size);
} else {
- __u8 digest[size];
+ __u8 digest[HASH_MAX_DIGESTSIZE];
+
+ if (WARN_ON(size > sizeof(digest))) {
+ dm_integrity_io_error(ic, "digest_size", -EINVAL);
+ goto err;
+ }
r = crypto_shash_final(desc, digest);
if (unlikely(r)) {
dm_integrity_io_error(ic, "crypto_shash_final", r);
@@ -1324,7 +1329,7 @@ static void integrity_metadata(struct work_struct *w)
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- char checksums_onstack[ic->tag_size + extra_space];
+ char checksums_onstack[HASH_MAX_DIGESTSIZE];
unsigned sectors_to_process = dio->range.n_sectors;
sector_t sector = dio->range.logical_sector;
@@ -1333,8 +1338,14 @@ static void integrity_metadata(struct work_struct *w)
checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
- if (!checksums)
+ if (!checksums) {
checksums = checksums_onstack;
+ if (WARN_ON(extra_space &&
+ digest_size > sizeof(checksums_onstack))) {
+ r = -EINVAL;
+ goto error;
+ }
+ }
__bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
unsigned pos;
@@ -1546,7 +1557,7 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY
if (ic->internal_hash) {
- char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
+ char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
@@ -1596,7 +1607,7 @@ retry_kmap:
if (ic->internal_hash) {
unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
if (unlikely(digest_size > ic->tag_size)) {
- char checksums_onstack[digest_size];
+ char checksums_onstack[HASH_MAX_DIGESTSIZE];
integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
} else
@@ -2023,7 +2034,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
unlikely(from_replay) &&
#endif
ic->internal_hash) {
- char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
+ char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index b810ea77e6b1..f666778ad237 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1720,8 +1720,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
}
static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
- int ioctl_flags,
- struct dm_ioctl **param, int *param_flags)
+ int ioctl_flags, struct dm_ioctl **param, int *param_flags)
{
struct dm_ioctl *dmi;
int secure_data;
@@ -1762,18 +1761,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
*param_flags |= DM_PARAMS_MALLOC;
- if (copy_from_user(dmi, user, param_kernel->data_size))
- goto bad;
+ /* Copy from param_kernel (which was already copied from user) */
+ memcpy(dmi, param_kernel, minimum_data_size);
-data_copied:
- /*
- * Abort if something changed the ioctl data while it was being copied.
- */
- if (dmi->data_size != param_kernel->data_size) {
- DMERR("rejecting ioctl: data size modified while processing parameters");
+ if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
+ param_kernel->data_size - minimum_data_size))
goto bad;
- }
-
+data_copied:
/* Wipe the user buffer so we do not return it to userspace */
if (secure_data && clear_user(user, param_kernel->data_size))
goto bad;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 2f7c44a006c4..8d7ddee6ac4d 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -102,19 +102,6 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
-#ifdef CONFIG_BLK_DEV_ZONED
-static int linear_end_io(struct dm_target *ti, struct bio *bio,
- blk_status_t *error)
-{
- struct linear_c *lc = ti->private;
-
- if (!*error && bio_op(bio) == REQ_OP_ZONE_REPORT)
- dm_remap_zone_report(ti, bio, lc->start);
-
- return DM_ENDIO_DONE;
-}
-#endif
-
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
@@ -148,6 +135,26 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
return 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int linear_report_zones(struct dm_target *ti, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+ struct linear_c *lc = (struct linear_c *) ti->private;
+ int ret;
+
+ /* Do report and remap it */
+ ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
+ zones, nr_zones, gfp_mask);
+ if (ret != 0)
+ return ret;
+
+ if (*nr_zones)
+ dm_remap_zone_report(ti, lc->start, zones, nr_zones);
+ return 0;
+}
+#endif
+
static int linear_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -211,8 +218,8 @@ static struct target_type linear_target = {
.name = "linear",
.version = {1, 4, 0},
#ifdef CONFIG_BLK_DEV_ZONED
- .end_io = linear_end_io,
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+ .report_zones = linear_report_zones,
#else
.features = DM_TARGET_PASSES_INTEGRITY,
#endif
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 419362c2d8ac..d6a66921daf4 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -203,14 +203,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
{
if (m->queue_mode == DM_TYPE_NONE) {
- /*
- * Default to request-based.
- */
- if (dm_use_blk_mq(dm_table_get_md(ti->table)))
- m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
- else
- m->queue_mode = DM_TYPE_REQUEST_BASED;
-
+ m->queue_mode = DM_TYPE_REQUEST_BASED;
} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
/*
@@ -537,10 +530,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
* get the queue busy feedback (via BLK_STS_RESOURCE),
* otherwise I/O merging can suffer.
*/
- if (q->mq_ops)
- return DM_MAPIO_REQUEUE;
- else
- return DM_MAPIO_DELAY_REQUEUE;
+ return DM_MAPIO_REQUEUE;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
@@ -668,7 +658,7 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
static void process_queued_io_list(struct multipath *m)
{
- if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
+ if (m->queue_mode == DM_TYPE_REQUEST_BASED)
dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
else if (m->queue_mode == DM_TYPE_BIO_BASED)
queue_work(kmultipathd, &m->process_queued_bios);
@@ -1089,10 +1079,9 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
if (!strcasecmp(queue_mode_name, "bio"))
m->queue_mode = DM_TYPE_BIO_BASED;
- else if (!strcasecmp(queue_mode_name, "rq"))
+ else if (!strcasecmp(queue_mode_name, "rq") ||
+ !strcasecmp(queue_mode_name, "mq"))
m->queue_mode = DM_TYPE_REQUEST_BASED;
- else if (!strcasecmp(queue_mode_name, "mq"))
- m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else {
ti->error = "Unknown 'queue_mode' requested";
r = -EINVAL;
@@ -1726,9 +1715,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
case DM_TYPE_BIO_BASED:
DMEMIT("queue_mode bio ");
break;
- case DM_TYPE_MQ_REQUEST_BASED:
- DMEMIT("queue_mode mq ");
- break;
default:
WARN_ON_ONCE(true);
break;
@@ -1972,7 +1958,7 @@ static int multipath_busy(struct dm_target *ti)
/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
- return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
+ return (m->queue_mode != DM_TYPE_REQUEST_BASED);
/* Guess which priority_group will be used at next mapping time */
pg = READ_ONCE(m->current_pg);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c44925e4e481..e1dd1622a290 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2475,7 +2475,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
}
/* Enable bitmap creation for RAID levels != 0 */
- mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
+ mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e547b8dd298..7cd36e4d1310 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -23,19 +23,6 @@ static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
#define RESERVED_REQUEST_BASED_IOS 256
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
-
-bool dm_use_blk_mq_default(void)
-{
- return use_blk_mq;
-}
-
-bool dm_use_blk_mq(struct mapped_device *md)
-{
- return md->use_blk_mq;
-}
-EXPORT_SYMBOL_GPL(dm_use_blk_mq);
-
unsigned dm_get_reserved_rq_based_ios(void)
{
return __dm_get_module_param(&reserved_rq_based_ios,
@@ -59,41 +46,13 @@ int dm_request_based(struct mapped_device *md)
return queue_is_rq_based(md->queue);
}
-static void dm_old_start_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_mq_start_queue(struct request_queue *q)
+void dm_start_queue(struct request_queue *q)
{
blk_mq_unquiesce_queue(q);
blk_mq_kick_requeue_list(q);
}
-void dm_start_queue(struct request_queue *q)
-{
- if (!q->mq_ops)
- dm_old_start_queue(q);
- else
- dm_mq_start_queue(q);
-}
-
-static void dm_old_stop_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (!blk_queue_stopped(q))
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_mq_stop_queue(struct request_queue *q)
+void dm_stop_queue(struct request_queue *q)
{
if (blk_mq_queue_stopped(q))
return;
@@ -101,14 +60,6 @@ static void dm_mq_stop_queue(struct request_queue *q)
blk_mq_quiesce_queue(q);
}
-void dm_stop_queue(struct request_queue *q)
-{
- if (!q->mq_ops)
- dm_old_stop_queue(q);
- else
- dm_mq_stop_queue(q);
-}
-
/*
* Partial completion handling for request-based dm
*/
@@ -179,9 +130,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
- struct request_queue *q = md->queue;
- unsigned long flags;
-
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
@@ -189,18 +137,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
wake_up(&md->wait);
/*
- * Run this off this callpath, as drivers could invoke end_io while
- * inside their request_fn (and holding the queue lock). Calling
- * back into ->request_fn() could deadlock attempting to grab the
- * queue lock again.
- */
- if (!q->mq_ops && run_queue) {
- spin_lock_irqsave(q->queue_lock, flags);
- blk_run_queue_async(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
-
- /*
* dm_put() must be at the end of this function. See the comment above
*/
dm_put(md);
@@ -222,27 +158,10 @@ static void dm_end_request(struct request *clone, blk_status_t error)
tio->ti->type->release_clone_rq(clone);
rq_end_stats(md, rq);
- if (!rq->q->mq_ops)
- blk_end_request_all(rq, error);
- else
- blk_mq_end_request(rq, error);
+ blk_mq_end_request(rq, error);
rq_completed(md, rw, true);
}
-/*
- * Requeue the original request of a clone.
- */
-static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms)
-{
- struct request_queue *q = rq->q;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_requeue_request(q, rq);
- blk_delay_queue(q, delay_ms);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
{
blk_mq_delay_kick_requeue_list(q, msecs);
@@ -273,11 +192,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
tio->ti->type->release_clone_rq(tio->clone);
}
- if (!rq->q->mq_ops)
- dm_old_requeue_request(rq, delay_ms);
- else
- dm_mq_delay_requeue_request(rq, delay_ms);
-
+ dm_mq_delay_requeue_request(rq, delay_ms);
rq_completed(md, rw, false);
}
@@ -340,10 +255,7 @@ static void dm_softirq_done(struct request *rq)
rq_end_stats(md, rq);
rw = rq_data_dir(rq);
- if (!rq->q->mq_ops)
- blk_end_request_all(rq, tio->error);
- else
- blk_mq_end_request(rq, tio->error);
+ blk_mq_end_request(rq, tio->error);
rq_completed(md, rw, false);
return;
}
@@ -363,17 +275,14 @@ static void dm_complete_request(struct request *rq, blk_status_t error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- if (!rq->q->mq_ops)
- blk_complete_request(rq);
- else
- blk_mq_complete_request(rq);
+ blk_mq_complete_request(rq);
}
/*
* Complete the not-mapped clone and the original request with the error status
* through softirq context.
* Target's rq_end_io() function isn't called.
- * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
+ * This may be used when the target's clone_and_map_rq() function fails.
*/
static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
{
@@ -381,21 +290,10 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
dm_complete_request(rq, error);
}
-/*
- * Called with the clone's queue lock held (in the case of .request_fn)
- */
static void end_clone_request(struct request *clone, blk_status_t error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
- /*
- * Actual request completion is done in a softirq context which doesn't
- * hold the clone's queue lock. Otherwise, deadlock could occur because:
- * - another request may be submitted by the upper level driver
- * of the stacking during the completion
- * - the submission which requires queue lock may be done
- * against this clone's queue
- */
dm_complete_request(tio->orig, error);
}
@@ -446,8 +344,6 @@ static int setup_clone(struct request *clone, struct request *rq,
return 0;
}
-static void map_tio_request(struct kthread_work *work);
-
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
struct mapped_device *md)
{
@@ -464,8 +360,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
*/
if (!md->init_tio_pdu)
memset(&tio->info, 0, sizeof(tio->info));
- if (md->kworker_task)
- kthread_init_work(&tio->work, map_tio_request);
}
/*
@@ -504,10 +398,7 @@ check_again:
blk_rq_unprep_clone(clone);
tio->ti->type->release_clone_rq(clone);
tio->clone = NULL;
- if (!rq->q->mq_ops)
- r = DM_MAPIO_DELAY_REQUEUE;
- else
- r = DM_MAPIO_REQUEUE;
+ r = DM_MAPIO_REQUEUE;
goto check_again;
}
break;
@@ -530,20 +421,23 @@ check_again:
return r;
}
+/* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
+{
+ return sprintf(buf, "%u\n", 0);
+}
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count)
+{
+ return count;
+}
+
static void dm_start_request(struct mapped_device *md, struct request *orig)
{
- if (!orig->q->mq_ops)
- blk_start_request(orig);
- else
- blk_mq_start_request(orig);
+ blk_mq_start_request(orig);
atomic_inc(&md->pending[rq_data_dir(orig)]);
- if (md->seq_rq_merge_deadline_usecs) {
- md->last_rq_pos = rq_end_sector(orig);
- md->last_rq_rw = rq_data_dir(orig);
- md->last_rq_start_time = ktime_get();
- }
-
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
tio->duration_jiffies = jiffies;
@@ -563,8 +457,10 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
dm_get(md);
}
-static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
+static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
+ struct mapped_device *md = set->driver_data;
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
/*
@@ -581,163 +477,6 @@ static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
return 0;
}
-static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
-{
- return __dm_rq_init_rq(q->rq_alloc_data, rq);
-}
-
-static void map_tio_request(struct kthread_work *work)
-{
- struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
-
- if (map_request(tio) == DM_MAPIO_REQUEUE)
- dm_requeue_original_request(tio, false);
-}
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
-{
- return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
-}
-
-#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
- const char *buf, size_t count)
-{
- unsigned deadline;
-
- if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
- return count;
-
- if (kstrtouint(buf, 10, &deadline))
- return -EINVAL;
-
- if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
- deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
-
- md->seq_rq_merge_deadline_usecs = deadline;
-
- return count;
-}
-
-static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
-{
- ktime_t kt_deadline;
-
- if (!md->seq_rq_merge_deadline_usecs)
- return false;
-
- kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
- kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
-
- return !ktime_after(ktime_get(), kt_deadline);
-}
-
-/*
- * q->request_fn for old request-based dm.
- * Called with the queue lock held.
- */
-static void dm_old_request_fn(struct request_queue *q)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_target *ti = md->immutable_target;
- struct request *rq;
- struct dm_rq_target_io *tio;
- sector_t pos = 0;
-
- if (unlikely(!ti)) {
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
-
- if (unlikely(!map)) {
- dm_put_live_table(md, srcu_idx);
- return;
- }
- ti = dm_table_find_target(map, pos);
- dm_put_live_table(md, srcu_idx);
- }
-
- /*
- * For suspend, check blk_queue_stopped() and increment
- * ->pending within a single queue_lock not to increment the
- * number of in-flight I/Os after the queue is stopped in
- * dm_suspend().
- */
- while (!blk_queue_stopped(q)) {
- rq = blk_peek_request(q);
- if (!rq)
- return;
-
- /* always use block 0 to find the target for flushes for now */
- pos = 0;
- if (req_op(rq) != REQ_OP_FLUSH)
- pos = blk_rq_pos(rq);
-
- if ((dm_old_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
- md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
- (ti->type->busy && ti->type->busy(ti))) {
- blk_delay_queue(q, 10);
- return;
- }
-
- dm_start_request(md, rq);
-
- tio = tio_from_request(rq);
- init_tio(tio, rq, md);
- /* Establish tio->ti before queuing work (map_tio_request) */
- tio->ti = ti;
- kthread_queue_work(&md->kworker, &tio->work);
- BUG_ON(!irqs_disabled());
- }
-}
-
-/*
- * Fully initialize a .request_fn request-based queue.
- */
-int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
-{
- struct dm_target *immutable_tgt;
-
- /* Fully initialize the queue */
- md->queue->cmd_size = sizeof(struct dm_rq_target_io);
- md->queue->rq_alloc_data = md;
- md->queue->request_fn = dm_old_request_fn;
- md->queue->init_rq_fn = dm_rq_init_rq;
-
- immutable_tgt = dm_table_get_immutable_target(t);
- if (immutable_tgt && immutable_tgt->per_io_data_size) {
- /* any target-specific per-io data is immediately after the tio */
- md->queue->cmd_size += immutable_tgt->per_io_data_size;
- md->init_tio_pdu = true;
- }
- if (blk_init_allocated_queue(md->queue) < 0)
- return -EINVAL;
-
- /* disable dm_old_request_fn's merge heuristic by default */
- md->seq_rq_merge_deadline_usecs = 0;
-
- blk_queue_softirq_done(md->queue, dm_softirq_done);
-
- /* Initialize the request-based DM worker thread */
- kthread_init_worker(&md->kworker);
- md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
- "kdmwork-%s", dm_device_name(md));
- if (IS_ERR(md->kworker_task)) {
- int error = PTR_ERR(md->kworker_task);
- md->kworker_task = NULL;
- return error;
- }
-
- return 0;
-}
-
-static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- return __dm_rq_init_rq(set->driver_data, rq);
-}
-
static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -790,11 +529,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
struct dm_target *immutable_tgt;
int err;
- if (!dm_table_all_blk_mq_devices(t)) {
- DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
- return -EINVAL;
- }
-
md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
if (!md->tag_set)
return -ENOMEM;
@@ -845,6 +579,8 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+/* Unused, but preserved for userspace compatibility */
+static bool use_blk_mq = true;
module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index f43c45460aac..b39245545229 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -46,10 +46,6 @@ struct dm_rq_clone_bio_info {
struct bio clone;
};
-bool dm_use_blk_mq_default(void);
-bool dm_use_blk_mq(struct mapped_device *md);
-
-int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
void dm_mq_cleanup_mapped_device(struct mapped_device *md);
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index c209b8a19b84..a05fcd50e1b9 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -92,7 +92,8 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
{
- sprintf(buf, "%d\n", dm_use_blk_mq(md));
+ /* Purely for userspace compatibility */
+ sprintf(buf, "%d\n", true);
return strlen(buf);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3d0e2c198f06..9038c302d5c2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -47,7 +47,6 @@ struct dm_table {
bool integrity_supported:1;
bool singleton:1;
- bool all_blk_mq:1;
unsigned integrity_added:1;
/*
@@ -872,8 +871,7 @@ static bool __table_type_bio_based(enum dm_queue_mode table_type)
static bool __table_type_request_based(enum dm_queue_mode table_type)
{
- return (table_type == DM_TYPE_REQUEST_BASED ||
- table_type == DM_TYPE_MQ_REQUEST_BASED);
+ return table_type == DM_TYPE_REQUEST_BASED;
}
void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
@@ -999,10 +997,6 @@ verify_bio_based:
BUG_ON(!request_based); /* No targets in this table */
- /*
- * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
- * having a compatible target use dm_table_set_type.
- */
t->type = DM_TYPE_REQUEST_BASED;
verify_rq_based:
@@ -1022,11 +1016,9 @@ verify_rq_based:
int srcu_idx;
struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
- /* inherit live table's type and all_blk_mq */
- if (live_table) {
+ /* inherit live table's type */
+ if (live_table)
t->type = live_table->type;
- t->all_blk_mq = live_table->all_blk_mq;
- }
dm_put_live_table(t->md, srcu_idx);
return 0;
}
@@ -1046,17 +1038,10 @@ verify_rq_based:
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
- if (v.sq_count && v.mq_count) {
+ if (v.sq_count > 0) {
DMERR("table load rejected: not all devices are blk-mq request-stackable");
return -EINVAL;
}
- t->all_blk_mq = v.mq_count > 0;
-
- if (!t->all_blk_mq &&
- (t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) {
- DMERR("table load rejected: all devices are not blk-mq request-stackable");
- return -EINVAL;
- }
return 0;
}
@@ -1105,11 +1090,6 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t));
}
-bool dm_table_all_blk_mq_devices(struct dm_table *t)
-{
- return t->all_blk_mq;
-}
-
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
@@ -1937,6 +1917,16 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+
+ /*
+ * For a zoned target, the number of zones should be updated for the
+ * correct value to be exposed in sysfs queue/nr_zones. For a BIO based
+ * target, this is all that is needed. For a request based target, the
+ * queue zone bitmaps must also be updated.
+ * Use blk_revalidate_disk_zones() to handle this.
+ */
+ if (blk_queue_is_zoned(q))
+ blk_revalidate_disk_zones(t->md->disk);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -2079,26 +2069,24 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
}
EXPORT_SYMBOL(dm_table_get_md);
+const char *dm_table_device_name(struct dm_table *t)
+{
+ return dm_device_name(t->md);
+}
+EXPORT_SYMBOL_GPL(dm_table_device_name);
+
void dm_table_run_md_queue_async(struct dm_table *t)
{
struct mapped_device *md;
struct request_queue *queue;
- unsigned long flags;
if (!dm_table_request_based(t))
return;
md = dm_table_get_md(t);
queue = dm_get_md_queue(md);
- if (queue) {
- if (queue->mq_ops)
- blk_mq_run_hw_queues(queue, true);
- else {
- spin_lock_irqsave(queue->queue_lock, flags);
- blk_run_queue_async(queue);
- spin_unlock_irqrestore(queue->queue_lock, flags);
- }
- }
+ if (queue)
+ blk_mq_run_hw_queues(queue, true);
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index aaf1ad481ee8..0bd8d498b3b9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -325,7 +325,7 @@ struct thin_c {
* Ensures the thin is not destroyed until the worker has finished
* iterating the active_thins list.
*/
- atomic_t refcount;
+ refcount_t refcount;
struct completion can_destroy;
};
@@ -4044,12 +4044,12 @@ static struct target_type pool_target = {
*--------------------------------------------------------------*/
static void thin_get(struct thin_c *tc)
{
- atomic_inc(&tc->refcount);
+ refcount_inc(&tc->refcount);
}
static void thin_put(struct thin_c *tc)
{
- if (atomic_dec_and_test(&tc->refcount))
+ if (refcount_dec_and_test(&tc->refcount))
complete(&tc->can_destroy);
}
@@ -4193,7 +4193,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -EINVAL;
goto bad;
}
- atomic_set(&tc->refcount, 1);
+ refcount_set(&tc->refcount, 1);
init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
spin_unlock_irqrestore(&tc->pool->lock, flags);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 684af08d0747..0ce04e5b4afb 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -212,12 +212,15 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
struct dm_verity_fec_io *fio = fec_io(io);
u64 block, ileaved;
u8 *bbuf, *rs_block;
- u8 want_digest[v->digest_size];
+ u8 want_digest[HASH_MAX_DIGESTSIZE];
unsigned n, k;
if (neras)
*neras = 0;
+ if (WARN_ON(v->digest_size > sizeof(want_digest)))
+ return -EINVAL;
+
/*
* read each of the rsn data blocks that are part of the RS block, and
* interleave contents to available bufs
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 5f1f80d424dd..2d50eec94cd7 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -350,10 +350,7 @@ static struct wc_memory_superblock *sb(struct dm_writecache *wc)
static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
{
- if (is_power_of_2(sizeof(struct wc_entry)) && 0)
- return &sb(wc)->entries[e - wc->entries];
- else
- return &sb(wc)->entries[e->index];
+ return &sb(wc)->entries[e->index];
}
static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 969954915566..fa68336560c3 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -99,7 +99,7 @@ struct dmz_mblock {
struct rb_node node;
struct list_head link;
sector_t no;
- atomic_t ref;
+ unsigned int ref;
unsigned long state;
struct page *page;
void *data;
@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
RB_CLEAR_NODE(&mblk->node);
INIT_LIST_HEAD(&mblk->link);
- atomic_set(&mblk->ref, 0);
+ mblk->ref = 0;
mblk->state = 0;
mblk->no = mblk_no;
mblk->data = page_address(mblk->page);
@@ -339,10 +339,11 @@ static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
}
/*
- * Lookup a metadata block in the rbtree.
+ * Lookup a metadata block in the rbtree. If the block is found, increment
+ * its reference count.
*/
-static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
- sector_t mblk_no)
+static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
+ sector_t mblk_no)
{
struct rb_root *root = &zmd->mblk_rbtree;
struct rb_node *node = root->rb_node;
@@ -350,8 +351,17 @@ static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
while (node) {
mblk = container_of(node, struct dmz_mblock, node);
- if (mblk->no == mblk_no)
+ if (mblk->no == mblk_no) {
+ /*
+ * If this is the first reference to the block,
+ * remove it from the LRU list.
+ */
+ mblk->ref++;
+ if (mblk->ref == 1 &&
+ !test_bit(DMZ_META_DIRTY, &mblk->state))
+ list_del_init(&mblk->link);
return mblk;
+ }
node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
}
@@ -382,32 +392,47 @@ static void dmz_mblock_bio_end_io(struct bio *bio)
}
/*
- * Read a metadata block from disk.
+ * Read an uncached metadata block from disk and add it to the cache.
*/
-static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
- sector_t mblk_no)
+static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
+ sector_t mblk_no)
{
- struct dmz_mblock *mblk;
+ struct dmz_mblock *mblk, *m;
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
struct bio *bio;
- /* Get block and insert it */
+ /* Get a new block and a BIO to read it */
mblk = dmz_alloc_mblock(zmd, mblk_no);
if (!mblk)
return NULL;
- spin_lock(&zmd->mblk_lock);
- atomic_inc(&mblk->ref);
- set_bit(DMZ_META_READING, &mblk->state);
- dmz_insert_mblock(zmd, mblk);
- spin_unlock(&zmd->mblk_lock);
-
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
dmz_free_mblock(zmd, mblk);
return NULL;
}
+ spin_lock(&zmd->mblk_lock);
+
+ /*
+ * Make sure that another context did not start reading
+ * the block already.
+ */
+ m = dmz_get_mblock_fast(zmd, mblk_no);
+ if (m) {
+ spin_unlock(&zmd->mblk_lock);
+ dmz_free_mblock(zmd, mblk);
+ bio_put(bio);
+ return m;
+ }
+
+ mblk->ref++;
+ set_bit(DMZ_META_READING, &mblk->state);
+ dmz_insert_mblock(zmd, mblk);
+
+ spin_unlock(&zmd->mblk_lock);
+
+ /* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk;
@@ -484,7 +509,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd,
spin_lock(&zmd->mblk_lock);
- if (atomic_dec_and_test(&mblk->ref)) {
+ mblk->ref--;
+ if (mblk->ref == 0) {
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
@@ -508,18 +534,12 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
/* Check rbtree */
spin_lock(&zmd->mblk_lock);
- mblk = dmz_lookup_mblock(zmd, mblk_no);
- if (mblk) {
- /* Cache hit: remove block from LRU list */
- if (atomic_inc_return(&mblk->ref) == 1 &&
- !test_bit(DMZ_META_DIRTY, &mblk->state))
- list_del_init(&mblk->link);
- }
+ mblk = dmz_get_mblock_fast(zmd, mblk_no);
spin_unlock(&zmd->mblk_lock);
if (!mblk) {
/* Cache miss: read the block from disk */
- mblk = dmz_fetch_mblock(zmd, mblk_no);
+ mblk = dmz_get_mblock_slow(zmd, mblk_no);
if (!mblk)
return ERR_PTR(-ENOMEM);
}
@@ -753,7 +773,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
spin_lock(&zmd->mblk_lock);
clear_bit(DMZ_META_DIRTY, &mblk->state);
- if (atomic_read(&mblk->ref) == 0)
+ if (mblk->ref == 0)
list_add_tail(&mblk->link, &zmd->mblk_lru_list);
spin_unlock(&zmd->mblk_lock);
}
@@ -2308,7 +2328,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
mblk = list_first_entry(&zmd->mblk_dirty_list,
struct dmz_mblock, link);
dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
- (u64)mblk->no, atomic_read(&mblk->ref));
+ (u64)mblk->no, mblk->ref);
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
@@ -2326,8 +2346,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
root = &zmd->mblk_rbtree;
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
- (u64)mblk->no, atomic_read(&mblk->ref));
- atomic_set(&mblk->ref, 0);
+ (u64)mblk->no, mblk->ref);
+ mblk->ref = 0;
dmz_free_mblock(zmd, mblk);
}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index a44183ff4be0..981154e59461 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -19,7 +19,7 @@ struct dmz_bioctx {
struct dmz_target *target;
struct dm_zone *zone;
struct bio *bio;
- atomic_t ref;
+ refcount_t ref;
blk_status_t status;
};
@@ -28,7 +28,7 @@ struct dmz_bioctx {
*/
struct dm_chunk_work {
struct work_struct work;
- atomic_t refcount;
+ refcount_t refcount;
struct dmz_target *target;
unsigned int chunk;
struct bio_list bio_list;
@@ -115,7 +115,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
if (nr_blocks == dmz_bio_blocks(bio)) {
/* Setup and submit the BIO */
bio->bi_iter.bi_sector = sector;
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(bio);
return 0;
}
@@ -134,7 +134,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
bio_advance(bio, clone->bi_iter.bi_size);
/* Submit the clone */
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(clone);
return 0;
@@ -240,7 +240,7 @@ static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
/* Setup and submit the BIO */
bio_set_dev(bio, dmz->dev->bdev);
bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(bio);
if (dmz_is_seq(zone))
@@ -456,7 +456,7 @@ out:
*/
static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
{
- atomic_inc(&cw->refcount);
+ refcount_inc(&cw->refcount);
}
/*
@@ -465,7 +465,7 @@ static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
*/
static void dmz_put_chunk_work(struct dm_chunk_work *cw)
{
- if (atomic_dec_and_test(&cw->refcount)) {
+ if (refcount_dec_and_test(&cw->refcount)) {
WARN_ON(!bio_list_empty(&cw->bio_list));
radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
kfree(cw);
@@ -546,7 +546,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
goto out;
INIT_WORK(&cw->work, dmz_chunk_work);
- atomic_set(&cw->refcount, 0);
+ refcount_set(&cw->refcount, 0);
cw->target = dmz;
cw->chunk = chunk;
bio_list_init(&cw->bio_list);
@@ -599,7 +599,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->target = dmz;
bioctx->zone = NULL;
bioctx->bio = bio;
- atomic_set(&bioctx->ref, 1);
+ refcount_set(&bioctx->ref, 1);
bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
@@ -633,7 +633,7 @@ static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error
if (bioctx->status == BLK_STS_OK && *error)
bioctx->status = *error;
- if (!atomic_dec_and_test(&bioctx->ref))
+ if (!refcount_dec_and_test(&bioctx->ref))
return DM_ENDIO_INCOMPLETE;
/* Done */
@@ -702,8 +702,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
- dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1)
- >> dev->zone_nr_sectors_shift;
+ dev->nr_zones = blkdev_nr_zones(dev->bdev);
dmz->dev = dev;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 45abb54037fc..c510179a7f84 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -458,6 +458,57 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
+static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+#ifdef CONFIG_BLK_DEV_ZONED
+ struct mapped_device *md = disk->private_data;
+ struct dm_target *tgt;
+ struct dm_table *map;
+ int srcu_idx, ret;
+
+ if (dm_suspended_md(md))
+ return -EAGAIN;
+
+ map = dm_get_live_table(md, &srcu_idx);
+ if (!map)
+ return -EIO;
+
+ tgt = dm_table_find_target(map, sector);
+ if (!dm_target_is_valid(tgt)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * If we are executing this, we already know that the block device
+ * is a zoned device and so each target should have support for that
+ * type of drive. A missing report_zones method means that the target
+ * driver has a problem.
+ */
+ if (WARN_ON(!tgt->type->report_zones)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * blkdev_report_zones() will loop and call this again to cover all the
+ * zones of the target, eventually moving on to the next target.
+ * So there is no need to loop here trying to fill the entire array
+ * of zones.
+ */
+ ret = tgt->type->report_zones(tgt, sector, zones,
+ nr_zones, gfp_mask);
+
+out:
+ dm_put_live_table(md, srcu_idx);
+ return ret;
+#else
+ return -ENOTSUPP;
+#endif
+}
+
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
__acquires(md->io_barrier)
@@ -1155,93 +1206,49 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/*
- * The zone descriptors obtained with a zone report indicate zone positions
- * within the target backing device, regardless of that device is a partition
- * and regardless of the target mapping start sector on the device or partition.
- * The zone descriptors start sector and write pointer position must be adjusted
- * to match their relative position within the dm device.
- * A target may call dm_remap_zone_report() after completion of a
- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
- * backing device.
+ * The zone descriptors obtained with a zone report indicate
+ * zone positions within the underlying device of the target. The zone
+ * descriptors must be remapped to match their position within the dm device.
+ * The caller target should obtain the zones information using
+ * blkdev_report_zones() to ensure that remapping for partition offset is
+ * already handled.
*/
-void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+void dm_remap_zone_report(struct dm_target *ti, sector_t start,
+ struct blk_zone *zones, unsigned int *nr_zones)
{
#ifdef CONFIG_BLK_DEV_ZONED
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct bio *report_bio = tio->io->orig_bio;
- struct blk_zone_report_hdr *hdr = NULL;
struct blk_zone *zone;
- unsigned int nr_rep = 0;
- unsigned int ofst;
- sector_t part_offset;
- struct bio_vec bvec;
- struct bvec_iter iter;
- void *addr;
-
- if (bio->bi_status)
- return;
-
- /*
- * bio sector was incremented by the request size on completion. Taking
- * into account the original request sector, the target start offset on
- * the backing device and the target mapping offset (ti->begin), the
- * start sector of the backing device. The partition offset is always 0
- * if the target uses a whole device.
- */
- part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
+ unsigned int nrz = *nr_zones;
+ int i;
/*
- * Remap the start sector of the reported zones. For sequential zones,
- * also remap the write pointer position.
+ * Remap the start sector and write pointer position of the zones in
+ * the array. Since we may have obtained from the target underlying
+ * device more zones that the target size, also adjust the number
+ * of zones.
*/
- bio_for_each_segment(bvec, report_bio, iter) {
- addr = kmap_atomic(bvec.bv_page);
-
- /* Remember the report header in the first page */
- if (!hdr) {
- hdr = addr;
- ofst = sizeof(struct blk_zone_report_hdr);
- } else
- ofst = 0;
-
- /* Set zones start sector */
- while (hdr->nr_zones && ofst < bvec.bv_len) {
- zone = addr + ofst;
- zone->start -= part_offset;
- if (zone->start >= start + ti->len) {
- hdr->nr_zones = 0;
- break;
- }
- zone->start = zone->start + ti->begin - start;
- if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
- if (zone->cond == BLK_ZONE_COND_FULL)
- zone->wp = zone->start + zone->len;
- else if (zone->cond == BLK_ZONE_COND_EMPTY)
- zone->wp = zone->start;
- else
- zone->wp = zone->wp + ti->begin - start - part_offset;
- }
- ofst += sizeof(struct blk_zone);
- hdr->nr_zones--;
- nr_rep++;
+ for (i = 0; i < nrz; i++) {
+ zone = zones + i;
+ if (zone->start >= start + ti->len) {
+ memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
+ break;
}
- if (addr != hdr)
- kunmap_atomic(addr);
-
- if (!hdr->nr_zones)
- break;
- }
+ zone->start = zone->start + ti->begin - start;
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ continue;
- if (hdr) {
- hdr->nr_zones = nr_rep;
- kunmap_atomic(hdr);
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ zone->wp = zone->start + zone->len;
+ else if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->wp = zone->start;
+ else
+ zone->wp = zone->wp + ti->begin - start;
}
- bio_advance(report_bio, report_bio->bi_iter.bi_size);
-
+ *nr_zones = i;
#else /* !CONFIG_BLK_DEV_ZONED */
- bio->bi_status = BLK_STS_NOTSUPP;
+ *nr_zones = 0;
#endif
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
@@ -1327,8 +1334,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
return r;
}
- if (bio_op(bio) != REQ_OP_ZONE_REPORT)
- bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+ bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
if (unlikely(bio_integrity(bio) != NULL))
@@ -1541,7 +1547,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
*/
static int __split_and_process_non_flush(struct clone_info *ci)
{
- struct bio *bio = ci->bio;
struct dm_target *ti;
unsigned len;
int r;
@@ -1553,11 +1558,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (unlikely(__process_abnormal_io(ci, ti, &r)))
return r;
- if (bio_op(bio) == REQ_OP_ZONE_REPORT)
- len = ci->sector_count;
- else
- len = min_t(sector_t, max_io_len(ci->sector, ti),
- ci->sector_count);
+ len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
if (r < 0)
@@ -1616,9 +1617,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* We take a clone of the original to store in
* ci.io->orig_bio to be used by end_io_acct() and
* for dec_pending to use for completion handling.
- * As this path is not used for REQ_OP_ZONE_REPORT,
- * the usage of io->orig_bio in dm_remap_zone_report()
- * won't be affected by this reassignment.
*/
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
GFP_NOIO, &md->queue->bio_split);
@@ -1666,7 +1664,7 @@ static blk_qc_t __process_bio(struct mapped_device *md,
* Defend against IO still getting in during teardown
* - as was seen for a time with nvme-fcloop
*/
- if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) {
+ if (WARN_ON_ONCE(!ti || !dm_target_is_valid(ti))) {
error = -EIO;
goto out;
}
@@ -1808,8 +1806,6 @@ static void dm_wq_work(struct work_struct *work);
static void dm_init_normal_md_queue(struct mapped_device *md)
{
- md->use_blk_mq = false;
-
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
@@ -1820,8 +1816,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
destroy_workqueue(md->wq);
- if (md->kworker_task)
- kthread_stop(md->kworker_task);
bioset_exit(&md->bs);
bioset_exit(&md->io_bs);
@@ -1888,7 +1882,6 @@ static struct mapped_device *alloc_dev(int minor)
goto bad_io_barrier;
md->numa_node_id = numa_node_id;
- md->use_blk_mq = dm_use_blk_mq_default();
md->init_tio_pdu = false;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
@@ -1919,7 +1912,6 @@ static struct mapped_device *alloc_dev(int minor)
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
init_completion(&md->kobj_holder.completion);
- md->kworker_task = NULL;
md->disk->major = _major;
md->disk->first_minor = minor;
@@ -2219,14 +2211,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) {
case DM_TYPE_REQUEST_BASED:
- dm_init_normal_md_queue(md);
- r = dm_old_init_request_queue(md, t);
- if (r) {
- DMERR("Cannot initialize queue for request-based mapped device");
- return r;
- }
- break;
- case DM_TYPE_MQ_REQUEST_BASED:
r = dm_mq_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
@@ -2331,9 +2315,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
blk_set_queue_dying(md->queue);
- if (dm_request_based(md) && md->kworker_task)
- kthread_flush_worker(&md->kworker);
-
/*
* Take suspend_lock so that presuspend and postsuspend methods
* do not race with internal suspend.
@@ -2586,11 +2567,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
- if (dm_request_based(md)) {
+ if (dm_request_based(md))
dm_stop_queue(md->queue);
- if (md->kworker_task)
- kthread_flush_worker(&md->kworker);
- }
flush_workqueue(md->wq);
@@ -2965,7 +2943,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
goto out;
break;
case DM_TYPE_REQUEST_BASED:
- case DM_TYPE_MQ_REQUEST_BASED:
pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */
@@ -3167,6 +3144,7 @@ static const struct block_device_operations dm_blk_dops = {
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
+ .report_zones = dm_blk_report_zones,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 114a81b27c37..2d539b82ec08 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -70,7 +70,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
-bool dm_table_all_blk_mq_devices(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 2fc8c113977f..1cd4f991792c 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2288,9 +2288,9 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
goto out;
}
if (mddev->pers) {
- mddev->pers->quiesce(mddev, 1);
+ mddev_suspend(mddev);
md_bitmap_destroy(mddev);
- mddev->pers->quiesce(mddev, 0);
+ mddev_resume(mddev);
}
mddev->bitmap_info.offset = 0;
if (mddev->bitmap_info.file) {
@@ -2327,8 +2327,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
mddev->bitmap_info.offset = offset;
if (mddev->pers) {
struct bitmap *bitmap;
- mddev->pers->quiesce(mddev, 1);
bitmap = md_bitmap_create(mddev, -1);
+ mddev_suspend(mddev);
if (IS_ERR(bitmap))
rv = PTR_ERR(bitmap);
else {
@@ -2337,11 +2337,12 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
if (rv)
mddev->bitmap_info.offset = 0;
}
- mddev->pers->quiesce(mddev, 0);
if (rv) {
md_bitmap_destroy(mddev);
+ mddev_resume(mddev);
goto out;
}
+ mddev_resume(mddev);
}
}
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 0b2af6e74fc3..8dff19d5502e 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -33,13 +33,6 @@ struct dlm_lock_resource {
int mode;
};
-struct suspend_info {
- int slot;
- sector_t lo;
- sector_t hi;
- struct list_head list;
-};
-
struct resync_info {
__le64 lo;
__le64 hi;
@@ -80,7 +73,13 @@ struct md_cluster_info {
struct dlm_lock_resource **other_bitmap_lockres;
struct dlm_lock_resource *resync_lockres;
struct list_head suspend_list;
+
spinlock_t suspend_lock;
+ /* record the region which write should be suspended */
+ sector_t suspend_lo;
+ sector_t suspend_hi;
+ int suspend_from; /* the slot which broadcast suspend_lo/hi */
+
struct md_thread *recovery_thread;
unsigned long recovery_map;
/* communication loc resources */
@@ -105,6 +104,7 @@ enum msg_type {
RE_ADD,
BITMAP_NEEDS_SYNC,
CHANGE_CAPACITY,
+ BITMAP_RESIZE,
};
struct cluster_msg {
@@ -270,25 +270,22 @@ static void add_resync_info(struct dlm_lock_resource *lockres,
ri->hi = cpu_to_le64(hi);
}
-static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres)
+static int read_resync_info(struct mddev *mddev,
+ struct dlm_lock_resource *lockres)
{
struct resync_info ri;
- struct suspend_info *s = NULL;
- sector_t hi = 0;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int ret = 0;
dlm_lock_sync(lockres, DLM_LOCK_CR);
memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
- hi = le64_to_cpu(ri.hi);
- if (hi > 0) {
- s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
- if (!s)
- goto out;
- s->hi = hi;
- s->lo = le64_to_cpu(ri.lo);
+ if (le64_to_cpu(ri.hi) > 0) {
+ cinfo->suspend_hi = le64_to_cpu(ri.hi);
+ cinfo->suspend_lo = le64_to_cpu(ri.lo);
+ ret = 1;
}
dlm_unlock_sync(lockres);
-out:
- return s;
+ return ret;
}
static void recover_bitmaps(struct md_thread *thread)
@@ -298,7 +295,6 @@ static void recover_bitmaps(struct md_thread *thread)
struct dlm_lock_resource *bm_lockres;
char str[64];
int slot, ret;
- struct suspend_info *s, *tmp;
sector_t lo, hi;
while (cinfo->recovery_map) {
@@ -325,13 +321,17 @@ static void recover_bitmaps(struct md_thread *thread)
/* Clear suspend_area associated with the bitmap */
spin_lock_irq(&cinfo->suspend_lock);
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- }
+ cinfo->suspend_hi = 0;
+ cinfo->suspend_lo = 0;
+ cinfo->suspend_from = -1;
spin_unlock_irq(&cinfo->suspend_lock);
+ /* Kick off a reshape if needed */
+ if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
+ test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ mddev->reshape_position != MaxSector)
+ md_wakeup_thread(mddev->sync_thread);
+
if (hi > 0) {
if (lo < mddev->recovery_cp)
mddev->recovery_cp = lo;
@@ -434,34 +434,23 @@ static void ack_bast(void *arg, int mode)
}
}
-static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
-{
- struct suspend_info *s, *tmp;
-
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- break;
- }
-}
-
static void remove_suspend_info(struct mddev *mddev, int slot)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
mddev->pers->quiesce(mddev, 1);
spin_lock_irq(&cinfo->suspend_lock);
- __remove_suspend_info(cinfo, slot);
+ cinfo->suspend_hi = 0;
+ cinfo->suspend_lo = 0;
spin_unlock_irq(&cinfo->suspend_lock);
mddev->pers->quiesce(mddev, 0);
}
-
static void process_suspend_info(struct mddev *mddev,
int slot, sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
- struct suspend_info *s;
+ struct mdp_superblock_1 *sb = NULL;
+ struct md_rdev *rdev;
if (!hi) {
/*
@@ -475,6 +464,12 @@ static void process_suspend_info(struct mddev *mddev,
return;
}
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) {
+ sb = page_address(rdev->sb_page);
+ break;
+ }
+
/*
* The bitmaps are not same for different nodes
* if RESYNCING is happening in one node, then
@@ -487,26 +482,26 @@ static void process_suspend_info(struct mddev *mddev,
* sync_low/hi is used to record the region which
* arrived in the previous RESYNCING message,
*
- * Call bitmap_sync_with_cluster to clear
- * NEEDED_MASK and set RESYNC_MASK since
- * resync thread is running in another node,
- * so we don't need to do the resync again
- * with the same section */
- md_bitmap_sync_with_cluster(mddev, cinfo->sync_low, cinfo->sync_hi, lo, hi);
+ * Call md_bitmap_sync_with_cluster to clear NEEDED_MASK
+ * and set RESYNC_MASK since resync thread is running
+ * in another node, so we don't need to do the resync
+ * again with the same section.
+ *
+ * Skip md_bitmap_sync_with_cluster in case reshape
+ * happening, because reshaping region is small and
+ * we don't want to trigger lots of WARN.
+ */
+ if (sb && !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE))
+ md_bitmap_sync_with_cluster(mddev, cinfo->sync_low,
+ cinfo->sync_hi, lo, hi);
cinfo->sync_low = lo;
cinfo->sync_hi = hi;
- s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
- if (!s)
- return;
- s->slot = slot;
- s->lo = lo;
- s->hi = hi;
mddev->pers->quiesce(mddev, 1);
spin_lock_irq(&cinfo->suspend_lock);
- /* Remove existing entry (if exists) before adding */
- __remove_suspend_info(cinfo, slot);
- list_add(&s->list, &cinfo->suspend_list);
+ cinfo->suspend_from = slot;
+ cinfo->suspend_lo = lo;
+ cinfo->suspend_hi = hi;
spin_unlock_irq(&cinfo->suspend_lock);
mddev->pers->quiesce(mddev, 0);
}
@@ -612,6 +607,11 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
case BITMAP_NEEDS_SYNC:
__recover_slot(mddev, le32_to_cpu(msg->slot));
break;
+ case BITMAP_RESIZE:
+ if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0))
+ ret = md_bitmap_resize(mddev->bitmap,
+ le64_to_cpu(msg->high), 0, 0);
+ break;
default:
ret = -1;
pr_warn("%s:%d Received unknown message from %d\n",
@@ -800,7 +800,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
struct md_cluster_info *cinfo = mddev->cluster_info;
int i, ret = 0;
struct dlm_lock_resource *bm_lockres;
- struct suspend_info *s;
char str[64];
sector_t lo, hi;
@@ -819,16 +818,13 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
bm_lockres->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
if (ret == -EAGAIN) {
- s = read_resync_info(mddev, bm_lockres);
- if (s) {
+ if (read_resync_info(mddev, bm_lockres)) {
pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
__func__, __LINE__,
- (unsigned long long) s->lo,
- (unsigned long long) s->hi, i);
- spin_lock_irq(&cinfo->suspend_lock);
- s->slot = i;
- list_add(&s->list, &cinfo->suspend_list);
- spin_unlock_irq(&cinfo->suspend_lock);
+ (unsigned long long) cinfo->suspend_lo,
+ (unsigned long long) cinfo->suspend_hi,
+ i);
+ cinfo->suspend_from = i;
}
ret = 0;
lockres_free(bm_lockres);
@@ -1001,10 +997,17 @@ static int leave(struct mddev *mddev)
if (!cinfo)
return 0;
- /* BITMAP_NEEDS_SYNC message should be sent when node
+ /*
+ * BITMAP_NEEDS_SYNC message should be sent when node
* is leaving the cluster with dirty bitmap, also we
- * can only deliver it when dlm connection is available */
- if (cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector)
+ * can only deliver it when dlm connection is available.
+ *
+ * Also, we should send BITMAP_NEEDS_SYNC message in
+ * case reshaping is interrupted.
+ */
+ if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) ||
+ (mddev->reshape_position != MaxSector &&
+ test_bit(MD_CLOSING, &mddev->flags)))
resync_bitmap(mddev);
set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
@@ -1102,6 +1105,80 @@ static void metadata_update_cancel(struct mddev *mddev)
unlock_comm(cinfo);
}
+static int update_bitmap_size(struct mddev *mddev, sector_t size)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg = {0};
+ int ret;
+
+ cmsg.type = cpu_to_le32(BITMAP_RESIZE);
+ cmsg.high = cpu_to_le64(size);
+ ret = sendmsg(cinfo, &cmsg, 0);
+ if (ret)
+ pr_err("%s:%d: failed to send BITMAP_RESIZE message (%d)\n",
+ __func__, __LINE__, ret);
+ return ret;
+}
+
+static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsize)
+{
+ struct bitmap_counts *counts;
+ char str[64];
+ struct dlm_lock_resource *bm_lockres;
+ struct bitmap *bitmap = mddev->bitmap;
+ unsigned long my_pages = bitmap->counts.pages;
+ int i, rv;
+
+ /*
+ * We need to ensure all the nodes can grow to a larger
+ * bitmap size before make the reshaping.
+ */
+ rv = update_bitmap_size(mddev, newsize);
+ if (rv)
+ return rv;
+
+ for (i = 0; i < mddev->bitmap_info.nodes; i++) {
+ if (i == md_cluster_ops->slot_number(mddev))
+ continue;
+
+ bitmap = get_bitmap_from_slot(mddev, i);
+ if (IS_ERR(bitmap)) {
+ pr_err("can't get bitmap from slot %d\n", i);
+ goto out;
+ }
+ counts = &bitmap->counts;
+
+ /*
+ * If we can hold the bitmap lock of one node then
+ * the slot is not occupied, update the pages.
+ */
+ snprintf(str, 64, "bitmap%04d", i);
+ bm_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!bm_lockres) {
+ pr_err("Cannot initialize %s lock\n", str);
+ goto out;
+ }
+ bm_lockres->flags |= DLM_LKF_NOQUEUE;
+ rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
+ if (!rv)
+ counts->pages = my_pages;
+ lockres_free(bm_lockres);
+
+ if (my_pages != counts->pages)
+ /*
+ * Let's revert the bitmap size if one node
+ * can't resize bitmap
+ */
+ goto out;
+ }
+
+ return 0;
+out:
+ md_bitmap_free(bitmap);
+ update_bitmap_size(mddev, oldsize);
+ return -1;
+}
+
/*
* return 0 if all the bitmaps have the same sync_size
*/
@@ -1243,6 +1320,16 @@ static int resync_start(struct mddev *mddev)
return dlm_lock_sync_interruptible(cinfo->resync_lockres, DLM_LOCK_EX, mddev);
}
+static void resync_info_get(struct mddev *mddev, sector_t *lo, sector_t *hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ spin_lock_irq(&cinfo->suspend_lock);
+ *lo = cinfo->suspend_lo;
+ *hi = cinfo->suspend_hi;
+ spin_unlock_irq(&cinfo->suspend_lock);
+}
+
static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
@@ -1295,21 +1382,14 @@ static int area_resyncing(struct mddev *mddev, int direction,
{
struct md_cluster_info *cinfo = mddev->cluster_info;
int ret = 0;
- struct suspend_info *s;
if ((direction == READ) &&
test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
return 1;
spin_lock_irq(&cinfo->suspend_lock);
- if (list_empty(&cinfo->suspend_list))
- goto out;
- list_for_each_entry(s, &cinfo->suspend_list, list)
- if (hi > s->lo && lo < s->hi) {
- ret = 1;
- break;
- }
-out:
+ if (hi > cinfo->suspend_lo && lo < cinfo->suspend_hi)
+ ret = 1;
spin_unlock_irq(&cinfo->suspend_lock);
return ret;
}
@@ -1482,6 +1562,7 @@ static struct md_cluster_operations cluster_ops = {
.resync_start = resync_start,
.resync_finish = resync_finish,
.resync_info_update = resync_info_update,
+ .resync_info_get = resync_info_get,
.metadata_update_start = metadata_update_start,
.metadata_update_finish = metadata_update_finish,
.metadata_update_cancel = metadata_update_cancel,
@@ -1492,6 +1573,7 @@ static struct md_cluster_operations cluster_ops = {
.remove_disk = remove_disk,
.load_bitmaps = load_bitmaps,
.gather_bitmaps = gather_bitmaps,
+ .resize_bitmaps = resize_bitmaps,
.lock_all_bitmaps = lock_all_bitmaps,
.unlock_all_bitmaps = unlock_all_bitmaps,
.update_size = update_size,
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index c0240708f443..a78e3021775d 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -14,6 +14,7 @@ struct md_cluster_operations {
int (*leave)(struct mddev *mddev);
int (*slot_number)(struct mddev *mddev);
int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
+ void (*resync_info_get)(struct mddev *mddev, sector_t *lo, sector_t *hi);
int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev);
void (*metadata_update_cancel)(struct mddev *mddev);
@@ -26,6 +27,7 @@ struct md_cluster_operations {
int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
void (*load_bitmaps)(struct mddev *mddev, int total_slots);
int (*gather_bitmaps)(struct md_rdev *rdev);
+ int (*resize_bitmaps)(struct mddev *mddev, sector_t newsize, sector_t oldsize);
int (*lock_all_bitmaps)(struct mddev *mddev);
void (*unlock_all_bitmaps)(struct mddev *mddev);
void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 63ceabb4e020..fc488cb30a94 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -452,10 +452,11 @@ static void md_end_flush(struct bio *fbio)
rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0)
+ if (bio->bi_iter.bi_size == 0) {
/* an empty barrier - all done */
bio_endio(bio);
- else {
+ mempool_free(fi, mddev->flush_pool);
+ } else {
INIT_WORK(&fi->flush_work, submit_flushes);
queue_work(md_wq, &fi->flush_work);
}
@@ -509,10 +510,11 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
rcu_read_unlock();
if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0)
+ if (bio->bi_iter.bi_size == 0) {
/* an empty barrier - all done */
bio_endio(bio);
- else {
+ mempool_free(fi, mddev->flush_pool);
+ } else {
INIT_WORK(&fi->flush_work, submit_flushes);
queue_work(md_wq, &fi->flush_work);
}
@@ -5904,14 +5906,6 @@ static void __md_stop(struct mddev *mddev)
mddev->to_remove = &md_redundancy_group;
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-}
-
-void md_stop(struct mddev *mddev)
-{
- /* stop the array and free an attached data structures.
- * This is called from dm-raid
- */
- __md_stop(mddev);
if (mddev->flush_bio_pool) {
mempool_destroy(mddev->flush_bio_pool);
mddev->flush_bio_pool = NULL;
@@ -5920,6 +5914,14 @@ void md_stop(struct mddev *mddev)
mempool_destroy(mddev->flush_pool);
mddev->flush_pool = NULL;
}
+}
+
+void md_stop(struct mddev *mddev)
+{
+ /* stop the array and free an attached data structures.
+ * This is called from dm-raid
+ */
+ __md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
}
@@ -8370,9 +8372,17 @@ void md_do_sync(struct md_thread *thread)
else if (!mddev->bitmap)
j = mddev->recovery_cp;
- } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
max_sectors = mddev->resync_max_sectors;
- else {
+ /*
+ * If the original node aborts reshaping then we continue the
+ * reshaping, so set j again to avoid restart reshape from the
+ * first beginning
+ */
+ if (mddev_is_clustered(mddev) &&
+ mddev->reshape_position != MaxSector)
+ j = mddev->reshape_position;
+ } else {
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
j = MaxSector;
@@ -8623,8 +8633,10 @@ void md_do_sync(struct md_thread *thread)
mddev_lock_nointr(mddev);
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
mddev_unlock(mddev);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ if (!mddev_is_clustered(mddev)) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
}
spin_lock(&mddev->lock);
@@ -8790,6 +8802,18 @@ static void md_start_sync(struct work_struct *ws)
*/
void md_check_recovery(struct mddev *mddev)
{
+ if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
+ /* Write superblock - thread that called mddev_suspend()
+ * holds reconfig_mutex for us.
+ */
+ set_bit(MD_UPDATING_SB, &mddev->flags);
+ smp_mb__after_atomic();
+ if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
+ md_update_sb(mddev, 0);
+ clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
+ wake_up(&mddev->sb_wait);
+ }
+
if (mddev->suspended)
return;
@@ -8949,16 +8973,6 @@ void md_check_recovery(struct mddev *mddev)
unlock:
wake_up(&mddev->sb_wait);
mddev_unlock(mddev);
- } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
- /* Write superblock - thread that called mddev_suspend()
- * holds reconfig_mutex for us.
- */
- set_bit(MD_UPDATING_SB, &mddev->flags);
- smp_mb__after_atomic();
- if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
- md_update_sb(mddev, 0);
- clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
- wake_up(&mddev->sb_wait);
}
}
EXPORT_SYMBOL(md_check_recovery);
@@ -8966,6 +8980,8 @@ EXPORT_SYMBOL(md_check_recovery);
void md_reap_sync_thread(struct mddev *mddev)
{
struct md_rdev *rdev;
+ sector_t old_dev_sectors = mddev->dev_sectors;
+ bool is_reshaped = false;
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
@@ -8980,8 +8996,11 @@ void md_reap_sync_thread(struct mddev *mddev)
}
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- mddev->pers->finish_reshape)
+ mddev->pers->finish_reshape) {
mddev->pers->finish_reshape(mddev);
+ if (mddev_is_clustered(mddev))
+ is_reshaped = true;
+ }
/* If array is no-longer degraded, then any saved_raid_disk
* information must be scrapped.
@@ -9002,6 +9021,14 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ /*
+ * We call md_cluster_ops->update_size here because sync_size could
+ * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
+ * so it is time to update size across cluster.
+ */
+ if (mddev_is_clustered(mddev) && is_reshaped
+ && !test_bit(MD_CLOSING, &mddev->flags))
+ md_cluster_ops->update_size(mddev, old_dev_sectors);
wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -9201,8 +9228,12 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
}
if (role != rdev2->raid_disk) {
- /* got activated */
- if (rdev2->raid_disk == -1 && role != 0xffff) {
+ /*
+ * got activated except reshape is happening.
+ */
+ if (rdev2->raid_disk == -1 && role != 0xffff &&
+ !(le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RESHAPE_ACTIVE)) {
rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2);
pr_info("Activated spare: %s\n",
@@ -9228,6 +9259,30 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
+ /*
+ * Since mddev->delta_disks has already updated in update_raid_disks,
+ * so it is time to check reshape.
+ */
+ if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
+ (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
+ /*
+ * reshape is happening in the remote node, we need to
+ * update reshape_position and call start_reshape.
+ */
+ mddev->reshape_position = sb->reshape_position;
+ if (mddev->pers->update_reshape_pos)
+ mddev->pers->update_reshape_pos(mddev);
+ if (mddev->pers->start_reshape)
+ mddev->pers->start_reshape(mddev);
+ } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
+ mddev->reshape_position != MaxSector &&
+ !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
+ /* reshape is just done in another node. */
+ mddev->reshape_position = MaxSector;
+ if (mddev->pers->update_reshape_pos)
+ mddev->pers->update_reshape_pos(mddev);
+ }
+
/* Finally set the event to be up to date */
mddev->events = le64_to_cpu(sb->events);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8afd6bfdbfb9..c52afb52c776 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -557,6 +557,7 @@ struct md_personality
int (*check_reshape) (struct mddev *mddev);
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
+ void (*update_reshape_pos) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4e990246225e..1d54109071cc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1734,6 +1734,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
*/
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk < conf->raid_disks &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
first = last = rdev->saved_raid_disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d6f7978b4449..b98e746e7fc4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -25,6 +25,7 @@
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
+#include <linux/raid/md_p.h>
#include <trace/events/block.h>
#include "md.h"
#include "raid10.h"
@@ -1808,6 +1809,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
first = last = rdev->raid_disk;
if (rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk < conf->geo.raid_disks &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
mirror = rdev->saved_raid_disk;
else
@@ -3079,6 +3081,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sect;
int must_sync;
int any_working;
+ int need_recover = 0;
+ int need_replace = 0;
struct raid10_info *mirror = &conf->mirrors[i];
struct md_rdev *mrdev, *mreplace;
@@ -3086,11 +3090,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
mrdev = rcu_dereference(mirror->rdev);
mreplace = rcu_dereference(mirror->replacement);
- if ((mrdev == NULL ||
- test_bit(Faulty, &mrdev->flags) ||
- test_bit(In_sync, &mrdev->flags)) &&
- (mreplace == NULL ||
- test_bit(Faulty, &mreplace->flags))) {
+ if (mrdev != NULL &&
+ !test_bit(Faulty, &mrdev->flags) &&
+ !test_bit(In_sync, &mrdev->flags))
+ need_recover = 1;
+ if (mreplace != NULL &&
+ !test_bit(Faulty, &mreplace->flags))
+ need_replace = 1;
+
+ if (!need_recover && !need_replace) {
rcu_read_unlock();
continue;
}
@@ -3213,7 +3221,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->devs[1].devnum = i;
r10_bio->devs[1].addr = to_addr;
- if (!test_bit(In_sync, &mrdev->flags)) {
+ if (need_recover) {
bio = r10_bio->devs[1].bio;
bio->bi_next = biolist;
biolist = bio;
@@ -3230,16 +3238,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[1].repl_bio;
if (bio)
bio->bi_end_io = NULL;
- /* Note: if mreplace != NULL, then bio
+ /* Note: if need_replace, then bio
* cannot be NULL as r10buf_pool_alloc will
* have allocated it.
- * So the second test here is pointless.
- * But it keeps semantic-checkers happy, and
- * this comment keeps human reviewers
- * happy.
*/
- if (mreplace == NULL || bio == NULL ||
- test_bit(Faulty, &mreplace->flags))
+ if (!need_replace)
break;
bio->bi_next = biolist;
biolist = bio;
@@ -4286,12 +4289,46 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock);
if (mddev->delta_disks && mddev->bitmap) {
- ret = md_bitmap_resize(mddev->bitmap,
- raid10_size(mddev, 0, conf->geo.raid_disks),
- 0, 0);
+ struct mdp_superblock_1 *sb = NULL;
+ sector_t oldsize, newsize;
+
+ oldsize = raid10_size(mddev, 0, 0);
+ newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
+
+ if (!mddev_is_clustered(mddev)) {
+ ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
+ if (ret)
+ goto abort;
+ else
+ goto out;
+ }
+
+ rdev_for_each(rdev, mddev) {
+ if (rdev->raid_disk > -1 &&
+ !test_bit(Faulty, &rdev->flags))
+ sb = page_address(rdev->sb_page);
+ }
+
+ /*
+ * some node is already performing reshape, and no need to
+ * call md_bitmap_resize again since it should be called when
+ * receiving BITMAP_RESIZE msg
+ */
+ if ((sb && (le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
+ goto out;
+
+ ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
if (ret)
goto abort;
+
+ ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
+ if (ret) {
+ md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
+ goto abort;
+ }
}
+out:
if (mddev->delta_disks > 0) {
rdev_for_each(rdev, mddev)
if (rdev->raid_disk < 0 &&
@@ -4568,6 +4605,32 @@ read_more:
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
+ /*
+ * Broadcast RESYNC message to other nodes, so all nodes would not
+ * write to the region to avoid conflict.
+ */
+ if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
+ struct mdp_superblock_1 *sb = NULL;
+ int sb_reshape_pos = 0;
+
+ conf->cluster_sync_low = sector_nr;
+ conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
+ sb = page_address(rdev->sb_page);
+ if (sb) {
+ sb_reshape_pos = le64_to_cpu(sb->reshape_position);
+ /*
+ * Set cluster_sync_low again if next address for array
+ * reshape is less than cluster_sync_low. Since we can't
+ * update cluster_sync_low until it has finished reshape.
+ */
+ if (sb_reshape_pos < conf->cluster_sync_low)
+ conf->cluster_sync_low = sb_reshape_pos;
+ }
+
+ md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
+ conf->cluster_sync_high);
+ }
+
/* Now find the locations in the new layout */
__raid10_find_phys(&conf->geo, r10_bio);
@@ -4719,6 +4782,19 @@ static void end_reshape(struct r10conf *conf)
conf->fullsync = 0;
}
+static void raid10_update_reshape_pos(struct mddev *mddev)
+{
+ struct r10conf *conf = mddev->private;
+ sector_t lo, hi;
+
+ md_cluster_ops->resync_info_get(mddev, &lo, &hi);
+ if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
+ || mddev->reshape_position == MaxSector)
+ conf->reshape_progress = mddev->reshape_position;
+ else
+ WARN_ON_ONCE(1);
+}
+
static int handle_reshape_read_error(struct mddev *mddev,
struct r10bio *r10_bio)
{
@@ -4887,6 +4963,7 @@ static struct md_personality raid10_personality =
.check_reshape = raid10_check_reshape,
.start_reshape = raid10_start_reshape,
.finish_reshape = raid10_finish_reshape,
+ .update_reshape_pos = raid10_update_reshape_pos,
.congested = raid10_congested,
};
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index e6e925add700..ec3a5ef7fee0 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -3151,8 +3151,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
- rcu_assign_pointer(conf->log, NULL);
- md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
mempool_exit(&log->meta_pool);
out_mempool:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e4e98f47865d..4990f0319f6c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2681,6 +2681,18 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
pr_debug("raid456: error called\n");
spin_lock_irqsave(&conf->device_lock, flags);
+
+ if (test_bit(In_sync, &rdev->flags) &&
+ mddev->degraded == conf->max_degraded) {
+ /*
+ * Don't allow to achieve failed state
+ * Don't try to recover this device
+ */
+ conf->recovery_disabled = mddev->recovery_disabled;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ return;
+ }
+
set_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
mddev->degraded = raid5_calc_degraded(conf);
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 8e799ae1df69..67481fc82445 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -116,6 +116,7 @@ static void em28xx_audio_isocirq(struct urb *urb)
stride = runtime->frame_bits >> 3;
for (i = 0; i < urb->number_of_packets; i++) {
+ unsigned long flags;
int length =
urb->iso_frame_desc[i].actual_length / stride;
cp = (unsigned char *)urb->transfer_buffer +
@@ -137,7 +138,7 @@ static void em28xx_audio_isocirq(struct urb *urb)
length * stride);
}
- snd_pcm_stream_lock(substream);
+ snd_pcm_stream_lock_irqsave(substream, flags);
dev->adev.hwptr_done_capture += length;
if (dev->adev.hwptr_done_capture >=
@@ -153,7 +154,7 @@ static void em28xx_audio_isocirq(struct urb *urb)
period_elapsed = 1;
}
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
}
if (period_elapsed)
snd_pcm_period_elapsed(substream);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 5657f8710ca6..2b8c84a5c9a8 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -777,6 +777,7 @@ EXPORT_SYMBOL_GPL(em28xx_set_mode);
static void em28xx_irq_callback(struct urb *urb)
{
struct em28xx *dev = urb->context;
+ unsigned long flags;
int i;
switch (urb->status) {
@@ -793,9 +794,9 @@ static void em28xx_irq_callback(struct urb *urb)
}
/* Copy data from URB */
- spin_lock(&dev->slock);
+ spin_lock_irqsave(&dev->slock, flags);
dev->usb_ctl.urb_data_copy(dev, urb);
- spin_unlock(&dev->slock);
+ spin_unlock_irqrestore(&dev->slock, flags);
/* Reset urb buffers */
for (i = 0; i < urb->number_of_packets; i++) {
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 96055de6e8ce..7d268f2404e1 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -419,6 +419,7 @@ static void tm6000_irq_callback(struct urb *urb)
{
struct tm6000_dmaqueue *dma_q = urb->context;
struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ unsigned long flags;
int i;
switch (urb->status) {
@@ -436,9 +437,9 @@ static void tm6000_irq_callback(struct urb *urb)
break;
}
- spin_lock(&dev->slock);
+ spin_lock_irqsave(&dev->slock, flags);
tm6000_isoc_copy(urb);
- spin_unlock(&dev->slock);
+ spin_unlock_irqrestore(&dev->slock, flags);
/* Reset urb buffers */
for (i = 0; i < urb->number_of_packets; i++) {
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 059997f8ebce..178f414ea8f9 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -2004,7 +2004,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_6
U64 LinkFailureCount; /* 50h */
U64 LossOfSyncCount; /* 58h */
U64 LossOfSignalCount; /* 60h */
- U64 PrimativeSeqErrCount; /* 68h */
+ U64 PrimitiveSeqErrCount; /* 68h */
U64 InvalidTxWordCount; /* 70h */
U64 InvalidCrcCount; /* 78h */
U64 FcpInitiatorIoCount; /* 80h */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e6b4ae558767..ba551d8dfba4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -335,11 +335,11 @@ static int mpt_remove_dead_ioc_func(void *arg)
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
struct pci_dev *pdev;
- if ((ioc == NULL))
+ if (!ioc)
return -1;
pdev = ioc->pcidev;
- if ((pdev == NULL))
+ if (!pdev)
return -1;
pci_stop_and_remove_bus_device_locked(pdev);
@@ -7570,11 +7570,11 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
u8 phy_num = (u8)(evData0);
u8 port_num = (u8)(evData0 >> 8);
u8 port_width = (u8)(evData0 >> 16);
- u8 primative = (u8)(evData0 >> 24);
+ u8 primitive = (u8)(evData0 >> 24);
snprintf(evStr, EVENT_DESCR_STR_SZ,
- "SAS Broadcase Primative: phy=%d port=%d "
- "width=%d primative=0x%02x",
- phy_num, port_num, port_width, primative);
+ "SAS Broadcast Primitive: phy=%d port=%d "
+ "width=%d primitive=0x%02x",
+ phy_num, port_num, port_width, primitive);
break;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b8cf2658649e..9b404fc69c90 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -129,7 +129,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc,
static void mptsas_send_expander_event(struct fw_event_work *fw_event);
static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
-static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
+static void mptsas_broadcast_primitive_work(struct fw_event_work *fw_event);
static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
void mptsas_schedule_target_reset(void *ioc);
@@ -1665,7 +1665,7 @@ mptsas_firmware_event_work(struct work_struct *work)
mptsas_free_fw_event(ioc, fw_event);
break;
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
- mptsas_broadcast_primative_work(fw_event);
+ mptsas_broadcast_primitive_work(fw_event);
break;
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
mptsas_send_expander_event(fw_event);
@@ -4826,13 +4826,13 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
}
/**
- * mptsas_broadcast_primative_work - Handle broadcast primitives
+ * mptsas_broadcast_primitive_work - Handle broadcast primitives
* @work: work queue payload containing info describing the event
*
* this will be handled in workqueue context.
*/
static void
-mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
+mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc = fw_event->ioc;
MPT_FRAME_HDR *mf;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 11841f4b7b2b..8c5dfdce4326 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -99,6 +99,15 @@ config MFD_AAT2870_CORE
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_AT91_USART
+ tristate "AT91 USART Driver"
+ select MFD_CORE
+ help
+ Select this to get support for AT91 USART IP. This is a wrapper
+ over at91-usart-serial driver and usart-spi-driver. Only one function
+ can be used at a time. The choice is done at boot time by the probe
+ function of this MFD driver according to a device tree property.
+
config MFD_ATMEL_FLEXCOM
tristate "Atmel Flexcom (Flexible Serial Communication Unit)"
select MFD_CORE
@@ -1023,16 +1032,23 @@ config MFD_RN5T618
functionality of the device.
config MFD_SEC_CORE
- bool "SAMSUNG Electronics PMIC Series Support"
+ tristate "SAMSUNG Electronics PMIC Series Support"
depends on I2C=y
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
help
- Support for the Samsung Electronics MFD series.
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the functionality
- of the device
+ Support for the Samsung Electronics PMIC devices coming
+ usually along with Samsung Exynos SoC chipset.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device
+
+ To compile this driver as a module, choose M here: the
+ module will be called sec-core.
+ Have in mind that important core drivers (like regulators) depend
+ on this driver so building this as a module might require proper
+ initial ramdisk or might not boot up as well in certain scenarios.
config MFD_SI476X_CORE
tristate "Silicon Laboratories 4761/64/68 AM/FM radio."
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 5856a9489cbd..12980a4ad460 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -196,6 +196,7 @@ obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_TPS65090) += tps65090.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
+obj-$(CONFIG_MFD_AT91_USART) += at91-usart.o
obj-$(CONFIG_MFD_ATMEL_FLEXCOM) += atmel-flexcom.o
obj-$(CONFIG_MFD_ATMEL_HLCDC) += atmel-hlcdc.o
obj-$(CONFIG_MFD_ATMEL_SMC) += atmel-smc.o
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index d817f202da5b..be0497b96720 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -360,6 +360,6 @@ static struct i2c_driver adp5520_driver = {
module_i2c_driver(adp5520_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP5520(01) PMIC-MFD Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 5f1e37d23943..27b61639cdc7 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -52,8 +52,10 @@ int arizona_clk32k_enable(struct arizona *arizona)
if (ret != 0)
goto err_ref;
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
- if (ret != 0)
- goto err_pm;
+ if (ret != 0) {
+ pm_runtime_put_sync(arizona->dev);
+ goto err_ref;
+ }
break;
case ARIZONA_32KZ_MCLK2:
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]);
@@ -67,8 +69,6 @@ int arizona_clk32k_enable(struct arizona *arizona)
ARIZONA_CLK_32K_ENA);
}
-err_pm:
- pm_runtime_put_sync(arizona->dev);
err_ref:
if (ret != 0)
arizona->clk32k_ref--;
@@ -990,7 +990,7 @@ static const struct mfd_cell wm8998_devs[] = {
int arizona_dev_init(struct arizona *arizona)
{
- const char * const mclk_name[] = { "mclk1", "mclk2" };
+ static const char * const mclk_name[] = { "mclk1", "mclk2" };
struct device *dev = arizona->dev;
const char *type_name = NULL;
unsigned int reg, val;
diff --git a/drivers/mfd/at91-usart.c b/drivers/mfd/at91-usart.c
new file mode 100644
index 000000000000..d20747f612c1
--- /dev/null
+++ b/drivers/mfd/at91-usart.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for AT91 USART
+ *
+ * Copyright (C) 2018 Microchip Technology
+ *
+ * Author: Radu Pirea <radu.pirea@microchip.com>
+ *
+ */
+
+#include <dt-bindings/mfd/at91-usart.h>
+
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/of.h>
+#include <linux/property.h>
+
+static struct mfd_cell at91_usart_spi_subdev = {
+ .name = "at91_usart_spi",
+ .of_compatible = "microchip,at91sam9g45-usart-spi",
+ };
+
+static struct mfd_cell at91_usart_serial_subdev = {
+ .name = "atmel_usart_serial",
+ .of_compatible = "atmel,at91rm9200-usart-serial",
+ };
+
+static int at91_usart_mode_probe(struct platform_device *pdev)
+{
+ struct mfd_cell cell;
+ u32 opmode = AT91_USART_MODE_SERIAL;
+
+ device_property_read_u32(&pdev->dev, "atmel,usart-mode", &opmode);
+
+ switch (opmode) {
+ case AT91_USART_MODE_SPI:
+ cell = at91_usart_spi_subdev;
+ break;
+ case AT91_USART_MODE_SERIAL:
+ cell = at91_usart_serial_subdev;
+ break;
+ default:
+ dev_err(&pdev->dev, "atmel,usart-mode has an invalid value %u\n",
+ opmode);
+ return -EINVAL;
+ }
+
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, &cell, 1,
+ NULL, 0, NULL);
+}
+
+static const struct of_device_id at91_usart_mode_of_match[] = {
+ { .compatible = "atmel,at91rm9200-usart" },
+ { .compatible = "atmel,at91sam9260-usart" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91_usart_mode_of_match);
+
+static struct platform_driver at91_usart_mfd = {
+ .probe = at91_usart_mode_probe,
+ .driver = {
+ .name = "at91_usart_mode",
+ .of_match_table = at91_usart_mode_of_match,
+ },
+};
+
+module_platform_driver(at91_usart_mfd);
+
+MODULE_AUTHOR("Radu Pirea <radu.pirea@microchip.com>");
+MODULE_DESCRIPTION("AT91 USART MFD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index 65a9757a6d21..fe6f83766144 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -218,7 +218,8 @@ EXPORT_SYMBOL(cros_ec_suspend);
static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
- while (cros_ec_get_next_event(ec_dev, NULL) > 0)
+ while (ec_dev->mkbp_event_supported &&
+ cros_ec_get_next_event(ec_dev, NULL) > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
1, ec_dev);
}
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 999dac752bcc..8f9d6964173e 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -546,6 +546,7 @@ static struct platform_driver cros_ec_dev_driver = {
.name = DRV_NAME,
.pm = &cros_ec_dev_pm_ops,
},
+ .id_table = cros_ec_id,
.probe = ec_device_probe,
.remove = ec_device_remove,
.shutdown = ec_device_shutdown,
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 2017446c5b4b..bb24c2a07900 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Intel MSIC
*
* Copyright (C) 2011, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/err.h>
@@ -54,68 +51,44 @@ struct intel_msic {
};
static struct resource msic_touch_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_adc_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_battery_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_gpio_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_audio_resources[] = {
- {
- .name = "IRQ",
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(0, "IRQ"),
/*
* We will pass IRQ_BASE to the driver now but this can be removed
* when/if the driver starts to use intel_msic_irq_read().
*/
- {
- .name = "IRQ_BASE",
- .flags = IORESOURCE_MEM,
- .start = MSIC_IRQ_STATUS_ACCDET,
- .end = MSIC_IRQ_STATUS_ACCDET,
- },
+ DEFINE_RES_MEM_NAMED(MSIC_IRQ_STATUS_ACCDET, 1, "IRQ_BASE"),
};
static struct resource msic_hdmi_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_thermal_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_power_btn_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_ocd_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
/*
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 15bc052704a6..6310c3bdb991 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -1,27 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MFD core driver for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
-#include <linux/module.h>
#include <linux/acpi.h>
-#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/mfd/intel_soc_pmic_bxtwc.h>
+#include <linux/module.h>
+
#include <asm/intel_pmc_ipc.h>
/* PMIC device registers */
@@ -31,8 +24,8 @@
/* Interrupt Status Registers */
#define BXTWC_IRQLVL1 0x4E02
-#define BXTWC_PWRBTNIRQ 0x4E03
+#define BXTWC_PWRBTNIRQ 0x4E03
#define BXTWC_THRM0IRQ 0x4E04
#define BXTWC_THRM1IRQ 0x4E05
#define BXTWC_THRM2IRQ 0x4E06
@@ -47,10 +40,9 @@
/* Interrupt MASK Registers */
#define BXTWC_MIRQLVL1 0x4E0E
-#define BXTWC_MPWRTNIRQ 0x4E0F
-
#define BXTWC_MIRQLVL1_MCHGR BIT(5)
+#define BXTWC_MPWRBTNIRQ 0x4E0F
#define BXTWC_MTHRM0IRQ 0x4E12
#define BXTWC_MTHRM1IRQ 0x4E13
#define BXTWC_MTHRM2IRQ 0x4E14
@@ -66,9 +58,7 @@
/* Whiskey Cove PMIC share same ACPI ID between different platforms */
#define BROXTON_PMIC_WC_HRV 4
-/* Manage in two IRQ chips since mask registers are not consecutive */
enum bxtwc_irqs {
- /* Level 1 */
BXTWC_PWRBTN_LVL1_IRQ = 0,
BXTWC_TMU_LVL1_IRQ,
BXTWC_THRM_LVL1_IRQ,
@@ -77,9 +67,11 @@ enum bxtwc_irqs {
BXTWC_CHGR_LVL1_IRQ,
BXTWC_GPIO_LVL1_IRQ,
BXTWC_CRIT_LVL1_IRQ,
+};
- /* Level 2 */
- BXTWC_PWRBTN_IRQ,
+enum bxtwc_irqs_pwrbtn {
+ BXTWC_PWRBTN_IRQ = 0,
+ BXTWC_UIBTN_IRQ,
};
enum bxtwc_irqs_bcu {
@@ -113,7 +105,10 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = {
REGMAP_IRQ_REG(BXTWC_CHGR_LVL1_IRQ, 0, BIT(5)),
REGMAP_IRQ_REG(BXTWC_GPIO_LVL1_IRQ, 0, BIT(6)),
REGMAP_IRQ_REG(BXTWC_CRIT_LVL1_IRQ, 0, BIT(7)),
- REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 1, 0x03),
+};
+
+static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = {
+ REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01),
};
static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = {
@@ -125,7 +120,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_adc[] = {
};
static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = {
- REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)),
+ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20),
REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f),
REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f),
};
@@ -144,7 +139,16 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.mask_base = BXTWC_MIRQLVL1,
.irqs = bxtwc_regmap_irqs,
.num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs),
- .num_regs = 2,
+ .num_regs = 1,
+};
+
+static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
+ .name = "bxtwc_irq_chip_pwrbtn",
+ .status_base = BXTWC_PWRBTNIRQ,
+ .mask_base = BXTWC_MPWRBTNIRQ,
+ .irqs = bxtwc_regmap_irqs_pwrbtn,
+ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_pwrbtn),
+ .num_regs = 1,
};
static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
@@ -473,6 +477,16 @@ static int bxtwc_probe(struct platform_device *pdev)
}
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+ BXTWC_PWRBTN_LVL1_IRQ,
+ IRQF_ONESHOT,
+ &bxtwc_regmap_irq_chip_pwrbtn,
+ &pmic->irq_chip_data_pwrbtn);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n");
+ return ret;
+ }
+
+ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_TMU_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_tmu,
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 861277c6580a..64b5c3cc30e7 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Device access for Dollar Cove TI PMIC
*
@@ -6,10 +7,6 @@
*
* Cleanup and forward-ported
* Copyright (c) 2017 Takashi Iwai <tiwai@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index b35da01d5bcf..64a3aece9c5e 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MFD core driver for Intel Cherrytrail Whiskey Cove PMIC
*
@@ -5,10 +6,6 @@
*
* Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
* Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 274306d98ac1..c9f35378d391 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -1,31 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_soc_pmic_core.c - Intel SoC PMIC MFD Driver
+ * Intel SoC PMIC MFD Driver
*
* Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/module.h>
-#include <linux/mfd/core.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/gpio/consumer.h>
-#include <linux/acpi.h>
-#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
-#include <linux/gpio/machine.h>
#include <linux/pwm.h>
+#include <linux/regmap.h>
+
#include "intel_soc_pmic_core.h"
/* Crystal Cove PMIC shares same ACPI ID between different platforms */
diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h
index 90a1416d4dac..d490685845eb 100644
--- a/drivers/mfd/intel_soc_pmic_core.h
+++ b/drivers/mfd/intel_soc_pmic_core.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * intel_soc_pmic_core.h - Intel SoC PMIC MFD Driver
+ * Intel SoC PMIC MFD Driver
*
* Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 6d19a6d0fb97..b6ab72fa0569 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -1,25 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_soc_pmic_crc.c - Device access for Crystal Cove PMIC
+ * Device access for Crystal Cove PMIC
*
* Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/mfd/core.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
+
#include "intel_soc_pmic_core.h"
#define CRYSTAL_COVE_MAX_REGISTER 0xC6
@@ -36,48 +29,23 @@
#define CRYSTAL_COVE_IRQ_VHDMIOCP 6
static struct resource gpio_resources[] = {
- {
- .name = "GPIO",
- .start = CRYSTAL_COVE_IRQ_GPIO,
- .end = CRYSTAL_COVE_IRQ_GPIO,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_GPIO, "GPIO"),
};
static struct resource pwrsrc_resources[] = {
- {
- .name = "PWRSRC",
- .start = CRYSTAL_COVE_IRQ_PWRSRC,
- .end = CRYSTAL_COVE_IRQ_PWRSRC,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_PWRSRC, "PWRSRC"),
};
static struct resource adc_resources[] = {
- {
- .name = "ADC",
- .start = CRYSTAL_COVE_IRQ_ADC,
- .end = CRYSTAL_COVE_IRQ_ADC,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_ADC, "ADC"),
};
static struct resource thermal_resources[] = {
- {
- .name = "THERMAL",
- .start = CRYSTAL_COVE_IRQ_THRM,
- .end = CRYSTAL_COVE_IRQ_THRM,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_THRM, "THERMAL"),
};
static struct resource bcu_resources[] = {
- {
- .name = "BCU",
- .start = CRYSTAL_COVE_IRQ_BCU,
- .end = CRYSTAL_COVE_IRQ_BCU,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_BCU, "BCU"),
};
static struct mfd_cell crystal_cove_byt_dev[] = {
@@ -134,27 +102,13 @@ static const struct regmap_config crystal_cove_regmap_config = {
};
static const struct regmap_irq crystal_cove_irqs[] = {
- [CRYSTAL_COVE_IRQ_PWRSRC] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_PWRSRC),
- },
- [CRYSTAL_COVE_IRQ_THRM] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_THRM),
- },
- [CRYSTAL_COVE_IRQ_BCU] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_BCU),
- },
- [CRYSTAL_COVE_IRQ_ADC] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_ADC),
- },
- [CRYSTAL_COVE_IRQ_CHGR] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_CHGR),
- },
- [CRYSTAL_COVE_IRQ_GPIO] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_GPIO),
- },
- [CRYSTAL_COVE_IRQ_VHDMIOCP] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_VHDMIOCP),
- },
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_PWRSRC, 0, BIT(CRYSTAL_COVE_IRQ_PWRSRC)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_THRM, 0, BIT(CRYSTAL_COVE_IRQ_THRM)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_BCU, 0, BIT(CRYSTAL_COVE_IRQ_BCU)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_ADC, 0, BIT(CRYSTAL_COVE_IRQ_ADC)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_CHGR, 0, BIT(CRYSTAL_COVE_IRQ_CHGR)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_GPIO, 0, BIT(CRYSTAL_COVE_IRQ_GPIO)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_VHDMIOCP, 0, BIT(CRYSTAL_COVE_IRQ_VHDMIOCP)),
};
static const struct regmap_irq_chip crystal_cove_irq_chip = {
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 8cfea969b060..440030cecbbd 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -132,32 +132,39 @@ const char *madera_name_from_type(enum madera_type type)
}
EXPORT_SYMBOL_GPL(madera_name_from_type);
-#define MADERA_BOOT_POLL_MAX_INTERVAL_US 5000
-#define MADERA_BOOT_POLL_TIMEOUT_US 25000
+#define MADERA_BOOT_POLL_INTERVAL_USEC 5000
+#define MADERA_BOOT_POLL_TIMEOUT_USEC 25000
static int madera_wait_for_boot(struct madera *madera)
{
+ ktime_t timeout;
unsigned int val;
- int ret;
+ int ret = 0;
/*
* We can't use an interrupt as we need to runtime resume to do so,
* so we poll the status bit. This won't race with the interrupt
* handler because it will be blocked on runtime resume.
+ * The chip could NAK a read request while it is booting so ignore
+ * errors from regmap_read.
*/
- ret = regmap_read_poll_timeout(madera->regmap,
- MADERA_IRQ1_RAW_STATUS_1,
- val,
- (val & MADERA_BOOT_DONE_STS1),
- MADERA_BOOT_POLL_MAX_INTERVAL_US,
- MADERA_BOOT_POLL_TIMEOUT_US);
-
- if (ret)
- dev_err(madera->dev, "Polling BOOT_DONE_STS failed: %d\n", ret);
+ timeout = ktime_add_us(ktime_get(), MADERA_BOOT_POLL_TIMEOUT_USEC);
+ regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
+ while (!(val & MADERA_BOOT_DONE_STS1) &&
+ !ktime_after(ktime_get(), timeout)) {
+ usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2,
+ MADERA_BOOT_POLL_INTERVAL_USEC);
+ regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
+ };
+
+ if (!(val & MADERA_BOOT_DONE_STS1)) {
+ dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n");
+ ret = -ETIMEDOUT;
+ }
/*
* BOOT_DONE defaults to unmasked on boot so we must ack it.
- * Do this unconditionally to avoid interrupt storms.
+ * Do this even after a timeout to avoid interrupt storms.
*/
regmap_write(madera->regmap, MADERA_IRQ1_STATUS_1,
MADERA_BOOT_DONE_EINT1);
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 6cbe96b28f42..ebb13d5de530 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -1,22 +1,12 @@
-/*
- * max14577.c - mfd core driver for the Maxim 14577/77836
- *
- * Copyright (C) 2014 Samsung Electronics
- * Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max14577.c - mfd core driver for the Maxim 14577/77836
+//
+// Copyright (C) 2014 Samsung Electronics
+// Chanwoo Choi <cw00.choi@samsung.com>
+// Krzysztof Kozlowski <krzk@kernel.org>
+//
+// This driver is based on max8997.c
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index b1700b5fa640..d8217366ed36 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -285,7 +285,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
}
if (fps_id == MAX77620_FPS_COUNT) {
- dev_err(dev, "FPS node name %s is not valid\n", fps_np->name);
+ dev_err(dev, "FPS node name %pOFn is not valid\n", fps_np);
return -EINVAL;
}
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index b0e8e13c0049..71faf503844b 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -1,26 +1,12 @@
-/*
- * max77686.c - mfd core driver for the Maxim 77686/802
- *
- * Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@samsung.com>
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77686.c - mfd core driver for the Maxim 77686/802
+//
+// Copyright (C) 2012 Samsung Electronics
+// Chiwoong Byun <woong.byun@samsung.com>
+// Jonghwa Lee <jonghwa3.lee@samsung.com>
+//
+//This driver is based on max8997.c
#include <linux/export.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 1c05ea0cba61..901d99d65924 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -1,27 +1,13 @@
-/*
- * max77693.c - mfd core driver for the MAX 77693
- *
- * Copyright (C) 2012 Samsung Electronics
- * SangYoung Son <hello.son@samsung.com>
- *
- * This program is not provided / owned by Maxim Integrated Products.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77693.c - mfd core driver for the MAX 77693
+//
+// Copyright (C) 2012 Samsung Electronics
+// SangYoung Son <hello.son@samsung.com>
+//
+// This program is not provided / owned by Maxim Integrated Products.
+//
+// This driver is based on max8997.c
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index da9612dbb222..25cbb2242b26 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -1,15 +1,10 @@
-/*
- * MFD core driver for the Maxim MAX77843
- *
- * Copyright (C) 2015 Samsung Electronics
- * Author: Jaewon Kim <jaewon02.kim@samsung.com>
- * Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// MFD core driver for the Maxim MAX77843
+//
+// Copyright (C) 2015 Samsung Electronics
+// Author: Jaewon Kim <jaewon02.kim@samsung.com>
+// Author: Beomho Seo <beomho.seo@samsung.com>
#include <linux/err.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index 326f17b632a7..93a3b1698d9c 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -1,25 +1,11 @@
-/*
- * max8997-irq.c - Interrupt controller support for MAX8997
- *
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8998-irq.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997-irq.c - Interrupt controller support for MAX8997
+//
+// Copyright (C) 2011 Samsung Electronics Co.Ltd
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max8998-irq.c
#include <linux/err.h>
#include <linux/irq.h>
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 3f554c447521..8c06c09e36d1 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -1,25 +1,11 @@
-/*
- * max8997.c - mfd core driver for the Maxim 8966 and 8997
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8998.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997.c - mfd core driver for the Maxim 8966 and 8997
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max8998.c
#include <linux/err.h>
#include <linux/slab.h>
@@ -153,12 +139,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
pd->ono = irq_of_parse_and_map(dev->of_node, 1);
- /*
- * ToDo: the 'wakeup' member in the platform data is more of a linux
- * specfic information. Hence, there is no binding for that yet and
- * not parsed here.
- */
-
return pd;
}
@@ -246,7 +226,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
*/
/* MAX8997 has a power button input. */
- device_init_wakeup(max8997->dev, pdata->wakeup);
+ device_init_wakeup(max8997->dev, true);
return ret;
@@ -468,6 +448,7 @@ static int max8997_suspend(struct device *dev)
struct i2c_client *i2c = to_i2c_client(dev);
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ disable_irq(max8997->irq);
if (device_may_wakeup(dev))
irq_set_irq_wake(max8997->irq, 1);
return 0;
@@ -480,6 +461,7 @@ static int max8997_resume(struct device *dev)
if (device_may_wakeup(dev))
irq_set_irq_wake(max8997->irq, 0);
+ enable_irq(max8997->irq);
return max8997_irq_resume(max8997);
}
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 90bad9ffa7e2..83b6f510bc05 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -1,15 +1,9 @@
-/*
- * Interrupt controller support for MAX8998
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Interrupt controller support for MAX8998
+//
+// Copyright (C) 2010 Samsung Electronics Co.Ltd
+// Author: Joonyoung Shim <jy0922.shim@samsung.com>
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index b1d3f70782d9..56409df120f8 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -1,24 +1,10 @@
-/*
- * max8998.c - mfd core driver for the Maxim 8998
- *
- * Copyright (C) 2009-2010 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- * Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8998.c - mfd core driver for the Maxim 8998
+//
+// Copyright (C) 2009-2010 Samsung Electronics
+// Kyungmin Park <kyungmin.park@samsung.com>
+// Marek Szyprowski <m.szyprowski@samsung.com>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index c63e331738c1..f475e848252f 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -276,7 +276,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
- adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
+ adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
+ MC13XXX_ADC0_CHRGRAWDIV;
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
/*
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 29b7164a823b..d28ebe7ecd21 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1094,6 +1094,7 @@ static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
static inline void menelaus_rtc_init(struct menelaus_chip *m)
{
int alarm = (m->client->irq > 0);
+ int err;
/* assume 32KDETEN pin is pulled high */
if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
@@ -1101,6 +1102,12 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
return;
}
+ m->rtc = devm_rtc_allocate_device(&m->client->dev);
+ if (IS_ERR(m->rtc))
+ return;
+
+ m->rtc->ops = &menelaus_rtc_ops;
+
/* support RTC alarm; it can issue wakeups */
if (alarm) {
if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
@@ -1125,10 +1132,8 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
}
- m->rtc = rtc_device_register(DRIVER_NAME,
- &m->client->dev,
- &menelaus_rtc_ops, THIS_MODULE);
- if (IS_ERR(m->rtc)) {
+ err = rtc_register_device(m->rtc);
+ if (err) {
if (alarm) {
menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
device_init_wakeup(&m->client->dev, 0);
diff --git a/drivers/mfd/motorola-cpcap.c b/drivers/mfd/motorola-cpcap.c
index 5276911caaec..20d9692640e1 100644
--- a/drivers/mfd/motorola-cpcap.c
+++ b/drivers/mfd/motorola-cpcap.c
@@ -18,6 +18,7 @@
#include <linux/regmap.h>
#include <linux/sysfs.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/motorola-cpcap.h>
#include <linux/spi/spi.h>
@@ -216,6 +217,53 @@ static const struct regmap_config cpcap_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+static const struct mfd_cell cpcap_mfd_devices[] = {
+ {
+ .name = "cpcap_adc",
+ .of_compatible = "motorola,mapphone-cpcap-adc",
+ }, {
+ .name = "cpcap_battery",
+ .of_compatible = "motorola,cpcap-battery",
+ }, {
+ .name = "cpcap-charger",
+ .of_compatible = "motorola,mapphone-cpcap-charger",
+ }, {
+ .name = "cpcap-regulator",
+ .of_compatible = "motorola,mapphone-cpcap-regulator",
+ }, {
+ .name = "cpcap-rtc",
+ .of_compatible = "motorola,cpcap-rtc",
+ }, {
+ .name = "cpcap-pwrbutton",
+ .of_compatible = "motorola,cpcap-pwrbutton",
+ }, {
+ .name = "cpcap-usb-phy",
+ .of_compatible = "motorola,mapphone-cpcap-usb-phy",
+ }, {
+ .name = "cpcap-led",
+ .id = 0,
+ .of_compatible = "motorola,cpcap-led-red",
+ }, {
+ .name = "cpcap-led",
+ .id = 1,
+ .of_compatible = "motorola,cpcap-led-green",
+ }, {
+ .name = "cpcap-led",
+ .id = 2,
+ .of_compatible = "motorola,cpcap-led-blue",
+ }, {
+ .name = "cpcap-led",
+ .id = 3,
+ .of_compatible = "motorola,cpcap-led-adl",
+ }, {
+ .name = "cpcap-led",
+ .id = 4,
+ .of_compatible = "motorola,cpcap-led-cp",
+ }, {
+ .name = "cpcap-codec",
+ }
+};
+
static int cpcap_probe(struct spi_device *spi)
{
const struct of_device_id *match;
@@ -260,7 +308,8 @@ static int cpcap_probe(struct spi_device *spi)
if (ret)
return ret;
- return devm_of_platform_populate(&cpcap->spi->dev);
+ return devm_mfd_add_devices(&spi->dev, 0, cpcap_mfd_devices,
+ ARRAY_SIZE(cpcap_mfd_devices), NULL, 0, NULL);
}
static struct spi_driver cpcap_driver = {
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 9613b4257302..e0835c9df7a1 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -1,15 +1,7 @@
-/*
- * sec-core.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 5eb59c233d52..ad0099077e7e 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -1,19 +1,12 @@
-/*
- * sec-irq.c
- *
- * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/mfd/samsung/core.h>
@@ -501,3 +494,10 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
return 0;
}
+EXPORT_SYMBOL_GPL(sec_irq_init);
+
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_DESCRIPTION("Interrupt support for the S5M MFD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ti-lmu.c b/drivers/mfd/ti-lmu.c
index cfb411cde51c..37d0bdb291c3 100644
--- a/drivers/mfd/ti-lmu.c
+++ b/drivers/mfd/ti-lmu.c
@@ -12,7 +12,7 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
@@ -21,28 +21,18 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
struct ti_lmu_data {
- struct mfd_cell *cells;
+ const struct mfd_cell *cells;
int num_cells;
unsigned int max_register;
};
static int ti_lmu_enable_hw(struct ti_lmu *lmu, enum ti_lmu_id id)
{
- int ret;
-
- if (gpio_is_valid(lmu->en_gpio)) {
- ret = devm_gpio_request_one(lmu->dev, lmu->en_gpio,
- GPIOF_OUT_INIT_HIGH, "lmu_hwen");
- if (ret) {
- dev_err(lmu->dev, "Can not request enable GPIO: %d\n",
- ret);
- return ret;
- }
- }
+ if (lmu->en_gpio)
+ gpiod_set_value(lmu->en_gpio, 1);
/* Delay about 1ms after HW enable pin control */
usleep_range(1000, 1500);
@@ -57,13 +47,14 @@ static int ti_lmu_enable_hw(struct ti_lmu *lmu, enum ti_lmu_id id)
return 0;
}
-static void ti_lmu_disable_hw(struct ti_lmu *lmu)
+static void ti_lmu_disable_hw(void *data)
{
- if (gpio_is_valid(lmu->en_gpio))
- gpio_set_value(lmu->en_gpio, 0);
+ struct ti_lmu *lmu = data;
+ if (lmu->en_gpio)
+ gpiod_set_value(lmu->en_gpio, 0);
}
-static struct mfd_cell lm3532_devices[] = {
+static const struct mfd_cell lm3532_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3532,
@@ -78,7 +69,7 @@ static struct mfd_cell lm3532_devices[] = {
.of_compatible = "ti,lm363x-regulator", \
} \
-static struct mfd_cell lm3631_devices[] = {
+static const struct mfd_cell lm3631_devices[] = {
LM363X_REGULATOR(LM3631_BOOST),
LM363X_REGULATOR(LM3631_LDO_CONT),
LM363X_REGULATOR(LM3631_LDO_OREF),
@@ -91,7 +82,7 @@ static struct mfd_cell lm3631_devices[] = {
},
};
-static struct mfd_cell lm3632_devices[] = {
+static const struct mfd_cell lm3632_devices[] = {
LM363X_REGULATOR(LM3632_BOOST),
LM363X_REGULATOR(LM3632_LDO_POS),
LM363X_REGULATOR(LM3632_LDO_NEG),
@@ -102,7 +93,7 @@ static struct mfd_cell lm3632_devices[] = {
},
};
-static struct mfd_cell lm3633_devices[] = {
+static const struct mfd_cell lm3633_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3633,
@@ -120,7 +111,7 @@ static struct mfd_cell lm3633_devices[] = {
},
};
-static struct mfd_cell lm3695_devices[] = {
+static const struct mfd_cell lm3695_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3695,
@@ -128,7 +119,7 @@ static struct mfd_cell lm3695_devices[] = {
},
};
-static struct mfd_cell lm3697_devices[] = {
+static const struct mfd_cell lm3697_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3697,
@@ -157,34 +148,21 @@ TI_LMU_DATA(lm3633, LM3633_MAX_REG);
TI_LMU_DATA(lm3695, LM3695_MAX_REG);
TI_LMU_DATA(lm3697, LM3697_MAX_REG);
-static const struct of_device_id ti_lmu_of_match[] = {
- { .compatible = "ti,lm3532", .data = &lm3532_data },
- { .compatible = "ti,lm3631", .data = &lm3631_data },
- { .compatible = "ti,lm3632", .data = &lm3632_data },
- { .compatible = "ti,lm3633", .data = &lm3633_data },
- { .compatible = "ti,lm3695", .data = &lm3695_data },
- { .compatible = "ti,lm3697", .data = &lm3697_data },
- { }
-};
-MODULE_DEVICE_TABLE(of, ti_lmu_of_match);
-
static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct device *dev = &cl->dev;
- const struct of_device_id *match;
const struct ti_lmu_data *data;
struct regmap_config regmap_cfg;
struct ti_lmu *lmu;
int ret;
- match = of_match_device(ti_lmu_of_match, dev);
- if (!match)
- return -ENODEV;
/*
* Get device specific data from of_match table.
* This data is defined by using TI_LMU_DATA() macro.
*/
- data = (struct ti_lmu_data *)match->data;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
lmu = devm_kzalloc(dev, sizeof(*lmu), GFP_KERNEL);
if (!lmu)
@@ -204,11 +182,21 @@ static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
return PTR_ERR(lmu->regmap);
/* HW enable pin control and additional power up sequence if required */
- lmu->en_gpio = of_get_named_gpio(dev->of_node, "enable-gpios", 0);
+ lmu->en_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(lmu->en_gpio)) {
+ ret = PTR_ERR(lmu->en_gpio);
+ dev_err(dev, "Can not request enable GPIO: %d\n", ret);
+ return ret;
+ }
+
ret = ti_lmu_enable_hw(lmu, id->driver_data);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, ti_lmu_disable_hw, lmu);
+ if (ret)
+ return ret;
+
/*
* Fault circuit(open/short) can be detected by ti-lmu-fault-monitor.
* After fault detection is done, some devices should re-initialize
@@ -218,18 +206,20 @@ static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
i2c_set_clientdata(cl, lmu);
- return mfd_add_devices(lmu->dev, 0, data->cells,
- data->num_cells, NULL, 0, NULL);
+ return devm_mfd_add_devices(lmu->dev, 0, data->cells,
+ data->num_cells, NULL, 0, NULL);
}
-static int ti_lmu_remove(struct i2c_client *cl)
-{
- struct ti_lmu *lmu = i2c_get_clientdata(cl);
-
- ti_lmu_disable_hw(lmu);
- mfd_remove_devices(lmu->dev);
- return 0;
-}
+static const struct of_device_id ti_lmu_of_match[] = {
+ { .compatible = "ti,lm3532", .data = &lm3532_data },
+ { .compatible = "ti,lm3631", .data = &lm3631_data },
+ { .compatible = "ti,lm3632", .data = &lm3632_data },
+ { .compatible = "ti,lm3633", .data = &lm3633_data },
+ { .compatible = "ti,lm3695", .data = &lm3695_data },
+ { .compatible = "ti,lm3697", .data = &lm3697_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ti_lmu_of_match);
static const struct i2c_device_id ti_lmu_ids[] = {
{ "lm3532", LM3532 },
@@ -244,7 +234,6 @@ MODULE_DEVICE_TABLE(i2c, ti_lmu_ids);
static struct i2c_driver ti_lmu_driver = {
.probe = ti_lmu_probe,
- .remove = ti_lmu_remove,
.driver = {
.name = "ti-lmu",
.of_match_table = ti_lmu_of_match,
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 7a30546880a4..c2d47d78705b 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -269,7 +269,6 @@ static int ti_tscadc_probe(struct platform_device *pdev)
if (err < 0)
goto err_disable_clk;
- device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, tscadc);
return 0;
@@ -294,11 +293,24 @@ static int ti_tscadc_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused ti_tscadc_can_wakeup(struct device *dev, void *data)
+{
+ return device_may_wakeup(dev);
+}
+
static int __maybe_unused tscadc_suspend(struct device *dev)
{
struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev);
regmap_write(tscadc->regmap, REG_SE, 0x00);
+ if (device_for_each_child(dev, NULL, ti_tscadc_can_wakeup)) {
+ u32 ctrl;
+
+ regmap_read(tscadc->regmap, REG_CTRL, &ctrl);
+ ctrl &= ~(CNTRLREG_POWERDOWN);
+ ctrl |= CNTRLREG_TSCSSENB;
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
+ }
pm_runtime_put_sync(dev);
return 0;
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 4f832002d116..1827c69959fb 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -114,6 +114,6 @@ static struct i2c_driver ad_dpot_i2c_driver = {
module_i2c_driver(ad_dpot_i2c_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index 39a7f517ee7e..0383ec153725 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -140,7 +140,7 @@ static struct spi_driver ad_dpot_spi_driver = {
module_spi_driver(ad_dpot_spi_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index bc591b7168db..a0afadefcc49 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -1,7 +1,7 @@
/*
* ad525x_dpot: Driver for the Analog Devices digital potentiometers
* Copyright (c) 2009-2010 Analog Devices, Inc.
- * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
+ * Author: Michael Hennerich <michael.hennerich@analog.com>
*
* DEVID #Wipers #Positions Resistor Options (kOhm)
* AD5258 1 64 1, 10, 50, 100
@@ -64,7 +64,7 @@
* Author: Chris Verges <chrisv@cyberswitching.com>
*
* derived from ad5252.c
- * Copyright (c) 2006-2011 Michael Hennerich <hennerich@blackfin.uclinux.org>
+ * Copyright (c) 2006-2011 Michael Hennerich <michael.hennerich@analog.com>
*
* Licensed under the GPL-2 or later.
*/
@@ -760,6 +760,6 @@ EXPORT_SYMBOL(ad_dpot_remove);
MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
- "Michael Hennerich <hennerich@blackfin.uclinux.org>");
+ "Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Digital potentiometer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index ed9412d750b7..24876c615c3c 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -188,7 +188,6 @@ struct apds990x_chip {
#define APDS_LUX_DEFAULT_RATE 200
static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */
-static const u8 ir_currents[] = {100, 50, 25, 12}; /* IRled currents in mA */
/* Following two tables must match i.e 10Hz rate means 1 as persistence value */
static const u16 arates_hz[] = {10, 5, 2, 1};
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 9c62bf064f77..17e81ce9925b 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -180,9 +180,6 @@ static const char reg_vleds[] = "Vleds";
static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
-/* Supported IR-led currents in mA */
-static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
-
/*
* Supported stand alone rates in ms from chip data sheet
* {100, 200, 500, 1000, 2000};
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 43917898fb9a..4d6836f19489 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -92,8 +92,8 @@ static int update_property(struct device_node *dn, const char *name,
val = (u32 *)new_prop->value;
rc = cxl_update_properties(dn, new_prop);
- pr_devel("%s: update property (%s, length: %i, value: %#x)\n",
- dn->name, name, vd, be32_to_cpu(*val));
+ pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n",
+ dn, name, vd, be32_to_cpu(*val));
if (rc) {
kfree(new_prop->name);
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3bc0c15d4d85..5d28d9e454f5 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1018,8 +1018,6 @@ err1:
void cxl_guest_remove_afu(struct cxl_afu *afu)
{
- pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
-
if (!afu)
return;
diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
index 8a5adc0d2e88..3ebe5d75ad6a 100644
--- a/drivers/misc/echo/echo.c
+++ b/drivers/misc/echo/echo.c
@@ -381,7 +381,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
*/
ec->factor = 0;
ec->shift = 0;
- if ((ec->nonupdate_dwell == 0)) {
+ if (!ec->nonupdate_dwell) {
int p, logp, shift;
/* Determine:
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 68a1ac929917..fe7a1d27a017 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -111,4 +111,15 @@ config EEPROM_IDT_89HPESX
This driver can also be built as a module. If so, the module
will be called idt_89hpesx.
+config EEPROM_EE1004
+ tristate "SPD EEPROMs on DDR4 memory modules"
+ depends on I2C && SYSFS
+ help
+ Enable this driver to get read support to SPD EEPROMs following
+ the JEDEC EE1004 standard. These are typically found on DDR4
+ SDRAM memory modules.
+
+ This driver can also be built as a module. If so, the module
+ will be called ee1004.
+
endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 2aab60ef3e3e..a9b4b6579b75 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
+obj-$(CONFIG_EEPROM_EE1004) += ee1004.o
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 840afb398f9e..99de6939cd5a 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -366,7 +366,7 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.word_size = 1;
at25->nvmem_config.size = chip.byte_len;
- at25->nvmem = nvmem_register(&at25->nvmem_config);
+ at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
if (IS_ERR(at25->nvmem))
return PTR_ERR(at25->nvmem);
@@ -379,16 +379,6 @@ static int at25_probe(struct spi_device *spi)
return 0;
}
-static int at25_remove(struct spi_device *spi)
-{
- struct at25_data *at25;
-
- at25 = spi_get_drvdata(spi);
- nvmem_unregister(at25->nvmem);
-
- return 0;
-}
-
/*-------------------------------------------------------------------------*/
static const struct of_device_id at25_of_match[] = {
@@ -403,7 +393,6 @@ static struct spi_driver at25_driver = {
.of_match_table = at25_of_match,
},
.probe = at25_probe,
- .remove = at25_remove,
};
module_spi_driver(at25_driver);
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
new file mode 100644
index 000000000000..276c1690ea1b
--- /dev/null
+++ b/drivers/misc/eeprom/ee1004.c
@@ -0,0 +1,281 @@
+/*
+ * ee1004 - driver for DDR4 SPD EEPROMs
+ *
+ * Copyright (C) 2017 Jean Delvare
+ *
+ * Based on the at24 driver:
+ * Copyright (C) 2005-2007 David Brownell
+ * Copyright (C) 2008 Wolfram Sang, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+/*
+ * DDR4 memory modules use special EEPROMs following the Jedec EE1004
+ * specification. These are 512-byte EEPROMs using a single I2C address
+ * in the 0x50-0x57 range for data. One of two 256-byte page is selected
+ * by writing a command to I2C address 0x36 or 0x37 on the same I2C bus.
+ *
+ * Therefore we need to request these 2 additional addresses, and serialize
+ * access to all such EEPROMs with a single mutex.
+ *
+ * We assume it is safe to read up to 32 bytes at once from these EEPROMs.
+ * We use SMBus access even if I2C is available, these EEPROMs are small
+ * enough, and reading from them infrequent enough, that we favor simplicity
+ * over performance.
+ */
+
+#define EE1004_ADDR_SET_PAGE 0x36
+#define EE1004_EEPROM_SIZE 512
+#define EE1004_PAGE_SIZE 256
+#define EE1004_PAGE_SHIFT 8
+
+/*
+ * Mutex protects ee1004_set_page and ee1004_dev_count, and must be held
+ * from page selection to end of read.
+ */
+static DEFINE_MUTEX(ee1004_bus_lock);
+static struct i2c_client *ee1004_set_page[2];
+static unsigned int ee1004_dev_count;
+static int ee1004_current_page;
+
+static const struct i2c_device_id ee1004_ids[] = {
+ { "ee1004", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ee1004_ids);
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
+ unsigned int offset, size_t count)
+{
+ int status;
+
+ if (count > I2C_SMBUS_BLOCK_MAX)
+ count = I2C_SMBUS_BLOCK_MAX;
+ /* Can't cross page boundaries */
+ if (unlikely(offset + count > EE1004_PAGE_SIZE))
+ count = EE1004_PAGE_SIZE - offset;
+
+ status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset,
+ count, buf);
+ dev_dbg(&client->dev, "read %zu@%d --> %d\n", count, offset, status);
+
+ return status;
+}
+
+static ssize_t ee1004_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ size_t requested = count;
+ int page;
+
+ if (unlikely(!count))
+ return count;
+
+ page = off >> EE1004_PAGE_SHIFT;
+ if (unlikely(page > 1))
+ return 0;
+ off &= (1 << EE1004_PAGE_SHIFT) - 1;
+
+ /*
+ * Read data from chip, protecting against concurrent access to
+ * other EE1004 SPD EEPROMs on the same adapter.
+ */
+ mutex_lock(&ee1004_bus_lock);
+
+ while (count) {
+ int status;
+
+ /* Select page */
+ if (page != ee1004_current_page) {
+ /* Data is ignored */
+ status = i2c_smbus_write_byte(ee1004_set_page[page],
+ 0x00);
+ if (status < 0) {
+ dev_err(dev, "Failed to select page %d (%d)\n",
+ page, status);
+ mutex_unlock(&ee1004_bus_lock);
+ return status;
+ }
+ dev_dbg(dev, "Selected page %d\n", page);
+ ee1004_current_page = page;
+ }
+
+ status = ee1004_eeprom_read(client, buf, off, count);
+ if (status < 0) {
+ mutex_unlock(&ee1004_bus_lock);
+ return status;
+ }
+ buf += status;
+ off += status;
+ count -= status;
+
+ if (off == EE1004_PAGE_SIZE) {
+ page++;
+ off = 0;
+ }
+ }
+
+ mutex_unlock(&ee1004_bus_lock);
+
+ return requested;
+}
+
+static const struct bin_attribute eeprom_attr = {
+ .attr = {
+ .name = "eeprom",
+ .mode = 0444,
+ },
+ .size = EE1004_EEPROM_SIZE,
+ .read = ee1004_read,
+};
+
+static int ee1004_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err, cnr = 0;
+ const char *slow = NULL;
+
+ /* Make sure we can operate on this adapter */
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ slow = "word";
+ else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ slow = "byte";
+ else
+ return -EPFNOSUPPORT;
+ }
+
+ /* Use 2 dummy devices for page select command */
+ mutex_lock(&ee1004_bus_lock);
+ if (++ee1004_dev_count == 1) {
+ for (cnr = 0; cnr < 2; cnr++) {
+ ee1004_set_page[cnr] = i2c_new_dummy(client->adapter,
+ EE1004_ADDR_SET_PAGE + cnr);
+ if (!ee1004_set_page[cnr]) {
+ dev_err(&client->dev,
+ "address 0x%02x unavailable\n",
+ EE1004_ADDR_SET_PAGE + cnr);
+ err = -EADDRINUSE;
+ goto err_clients;
+ }
+ }
+ } else if (i2c_adapter_id(client->adapter) !=
+ i2c_adapter_id(ee1004_set_page[0]->adapter)) {
+ dev_err(&client->dev,
+ "Driver only supports devices on a single I2C bus\n");
+ err = -EOPNOTSUPP;
+ goto err_clients;
+ }
+
+ /* Remember current page to avoid unneeded page select */
+ err = i2c_smbus_read_byte(ee1004_set_page[0]);
+ if (err == -ENXIO) {
+ /* Nack means page 1 is selected */
+ ee1004_current_page = 1;
+ } else if (err < 0) {
+ /* Anything else is a real error, bail out */
+ goto err_clients;
+ } else {
+ /* Ack means page 0 is selected, returned value meaningless */
+ ee1004_current_page = 0;
+ }
+ dev_dbg(&client->dev, "Currently selected page: %d\n",
+ ee1004_current_page);
+ mutex_unlock(&ee1004_bus_lock);
+
+ /* Create the sysfs eeprom file */
+ err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
+ if (err)
+ goto err_clients_lock;
+
+ dev_info(&client->dev,
+ "%u byte EE1004-compliant SPD EEPROM, read-only\n",
+ EE1004_EEPROM_SIZE);
+ if (slow)
+ dev_notice(&client->dev,
+ "Falling back to %s reads, performance will suffer\n",
+ slow);
+
+ return 0;
+
+ err_clients_lock:
+ mutex_lock(&ee1004_bus_lock);
+ err_clients:
+ if (--ee1004_dev_count == 0) {
+ for (cnr--; cnr >= 0; cnr--) {
+ i2c_unregister_device(ee1004_set_page[cnr]);
+ ee1004_set_page[cnr] = NULL;
+ }
+ }
+ mutex_unlock(&ee1004_bus_lock);
+
+ return err;
+}
+
+static int ee1004_remove(struct i2c_client *client)
+{
+ int i;
+
+ sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
+
+ /* Remove page select clients if this is the last device */
+ mutex_lock(&ee1004_bus_lock);
+ if (--ee1004_dev_count == 0) {
+ for (i = 0; i < 2; i++) {
+ i2c_unregister_device(ee1004_set_page[i]);
+ ee1004_set_page[i] = NULL;
+ }
+ }
+ mutex_unlock(&ee1004_bus_lock);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct i2c_driver ee1004_driver = {
+ .driver = {
+ .name = "ee1004",
+ },
+ .probe = ee1004_probe,
+ .remove = ee1004_remove,
+ .id_table = ee1004_ids,
+};
+
+static int __init ee1004_init(void)
+{
+ return i2c_add_driver(&ee1004_driver);
+}
+module_init(ee1004_init);
+
+static void __exit ee1004_exit(void)
+{
+ i2c_del_driver(&ee1004_driver);
+}
+module_exit(ee1004_exit);
+
+MODULE_DESCRIPTION("Driver for EE1004-compliant DDR4 SPD EEPROMs");
+MODULE_AUTHOR("Jean Delvare");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 38766968bfa2..c6dd9ad9bf7b 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -439,7 +439,7 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
return -ENODEV;
}
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ edev = devm_kzalloc(&spi->dev, sizeof(*edev), GFP_KERNEL);
if (!edev)
return -ENOMEM;
@@ -449,8 +449,7 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->addrlen = 6;
else {
dev_err(&spi->dev, "unspecified address type\n");
- err = -EINVAL;
- goto fail;
+ return -EINVAL;
}
mutex_init(&edev->lock);
@@ -473,11 +472,9 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->nvmem_config.word_size = 1;
edev->nvmem_config.size = edev->size;
- edev->nvmem = nvmem_register(&edev->nvmem_config);
- if (IS_ERR(edev->nvmem)) {
- err = PTR_ERR(edev->nvmem);
- goto fail;
- }
+ edev->nvmem = devm_nvmem_register(&spi->dev, &edev->nvmem_config);
+ if (IS_ERR(edev->nvmem))
+ return PTR_ERR(edev->nvmem);
dev_info(&spi->dev, "%d-bit eeprom %s\n",
(pd->flags & EE_ADDR8) ? 8 : 16,
@@ -490,21 +487,15 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
spi_set_drvdata(spi, edev);
return 0;
-fail:
- kfree(edev);
- return err;
}
static int eeprom_93xx46_remove(struct spi_device *spi)
{
struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
- nvmem_unregister(edev->nvmem);
-
if (!(edev->pdata->flags & EE_READONLY))
device_remove_file(&spi->dev, &dev_attr_erase);
- kfree(edev);
return 0;
}
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index c7cd3675bcd1..d137d0fab9bf 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -24,7 +24,6 @@
* controlled from here.
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/err.h>
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 656449cb4476..9a65bd9d6152 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -27,7 +27,6 @@
*/
#include <linux/types.h>
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/pci.h>
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 8679e0bd8ec2..3fcb9a2fe1c9 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -23,14 +23,12 @@
*/
#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/page-flags.h>
#include <linux/scatterlist.h>
#include <linux/hugetlb.h>
#include <linux/iommu.h>
-#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/ctype.h>
@@ -298,7 +296,7 @@ static int genwqe_sgl_size(int num_pages)
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
void __user *user_addr, size_t user_size, int write)
{
- int rc;
+ int ret = -ENOMEM;
struct pci_dev *pci_dev = cd->pci_dev;
sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
@@ -318,7 +316,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (get_order(sgl->sgl_size) > MAX_ORDER) {
dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__);
- return -ENOMEM;
+ return ret;
}
sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
@@ -326,7 +324,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (sgl->sgl == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: no memory available!\n", __func__);
- return -ENOMEM;
+ return ret;
}
/* Only use buffering on incomplete pages */
@@ -339,7 +337,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/* Sync with user memory */
if (copy_from_user(sgl->fpage + sgl->fpage_offs,
user_addr, sgl->fpage_size)) {
- rc = -EFAULT;
+ ret = -EFAULT;
goto err_out;
}
}
@@ -352,7 +350,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/* Sync with user memory */
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
- rc = -EFAULT;
+ ret = -EFAULT;
goto err_out2;
}
}
@@ -374,7 +372,8 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->sgl = NULL;
sgl->sgl_dma_addr = 0;
sgl->sgl_size = 0;
- return -ENOMEM;
+
+ return ret;
}
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 6193270e7b3d..de20bdaa148d 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -985,6 +985,12 @@ static void kgdbts_run_tests(void)
int nmi_sleep = 0;
int i;
+ verbose = 0;
+ if (strstr(config, "V1"))
+ verbose = 1;
+ if (strstr(config, "V2"))
+ verbose = 2;
+
ptr = strchr(config, 'F');
if (ptr)
fork_test = simple_strtol(ptr + 1, NULL, 10);
@@ -1068,13 +1074,6 @@ static int kgdbts_option_setup(char *opt)
return -ENOSPC;
}
strcpy(config, opt);
-
- verbose = 0;
- if (strstr(config, "V1"))
- verbose = 1;
- if (strstr(config, "V2"))
- verbose = 2;
-
return 0;
}
@@ -1086,9 +1085,6 @@ static int configure_kgdbts(void)
if (!strlen(config) || isspace(config[0]))
goto noconfig;
- err = kgdbts_option_setup(config);
- if (err)
- goto noconfig;
final_ack = 0;
run_plant_and_detach_test(1);
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 389475b25bb7..d5a0e7f1813b 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -18,7 +18,7 @@
* hardened usercopy checks by added "unconst" to all the const copies,
* and making sure "cache_size" isn't optimized into a const.
*/
-static volatile size_t unconst = 0;
+static volatile size_t unconst;
static volatile size_t cache_size = 1024;
static struct kmem_cache *whitelist_cache;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index a6f41f96f2a1..80215c312f0e 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/uuid.h>
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 4d77a6ae183a..87281b3695e6 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -599,10 +599,10 @@ static __poll_t mei_poll(struct file *file, poll_table *wait)
mei_cl_read_start(cl, mei_cl_mtu(cl), file);
}
- if (req_events & (POLLOUT | POLLWRNORM)) {
+ if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
poll_wait(file, &cl->tx_wait, wait);
if (cl->tx_cb_queued < dev->tx_queue_limit)
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
}
out:
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 6369aeaa7056..18b8ed57c4ac 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -1035,8 +1035,6 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
}
dma_async_issue_pending(chan);
}
- if (ret < 0)
- goto err;
offset += loop_len;
temp += loop_len;
temp_phys += loop_len;
@@ -1553,9 +1551,8 @@ static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
int src_cache_off, dst_cache_off;
s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
u8 *temp = NULL;
- bool src_local = true, dst_local = false;
+ bool src_local = true;
struct scif_dma_comp_cb *comp_cb;
- dma_addr_t src_dma_addr, dst_dma_addr;
int err;
if (is_dma_copy_aligned(chan->device, 1, 1, 1))
@@ -1569,12 +1566,8 @@ static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
if (work->loopback)
return scif_rma_list_cpu_copy(work);
- src_dma_addr = __scif_off_to_dma_addr(work->src_window, src_offset);
- dst_dma_addr = __scif_off_to_dma_addr(work->dst_window, dst_offset);
src_local = work->src_window->type == SCIF_WINDOW_SELF;
- dst_local = work->dst_window->type == SCIF_WINDOW_SELF;
- dst_local = dst_local;
/* Allocate dma_completion cb */
comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL);
if (!comp_cb)
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
index cac3bcc308a7..7bb929f05d85 100644
--- a/drivers/misc/mic/scif/scif_fence.c
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
dma_fail:
if (!x100)
dma_pool_free(ep->remote_dev->signal_pool, status,
- status->src_dma_addr);
+ src - offsetof(struct scif_status, val));
alloc_fail:
return err;
}
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index 2e30de9c694a..57a6bb1fd3c9 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -280,7 +280,9 @@ int ocxl_config_check_afu_index(struct pci_dev *dev,
u32 val;
int rc, templ_major, templ_minor, len;
- pci_write_config_word(dev, fn->dvsec_afu_info_pos, afu_idx);
+ pci_write_config_byte(dev,
+ fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX,
+ afu_idx);
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val);
if (rc)
return rc;
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 030769018461..4b23d586fc3f 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -634,7 +634,7 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_noop_page_overflow);
- /* fallthru */
+ /* fall through */
default:
BUG();
}
@@ -792,7 +792,7 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_page_overflow);
- /* fallthru */
+ /* fall through */
default:
BUG();
}
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 05a890ce2ab8..8e6607fc8a67 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -28,7 +28,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
enum xp_retval ret;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (!(ch->flags & XPC_C_OPENREQUEST) ||
!(ch->flags & XPC_C_ROPENREQUEST)) {
@@ -82,7 +82,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
struct xpc_partition *part = &xpc_partitions[ch->partid];
u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (!(ch->flags & XPC_C_DISCONNECTING))
return;
@@ -755,7 +755,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
{
u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
return;
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 0c3ef6f1df54..3eba1c420cc0 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -98,8 +98,7 @@ xpc_get_rsvd_page_pa(int nasid)
len = L1_CACHE_ALIGN(len);
if (len > buf_len) {
- if (buf_base != NULL)
- kfree(buf_base);
+ kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
&buf_base);
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 5a12d2a54049..0ae69b9390ce 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -1671,7 +1671,7 @@ xpc_teardown_msg_structures_sn2(struct xpc_channel *ch)
{
struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
ch_sn2->remote_msgqueue_pa = 0;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 340b44d9e8cf..0441abe87880 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -1183,7 +1183,7 @@ xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
kfree(ch_uv->cached_notify_gru_mq_desc);
ch_uv->cached_notify_gru_mq_desc = NULL;
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 74b183baf044..80d8cbe8c01a 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -323,10 +323,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
cur_start = block->start + block->size;
}
- err_chunks:
- if (child)
- of_node_put(child);
-
+err_chunks:
+ of_node_put(child);
kfree(rblocks);
return ret;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2543ef1ece17..9b0b3fa4f836 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -25,6 +25,9 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <asm/hypervisor.h>
@@ -37,20 +40,20 @@ MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
/*
- * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
- * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
- * __GFP_NOWARN, to suppress page allocation failure warnings.
+ * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't allow wait
+ * (__GFP_RECLAIM) for huge page allocations. Use __GFP_NOWARN, to suppress page
+ * allocation failure warnings. Disallow access to emergency low-memory pools.
*/
-#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
+#define VMW_HUGE_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
+ __GFP_NOMEMALLOC)
/*
- * Use GFP_HIGHUSER when executing in a separate kernel thread
- * context and allocation can sleep. This is less stressful to
- * the guest memory system, since it allows the thread to block
- * while memory is reclaimed, and won't take pages from emergency
- * low-memory pools.
+ * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We allow lightweight
+ * reclamation (__GFP_NORETRY). Use __GFP_NOWARN, to suppress page allocation
+ * failure warnings. Disallow access to emergency low-memory pools.
*/
-#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
+#define VMW_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
+ __GFP_NOMEMALLOC|__GFP_NORETRY)
/* Maximum number of refused pages we accumulate during inflation cycle */
#define VMW_BALLOON_MAX_REFUSED 16
@@ -77,225 +80,420 @@ enum vmwballoon_capabilities {
| VMW_BALLOON_BATCHED_2M_CMDS \
| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
-#define VMW_BALLOON_2M_SHIFT (9)
-#define VMW_BALLOON_NUM_PAGE_SIZES (2)
+#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
-/*
- * Backdoor commands availability:
- *
- * START, GET_TARGET and GUEST_ID are always available,
- *
- * VMW_BALLOON_BASIC_CMDS:
- * LOCK and UNLOCK commands,
- * VMW_BALLOON_BATCHED_CMDS:
- * BATCHED_LOCK and BATCHED_UNLOCK commands.
- * VMW BALLOON_BATCHED_2M_CMDS:
- * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
- * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
- * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
- */
-#define VMW_BALLOON_CMD_START 0
-#define VMW_BALLOON_CMD_GET_TARGET 1
-#define VMW_BALLOON_CMD_LOCK 2
-#define VMW_BALLOON_CMD_UNLOCK 3
-#define VMW_BALLOON_CMD_GUEST_ID 4
-#define VMW_BALLOON_CMD_BATCHED_LOCK 6
-#define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
-#define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
-#define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
-#define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
-
-
-/* error codes */
-#define VMW_BALLOON_SUCCESS 0
-#define VMW_BALLOON_FAILURE -1
-#define VMW_BALLOON_ERROR_CMD_INVALID 1
-#define VMW_BALLOON_ERROR_PPN_INVALID 2
-#define VMW_BALLOON_ERROR_PPN_LOCKED 3
-#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
-#define VMW_BALLOON_ERROR_PPN_PINNED 5
-#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
-#define VMW_BALLOON_ERROR_RESET 7
-#define VMW_BALLOON_ERROR_BUSY 8
+enum vmballoon_page_size_type {
+ VMW_BALLOON_4K_PAGE,
+ VMW_BALLOON_2M_PAGE,
+ VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
+};
-#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
+#define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
-/* Batch page description */
+static const char * const vmballoon_page_size_names[] = {
+ [VMW_BALLOON_4K_PAGE] = "4k",
+ [VMW_BALLOON_2M_PAGE] = "2M"
+};
-/*
- * Layout of a page in the batch page:
+enum vmballoon_op {
+ VMW_BALLOON_INFLATE,
+ VMW_BALLOON_DEFLATE
+};
+
+enum vmballoon_op_stat_type {
+ VMW_BALLOON_OP_STAT,
+ VMW_BALLOON_OP_FAIL_STAT
+};
+
+#define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
+
+/**
+ * enum vmballoon_cmd_type - backdoor commands.
*
- * +-------------+----------+--------+
- * | | | |
- * | Page number | Reserved | Status |
- * | | | |
- * +-------------+----------+--------+
- * 64 PAGE_SHIFT 6 0
+ * Availability of the commands is as followed:
*
- * The reserved field should be set to 0.
+ * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
+ * %VMW_BALLOON_CMD_GUEST_ID are always available.
+ *
+ * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
+ * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
+ *
+ * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
+ * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
+ * are available.
+ *
+ * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
+ * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
+ * are supported.
+ *
+ * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
+ * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
+ *
+ * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
+ * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
+ * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
+ * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
+ * to be deflated from the balloon.
+ * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
+ * runs in the VM.
+ * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
+ * ballooned pages (up to 512).
+ * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
+ * pages that are about to be deflated from the
+ * balloon (up to 512).
+ * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
+ * for 2MB pages.
+ * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
+ * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
+ * pages.
+ * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
+ * that would be invoked when the balloon
+ * size changes.
+ * @VMW_BALLOON_CMD_LAST: Value of the last command.
*/
-#define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
-#define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
-#define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
-
-struct vmballoon_batch_page {
- u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
+enum vmballoon_cmd_type {
+ VMW_BALLOON_CMD_START,
+ VMW_BALLOON_CMD_GET_TARGET,
+ VMW_BALLOON_CMD_LOCK,
+ VMW_BALLOON_CMD_UNLOCK,
+ VMW_BALLOON_CMD_GUEST_ID,
+ /* No command 5 */
+ VMW_BALLOON_CMD_BATCHED_LOCK = 6,
+ VMW_BALLOON_CMD_BATCHED_UNLOCK,
+ VMW_BALLOON_CMD_BATCHED_2M_LOCK,
+ VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
+ VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
+ VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
};
-static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
-{
- return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
-}
+#define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
+
+enum vmballoon_error_codes {
+ VMW_BALLOON_SUCCESS,
+ VMW_BALLOON_ERROR_CMD_INVALID,
+ VMW_BALLOON_ERROR_PPN_INVALID,
+ VMW_BALLOON_ERROR_PPN_LOCKED,
+ VMW_BALLOON_ERROR_PPN_UNLOCKED,
+ VMW_BALLOON_ERROR_PPN_PINNED,
+ VMW_BALLOON_ERROR_PPN_NOTNEEDED,
+ VMW_BALLOON_ERROR_RESET,
+ VMW_BALLOON_ERROR_BUSY
+};
-static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
- int idx)
-{
- return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
-}
+#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
-static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
- u64 pa)
-{
- batch->pages[idx] = pa;
-}
+#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
+ ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
+ (1UL << VMW_BALLOON_CMD_LOCK) | \
+ (1UL << VMW_BALLOON_CMD_UNLOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
+
+static const char * const vmballoon_cmd_names[] = {
+ [VMW_BALLOON_CMD_START] = "start",
+ [VMW_BALLOON_CMD_GET_TARGET] = "target",
+ [VMW_BALLOON_CMD_LOCK] = "lock",
+ [VMW_BALLOON_CMD_UNLOCK] = "unlock",
+ [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
+ [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
+ [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
+ [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
+ [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
+ [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
+};
+enum vmballoon_stat_page {
+ VMW_BALLOON_PAGE_STAT_ALLOC,
+ VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
+ VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
+ VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
+ VMW_BALLOON_PAGE_STAT_FREE,
+ VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
+};
-#define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
-({ \
- unsigned long __status, __dummy1, __dummy2, __dummy3; \
- __asm__ __volatile__ ("inl %%dx" : \
- "=a"(__status), \
- "=c"(__dummy1), \
- "=d"(__dummy2), \
- "=b"(result), \
- "=S" (__dummy3) : \
- "0"(VMW_BALLOON_HV_MAGIC), \
- "1"(VMW_BALLOON_CMD_##cmd), \
- "2"(VMW_BALLOON_HV_PORT), \
- "3"(arg1), \
- "4" (arg2) : \
- "memory"); \
- if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
- result = __dummy1; \
- result &= -1UL; \
- __status & -1UL; \
-})
+#define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
-#ifdef CONFIG_DEBUG_FS
-struct vmballoon_stats {
- unsigned int timer;
- unsigned int doorbell;
-
- /* allocation statistics */
- unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int sleep_alloc;
- unsigned int sleep_alloc_fail;
- unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
-
- /* monitor operations */
- unsigned int lock[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int lock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int unlock[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int unlock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int target;
- unsigned int target_fail;
- unsigned int start;
- unsigned int start_fail;
- unsigned int guest_type;
- unsigned int guest_type_fail;
- unsigned int doorbell_set;
- unsigned int doorbell_unset;
+enum vmballoon_stat_general {
+ VMW_BALLOON_STAT_TIMER,
+ VMW_BALLOON_STAT_DOORBELL,
+ VMW_BALLOON_STAT_RESET,
+ VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_RESET
};
-#define STATS_INC(stat) (stat)++
-#else
-#define STATS_INC(stat)
-#endif
+#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
-struct vmballoon;
-struct vmballoon_ops {
- void (*add_page)(struct vmballoon *b, int idx, struct page *p);
- int (*lock)(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target);
- int (*unlock)(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target);
+static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
+static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
+
+struct vmballoon_ctl {
+ struct list_head pages;
+ struct list_head refused_pages;
+ unsigned int n_refused_pages;
+ unsigned int n_pages;
+ enum vmballoon_page_size_type page_size;
+ enum vmballoon_op op;
};
struct vmballoon_page_size {
/* list of reserved physical pages */
struct list_head pages;
-
- /* transient list of non-balloonable pages */
- struct list_head refused_pages;
- unsigned int n_refused_pages;
};
+/**
+ * struct vmballoon_batch_entry - a batch entry for lock or unlock.
+ *
+ * @status: the status of the operation, which is written by the hypervisor.
+ * @reserved: reserved for future use. Must be set to zero.
+ * @pfn: the physical frame number of the page to be locked or unlocked.
+ */
+struct vmballoon_batch_entry {
+ u64 status : 5;
+ u64 reserved : PAGE_SHIFT - 5;
+ u64 pfn : 52;
+} __packed;
+
struct vmballoon {
struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
- /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
- unsigned supported_page_sizes;
+ /**
+ * @max_page_size: maximum supported page size for ballooning.
+ *
+ * Protected by @conf_sem
+ */
+ enum vmballoon_page_size_type max_page_size;
+
+ /**
+ * @size: balloon actual size in basic page size (frames).
+ *
+ * While we currently do not support size which is bigger than 32-bit,
+ * in preparation for future support, use 64-bits.
+ */
+ atomic64_t size;
- /* balloon size in pages */
- unsigned int size;
- unsigned int target;
+ /**
+ * @target: balloon target size in basic page size (frames).
+ *
+ * We do not protect the target under the assumption that setting the
+ * value is always done through a single write. If this assumption ever
+ * breaks, we would have to use X_ONCE for accesses, and suffer the less
+ * optimized code. Although we may read stale target value if multiple
+ * accesses happen at once, the performance impact should be minor.
+ */
+ unsigned long target;
- /* reset flag */
+ /**
+ * @reset_required: reset flag
+ *
+ * Setting this flag may introduce races, but the code is expected to
+ * handle them gracefully. In the worst case, another operation will
+ * fail as reset did not take place. Clearing the flag is done while
+ * holding @conf_sem for write.
+ */
bool reset_required;
+ /**
+ * @capabilities: hypervisor balloon capabilities.
+ *
+ * Protected by @conf_sem.
+ */
unsigned long capabilities;
- struct vmballoon_batch_page *batch_page;
+ /**
+ * @batch_page: pointer to communication batch page.
+ *
+ * When batching is used, batch_page points to a page, which holds up to
+ * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
+ */
+ struct vmballoon_batch_entry *batch_page;
+
+ /**
+ * @batch_max_pages: maximum pages that can be locked/unlocked.
+ *
+ * Indicates the number of pages that the hypervisor can lock or unlock
+ * at once, according to whether batching is enabled. If batching is
+ * disabled, only a single page can be locked/unlock on each operation.
+ *
+ * Protected by @conf_sem.
+ */
unsigned int batch_max_pages;
- struct page *page;
- const struct vmballoon_ops *ops;
+ /**
+ * @page: page to be locked/unlocked by the hypervisor
+ *
+ * @page is only used when batching is disabled and a single page is
+ * reclaimed on each iteration.
+ *
+ * Protected by @comm_lock.
+ */
+ struct page *page;
-#ifdef CONFIG_DEBUG_FS
/* statistics */
- struct vmballoon_stats stats;
+ struct vmballoon_stats *stats;
+#ifdef CONFIG_DEBUG_FS
/* debugfs file exporting statistics */
struct dentry *dbg_entry;
#endif
- struct sysinfo sysinfo;
-
struct delayed_work dwork;
+ /**
+ * @vmci_doorbell.
+ *
+ * Protected by @conf_sem.
+ */
struct vmci_handle vmci_doorbell;
+
+ /**
+ * @conf_sem: semaphore to protect the configuration and the statistics.
+ */
+ struct rw_semaphore conf_sem;
+
+ /**
+ * @comm_lock: lock to protect the communication with the host.
+ *
+ * Lock ordering: @conf_sem -> @comm_lock .
+ */
+ spinlock_t comm_lock;
};
static struct vmballoon balloon;
+struct vmballoon_stats {
+ /* timer / doorbell operations */
+ atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
+
+ /* allocation statistics for huge and small pages */
+ atomic64_t
+ page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
+
+ /* Monitor operations: total operations, and failures */
+ atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
+};
+
+static inline bool is_vmballoon_stats_on(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) &&
+ static_branch_unlikely(&balloon_stat_enabled);
+}
+
+static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
+ enum vmballoon_op_stat_type type)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_inc(&b->stats->ops[op][type]);
+}
+
+static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
+ enum vmballoon_stat_general stat)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_inc(&b->stats->general_stat[stat]);
+}
+
+static inline void vmballoon_stats_gen_add(struct vmballoon *b,
+ enum vmballoon_stat_general stat,
+ unsigned int val)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_add(val, &b->stats->general_stat[stat]);
+}
+
+static inline void vmballoon_stats_page_inc(struct vmballoon *b,
+ enum vmballoon_stat_page stat,
+ enum vmballoon_page_size_type size)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_inc(&b->stats->page_stat[stat][size]);
+}
+
+static inline void vmballoon_stats_page_add(struct vmballoon *b,
+ enum vmballoon_stat_page stat,
+ enum vmballoon_page_size_type size,
+ unsigned int val)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_add(val, &b->stats->page_stat[stat][size]);
+}
+
+static inline unsigned long
+__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
+ unsigned long arg2, unsigned long *result)
+{
+ unsigned long status, dummy1, dummy2, dummy3, local_result;
+
+ vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
+
+ asm volatile ("inl %%dx" :
+ "=a"(status),
+ "=c"(dummy1),
+ "=d"(dummy2),
+ "=b"(local_result),
+ "=S"(dummy3) :
+ "0"(VMW_BALLOON_HV_MAGIC),
+ "1"(cmd),
+ "2"(VMW_BALLOON_HV_PORT),
+ "3"(arg1),
+ "4"(arg2) :
+ "memory");
+
+ /* update the result if needed */
+ if (result)
+ *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
+ local_result;
+
+ /* update target when applicable */
+ if (status == VMW_BALLOON_SUCCESS &&
+ ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
+ WRITE_ONCE(b->target, local_result);
+
+ if (status != VMW_BALLOON_SUCCESS &&
+ status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
+ vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
+ pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
+ __func__, vmballoon_cmd_names[cmd], arg1, arg2,
+ status);
+ }
+
+ /* mark reset required accordingly */
+ if (status == VMW_BALLOON_ERROR_RESET)
+ b->reset_required = true;
+
+ return status;
+}
+
+static __always_inline unsigned long
+vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
+ unsigned long arg2)
+{
+ unsigned long dummy;
+
+ return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
+}
+
/*
* Send "start" command to the host, communicating supported version
* of the protocol.
*/
-static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
+static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
{
- unsigned long status, capabilities, dummy = 0;
- bool success;
-
- STATS_INC(b->stats.start);
+ unsigned long status, capabilities;
- status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
+ status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
+ &capabilities);
switch (status) {
case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
b->capabilities = capabilities;
- success = true;
break;
case VMW_BALLOON_SUCCESS:
b->capabilities = VMW_BALLOON_BASIC_CMDS;
- success = true;
break;
default:
- success = false;
+ return -EIO;
}
/*
@@ -303,626 +501,693 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
* reason disabled, do not use 2MB pages, since otherwise the legacy
* mechanism is used with 2MB pages, causing a failure.
*/
+ b->max_page_size = VMW_BALLOON_4K_PAGE;
if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
(b->capabilities & VMW_BALLOON_BATCHED_CMDS))
- b->supported_page_sizes = 2;
- else
- b->supported_page_sizes = 1;
-
- if (!success) {
- pr_debug("%s - failed, hv returns %ld\n", __func__, status);
- STATS_INC(b->stats.start_fail);
- }
- return success;
-}
+ b->max_page_size = VMW_BALLOON_2M_PAGE;
-static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
-{
- switch (status) {
- case VMW_BALLOON_SUCCESS:
- return true;
- case VMW_BALLOON_ERROR_RESET:
- b->reset_required = true;
- /* fall through */
-
- default:
- return false;
- }
+ return 0;
}
-/*
+/**
+ * vmballoon_send_guest_id - communicate guest type to the host.
+ *
+ * @b: pointer to the balloon.
+ *
* Communicate guest type to the host so that it can adjust ballooning
* algorithm to the one most appropriate for the guest. This command
* is normally issued after sending "start" command and is part of
* standard reset sequence.
+ *
+ * Return: zero on success or appropriate error code.
*/
-static bool vmballoon_send_guest_id(struct vmballoon *b)
+static int vmballoon_send_guest_id(struct vmballoon *b)
{
- unsigned long status, dummy = 0;
-
- status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
- dummy);
-
- STATS_INC(b->stats.guest_type);
+ unsigned long status;
- if (vmballoon_check_status(b, status))
- return true;
+ status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
+ VMW_BALLOON_GUEST_ID, 0);
- pr_debug("%s - failed, hv returns %ld\n", __func__, status);
- STATS_INC(b->stats.guest_type_fail);
- return false;
+ return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
-static u16 vmballoon_page_size(bool is_2m_page)
+/**
+ * vmballoon_page_order() - return the order of the page
+ * @page_size: the size of the page.
+ *
+ * Return: the allocation order.
+ */
+static inline
+unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
{
- if (is_2m_page)
- return 1 << VMW_BALLOON_2M_SHIFT;
+ return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
+}
- return 1;
+/**
+ * vmballoon_page_in_frames() - returns the number of frames in a page.
+ * @page_size: the size of the page.
+ *
+ * Return: the number of 4k frames.
+ */
+static inline unsigned int
+vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
+{
+ return 1 << vmballoon_page_order(page_size);
}
-/*
- * Retrieve desired balloon size from the host.
+/**
+ * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
+ *
+ * @b: pointer to the balloon.
+ *
+ * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
+ * by the host-guest protocol and EIO if an error occurred in communicating with
+ * the host.
*/
-static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
+static int vmballoon_send_get_target(struct vmballoon *b)
{
unsigned long status;
- unsigned long target;
unsigned long limit;
- unsigned long dummy = 0;
- u32 limit32;
- /*
- * si_meminfo() is cheap. Moreover, we want to provide dynamic
- * max balloon size later. So let us call si_meminfo() every
- * iteration.
- */
- si_meminfo(&b->sysinfo);
- limit = b->sysinfo.totalram;
+ limit = totalram_pages;
/* Ensure limit fits in 32-bits */
- limit32 = (u32)limit;
- if (limit != limit32)
- return false;
-
- /* update stats */
- STATS_INC(b->stats.target);
+ if (limit != (u32)limit)
+ return -EINVAL;
- status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
- if (vmballoon_check_status(b, status)) {
- *new_target = target;
- return true;
- }
+ status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
- pr_debug("%s - failed, hv returns %ld\n", __func__, status);
- STATS_INC(b->stats.target_fail);
- return false;
+ return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
-/*
- * Notify the host about allocated page so that host can use it without
- * fear that guest will need it. Host may reject some pages, we need to
- * check the return value and maybe submit a different page.
+/**
+ * vmballoon_alloc_page_list - allocates a list of pages.
+ *
+ * @b: pointer to the balloon.
+ * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
+ * @req_n_pages: the number of requested pages.
+ *
+ * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
+ * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
+ *
+ * Return: zero on success or error code otherwise.
*/
-static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
- unsigned int *hv_status, unsigned int *target)
+static int vmballoon_alloc_page_list(struct vmballoon *b,
+ struct vmballoon_ctl *ctl,
+ unsigned int req_n_pages)
{
- unsigned long status, dummy = 0;
- u32 pfn32;
-
- pfn32 = (u32)pfn;
- if (pfn32 != pfn)
- return -EINVAL;
-
- STATS_INC(b->stats.lock[false]);
+ struct page *page;
+ unsigned int i;
- *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
- if (vmballoon_check_status(b, status))
- return 0;
+ for (i = 0; i < req_n_pages; i++) {
+ if (ctl->page_size == VMW_BALLOON_2M_PAGE)
+ page = alloc_pages(VMW_HUGE_PAGE_ALLOC_FLAGS,
+ VMW_BALLOON_2M_ORDER);
+ else
+ page = alloc_page(VMW_PAGE_ALLOC_FLAGS);
- pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.lock_fail[false]);
- return -EIO;
-}
+ /* Update statistics */
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
+ ctl->page_size);
-static int vmballoon_send_batched_lock(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages, unsigned int *target)
-{
- unsigned long status;
- unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
+ if (page) {
+ /* Success. Add the page to the list and continue. */
+ list_add(&page->lru, &ctl->pages);
+ continue;
+ }
- STATS_INC(b->stats.lock[is_2m_pages]);
+ /* Allocation failed. Update statistics and stop. */
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
+ ctl->page_size);
+ break;
+ }
- if (is_2m_pages)
- status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages,
- *target);
- else
- status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages,
- *target);
+ ctl->n_pages = i;
- if (vmballoon_check_status(b, status))
- return 0;
-
- pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.lock_fail[is_2m_pages]);
- return 1;
+ return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
}
-/*
- * Notify the host that guest intends to release given page back into
- * the pool of available (to the guest) pages.
+/**
+ * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
+ *
+ * @b: pointer for %struct vmballoon.
+ * @page: pointer for the page whose result should be handled.
+ * @page_size: size of the page.
+ * @status: status of the operation as provided by the hypervisor.
*/
-static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
- unsigned int *target)
+static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
+ enum vmballoon_page_size_type page_size,
+ unsigned long status)
{
- unsigned long status, dummy = 0;
- u32 pfn32;
-
- pfn32 = (u32)pfn;
- if (pfn32 != pfn)
- return false;
+ /* On success do nothing. The page is already on the balloon list. */
+ if (likely(status == VMW_BALLOON_SUCCESS))
+ return 0;
- STATS_INC(b->stats.unlock[false]);
+ pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
+ page_to_pfn(page), status,
+ vmballoon_page_size_names[page_size]);
- status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
- if (vmballoon_check_status(b, status))
- return true;
+ /* Error occurred */
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
+ page_size);
- pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.unlock_fail[false]);
- return false;
+ return -EIO;
}
-static bool vmballoon_send_batched_unlock(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages, unsigned int *target)
+/**
+ * vmballoon_status_page - returns the status of (un)lock operation
+ *
+ * @b: pointer to the balloon.
+ * @idx: index for the page for which the operation is performed.
+ * @p: pointer to where the page struct is returned.
+ *
+ * Following a lock or unlock operation, returns the status of the operation for
+ * an individual page. Provides the page that the operation was performed on on
+ * the @page argument.
+ *
+ * Returns: The status of a lock or unlock operation for an individual page.
+ */
+static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
+ struct page **p)
{
- unsigned long status;
- unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
-
- STATS_INC(b->stats.unlock[is_2m_pages]);
-
- if (is_2m_pages)
- status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages,
- *target);
- else
- status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages,
- *target);
+ if (static_branch_likely(&vmw_balloon_batching)) {
+ /* batching mode */
+ *p = pfn_to_page(b->batch_page[idx].pfn);
+ return b->batch_page[idx].status;
+ }
- if (vmballoon_check_status(b, status))
- return true;
+ /* non-batching mode */
+ *p = b->page;
- pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.unlock_fail[is_2m_pages]);
- return false;
+ /*
+ * If a failure occurs, the indication will be provided in the status
+ * of the entire operation, which is considered before the individual
+ * page status. So for non-batching mode, the indication is always of
+ * success.
+ */
+ return VMW_BALLOON_SUCCESS;
}
-static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
+/**
+ * vmballoon_lock_op - notifies the host about inflated/deflated pages.
+ * @b: pointer to the balloon.
+ * @num_pages: number of inflated/deflated pages.
+ * @page_size: size of the page.
+ * @op: the type of operation (lock or unlock).
+ *
+ * Notify the host about page(s) that were ballooned (or removed from the
+ * balloon) so that host can use it without fear that guest will need it (or
+ * stop using them since the VM does). Host may reject some pages, we need to
+ * check the return value and maybe submit a different page. The pages that are
+ * inflated/deflated are pointed by @b->page.
+ *
+ * Return: result as provided by the hypervisor.
+ */
+static unsigned long vmballoon_lock_op(struct vmballoon *b,
+ unsigned int num_pages,
+ enum vmballoon_page_size_type page_size,
+ enum vmballoon_op op)
{
- if (is_2m_page)
- return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);
+ unsigned long cmd, pfn;
- return alloc_page(flags);
-}
+ lockdep_assert_held(&b->comm_lock);
-static void vmballoon_free_page(struct page *page, bool is_2m_page)
-{
- if (is_2m_page)
- __free_pages(page, VMW_BALLOON_2M_SHIFT);
- else
- __free_page(page);
+ if (static_branch_likely(&vmw_balloon_batching)) {
+ if (op == VMW_BALLOON_INFLATE)
+ cmd = page_size == VMW_BALLOON_2M_PAGE ?
+ VMW_BALLOON_CMD_BATCHED_2M_LOCK :
+ VMW_BALLOON_CMD_BATCHED_LOCK;
+ else
+ cmd = page_size == VMW_BALLOON_2M_PAGE ?
+ VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
+ VMW_BALLOON_CMD_BATCHED_UNLOCK;
+
+ pfn = PHYS_PFN(virt_to_phys(b->batch_page));
+ } else {
+ cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
+ VMW_BALLOON_CMD_UNLOCK;
+ pfn = page_to_pfn(b->page);
+
+ /* In non-batching mode, PFNs must fit in 32-bit */
+ if (unlikely(pfn != (u32)pfn))
+ return VMW_BALLOON_ERROR_PPN_INVALID;
+ }
+
+ return vmballoon_cmd(b, cmd, pfn, num_pages);
}
-/*
- * Quickly release all pages allocated for the balloon. This function is
- * called when host decides to "reset" balloon for one reason or another.
- * Unlike normal "deflate" we do not (shall not) notify host of the pages
- * being released.
+/**
+ * vmballoon_add_page - adds a page towards lock/unlock operation.
+ *
+ * @b: pointer to the balloon.
+ * @idx: index of the page to be ballooned in this batch.
+ * @p: pointer to the page that is about to be ballooned.
+ *
+ * Adds the page to be ballooned. Must be called while holding @comm_lock.
*/
-static void vmballoon_pop(struct vmballoon *b)
+static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
+ struct page *p)
{
- struct page *page, *next;
- unsigned is_2m_pages;
-
- for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
- is_2m_pages++) {
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
- u16 size_per_page = vmballoon_page_size(is_2m_pages);
-
- list_for_each_entry_safe(page, next, &page_size->pages, lru) {
- list_del(&page->lru);
- vmballoon_free_page(page, is_2m_pages);
- STATS_INC(b->stats.free[is_2m_pages]);
- b->size -= size_per_page;
- cond_resched();
- }
- }
+ lockdep_assert_held(&b->comm_lock);
- /* Clearing the batch_page unconditionally has no adverse effect */
- free_page((unsigned long)b->batch_page);
- b->batch_page = NULL;
+ if (static_branch_likely(&vmw_balloon_batching))
+ b->batch_page[idx] = (struct vmballoon_batch_entry)
+ { .pfn = page_to_pfn(p) };
+ else
+ b->page = p;
}
-/*
- * Notify the host of a ballooned page. If host rejects the page put it on the
- * refuse list, those refused page are then released at the end of the
- * inflation cycle.
+/**
+ * vmballoon_lock - lock or unlock a batch of pages.
+ *
+ * @b: pointer to the balloon.
+ * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
+ *
+ * Notifies the host of about ballooned pages (after inflation or deflation,
+ * according to @ctl). If the host rejects the page put it on the
+ * @ctl refuse list. These refused page are then released when moving to the
+ * next size of pages.
+ *
+ * Note that we neither free any @page here nor put them back on the ballooned
+ * pages list. Instead we queue it for later processing. We do that for several
+ * reasons. First, we do not want to free the page under the lock. Second, it
+ * allows us to unify the handling of lock and unlock. In the inflate case, the
+ * caller will check if there are too many refused pages and release them.
+ * Although it is not identical to the past behavior, it should not affect
+ * performance.
*/
-static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target)
+static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
{
- int locked, hv_status;
- struct page *page = b->page;
- struct vmballoon_page_size *page_size = &b->page_sizes[false];
-
- /* is_2m_pages can never happen as 2m pages support implies batching */
-
- locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
- target);
- if (locked) {
- STATS_INC(b->stats.refused_alloc[false]);
-
- if (locked == -EIO &&
- (hv_status == VMW_BALLOON_ERROR_RESET ||
- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
- vmballoon_free_page(page, false);
- return -EIO;
- }
-
- /*
- * Place page on the list of non-balloonable pages
- * and retry allocation, unless we already accumulated
- * too many of them, in which case take a breather.
- */
- if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
- page_size->n_refused_pages++;
- list_add(&page->lru, &page_size->refused_pages);
- } else {
- vmballoon_free_page(page, false);
- }
- return locked;
- }
-
- /* track allocated page */
- list_add(&page->lru, &page_size->pages);
+ unsigned long batch_status;
+ struct page *page;
+ unsigned int i, num_pages;
- /* update balloon size */
- b->size++;
+ num_pages = ctl->n_pages;
+ if (num_pages == 0)
+ return 0;
- return 0;
-}
+ /* communication with the host is done under the communication lock */
+ spin_lock(&b->comm_lock);
-static int vmballoon_lock_batched_page(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages, unsigned int *target)
-{
- int locked, i;
- u16 size_per_page = vmballoon_page_size(is_2m_pages);
+ i = 0;
+ list_for_each_entry(page, &ctl->pages, lru)
+ vmballoon_add_page(b, i++, page);
- locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
- target);
- if (locked > 0) {
- for (i = 0; i < num_pages; i++) {
- u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
- struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
+ batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
+ ctl->op);
- vmballoon_free_page(p, is_2m_pages);
- }
+ /*
+ * Iterate over the pages in the provided list. Since we are changing
+ * @ctl->n_pages we are saving the original value in @num_pages and
+ * use this value to bound the loop.
+ */
+ for (i = 0; i < num_pages; i++) {
+ unsigned long status;
- return -EIO;
- }
+ status = vmballoon_status_page(b, i, &page);
- for (i = 0; i < num_pages; i++) {
- u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
- struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
+ /*
+ * Failure of the whole batch overrides a single operation
+ * results.
+ */
+ if (batch_status != VMW_BALLOON_SUCCESS)
+ status = batch_status;
- locked = vmballoon_batch_get_status(b->batch_page, i);
+ /* Continue if no error happened */
+ if (!vmballoon_handle_one_result(b, page, ctl->page_size,
+ status))
+ continue;
- switch (locked) {
- case VMW_BALLOON_SUCCESS:
- list_add(&p->lru, &page_size->pages);
- b->size += size_per_page;
- break;
- case VMW_BALLOON_ERROR_PPN_PINNED:
- case VMW_BALLOON_ERROR_PPN_INVALID:
- if (page_size->n_refused_pages
- < VMW_BALLOON_MAX_REFUSED) {
- list_add(&p->lru, &page_size->refused_pages);
- page_size->n_refused_pages++;
- break;
- }
- /* Fallthrough */
- case VMW_BALLOON_ERROR_RESET:
- case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
- vmballoon_free_page(p, is_2m_pages);
- break;
- default:
- /* This should never happen */
- WARN_ON_ONCE(true);
- }
+ /*
+ * Error happened. Move the pages to the refused list and update
+ * the pages number.
+ */
+ list_move(&page->lru, &ctl->refused_pages);
+ ctl->n_pages--;
+ ctl->n_refused_pages++;
}
- return 0;
+ spin_unlock(&b->comm_lock);
+
+ return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
-/*
- * Release the page allocated for the balloon. Note that we first notify
- * the host so it can make sure the page will be available for the guest
- * to use, if needed.
+/**
+ * vmballoon_release_page_list() - Releases a page list
+ *
+ * @page_list: list of pages to release.
+ * @n_pages: pointer to the number of pages.
+ * @page_size: whether the pages in the list are 2MB (or else 4KB).
+ *
+ * Releases the list of pages and zeros the number of pages.
*/
-static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target)
+static void vmballoon_release_page_list(struct list_head *page_list,
+ int *n_pages,
+ enum vmballoon_page_size_type page_size)
{
- struct page *page = b->page;
- struct vmballoon_page_size *page_size = &b->page_sizes[false];
-
- /* is_2m_pages can never happen as 2m pages support implies batching */
+ struct page *page, *tmp;
- if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
- list_add(&page->lru, &page_size->pages);
- return -EIO;
+ list_for_each_entry_safe(page, tmp, page_list, lru) {
+ list_del(&page->lru);
+ __free_pages(page, vmballoon_page_order(page_size));
}
- /* deallocate page */
- vmballoon_free_page(page, false);
- STATS_INC(b->stats.free[false]);
+ *n_pages = 0;
+}
- /* update balloon size */
- b->size--;
- return 0;
+/*
+ * Release pages that were allocated while attempting to inflate the
+ * balloon but were refused by the host for one reason or another.
+ */
+static void vmballoon_release_refused_pages(struct vmballoon *b,
+ struct vmballoon_ctl *ctl)
+{
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
+ ctl->page_size);
+
+ vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
+ ctl->page_size);
}
-static int vmballoon_unlock_batched_page(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages,
- unsigned int *target)
+/**
+ * vmballoon_change - retrieve the required balloon change
+ *
+ * @b: pointer for the balloon.
+ *
+ * Return: the required change for the balloon size. A positive number
+ * indicates inflation, a negative number indicates a deflation.
+ */
+static int64_t vmballoon_change(struct vmballoon *b)
{
- int locked, i, ret = 0;
- bool hv_success;
- u16 size_per_page = vmballoon_page_size(is_2m_pages);
+ int64_t size, target;
- hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages,
- target);
- if (!hv_success)
- ret = -EIO;
+ size = atomic64_read(&b->size);
+ target = READ_ONCE(b->target);
- for (i = 0; i < num_pages; i++) {
- u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
- struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
+ /*
+ * We must cast first because of int sizes
+ * Otherwise we might get huge positives instead of negatives
+ */
- locked = vmballoon_batch_get_status(b->batch_page, i);
- if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
- /*
- * That page wasn't successfully unlocked by the
- * hypervisor, re-add it to the list of pages owned by
- * the balloon driver.
- */
- list_add(&p->lru, &page_size->pages);
- } else {
- /* deallocate page */
- vmballoon_free_page(p, is_2m_pages);
- STATS_INC(b->stats.free[is_2m_pages]);
-
- /* update balloon size */
- b->size -= size_per_page;
- }
- }
+ if (b->reset_required)
+ return 0;
+
+ /* consider a 2MB slack on deflate, unless the balloon is emptied */
+ if (target < size && target != 0 &&
+ size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
+ return 0;
- return ret;
+ return target - size;
}
-/*
- * Release pages that were allocated while attempting to inflate the
- * balloon but were refused by the host for one reason or another.
+/**
+ * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
+ *
+ * @b: pointer to balloon.
+ * @pages: list of pages to enqueue.
+ * @n_pages: pointer to number of pages in list. The value is zeroed.
+ * @page_size: whether the pages are 2MB or 4KB pages.
+ *
+ * Enqueues the provides list of pages in the ballooned page list, clears the
+ * list and zeroes the number of pages that was provided.
*/
-static void vmballoon_release_refused_pages(struct vmballoon *b,
- bool is_2m_pages)
+static void vmballoon_enqueue_page_list(struct vmballoon *b,
+ struct list_head *pages,
+ unsigned int *n_pages,
+ enum vmballoon_page_size_type page_size)
{
- struct page *page, *next;
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
+ struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
- list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
- list_del(&page->lru);
- vmballoon_free_page(page, is_2m_pages);
- STATS_INC(b->stats.refused_free[is_2m_pages]);
- }
-
- page_size->n_refused_pages = 0;
+ list_splice_init(pages, &page_size_info->pages);
+ *n_pages = 0;
}
-static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
+/**
+ * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
+ *
+ * @b: pointer to balloon.
+ * @pages: list of pages to enqueue.
+ * @n_pages: pointer to number of pages in list. The value is zeroed.
+ * @page_size: whether the pages are 2MB or 4KB pages.
+ * @n_req_pages: the number of requested pages.
+ *
+ * Dequeues the number of requested pages from the balloon for deflation. The
+ * number of dequeued pages may be lower, if not enough pages in the requested
+ * size are available.
+ */
+static void vmballoon_dequeue_page_list(struct vmballoon *b,
+ struct list_head *pages,
+ unsigned int *n_pages,
+ enum vmballoon_page_size_type page_size,
+ unsigned int n_req_pages)
{
- b->page = p;
-}
+ struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
+ struct page *page, *tmp;
+ unsigned int i = 0;
-static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
- struct page *p)
-{
- vmballoon_batch_set_pa(b->batch_page, idx,
- (u64)page_to_pfn(p) << PAGE_SHIFT);
+ list_for_each_entry_safe(page, tmp, &page_size_info->pages, lru) {
+ list_move(&page->lru, pages);
+ if (++i == n_req_pages)
+ break;
+ }
+ *n_pages = i;
}
-/*
- * Inflate the balloon towards its target size. Note that we try to limit
- * the rate of allocation to make sure we are not choking the rest of the
- * system.
+/**
+ * vmballoon_inflate() - Inflate the balloon towards its target size.
+ *
+ * @b: pointer to the balloon.
*/
static void vmballoon_inflate(struct vmballoon *b)
{
- unsigned int num_pages = 0;
- int error = 0;
- gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
- bool is_2m_pages;
+ int64_t to_inflate_frames;
+ struct vmballoon_ctl ctl = {
+ .pages = LIST_HEAD_INIT(ctl.pages),
+ .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
+ .page_size = b->max_page_size,
+ .op = VMW_BALLOON_INFLATE
+ };
- pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+ while ((to_inflate_frames = vmballoon_change(b)) > 0) {
+ unsigned int to_inflate_pages, page_in_frames;
+ int alloc_error, lock_error = 0;
- /*
- * First try NOSLEEP page allocations to inflate balloon.
- *
- * If we do not throttle nosleep allocations, we can drain all
- * free pages in the guest quickly (if the balloon target is high).
- * As a side-effect, draining free pages helps to inform (force)
- * the guest to start swapping if balloon target is not met yet,
- * which is a desired behavior. However, balloon driver can consume
- * all available CPU cycles if too many pages are allocated in a
- * second. Therefore, we throttle nosleep allocations even when
- * the guest is not under memory pressure. OTOH, if we have already
- * predicted that the guest is under memory pressure, then we
- * slowdown page allocations considerably.
- */
+ VM_BUG_ON(!list_empty(&ctl.pages));
+ VM_BUG_ON(ctl.n_pages != 0);
- /*
- * Start with no sleep allocation rate which may be higher
- * than sleeping allocation rate.
- */
- is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
+ page_in_frames = vmballoon_page_in_frames(ctl.page_size);
- pr_debug("%s - goal: %d", __func__, b->target - b->size);
+ to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
+ DIV_ROUND_UP_ULL(to_inflate_frames,
+ page_in_frames));
- while (!b->reset_required &&
- b->size + num_pages * vmballoon_page_size(is_2m_pages)
- < b->target) {
- struct page *page;
+ /* Start by allocating */
+ alloc_error = vmballoon_alloc_page_list(b, &ctl,
+ to_inflate_pages);
- if (flags == VMW_PAGE_ALLOC_NOSLEEP)
- STATS_INC(b->stats.alloc[is_2m_pages]);
- else
- STATS_INC(b->stats.sleep_alloc);
-
- page = vmballoon_alloc_page(flags, is_2m_pages);
- if (!page) {
- STATS_INC(b->stats.alloc_fail[is_2m_pages]);
-
- if (is_2m_pages) {
- b->ops->lock(b, num_pages, true, &b->target);
-
- /*
- * ignore errors from locking as we now switch
- * to 4k pages and we might get different
- * errors.
- */
-
- num_pages = 0;
- is_2m_pages = false;
- continue;
- }
-
- if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
- /*
- * CANSLEEP page allocation failed, so guest
- * is under severe memory pressure. We just log
- * the event, but do not stop the inflation
- * due to its negative impact on performance.
- */
- STATS_INC(b->stats.sleep_alloc_fail);
+ /* Actually lock the pages by telling the hypervisor */
+ lock_error = vmballoon_lock(b, &ctl);
+
+ /*
+ * If an error indicates that something serious went wrong,
+ * stop the inflation.
+ */
+ if (lock_error)
+ break;
+
+ /* Update the balloon size */
+ atomic64_add(ctl.n_pages * page_in_frames, &b->size);
+
+ vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
+ ctl.page_size);
+
+ /*
+ * If allocation failed or the number of refused pages exceeds
+ * the maximum allowed, move to the next page size.
+ */
+ if (alloc_error ||
+ ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
+ if (ctl.page_size == VMW_BALLOON_4K_PAGE)
break;
- }
/*
- * NOSLEEP page allocation failed, so the guest is
- * under memory pressure. Slowing down page alloctions
- * seems to be reasonable, but doing so might actually
- * cause the hypervisor to throttle us down, resulting
- * in degraded performance. We will count on the
- * scheduler and standard memory management mechanisms
- * for now.
+ * Ignore errors from locking as we now switch to 4k
+ * pages and we might get different errors.
*/
- flags = VMW_PAGE_ALLOC_CANSLEEP;
- continue;
- }
-
- b->ops->add_page(b, num_pages++, page);
- if (num_pages == b->batch_max_pages) {
- error = b->ops->lock(b, num_pages, is_2m_pages,
- &b->target);
- num_pages = 0;
- if (error)
- break;
+ vmballoon_release_refused_pages(b, &ctl);
+ ctl.page_size--;
}
cond_resched();
}
- if (num_pages > 0)
- b->ops->lock(b, num_pages, is_2m_pages, &b->target);
-
- vmballoon_release_refused_pages(b, true);
- vmballoon_release_refused_pages(b, false);
+ /*
+ * Release pages that were allocated while attempting to inflate the
+ * balloon but were refused by the host for one reason or another,
+ * and update the statistics.
+ */
+ if (ctl.n_refused_pages != 0)
+ vmballoon_release_refused_pages(b, &ctl);
}
-/*
+/**
+ * vmballoon_deflate() - Decrease the size of the balloon.
+ *
+ * @b: pointer to the balloon
+ * @n_frames: the number of frames to deflate. If zero, automatically
+ * calculated according to the target size.
+ * @coordinated: whether to coordinate with the host
+ *
* Decrease the size of the balloon allowing guest to use more memory.
+ *
+ * Return: The number of deflated frames (i.e., basic page size units)
*/
-static void vmballoon_deflate(struct vmballoon *b)
+static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
+ bool coordinated)
{
- unsigned is_2m_pages;
-
- pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+ unsigned long deflated_frames = 0;
+ unsigned long tried_frames = 0;
+ struct vmballoon_ctl ctl = {
+ .pages = LIST_HEAD_INIT(ctl.pages),
+ .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
+ .page_size = VMW_BALLOON_4K_PAGE,
+ .op = VMW_BALLOON_DEFLATE
+ };
/* free pages to reach target */
- for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
- is_2m_pages++) {
- struct page *page, *next;
- unsigned int num_pages = 0;
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
-
- list_for_each_entry_safe(page, next, &page_size->pages, lru) {
- if (b->reset_required ||
- (b->target > 0 &&
- b->size - num_pages
- * vmballoon_page_size(is_2m_pages)
- < b->target + vmballoon_page_size(true)))
- break;
+ while (true) {
+ unsigned int to_deflate_pages, n_unlocked_frames;
+ unsigned int page_in_frames;
+ int64_t to_deflate_frames;
+ bool deflated_all;
+
+ page_in_frames = vmballoon_page_in_frames(ctl.page_size);
+
+ VM_BUG_ON(!list_empty(&ctl.pages));
+ VM_BUG_ON(ctl.n_pages);
+ VM_BUG_ON(!list_empty(&ctl.refused_pages));
+ VM_BUG_ON(ctl.n_refused_pages);
+
+ /*
+ * If we were requested a specific number of frames, we try to
+ * deflate this number of frames. Otherwise, deflation is
+ * performed according to the target and balloon size.
+ */
+ to_deflate_frames = n_frames ? n_frames - tried_frames :
+ -vmballoon_change(b);
+
+ /* break if no work to do */
+ if (to_deflate_frames <= 0)
+ break;
+
+ /*
+ * Calculate the number of frames based on current page size,
+ * but limit the deflated frames to a single chunk
+ */
+ to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
+ DIV_ROUND_UP_ULL(to_deflate_frames,
+ page_in_frames));
+
+ /* First take the pages from the balloon pages. */
+ vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
+ ctl.page_size, to_deflate_pages);
+
+ /*
+ * Before pages are moving to the refused list, count their
+ * frames as frames that we tried to deflate.
+ */
+ tried_frames += ctl.n_pages * page_in_frames;
+
+ /*
+ * Unlock the pages by communicating with the hypervisor if the
+ * communication is coordinated (i.e., not pop). We ignore the
+ * return code. Instead we check if all the pages we manage to
+ * unlock all the pages. If we failed, we will move to the next
+ * page size, and would eventually try again later.
+ */
+ if (coordinated)
+ vmballoon_lock(b, &ctl);
+
+ /*
+ * Check if we deflated enough. We will move to the next page
+ * size if we did not manage to do so. This calculation takes
+ * place now, as once the pages are released, the number of
+ * pages is zeroed.
+ */
+ deflated_all = (ctl.n_pages == to_deflate_pages);
+
+ /* Update local and global counters */
+ n_unlocked_frames = ctl.n_pages * page_in_frames;
+ atomic64_sub(n_unlocked_frames, &b->size);
+ deflated_frames += n_unlocked_frames;
- list_del(&page->lru);
- b->ops->add_page(b, num_pages++, page);
+ vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
+ ctl.page_size, ctl.n_pages);
- if (num_pages == b->batch_max_pages) {
- int error;
+ /* free the ballooned pages */
+ vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
+ ctl.page_size);
- error = b->ops->unlock(b, num_pages,
- is_2m_pages, &b->target);
- num_pages = 0;
- if (error)
- return;
- }
+ /* Return the refused pages to the ballooned list. */
+ vmballoon_enqueue_page_list(b, &ctl.refused_pages,
+ &ctl.n_refused_pages,
+ ctl.page_size);
- cond_resched();
+ /* If we failed to unlock all the pages, move to next size. */
+ if (!deflated_all) {
+ if (ctl.page_size == b->max_page_size)
+ break;
+ ctl.page_size++;
}
- if (num_pages > 0)
- b->ops->unlock(b, num_pages, is_2m_pages, &b->target);
+ cond_resched();
}
-}
-static const struct vmballoon_ops vmballoon_basic_ops = {
- .add_page = vmballoon_add_page,
- .lock = vmballoon_lock_page,
- .unlock = vmballoon_unlock_page
-};
+ return deflated_frames;
+}
-static const struct vmballoon_ops vmballoon_batched_ops = {
- .add_page = vmballoon_add_batched_page,
- .lock = vmballoon_lock_batched_page,
- .unlock = vmballoon_unlock_batched_page
-};
+/**
+ * vmballoon_deinit_batching - disables batching mode.
+ *
+ * @b: pointer to &struct vmballoon.
+ *
+ * Disables batching, by deallocating the page for communication with the
+ * hypervisor and disabling the static key to indicate that batching is off.
+ */
+static void vmballoon_deinit_batching(struct vmballoon *b)
+{
+ free_page((unsigned long)b->batch_page);
+ b->batch_page = NULL;
+ static_branch_disable(&vmw_balloon_batching);
+ b->batch_max_pages = 1;
+}
-static bool vmballoon_init_batching(struct vmballoon *b)
+/**
+ * vmballoon_init_batching - enable batching mode.
+ *
+ * @b: pointer to &struct vmballoon.
+ *
+ * Enables batching, by allocating a page for communication with the hypervisor
+ * and enabling the static_key to use batching.
+ *
+ * Return: zero on success or an appropriate error-code.
+ */
+static int vmballoon_init_batching(struct vmballoon *b)
{
struct page *page;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
- return false;
+ return -ENOMEM;
b->batch_page = page_address(page);
- return true;
+ b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
+
+ static_branch_enable(&vmw_balloon_batching);
+
+ return 0;
}
/*
@@ -932,7 +1197,7 @@ static void vmballoon_doorbell(void *client_data)
{
struct vmballoon *b = client_data;
- STATS_INC(b->stats.doorbell);
+ vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
mod_delayed_work(system_freezable_wq, &b->dwork, 0);
}
@@ -942,11 +1207,8 @@ static void vmballoon_doorbell(void *client_data)
*/
static void vmballoon_vmci_cleanup(struct vmballoon *b)
{
- int error;
-
- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, VMCI_INVALID_ID,
- VMCI_INVALID_ID, error);
- STATS_INC(b->stats.doorbell_unset);
+ vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
+ VMCI_INVALID_ID, VMCI_INVALID_ID);
if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
vmci_doorbell_destroy(b->vmci_doorbell);
@@ -954,12 +1216,19 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
}
}
-/*
- * Initialize vmci doorbell, to get notified as soon as balloon changes
+/**
+ * vmballoon_vmci_init - Initialize vmci doorbell.
+ *
+ * @b: pointer to the balloon.
+ *
+ * Return: zero on success or when wakeup command not supported. Error-code
+ * otherwise.
+ *
+ * Initialize vmci doorbell, to get notified as soon as balloon changes.
*/
static int vmballoon_vmci_init(struct vmballoon *b)
{
- unsigned long error, dummy;
+ unsigned long error;
if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
return 0;
@@ -971,10 +1240,9 @@ static int vmballoon_vmci_init(struct vmballoon *b)
if (error != VMCI_SUCCESS)
goto fail;
- error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
- b->vmci_doorbell.resource, dummy);
-
- STATS_INC(b->stats.doorbell_set);
+ error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
+ b->vmci_doorbell.context,
+ b->vmci_doorbell.resource, NULL);
if (error != VMW_BALLOON_SUCCESS)
goto fail;
@@ -985,6 +1253,23 @@ fail:
return -EIO;
}
+/**
+ * vmballoon_pop - Quickly release all pages allocate for the balloon.
+ *
+ * @b: pointer to the balloon.
+ *
+ * This function is called when host decides to "reset" balloon for one reason
+ * or another. Unlike normal "deflate" we do not (shall not) notify host of the
+ * pages being released.
+ */
+static void vmballoon_pop(struct vmballoon *b)
+{
+ unsigned long size;
+
+ while ((size = atomic64_read(&b->size)))
+ vmballoon_deflate(b, size, false);
+}
+
/*
* Perform standard reset sequence by popping the balloon (in case it
* is not empty) and then restarting protocol. This operation normally
@@ -994,18 +1279,18 @@ static void vmballoon_reset(struct vmballoon *b)
{
int error;
+ down_write(&b->conf_sem);
+
vmballoon_vmci_cleanup(b);
/* free all pages, skipping monitor unlock */
vmballoon_pop(b);
- if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
+ if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
return;
if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
- b->ops = &vmballoon_batched_ops;
- b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
- if (!vmballoon_init_batching(b)) {
+ if (vmballoon_init_batching(b)) {
/*
* We failed to initialize batching, inform the monitor
* about it by sending a null capability.
@@ -1016,52 +1301,70 @@ static void vmballoon_reset(struct vmballoon *b)
return;
}
} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
- b->ops = &vmballoon_basic_ops;
- b->batch_max_pages = 1;
+ vmballoon_deinit_batching(b);
}
+ vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
b->reset_required = false;
error = vmballoon_vmci_init(b);
if (error)
pr_err("failed to initialize vmci doorbell\n");
- if (!vmballoon_send_guest_id(b))
+ if (vmballoon_send_guest_id(b))
pr_err("failed to send guest ID to the host\n");
+
+ up_write(&b->conf_sem);
}
-/*
- * Balloon work function: reset protocol, if needed, get the new size and
- * adjust balloon as needed. Repeat in 1 sec.
+/**
+ * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
+ *
+ * @work: pointer to the &work_struct which is provided by the workqueue.
+ *
+ * Resets the protocol if needed, gets the new size and adjusts balloon as
+ * needed. Repeat in 1 sec.
*/
static void vmballoon_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
- unsigned int target;
-
- STATS_INC(b->stats.timer);
+ int64_t change = 0;
if (b->reset_required)
vmballoon_reset(b);
- if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
- /* update target, adjust size */
- b->target = target;
+ down_read(&b->conf_sem);
+
+ /*
+ * Update the stats while holding the semaphore to ensure that
+ * @stats_enabled is consistent with whether the stats are actually
+ * enabled
+ */
+ vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
+
+ if (!vmballoon_send_get_target(b))
+ change = vmballoon_change(b);
+
+ if (change != 0) {
+ pr_debug("%s - size: %llu, target %lu\n", __func__,
+ atomic64_read(&b->size), READ_ONCE(b->target));
- if (b->size < target)
+ if (change > 0)
vmballoon_inflate(b);
- else if (target == 0 ||
- b->size > target + vmballoon_page_size(true))
- vmballoon_deflate(b);
+ else /* (change < 0) */
+ vmballoon_deflate(b, 0, true);
}
+ up_read(&b->conf_sem);
+
/*
* We are using a freezable workqueue so that balloon operations are
* stopped while the system transitions to/from sleep/hibernation.
*/
queue_delayed_work(system_freezable_wq,
dwork, round_jiffies_relative(HZ));
+
}
/*
@@ -1069,64 +1372,100 @@ static void vmballoon_work(struct work_struct *work)
*/
#ifdef CONFIG_DEBUG_FS
+static const char * const vmballoon_stat_page_names[] = {
+ [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
+ [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
+ [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
+ [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
+ [VMW_BALLOON_PAGE_STAT_FREE] = "free"
+};
+
+static const char * const vmballoon_stat_names[] = {
+ [VMW_BALLOON_STAT_TIMER] = "timer",
+ [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
+ [VMW_BALLOON_STAT_RESET] = "reset",
+};
+
+static int vmballoon_enable_stats(struct vmballoon *b)
+{
+ int r = 0;
+
+ down_write(&b->conf_sem);
+
+ /* did we somehow race with another reader which enabled stats? */
+ if (b->stats)
+ goto out;
+
+ b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
+
+ if (!b->stats) {
+ /* allocation failed */
+ r = -ENOMEM;
+ goto out;
+ }
+ static_key_enable(&balloon_stat_enabled.key);
+out:
+ up_write(&b->conf_sem);
+ return r;
+}
+
+/**
+ * vmballoon_debug_show - shows statistics of balloon operations.
+ * @f: pointer to the &struct seq_file.
+ * @offset: ignored.
+ *
+ * Provides the statistics that can be accessed in vmmemctl in the debugfs.
+ * To avoid the overhead - mainly that of memory - of collecting the statistics,
+ * we only collect statistics after the first time the counters are read.
+ *
+ * Return: zero on success or an error code.
+ */
static int vmballoon_debug_show(struct seq_file *f, void *offset)
{
struct vmballoon *b = f->private;
- struct vmballoon_stats *stats = &b->stats;
+ int i, j;
+
+ /* enables stats if they are disabled */
+ if (!b->stats) {
+ int r = vmballoon_enable_stats(b);
+
+ if (r)
+ return r;
+ }
/* format capabilities info */
- seq_printf(f,
- "balloon capabilities: %#4x\n"
- "used capabilities: %#4lx\n"
- "is resetting: %c\n",
- VMW_BALLOON_CAPABILITIES, b->capabilities,
- b->reset_required ? 'y' : 'n');
+ seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
+ VMW_BALLOON_CAPABILITIES);
+ seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
+ seq_printf(f, "%-22s: %16s\n", "is resetting",
+ b->reset_required ? "y" : "n");
/* format size info */
- seq_printf(f,
- "target: %8d pages\n"
- "current: %8d pages\n",
- b->target, b->size);
-
- seq_printf(f,
- "\n"
- "timer: %8u\n"
- "doorbell: %8u\n"
- "start: %8u (%4u failed)\n"
- "guestType: %8u (%4u failed)\n"
- "2m-lock: %8u (%4u failed)\n"
- "lock: %8u (%4u failed)\n"
- "2m-unlock: %8u (%4u failed)\n"
- "unlock: %8u (%4u failed)\n"
- "target: %8u (%4u failed)\n"
- "prim2mAlloc: %8u (%4u failed)\n"
- "primNoSleepAlloc: %8u (%4u failed)\n"
- "primCanSleepAlloc: %8u (%4u failed)\n"
- "prim2mFree: %8u\n"
- "primFree: %8u\n"
- "err2mAlloc: %8u\n"
- "errAlloc: %8u\n"
- "err2mFree: %8u\n"
- "errFree: %8u\n"
- "doorbellSet: %8u\n"
- "doorbellUnset: %8u\n",
- stats->timer,
- stats->doorbell,
- stats->start, stats->start_fail,
- stats->guest_type, stats->guest_type_fail,
- stats->lock[true], stats->lock_fail[true],
- stats->lock[false], stats->lock_fail[false],
- stats->unlock[true], stats->unlock_fail[true],
- stats->unlock[false], stats->unlock_fail[false],
- stats->target, stats->target_fail,
- stats->alloc[true], stats->alloc_fail[true],
- stats->alloc[false], stats->alloc_fail[false],
- stats->sleep_alloc, stats->sleep_alloc_fail,
- stats->free[true],
- stats->free[false],
- stats->refused_alloc[true], stats->refused_alloc[false],
- stats->refused_free[true], stats->refused_free[false],
- stats->doorbell_set, stats->doorbell_unset);
+ seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
+ seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
+
+ for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
+ if (vmballoon_cmd_names[i] == NULL)
+ continue;
+
+ seq_printf(f, "%-22s: %16llu (%llu failed)\n",
+ vmballoon_cmd_names[i],
+ atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
+ atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
+ }
+
+ for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
+ seq_printf(f, "%-22s: %16llu\n",
+ vmballoon_stat_names[i],
+ atomic64_read(&b->stats->general_stat[i]));
+
+ for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
+ for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
+ seq_printf(f, "%-18s(%s): %16llu\n",
+ vmballoon_stat_page_names[i],
+ vmballoon_page_size_names[j],
+ atomic64_read(&b->stats->page_stat[i][j]));
+ }
return 0;
}
@@ -1161,7 +1500,10 @@ static int __init vmballoon_debugfs_init(struct vmballoon *b)
static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
{
+ static_key_disable(&balloon_stat_enabled.key);
debugfs_remove(b->dbg_entry);
+ kfree(b->stats);
+ b->stats = NULL;
}
#else
@@ -1179,8 +1521,9 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
static int __init vmballoon_init(void)
{
+ enum vmballoon_page_size_type page_size;
int error;
- unsigned is_2m_pages;
+
/*
* Check if we are running on VMware's hypervisor and bail out
* if we are not.
@@ -1188,11 +1531,10 @@ static int __init vmballoon_init(void)
if (x86_hyper_type != X86_HYPER_VMWARE)
return -ENODEV;
- for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
- is_2m_pages++) {
- INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
- INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
- }
+ for (page_size = VMW_BALLOON_4K_PAGE;
+ page_size <= VMW_BALLOON_LAST_SIZE; page_size++)
+ INIT_LIST_HEAD(&balloon.page_sizes[page_size].pages);
+
INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
@@ -1200,6 +1542,8 @@ static int __init vmballoon_init(void)
if (error)
return error;
+ spin_lock_init(&balloon.comm_lock);
+ init_rwsem(&balloon.conf_sem);
balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
balloon.batch_page = NULL;
balloon.page = NULL;
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index d7eaf1eb11e7..003bfba40758 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.5.0-k");
+MODULE_VERSION("1.1.6.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 83e0c95d20a4..edfffc9699ba 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -15,7 +15,6 @@
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
-#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
@@ -448,15 +447,12 @@ static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
struct vmci_handle handle;
int vmci_status;
int __user *retptr;
- u32 cid;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
- cid = vmci_ctx_get_id(vmci_host_dev->context);
-
if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
struct vmci_qp_alloc_info_vmvm alloc_info;
struct vmci_qp_alloc_info_vmvm __user *info = uptr;
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
index 1ab6e8737a5f..da1ee2e1ba99 100644
--- a/drivers/misc/vmw_vmci/vmci_resource.c
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -57,7 +57,8 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
if (r->type == type &&
rid == handle.resource &&
- (cid == handle.context || cid == VMCI_INVALID_ID)) {
+ (cid == handle.context || cid == VMCI_INVALID_ID ||
+ handle.context == VMCI_INVALID_ID)) {
resource = r;
break;
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 6d3221134927..7968c644ad86 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1964,8 +1964,6 @@ static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
if (!alx_reset_mac(hw))
rc = PCI_ERS_RESULT_RECOVERED;
out:
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
rtnl_unlock();
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 122fdb80a789..bbb247116045 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8793,13 +8793,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
return result;
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err); /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 40093d88353f..95309b27c7d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14380,14 +14380,6 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
- /* If AER, perform cleanup of the PCIe registers */
- if (bp->flags & AER_ENABLED) {
- if (pci_cleanup_aer_uncorrect_error_status(pdev))
- BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
- else
- DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
- }
-
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index de987ccdcf1f..dd85d790f638 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -10354,13 +10354,6 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err); /* non-fatal, continue */
- }
-
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 35564a8a48f9..a6cbaca37e94 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -341,7 +341,7 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv)
if (!compat)
return NULL;
- priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ priv->mdio_dn = of_get_compatible_child(dn, compat);
kfree(compat);
if (!priv->mdio_dn) {
dev_err(kdev, "unable to find MDIO bus node\n");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2de0590a62c4..05a46926016a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4767,7 +4767,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 267322693ed5..9a6065a3fa46 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -520,10 +520,20 @@ setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
if (!txq_info)
return -ENOMEM;
+ if (uld_type == CXGB4_ULD_CRYPTO) {
+ i = min_t(int, adap->vres.ncrypto_fc,
+ num_online_cpus());
+ txq_info->ntxq = rounddown(i, adap->params.nports);
+ if (txq_info->ntxq <= 0) {
+ dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
+ kfree(txq_info);
+ return -EINVAL;
+ }
- i = min_t(int, uld_info->ntxq, num_online_cpus());
- txq_info->ntxq = roundup(i, adap->params.nports);
-
+ } else {
+ i = min_t(int, uld_info->ntxq, num_online_cpus());
+ txq_info->ntxq = roundup(i, adap->params.nports);
+ }
txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
GFP_KERNEL);
if (!txq_info->uldtxq) {
@@ -546,11 +556,14 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
struct cxgb4_lld_info *lli)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int tx_uld_type = TX_ULD(uld_type);
+ struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
lli->rxq_ids = rxq_info->rspq_id;
lli->nrxq = rxq_info->nrxq;
lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
lli->nciq = rxq_info->nciq;
+ lli->ntxq = txq_info->ntxq;
}
int t4_uld_mem_alloc(struct adapter *adap)
@@ -634,7 +647,6 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->ports = adap->port;
lld->vr = &adap->vres;
lld->mtus = adap->params.mtus;
- lld->ntxq = adap->sge.ofldqsets;
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index bff74752cef1..c5ad7a4f4d83 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -6146,7 +6146,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
if (status)
return PCI_ERS_RESULT_DISCONNECT;
- pci_cleanup_aer_uncorrect_error_status(pdev);
be_clear_error(adapter, BE_CLEAR_ALL);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index c0f9faca70c4..16a73bd9f4cb 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6854,8 +6854,6 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
return result;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index c859ababeed5..02345d381303 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2440,8 +2440,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
return result;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 330bafe6a689..bc71a21c1dc2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -14552,7 +14552,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
pci_ers_result_t result;
- int err;
u32 reg;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -14573,14 +14572,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_info(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 0d29df8accd8..5df88ad8ac81 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -9086,7 +9086,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
pci_ers_result_t result;
- int err;
if (pci_enable_device_mem(pdev)) {
dev_err(&pdev->dev,
@@ -9110,14 +9109,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 51268772a999..0049a2becd7e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -11322,8 +11322,6 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
/* Free device reference count */
pci_dev_put(vfdev);
}
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
/*
@@ -11373,7 +11371,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
pci_ers_result_t result;
- int err;
if (pci_enable_device_mem(pdev)) {
e_err(probe, "Cannot re-enable PCI device after reset.\n");
@@ -11393,13 +11390,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- e_dev_err("pci_cleanup_aer_uncorrect_error_status "
- "failed 0x%0x\n", err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 59c70be22a84..7d9819d80e44 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1784,11 +1784,6 @@ static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
-static void netxen_io_resume(struct pci_dev *pdev)
-{
- pci_cleanup_aer_uncorrect_error_status(pdev);
-}
-
static void netxen_nic_shutdown(struct pci_dev *pdev)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
@@ -3465,7 +3460,6 @@ netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
static const struct pci_error_handlers netxen_err_handler = {
.error_detected = netxen_io_error_detected,
.slot_reset = netxen_io_slot_reset,
- .resume = netxen_io_resume,
};
static struct pci_driver netxen_driver = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index a79d84f99102..2a533280b124 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -4233,7 +4233,6 @@ static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
qlcnic_83xx_aer_start_poll_work(adapter);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index dbd48012224f..d42ba2293d8c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -3930,7 +3930,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
&adapter->state))
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3d0dd39c289e..98fe7e762e17 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3821,7 +3821,6 @@ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
{
struct efx_nic *efx = pci_get_drvdata(pdev);
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int rc;
if (pci_enable_device(pdev)) {
netif_err(efx, hw, efx->net_dev,
@@ -3829,13 +3828,6 @@ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
status = PCI_ERS_RESULT_DISCONNECT;
}
- rc = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
- /* Non-fatal error. Continue. */
- }
-
return status;
}
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 03e2455c502e..8b1f94d7a6c5 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -3160,7 +3160,6 @@ static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
{
struct ef4_nic *efx = pci_get_drvdata(pdev);
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int rc;
if (pci_enable_device(pdev)) {
netif_err(efx, hw, efx->net_dev,
@@ -3168,13 +3167,6 @@ static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
status = PCI_ERS_RESULT_DISCONNECT;
}
- rc = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
- /* Non-fatal error. Continue. */
- }
-
return status;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index f9a61f90cfbc..0f660af01a4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -714,8 +714,9 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
return -ENODEV;
}
- mdio_internal = of_find_compatible_node(mdio_mux, NULL,
+ mdio_internal = of_get_compatible_child(mdio_mux,
"allwinner,sun8i-h3-mdio-internal");
+ of_node_put(mdio_mux);
if (!mdio_internal) {
dev_err(priv->device, "Cannot get internal_mdio node\n");
return -ENODEV;
@@ -729,13 +730,20 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
gmac->rst_ephy = of_reset_control_get_exclusive(iphynode, NULL);
if (IS_ERR(gmac->rst_ephy)) {
ret = PTR_ERR(gmac->rst_ephy);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(iphynode);
+ of_node_put(mdio_internal);
return ret;
+ }
continue;
}
dev_info(priv->device, "Found internal PHY node\n");
+ of_node_put(iphynode);
+ of_node_put(mdio_internal);
return 0;
}
+
+ of_node_put(mdio_internal);
return -ENODEV;
}
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index a205750b431b..7ccdc62c6052 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -95,7 +95,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
- struct crypto_skcipher *arc4;
+ struct crypto_sync_skcipher *arc4;
struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
@@ -155,15 +155,15 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
struct scatterlist sg_in[1], sg_out[1];
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
get_new_key_from_sha(state);
if (!initial_key) {
- crypto_skcipher_setkey(state->arc4, state->sha1_digest,
- state->keylen);
+ crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest,
+ state->keylen);
sg_init_table(sg_in, 1);
sg_init_table(sg_out, 1);
setup_sg(sg_in, state->sha1_digest, state->keylen);
@@ -181,7 +181,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
- crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen);
+ crypto_sync_skcipher_setkey(state->arc4, state->session_key,
+ state->keylen);
skcipher_request_zero(req);
}
@@ -203,7 +204,7 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out;
- state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(state->arc4)) {
state->arc4 = NULL;
goto out_free;
@@ -250,7 +251,7 @@ out_free:
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
}
- crypto_free_skcipher(state->arc4);
+ crypto_free_sync_skcipher(state->arc4);
kfree(state);
out:
return NULL;
@@ -266,7 +267,7 @@ static void mppe_free(void *arg)
kfree(state->sha1_digest);
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
- crypto_free_skcipher(state->arc4);
+ crypto_free_sync_skcipher(state->arc4);
kfree(state);
}
}
@@ -366,7 +367,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
int proto;
int err;
struct scatterlist sg_in[1], sg_out[1];
@@ -426,7 +427,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
setup_sg(sg_in, ibuf, isize);
setup_sg(sg_out, obuf, osize);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
err = crypto_skcipher_encrypt(req);
@@ -480,7 +481,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
struct scatterlist sg_in[1], sg_out[1];
@@ -615,7 +616,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
setup_sg(sg_in, ibuf, 1);
setup_sg(sg_out, obuf, 1);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
if (crypto_skcipher_decrypt(req)) {
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 91162f8e0366..9a22056e8d9e 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
struct device_node *matched_node;
int ret;
- matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart");
+ matched_node = of_get_compatible_child(node, "marvell,nfc-uart");
if (!matched_node) {
- matched_node = of_find_compatible_node(node, NULL,
- "mrvl,nfc-uart");
+ matched_node = of_get_compatible_child(node, "mrvl,nfc-uart");
if (!matched_node)
return -ENODEV;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 8aae6dcc839f..f1fb39921236 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -54,12 +54,6 @@ static int to_nd_device_type(struct device *dev)
static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- /*
- * Ensure that region devices always have their numa node set as
- * early as possible.
- */
- if (is_nd_region(dev))
- set_dev_node(dev, to_nd_region(dev)->numa_node);
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
to_nd_device_type(dev));
}
@@ -488,6 +482,8 @@ static void nd_async_device_register(void *d, async_cookie_t cookie)
put_device(dev);
}
put_device(dev);
+ if (dev->parent)
+ put_device(dev->parent);
}
static void nd_async_device_unregister(void *d, async_cookie_t cookie)
@@ -506,7 +502,19 @@ void __nd_device_register(struct device *dev)
{
if (!dev)
return;
+
+ /*
+ * Ensure that region devices always have their NUMA node set as
+ * early as possible. This way we are able to make certain that
+ * any memory associated with the creation and the creation
+ * itself of the region is associated with the correct node.
+ */
+ if (is_nd_region(dev))
+ set_dev_node(dev, to_nd_region(dev)->numa_node);
+
dev->bus = &nvdimm_bus_type;
+ if (dev->parent)
+ get_device(dev->parent);
get_device(dev);
async_schedule_domain(nd_async_device_register, dev,
&nd_async_domain);
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 6c8fb7590838..9899c97138a3 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -75,7 +75,7 @@ static int nvdimm_probe(struct device *dev)
* DIMM capacity. We fail the dimm probe to prevent regions from
* attempting to parse the label area.
*/
- rc = nvdimm_init_config_data(ndd);
+ rc = nd_label_data_init(ndd);
if (rc == -EACCES)
nvdimm_set_locked(dev);
if (rc)
@@ -84,10 +84,6 @@ static int nvdimm_probe(struct device *dev)
dev_dbg(dev, "config data size: %d\n", ndd->nsarea.config_size);
nvdimm_bus_lock(dev);
- ndd->ns_current = nd_label_validate(ndd);
- ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
- nd_label_copy(ndd, to_next_namespace_index(ndd),
- to_current_namespace_index(ndd));
if (ndd->ns_current >= 0) {
rc = nd_label_reserve_dpa(ndd);
if (rc == 0)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 863cabc35215..6c3de2317390 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -85,56 +85,48 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
return cmd_rc;
}
-int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
+int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
+ size_t offset, size_t len)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
int rc = validate_dimm(ndd), cmd_rc = 0;
struct nd_cmd_get_config_data_hdr *cmd;
- struct nvdimm_bus_descriptor *nd_desc;
- u32 max_cmd_size, config_size;
- size_t offset;
+ size_t max_cmd_size, buf_offset;
if (rc)
return rc;
- if (ndd->data)
- return 0;
-
- if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
- || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
- dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
- ndd->nsarea.max_xfer, ndd->nsarea.config_size);
+ if (offset + len > ndd->nsarea.config_size)
return -ENXIO;
- }
- ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
- if (!ndd->data)
- return -ENOMEM;
-
- max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
- cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
+ max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
+ cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
- nd_desc = nvdimm_bus->nd_desc;
- for (config_size = ndd->nsarea.config_size, offset = 0;
- config_size; config_size -= cmd->in_length,
- offset += cmd->in_length) {
- cmd->in_length = min(config_size, max_cmd_size);
- cmd->in_offset = offset;
+ for (buf_offset = 0; len;
+ len -= cmd->in_length, buf_offset += cmd->in_length) {
+ size_t cmd_size;
+
+ cmd->in_offset = offset + buf_offset;
+ cmd->in_length = min(max_cmd_size, len);
+
+ cmd_size = sizeof(*cmd) + cmd->in_length;
+
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_GET_CONFIG_DATA, cmd,
- cmd->in_length + sizeof(*cmd), &cmd_rc);
+ ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
if (rc < 0)
break;
if (cmd_rc < 0) {
rc = cmd_rc;
break;
}
- memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
+
+ /* out_buf should be valid, copy it into our output buffer */
+ memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
}
- dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
- kfree(cmd);
+ kvfree(cmd);
return rc;
}
@@ -151,15 +143,11 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
if (rc)
return rc;
- if (!ndd->data)
- return -ENXIO;
-
if (offset + len > ndd->nsarea.config_size)
return -ENXIO;
- max_cmd_size = min_t(u32, PAGE_SIZE, len);
- max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
- cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
+ max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
+ cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -183,7 +171,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
break;
}
}
- kfree(cmd);
+ kvfree(cmd);
return rc;
}
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 1d28cd656536..750dbaa6ce82 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -75,7 +75,8 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
/*
* Per UEFI 2.7, the minimum size of the Label Storage Area is large
* enough to hold 2 index blocks and 2 labels. The minimum index
- * block size is 256 bytes, and the minimum label size is 256 bytes.
+ * block size is 256 bytes. The label size is 128 for namespaces
+ * prior to version 1.2 and at minimum 256 for version 1.2 and later.
*/
nslot = nvdimm_num_label_slots(ndd);
space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
@@ -183,6 +184,13 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
__le64_to_cpu(nsindex[i]->otheroff));
continue;
}
+ if (__le64_to_cpu(nsindex[i]->labeloff)
+ != 2 * sizeof_namespace_index(ndd)) {
+ dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
+ i, (unsigned long long)
+ __le64_to_cpu(nsindex[i]->labeloff));
+ continue;
+ }
size = __le64_to_cpu(nsindex[i]->mysize);
if (size > sizeof_namespace_index(ndd)
@@ -227,7 +235,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
return -1;
}
-int nd_label_validate(struct nvdimm_drvdata *ndd)
+static int nd_label_validate(struct nvdimm_drvdata *ndd)
{
/*
* In order to probe for and validate namespace index blocks we
@@ -250,12 +258,12 @@ int nd_label_validate(struct nvdimm_drvdata *ndd)
return -1;
}
-void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
- struct nd_namespace_index *src)
+static void nd_label_copy(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_index *dst,
+ struct nd_namespace_index *src)
{
- if (dst && src)
- /* pass */;
- else
+ /* just exit if either destination or source is NULL */
+ if (!dst || !src)
return;
memcpy(dst, src, sizeof_namespace_index(ndd));
@@ -410,6 +418,128 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
return 0;
}
+int nd_label_data_init(struct nvdimm_drvdata *ndd)
+{
+ size_t config_size, read_size, max_xfer, offset;
+ struct nd_namespace_index *nsindex;
+ unsigned int i;
+ int rc = 0;
+ u32 nslot;
+
+ if (ndd->data)
+ return 0;
+
+ if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
+ dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
+ ndd->nsarea.max_xfer, ndd->nsarea.config_size);
+ return -ENXIO;
+ }
+
+ /*
+ * We need to determine the maximum index area as this is the section
+ * we must read and validate before we can start processing labels.
+ *
+ * If the area is too small to contain the two indexes and 2 labels
+ * then we abort.
+ *
+ * Start at a label size of 128 as this should result in the largest
+ * possible namespace index size.
+ */
+ ndd->nslabel_size = 128;
+ read_size = sizeof_namespace_index(ndd) * 2;
+ if (!read_size)
+ return -ENXIO;
+
+ /* Allocate config data */
+ config_size = ndd->nsarea.config_size;
+ ndd->data = kvzalloc(config_size, GFP_KERNEL);
+ if (!ndd->data)
+ return -ENOMEM;
+
+ /*
+ * We want to guarantee as few reads as possible while conserving
+ * memory. To do that we figure out how much unused space will be left
+ * in the last read, divide that by the total number of reads it is
+ * going to take given our maximum transfer size, and then reduce our
+ * maximum transfer size based on that result.
+ */
+ max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
+ if (read_size < max_xfer) {
+ /* trim waste */
+ max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
+ DIV_ROUND_UP(config_size, max_xfer);
+ /* make certain we read indexes in exactly 1 read */
+ if (max_xfer < read_size)
+ max_xfer = read_size;
+ }
+
+ /* Make our initial read size a multiple of max_xfer size */
+ read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
+ config_size);
+
+ /* Read the index data */
+ rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
+ if (rc)
+ goto out_err;
+
+ /* Validate index data, if not valid assume all labels are invalid */
+ ndd->ns_current = nd_label_validate(ndd);
+ if (ndd->ns_current < 0)
+ return 0;
+
+ /* Record our index values */
+ ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
+
+ /* Copy "current" index on top of the "next" index */
+ nsindex = to_current_namespace_index(ndd);
+ nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
+
+ /* Determine starting offset for label data */
+ offset = __le64_to_cpu(nsindex->labeloff);
+ nslot = __le32_to_cpu(nsindex->nslot);
+
+ /* Loop through the free list pulling in any active labels */
+ for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
+ size_t label_read_size;
+
+ /* zero out the unused labels */
+ if (test_bit_le(i, nsindex->free)) {
+ memset(ndd->data + offset, 0, ndd->nslabel_size);
+ continue;
+ }
+
+ /* if we already read past here then just continue */
+ if (offset + ndd->nslabel_size <= read_size)
+ continue;
+
+ /* if we haven't read in a while reset our read_size offset */
+ if (read_size < offset)
+ read_size = offset;
+
+ /* determine how much more will be read after this next call. */
+ label_read_size = offset + ndd->nslabel_size - read_size;
+ label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
+ max_xfer;
+
+ /* truncate last read if needed */
+ if (read_size + label_read_size > config_size)
+ label_read_size = config_size - read_size;
+
+ /* Read the label data */
+ rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
+ read_size, label_read_size);
+ if (rc)
+ goto out_err;
+
+ /* push read_size to next read offset */
+ read_size += label_read_size;
+ }
+
+ dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
+out_err:
+ return rc;
+}
+
int nd_label_active_count(struct nvdimm_drvdata *ndd)
{
struct nd_namespace_index *nsindex;
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index 18bbe183b3a9..e9a2ad3c2150 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -138,9 +138,7 @@ static inline int nd_label_next_nsindex(int index)
}
struct nvdimm_drvdata;
-int nd_label_validate(struct nvdimm_drvdata *ndd);
-void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
- struct nd_namespace_index *src);
+int nd_label_data_init(struct nvdimm_drvdata *ndd);
size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd);
int nd_label_active_count(struct nvdimm_drvdata *ndd);
struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 4a4266250c28..681af3a8fd62 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2099,7 +2099,6 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
return NULL;
}
dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
- dev->parent = &nd_region->dev;
dev->groups = nd_namespace_attribute_groups;
nd_namespace_pmem_set_resource(nd_region, nspm, 0);
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index ac68072fb8cd..182258f64417 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -14,7 +14,6 @@
#define __ND_CORE_H__
#include <linux/libnvdimm.h>
#include <linux/device.h>
-#include <linux/libnvdimm.h>
#include <linux/sizes.h>
#include <linux/mutex.h>
#include <linux/nd.h>
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 98317e7ce5b5..e79cc8e5c114 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -241,6 +241,8 @@ struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
int nvdimm_check_config_data(struct device *dev);
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
+int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
+ size_t offset, size_t len);
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len);
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 3f7ad5bc443e..24c64090169e 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -361,6 +361,65 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
return dev;
}
+/*
+ * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
+ * space associated with the namespace. If the memmap is set to DRAM, then
+ * this is a no-op. Since the memmap area is freshly initialized during
+ * probe, we have an opportunity to clear any badblocks in this area.
+ */
+static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
+{
+ struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ void *zero_page = page_address(ZERO_PAGE(0));
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ int num_bad, meta_num, rc, bb_present;
+ sector_t first_bad, meta_start;
+ struct nd_namespace_io *nsio;
+
+ if (nd_pfn->mode != PFN_MODE_PMEM)
+ return 0;
+
+ nsio = to_nd_namespace_io(&ndns->dev);
+ meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
+ meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
+
+ do {
+ unsigned long zero_len;
+ u64 nsoff;
+
+ bb_present = badblocks_check(&nd_region->bb, meta_start,
+ meta_num, &first_bad, &num_bad);
+ if (bb_present) {
+ dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %lx\n",
+ num_bad, first_bad);
+ nsoff = ALIGN_DOWN((nd_region->ndr_start
+ + (first_bad << 9)) - nsio->res.start,
+ PAGE_SIZE);
+ zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
+ while (zero_len) {
+ unsigned long chunk = min(zero_len, PAGE_SIZE);
+
+ rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
+ chunk, 0);
+ if (rc)
+ break;
+
+ zero_len -= chunk;
+ nsoff += chunk;
+ }
+ if (rc) {
+ dev_err(&nd_pfn->dev,
+ "error clearing %x badblocks at %lx\n",
+ num_bad, first_bad);
+ return rc;
+ }
+ }
+ } while (bb_present);
+
+ return 0;
+}
+
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
@@ -477,7 +536,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -ENXIO;
}
- return 0;
+ return nd_pfn_clear_memmap_errors(nd_pfn);
}
EXPORT_SYMBOL(nd_pfn_validate);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index a75d10c23d80..0e39e3d1846f 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -421,9 +421,11 @@ static int pmem_attach_disk(struct device *dev,
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
- } else
+ } else {
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
+ memcpy(&bb_res, &nsio->res, sizeof(bb_res));
+ }
/*
* At release time the queue must be frozen before
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index fa37afcd43ff..174a418cb171 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -560,10 +560,17 @@ static ssize_t region_badblocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
+ ssize_t rc;
- return badblocks_show(&nd_region->bb, buf, 0);
-}
+ device_lock(dev);
+ if (dev->driver)
+ rc = badblocks_show(&nd_region->bb, buf, 0);
+ else
+ rc = -ENXIO;
+ device_unlock(dev);
+ return rc;
+}
static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
static ssize_t resource_show(struct device *dev,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9e4a30b05bd2..2e65be8b1387 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3064,7 +3064,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
goto out_free_ns;
+
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
+ if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
+ blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
+
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9fefba039d1e..cee79cb388af 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -343,6 +343,7 @@ struct nvme_ctrl_ops {
unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
+#define NVME_F_PCI_P2PDMA (1 << 2)
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4e023cd007e1..f30031945ee4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sed-opal.h>
+#include <linux/pci-p2pdma.h>
#include "nvme.h"
@@ -99,9 +100,8 @@ struct nvme_dev {
struct work_struct remove_work;
struct mutex shutdown_lock;
bool subsystem;
- void __iomem *cmb;
- pci_bus_addr_t cmb_bus_addr;
u64 cmb_size;
+ bool cmb_use_sqes;
u32 cmbsz;
u32 cmbloc;
struct nvme_ctrl ctrl;
@@ -158,7 +158,7 @@ struct nvme_queue {
struct nvme_dev *dev;
spinlock_t sq_lock;
struct nvme_command *sq_cmds;
- struct nvme_command __iomem *sq_cmds_io;
+ bool sq_cmds_is_io;
spinlock_t cq_lock ____cacheline_aligned_in_smp;
volatile struct nvme_completion *cqes;
struct blk_mq_tags **tags;
@@ -447,11 +447,8 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
spin_lock(&nvmeq->sq_lock);
- if (nvmeq->sq_cmds_io)
- memcpy_toio(&nvmeq->sq_cmds_io[nvmeq->sq_tail], cmd,
- sizeof(*cmd));
- else
- memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
+
+ memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
@@ -748,8 +745,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out;
ret = BLK_STS_RESOURCE;
- nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
- DMA_ATTR_NO_WARN);
+
+ if (is_pci_p2pdma_page(sg_page(iod->sg)))
+ nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents,
+ dma_dir);
+ else
+ nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
+ dma_dir, DMA_ATTR_NO_WARN);
if (!nr_mapped)
goto out;
@@ -791,7 +793,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
DMA_TO_DEVICE : DMA_FROM_DEVICE;
if (iod->nents) {
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
+ /* P2PDMA requests do not need to be unmapped */
+ if (!is_pci_p2pdma_page(sg_page(iod->sg)))
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
+
if (blk_integrity_rq(req))
dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
}
@@ -1232,9 +1237,18 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
- if (nvmeq->sq_cmds)
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
- nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+
+ if (nvmeq->sq_cmds) {
+ if (nvmeq->sq_cmds_is_io)
+ pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev),
+ nvmeq->sq_cmds,
+ SQ_SIZE(nvmeq->q_depth));
+ else
+ dma_free_coherent(nvmeq->q_dmadev,
+ SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds,
+ nvmeq->sq_dma_addr);
+ }
}
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
@@ -1323,12 +1337,21 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
int qid, int depth)
{
- /* CMB SQEs will be mapped before creation */
- if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
- return 0;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+ nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
+ nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
+ nvmeq->sq_cmds);
+ nvmeq->sq_cmds_is_io = true;
+ }
+
+ if (!nvmeq->sq_cmds) {
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ nvmeq->sq_cmds_is_io = false;
+ }
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
- &nvmeq->sq_dma_addr, GFP_KERNEL);
if (!nvmeq->sq_cmds)
return -ENOMEM;
return 0;
@@ -1405,13 +1428,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
int result;
s16 vector;
- if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
- unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
- dev->ctrl.page_size);
- nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
- nvmeq->sq_cmds_io = dev->cmb + offset;
- }
-
/*
* A queue's vector matches the queue identifier unless the controller
* has only one vector available.
@@ -1652,9 +1668,6 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
- if (!use_cmb_sqes)
- return;
-
size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
bar = NVME_CMB_BIR(dev->cmbloc);
@@ -1671,11 +1684,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
- if (!dev->cmb)
+ if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
+ dev_warn(dev->ctrl.device,
+ "failed to register the CMB\n");
return;
- dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
+ }
+
dev->cmb_size = size;
+ dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS);
+
+ if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
+ (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
+ pci_p2pmem_publish(pdev, true);
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL))
@@ -1685,12 +1705,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
static inline void nvme_release_cmb(struct nvme_dev *dev)
{
- if (dev->cmb) {
- iounmap(dev->cmb);
- dev->cmb = NULL;
+ if (dev->cmb_size) {
sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL);
- dev->cmbsz = 0;
+ dev->cmb_size = 0;
}
}
@@ -1889,13 +1907,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
- if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+ if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
if (result > 0)
dev->q_depth = result;
else
- nvme_release_cmb(dev);
+ dev->cmb_use_sqes = false;
}
do {
@@ -2390,7 +2408,8 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
- .flags = NVME_F_METADATA_SUPPORTED,
+ .flags = NVME_F_METADATA_SUPPORTED |
+ NVME_F_PCI_P2PDMA,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
@@ -2648,7 +2667,6 @@ static void nvme_error_resume(struct pci_dev *pdev)
struct nvme_dev *dev = pci_get_drvdata(pdev);
flush_work(&dev->ctrl.reset_work);
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
static const struct pci_error_handlers nvme_err_handler = {
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index b37a8e3e3f80..d895579b6c5d 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -17,6 +17,8 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/ctype.h>
+#include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
#include "nvmet.h"
@@ -340,6 +342,48 @@ out_unlock:
CONFIGFS_ATTR(nvmet_ns_, device_path);
+#ifdef CONFIG_PCI_P2PDMA
+static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+
+ return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
+}
+
+static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct pci_dev *p2p_dev = NULL;
+ bool use_p2pmem;
+ int ret = count;
+ int error;
+
+ mutex_lock(&ns->subsys->lock);
+ if (ns->enabled) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
+ if (error) {
+ ret = error;
+ goto out_unlock;
+ }
+
+ ns->use_p2pmem = use_p2pmem;
+ pci_dev_put(ns->p2p_dev);
+ ns->p2p_dev = p2p_dev;
+
+out_unlock:
+ mutex_unlock(&ns->subsys->lock);
+
+ return ret;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, p2pmem);
+#endif /* CONFIG_PCI_P2PDMA */
+
static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
{
return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
@@ -509,6 +553,9 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_ana_grpid,
&nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
+#ifdef CONFIG_PCI_P2PDMA
+ &nvmet_ns_attr_p2pmem,
+#endif
NULL,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 0acdff9e6842..f4efe289dc7b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/rculist.h>
+#include <linux/pci-p2pdma.h>
#include "nvmet.h"
@@ -365,9 +366,93 @@ static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
nvmet_file_ns_disable(ns);
}
+static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
+{
+ int ret;
+ struct pci_dev *p2p_dev;
+
+ if (!ns->use_p2pmem)
+ return 0;
+
+ if (!ns->bdev) {
+ pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
+ return -EINVAL;
+ }
+
+ if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
+ pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
+ if (ret < 0)
+ return -EINVAL;
+ } else {
+ /*
+ * Right now we just check that there is p2pmem available so
+ * we can report an error to the user right away if there
+ * is not. We'll find the actual device to use once we
+ * setup the controller when the port's device is available.
+ */
+
+ p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available for %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ pci_dev_put(p2p_dev);
+ }
+
+ return 0;
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
+ struct nvmet_ns *ns)
+{
+ struct device *clients[2];
+ struct pci_dev *p2p_dev;
+ int ret;
+
+ if (!ctrl->p2p_client)
+ return;
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
+ if (ret < 0)
+ return;
+
+ p2p_dev = pci_dev_get(ns->p2p_dev);
+ } else {
+ clients[0] = ctrl->p2p_client;
+ clients[1] = nvmet_ns_dev(ns);
+
+ p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
+ dev_name(ctrl->p2p_client), ns->device_path);
+ return;
+ }
+ }
+
+ ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
+ if (ret < 0)
+ pci_dev_put(p2p_dev);
+
+ pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
+ ns->nsid);
+}
+
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
int ret;
mutex_lock(&subsys->lock);
@@ -384,6 +469,13 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
if (ret)
goto out_unlock;
+ ret = nvmet_p2pmem_ns_enable(ns);
+ if (ret)
+ goto out_unlock;
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+
ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
0, GFP_KERNEL);
if (ret)
@@ -418,6 +510,9 @@ out_unlock:
mutex_unlock(&subsys->lock);
return ret;
out_dev_put:
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+
nvmet_ns_dev_disable(ns);
goto out_unlock;
}
@@ -425,6 +520,7 @@ out_dev_put:
void nvmet_ns_disable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
if (!ns->enabled)
@@ -434,6 +530,10 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
list_del_rcu(&ns->dev_link);
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+
mutex_unlock(&subsys->lock);
/*
@@ -450,6 +550,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
percpu_ref_exit(&ns->ref);
mutex_lock(&subsys->lock);
+
subsys->nr_namespaces--;
nvmet_ns_changed(subsys, ns->nsid);
nvmet_ns_dev_disable(ns);
@@ -725,6 +826,51 @@ void nvmet_req_execute(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_execute);
+int nvmet_req_alloc_sgl(struct nvmet_req *req)
+{
+ struct pci_dev *p2p_dev = NULL;
+
+ if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
+ if (req->sq->ctrl && req->ns)
+ p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
+ req->ns->nsid);
+
+ req->p2p_dev = NULL;
+ if (req->sq->qid && p2p_dev) {
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
+ req->transfer_len);
+ if (req->sg) {
+ req->p2p_dev = p2p_dev;
+ return 0;
+ }
+ }
+
+ /*
+ * If no P2P memory was available we fallback to using
+ * regular memory
+ */
+ }
+
+ req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
+ if (!req->sg)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
+
+void nvmet_req_free_sgl(struct nvmet_req *req)
+{
+ if (req->p2p_dev)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+ else
+ sgl_free(req->sg);
+
+ req->sg = NULL;
+ req->sg_cnt = 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
+
static inline bool nvmet_cc_en(u32 cc)
{
return (cc >> NVME_CC_EN_SHIFT) & 0x1;
@@ -921,6 +1067,37 @@ bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
return __nvmet_host_allowed(subsys, hostnqn);
}
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
+ struct nvmet_req *req)
+{
+ struct nvmet_ns *ns;
+
+ if (!req->p2p_client)
+ return;
+
+ ctrl->p2p_client = get_device(req->p2p_client);
+
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
+ pci_dev_put(radix_tree_deref_slot(slot));
+
+ put_device(ctrl->p2p_client);
+}
+
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{
@@ -962,6 +1139,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
+ INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -1026,6 +1204,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
mutex_lock(&subsys->lock);
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ nvmet_setup_p2p_ns_map(ctrl, req);
mutex_unlock(&subsys->lock);
*ctrlp = ctrl;
@@ -1053,6 +1232,7 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_subsys *subsys = ctrl->subsys;
mutex_lock(&subsys->lock);
+ nvmet_release_p2p_ns_map(ctrl);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index f93fb5711142..c1ec3475a140 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -78,6 +78,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
op = REQ_OP_READ;
}
+ if (is_pci_p2pdma_page(sg_page(req->sg)))
+ op_flags |= REQ_NOMERGE;
+
sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 08f7b57a1203..c2b4d9ee6391 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -26,6 +26,7 @@
#include <linux/configfs.h>
#include <linux/rcupdate.h>
#include <linux/blkdev.h>
+#include <linux/radix-tree.h>
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
@@ -77,6 +78,9 @@ struct nvmet_ns {
struct completion disable_done;
mempool_t *bvec_pool;
struct kmem_cache *bvec_cache;
+
+ int use_p2pmem;
+ struct pci_dev *p2p_dev;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -84,6 +88,11 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
return container_of(to_config_group(item), struct nvmet_ns, group);
}
+static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
+{
+ return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
+}
+
struct nvmet_cq {
u16 qid;
u16 size;
@@ -184,6 +193,9 @@ struct nvmet_ctrl {
char subsysnqn[NVMF_NQN_FIELD_LEN];
char hostnqn[NVMF_NQN_FIELD_LEN];
+
+ struct device *p2p_client;
+ struct radix_tree_root p2p_ns_map;
};
struct nvmet_subsys {
@@ -295,6 +307,9 @@ struct nvmet_req {
void (*execute)(struct nvmet_req *req);
const struct nvmet_fabrics_ops *ops;
+
+ struct pci_dev *p2p_dev;
+ struct device *p2p_client;
};
extern struct workqueue_struct *buffered_io_wq;
@@ -337,6 +352,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
void nvmet_req_uninit(struct nvmet_req *req);
void nvmet_req_execute(struct nvmet_req *req);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
+int nvmet_req_alloc_sgl(struct nvmet_req *req);
+void nvmet_req_free_sgl(struct nvmet_req *req);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
u16 size);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index bd265aceb90c..ddce100be57a 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -504,7 +504,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
}
if (rsp->req.sg != rsp->cmd->inline_sg)
- sgl_free(rsp->req.sg);
+ nvmet_req_free_sgl(&rsp->req);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -653,24 +653,24 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
{
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
u64 addr = le64_to_cpu(sgl->addr);
- u32 len = get_unaligned_le24(sgl->length);
u32 key = get_unaligned_le32(sgl->key);
int ret;
+ rsp->req.transfer_len = get_unaligned_le24(sgl->length);
+
/* no data command? */
- if (!len)
+ if (!rsp->req.transfer_len)
return 0;
- rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
- if (!rsp->req.sg)
- return NVME_SC_INTERNAL;
+ ret = nvmet_req_alloc_sgl(&rsp->req);
+ if (ret < 0)
+ goto error_out;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
nvmet_data_dir(&rsp->req));
if (ret < 0)
- return NVME_SC_INTERNAL;
- rsp->req.transfer_len += len;
+ goto error_out;
rsp->n_rdma += ret;
if (invalidate) {
@@ -679,6 +679,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
}
return 0;
+
+error_out:
+ rsp->req.transfer_len = 0;
+ return NVME_SC_INTERNAL;
}
static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
@@ -746,6 +750,8 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->send_sge.addr, cmd->send_sge.length,
DMA_TO_DEVICE);
+ cmd->req.p2p_client = &queue->dev->device->dev;
+
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops))
return;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index aa1657831b70..9b18ce90f907 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nvmem framework core.
*
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
* Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
@@ -19,6 +11,7 @@
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/init.h>
+#include <linux/kref.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
@@ -26,18 +19,18 @@
#include <linux/slab.h>
struct nvmem_device {
- const char *name;
struct module *owner;
struct device dev;
int stride;
int word_size;
int id;
- int users;
+ struct kref refcnt;
size_t size;
bool read_only;
int flags;
struct bin_attribute eeprom;
struct device *base_dev;
+ struct list_head cells;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
void *priv;
@@ -58,8 +51,13 @@ struct nvmem_cell {
static DEFINE_MUTEX(nvmem_mutex);
static DEFINE_IDA(nvmem_ida);
-static LIST_HEAD(nvmem_cells);
-static DEFINE_MUTEX(nvmem_cells_mutex);
+static DEFINE_MUTEX(nvmem_cell_mutex);
+static LIST_HEAD(nvmem_cell_tables);
+
+static DEFINE_MUTEX(nvmem_lookup_mutex);
+static LIST_HEAD(nvmem_lookup_list);
+
+static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key eeprom_lock_key;
@@ -156,7 +154,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
static struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
.name = "nvmem",
- .mode = S_IWUSR | S_IRUGO,
+ .mode = 0644,
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
@@ -180,7 +178,7 @@ static const struct attribute_group *nvmem_rw_dev_groups[] = {
static struct bin_attribute bin_attr_ro_nvmem = {
.attr = {
.name = "nvmem",
- .mode = S_IRUGO,
+ .mode = 0444,
},
.read = bin_attr_nvmem_read,
};
@@ -203,7 +201,7 @@ static const struct attribute_group *nvmem_ro_dev_groups[] = {
static struct bin_attribute bin_attr_rw_root_nvmem = {
.attr = {
.name = "nvmem",
- .mode = S_IWUSR | S_IRUSR,
+ .mode = 0600,
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
@@ -227,7 +225,7 @@ static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
static struct bin_attribute bin_attr_ro_root_nvmem = {
.attr = {
.name = "nvmem",
- .mode = S_IRUSR,
+ .mode = 0400,
},
.read = bin_attr_nvmem_read,
};
@@ -282,48 +280,42 @@ static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
return to_nvmem_device(d);
}
-static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
+static struct nvmem_device *nvmem_find(const char *name)
{
- struct nvmem_cell *p;
-
- mutex_lock(&nvmem_cells_mutex);
+ struct device *d;
- list_for_each_entry(p, &nvmem_cells, node)
- if (!strcmp(p->name, cell_id)) {
- mutex_unlock(&nvmem_cells_mutex);
- return p;
- }
+ d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
- mutex_unlock(&nvmem_cells_mutex);
+ if (!d)
+ return NULL;
- return NULL;
+ return to_nvmem_device(d);
}
static void nvmem_cell_drop(struct nvmem_cell *cell)
{
- mutex_lock(&nvmem_cells_mutex);
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
+ mutex_lock(&nvmem_mutex);
list_del(&cell->node);
- mutex_unlock(&nvmem_cells_mutex);
+ mutex_unlock(&nvmem_mutex);
+ kfree(cell->name);
kfree(cell);
}
static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
{
- struct nvmem_cell *cell;
- struct list_head *p, *n;
+ struct nvmem_cell *cell, *p;
- list_for_each_safe(p, n, &nvmem_cells) {
- cell = list_entry(p, struct nvmem_cell, node);
- if (cell->nvmem == nvmem)
- nvmem_cell_drop(cell);
- }
+ list_for_each_entry_safe(cell, p, &nvmem->cells, node)
+ nvmem_cell_drop(cell);
}
static void nvmem_cell_add(struct nvmem_cell *cell)
{
- mutex_lock(&nvmem_cells_mutex);
- list_add_tail(&cell->node, &nvmem_cells);
- mutex_unlock(&nvmem_cells_mutex);
+ mutex_lock(&nvmem_mutex);
+ list_add_tail(&cell->node, &cell->nvmem->cells);
+ mutex_unlock(&nvmem_mutex);
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
}
static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
@@ -361,7 +353,7 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
*
* Return: 0 or negative error code on failure.
*/
-int nvmem_add_cells(struct nvmem_device *nvmem,
+static int nvmem_add_cells(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info,
int ncells)
{
@@ -400,7 +392,6 @@ err:
return rval;
}
-EXPORT_SYMBOL_GPL(nvmem_add_cells);
/*
* nvmem_setup_compat() - Create an additional binary entry in
@@ -440,6 +431,136 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
}
/**
+ * nvmem_register_notifier() - Register a notifier block for nvmem events.
+ *
+ * @nb: notifier block to be called on nvmem events.
+ *
+ * Return: 0 on success, negative error number on failure.
+ */
+int nvmem_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&nvmem_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(nvmem_register_notifier);
+
+/**
+ * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
+ *
+ * @nb: notifier block to be unregistered.
+ *
+ * Return: 0 on success, negative error number on failure.
+ */
+int nvmem_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
+
+static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
+{
+ const struct nvmem_cell_info *info;
+ struct nvmem_cell_table *table;
+ struct nvmem_cell *cell;
+ int rval = 0, i;
+
+ mutex_lock(&nvmem_cell_mutex);
+ list_for_each_entry(table, &nvmem_cell_tables, node) {
+ if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
+ for (i = 0; i < table->ncells; i++) {
+ info = &table->cells[i];
+
+ cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ if (!cell) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ rval = nvmem_cell_info_to_nvmem_cell(nvmem,
+ info,
+ cell);
+ if (rval) {
+ kfree(cell);
+ goto out;
+ }
+
+ nvmem_cell_add(cell);
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&nvmem_cell_mutex);
+ return rval;
+}
+
+static struct nvmem_cell *
+nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
+{
+ struct nvmem_cell *cell = NULL;
+
+ mutex_lock(&nvmem_mutex);
+ list_for_each_entry(cell, &nvmem->cells, node) {
+ if (strcmp(cell_id, cell->name) == 0)
+ break;
+ }
+ mutex_unlock(&nvmem_mutex);
+
+ return cell;
+}
+
+static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+{
+ struct device_node *parent, *child;
+ struct device *dev = &nvmem->dev;
+ struct nvmem_cell *cell;
+ const __be32 *addr;
+ int len;
+
+ parent = dev->of_node;
+
+ for_each_child_of_node(parent, child) {
+ addr = of_get_property(child, "reg", &len);
+ if (!addr || (len < 2 * sizeof(u32))) {
+ dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
+ return -EINVAL;
+ }
+
+ cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ if (!cell)
+ return -ENOMEM;
+
+ cell->nvmem = nvmem;
+ cell->offset = be32_to_cpup(addr++);
+ cell->bytes = be32_to_cpup(addr);
+ cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
+
+ addr = of_get_property(child, "bits", &len);
+ if (addr && len == (2 * sizeof(u32))) {
+ cell->bit_offset = be32_to_cpup(addr++);
+ cell->nbits = be32_to_cpup(addr);
+ }
+
+ if (cell->nbits)
+ cell->bytes = DIV_ROUND_UP(
+ cell->nbits + cell->bit_offset,
+ BITS_PER_BYTE);
+
+ if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+ dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
+ cell->name, nvmem->stride);
+ /* Cells already added will be freed later. */
+ kfree(cell->name);
+ kfree(cell);
+ return -EINVAL;
+ }
+
+ nvmem_cell_add(cell);
+ }
+
+ return 0;
+}
+
+/**
* nvmem_register() - Register a nvmem device for given nvmem_config.
* Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
*
@@ -467,6 +588,9 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
return ERR_PTR(rval);
}
+ kref_init(&nvmem->refcnt);
+ INIT_LIST_HEAD(&nvmem->cells);
+
nvmem->id = rval;
nvmem->owner = config->owner;
if (!nvmem->owner && config->dev->driver)
@@ -516,11 +640,31 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
goto err_device_del;
}
- if (config->cells)
- nvmem_add_cells(nvmem, config->cells, config->ncells);
+ if (config->cells) {
+ rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
+ if (rval)
+ goto err_teardown_compat;
+ }
+
+ rval = nvmem_add_cells_from_table(nvmem);
+ if (rval)
+ goto err_remove_cells;
+
+ rval = nvmem_add_cells_from_of(nvmem);
+ if (rval)
+ goto err_remove_cells;
+
+ rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
+ if (rval)
+ goto err_remove_cells;
return nvmem;
+err_remove_cells:
+ nvmem_device_remove_all_cells(nvmem);
+err_teardown_compat:
+ if (config->compat)
+ device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
err_device_del:
device_del(&nvmem->dev);
err_put_device:
@@ -530,21 +674,13 @@ err_put_device:
}
EXPORT_SYMBOL_GPL(nvmem_register);
-/**
- * nvmem_unregister() - Unregister previously registered nvmem device
- *
- * @nvmem: Pointer to previously registered nvmem device.
- *
- * Return: Will be an negative on error or a zero on success.
- */
-int nvmem_unregister(struct nvmem_device *nvmem)
+static void nvmem_device_release(struct kref *kref)
{
- mutex_lock(&nvmem_mutex);
- if (nvmem->users) {
- mutex_unlock(&nvmem_mutex);
- return -EBUSY;
- }
- mutex_unlock(&nvmem_mutex);
+ struct nvmem_device *nvmem;
+
+ nvmem = container_of(kref, struct nvmem_device, refcnt);
+
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
if (nvmem->flags & FLAG_COMPAT)
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
@@ -552,14 +688,22 @@ int nvmem_unregister(struct nvmem_device *nvmem)
nvmem_device_remove_all_cells(nvmem);
device_del(&nvmem->dev);
put_device(&nvmem->dev);
+}
- return 0;
+/**
+ * nvmem_unregister() - Unregister previously registered nvmem device
+ *
+ * @nvmem: Pointer to previously registered nvmem device.
+ */
+void nvmem_unregister(struct nvmem_device *nvmem)
+{
+ kref_put(&nvmem->refcnt, nvmem_device_release);
}
EXPORT_SYMBOL_GPL(nvmem_unregister);
static void devm_nvmem_release(struct device *dev, void *res)
{
- WARN_ON(nvmem_unregister(*(struct nvmem_device **)res));
+ nvmem_unregister(*(struct nvmem_device **)res);
}
/**
@@ -617,71 +761,34 @@ int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
}
EXPORT_SYMBOL(devm_nvmem_unregister);
-
static struct nvmem_device *__nvmem_device_get(struct device_node *np,
- struct nvmem_cell **cellp,
- const char *cell_id)
+ const char *nvmem_name)
{
struct nvmem_device *nvmem = NULL;
mutex_lock(&nvmem_mutex);
-
- if (np) {
- nvmem = of_nvmem_find(np);
- if (!nvmem) {
- mutex_unlock(&nvmem_mutex);
- return ERR_PTR(-EPROBE_DEFER);
- }
- } else {
- struct nvmem_cell *cell = nvmem_find_cell(cell_id);
-
- if (cell) {
- nvmem = cell->nvmem;
- *cellp = cell;
- }
-
- if (!nvmem) {
- mutex_unlock(&nvmem_mutex);
- return ERR_PTR(-ENOENT);
- }
- }
-
- nvmem->users++;
+ nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
mutex_unlock(&nvmem_mutex);
+ if (!nvmem)
+ return ERR_PTR(-EPROBE_DEFER);
if (!try_module_get(nvmem->owner)) {
dev_err(&nvmem->dev,
"could not increase module refcount for cell %s\n",
- nvmem->name);
-
- mutex_lock(&nvmem_mutex);
- nvmem->users--;
- mutex_unlock(&nvmem_mutex);
+ nvmem_dev_name(nvmem));
return ERR_PTR(-EINVAL);
}
+ kref_get(&nvmem->refcnt);
+
return nvmem;
}
static void __nvmem_device_put(struct nvmem_device *nvmem)
{
module_put(nvmem->owner);
- mutex_lock(&nvmem_mutex);
- nvmem->users--;
- mutex_unlock(&nvmem_mutex);
-}
-
-static struct nvmem_device *nvmem_find(const char *name)
-{
- struct device *d;
-
- d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
-
- if (!d)
- return NULL;
-
- return to_nvmem_device(d);
+ kref_put(&nvmem->refcnt, nvmem_device_release);
}
#if IS_ENABLED(CONFIG_OF)
@@ -706,7 +813,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
if (!nvmem_np)
return ERR_PTR(-EINVAL);
- return __nvmem_device_get(nvmem_np, NULL, NULL);
+ return __nvmem_device_get(nvmem_np, NULL);
}
EXPORT_SYMBOL_GPL(of_nvmem_device_get);
#endif
@@ -810,44 +917,86 @@ struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
-static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
+static struct nvmem_cell *
+nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
{
- struct nvmem_cell *cell = NULL;
+ struct nvmem_cell *cell = ERR_PTR(-ENOENT);
+ struct nvmem_cell_lookup *lookup;
struct nvmem_device *nvmem;
+ const char *dev_id;
- nvmem = __nvmem_device_get(NULL, &cell, cell_id);
- if (IS_ERR(nvmem))
- return ERR_CAST(nvmem);
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+ dev_id = dev_name(dev);
+
+ mutex_lock(&nvmem_lookup_mutex);
+
+ list_for_each_entry(lookup, &nvmem_lookup_list, node) {
+ if ((strcmp(lookup->dev_id, dev_id) == 0) &&
+ (strcmp(lookup->con_id, con_id) == 0)) {
+ /* This is the right entry. */
+ nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
+ if (IS_ERR(nvmem)) {
+ /* Provider may not be registered yet. */
+ cell = ERR_CAST(nvmem);
+ goto out;
+ }
+
+ cell = nvmem_find_cell_by_name(nvmem,
+ lookup->cell_name);
+ if (!cell) {
+ __nvmem_device_put(nvmem);
+ cell = ERR_PTR(-ENOENT);
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&nvmem_lookup_mutex);
return cell;
}
#if IS_ENABLED(CONFIG_OF)
+static struct nvmem_cell *
+nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index)
+{
+ struct nvmem_cell *cell = NULL;
+ int i = 0;
+
+ mutex_lock(&nvmem_mutex);
+ list_for_each_entry(cell, &nvmem->cells, node) {
+ if (index == i++)
+ break;
+ }
+ mutex_unlock(&nvmem_mutex);
+
+ return cell;
+}
+
/**
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
*
* @np: Device tree node that uses the nvmem cell.
- * @name: nvmem cell name from nvmem-cell-names property, or NULL
- * for the cell at index 0 (the lone cell with no accompanying
- * nvmem-cell-names property).
+ * @id: nvmem cell name from nvmem-cell-names property, or NULL
+ * for the cell at index 0 (the lone cell with no accompanying
+ * nvmem-cell-names property).
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
* nvmem_cell_put().
*/
-struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
- const char *name)
+struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
{
struct device_node *cell_np, *nvmem_np;
- struct nvmem_cell *cell;
struct nvmem_device *nvmem;
- const __be32 *addr;
- int rval, len;
+ struct nvmem_cell *cell;
int index = 0;
/* if cell name exists, find index to the name */
- if (name)
- index = of_property_match_string(np, "nvmem-cell-names", name);
+ if (id)
+ index = of_property_match_string(np, "nvmem-cell-names", id);
cell_np = of_parse_phandle(np, "nvmem-cells", index);
if (!cell_np)
@@ -857,59 +1006,18 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
if (!nvmem_np)
return ERR_PTR(-EINVAL);
- nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
+ nvmem = __nvmem_device_get(nvmem_np, NULL);
of_node_put(nvmem_np);
if (IS_ERR(nvmem))
return ERR_CAST(nvmem);
- addr = of_get_property(cell_np, "reg", &len);
- if (!addr || (len < 2 * sizeof(u32))) {
- dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n",
- cell_np);
- rval = -EINVAL;
- goto err_mem;
- }
-
- cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ cell = nvmem_find_cell_by_index(nvmem, index);
if (!cell) {
- rval = -ENOMEM;
- goto err_mem;
- }
-
- cell->nvmem = nvmem;
- cell->offset = be32_to_cpup(addr++);
- cell->bytes = be32_to_cpup(addr);
- cell->name = cell_np->name;
-
- addr = of_get_property(cell_np, "bits", &len);
- if (addr && len == (2 * sizeof(u32))) {
- cell->bit_offset = be32_to_cpup(addr++);
- cell->nbits = be32_to_cpup(addr);
+ __nvmem_device_put(nvmem);
+ return ERR_PTR(-ENOENT);
}
- if (cell->nbits)
- cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
- BITS_PER_BYTE);
-
- if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
- dev_err(&nvmem->dev,
- "cell %s unaligned to nvmem stride %d\n",
- cell->name, nvmem->stride);
- rval = -EINVAL;
- goto err_sanity;
- }
-
- nvmem_cell_add(cell);
-
return cell;
-
-err_sanity:
- kfree(cell);
-
-err_mem:
- __nvmem_device_put(nvmem);
-
- return ERR_PTR(rval);
}
EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
#endif
@@ -918,27 +1026,29 @@ EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
* nvmem_cell_get() - Get nvmem cell of device form a given cell name
*
* @dev: Device that requests the nvmem cell.
- * @cell_id: nvmem cell name to get.
+ * @id: nvmem cell name to get (this corresponds with the name from the
+ * nvmem-cell-names property for DT systems and with the con_id from
+ * the lookup entry for non-DT systems).
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
* nvmem_cell_put().
*/
-struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
{
struct nvmem_cell *cell;
if (dev->of_node) { /* try dt first */
- cell = of_nvmem_cell_get(dev->of_node, cell_id);
+ cell = of_nvmem_cell_get(dev->of_node, id);
if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
return cell;
}
- /* NULL cell_id only allowed for device tree; invalid otherwise */
- if (!cell_id)
+ /* NULL cell id only allowed for device tree; invalid otherwise */
+ if (!id)
return ERR_PTR(-EINVAL);
- return nvmem_cell_get_from_list(cell_id);
+ return nvmem_cell_get_from_lookup(dev, id);
}
EXPORT_SYMBOL_GPL(nvmem_cell_get);
@@ -1015,7 +1125,6 @@ void nvmem_cell_put(struct nvmem_cell *cell)
struct nvmem_device *nvmem = cell->nvmem;
__nvmem_device_put(nvmem);
- nvmem_cell_drop(cell);
}
EXPORT_SYMBOL_GPL(nvmem_cell_put);
@@ -1267,7 +1376,7 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
* @buf: buffer to be written to cell.
*
* Return: length of bytes written or negative error code on failure.
- * */
+ */
int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf)
{
@@ -1323,7 +1432,7 @@ EXPORT_SYMBOL_GPL(nvmem_device_read);
* @buf: buffer to be written.
*
* Return: length of bytes written or negative error code on failure.
- * */
+ */
int nvmem_device_write(struct nvmem_device *nvmem,
unsigned int offset,
size_t bytes, void *buf)
@@ -1343,6 +1452,80 @@ int nvmem_device_write(struct nvmem_device *nvmem,
}
EXPORT_SYMBOL_GPL(nvmem_device_write);
+/**
+ * nvmem_add_cell_table() - register a table of cell info entries
+ *
+ * @table: table of cell info entries
+ */
+void nvmem_add_cell_table(struct nvmem_cell_table *table)
+{
+ mutex_lock(&nvmem_cell_mutex);
+ list_add_tail(&table->node, &nvmem_cell_tables);
+ mutex_unlock(&nvmem_cell_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
+
+/**
+ * nvmem_del_cell_table() - remove a previously registered cell info table
+ *
+ * @table: table of cell info entries
+ */
+void nvmem_del_cell_table(struct nvmem_cell_table *table)
+{
+ mutex_lock(&nvmem_cell_mutex);
+ list_del(&table->node);
+ mutex_unlock(&nvmem_cell_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
+
+/**
+ * nvmem_add_cell_lookups() - register a list of cell lookup entries
+ *
+ * @entries: array of cell lookup entries
+ * @nentries: number of cell lookup entries in the array
+ */
+void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
+{
+ int i;
+
+ mutex_lock(&nvmem_lookup_mutex);
+ for (i = 0; i < nentries; i++)
+ list_add_tail(&entries[i].node, &nvmem_lookup_list);
+ mutex_unlock(&nvmem_lookup_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
+
+/**
+ * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
+ * entries
+ *
+ * @entries: array of cell lookup entries
+ * @nentries: number of cell lookup entries in the array
+ */
+void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
+{
+ int i;
+
+ mutex_lock(&nvmem_lookup_mutex);
+ for (i = 0; i < nentries; i++)
+ list_del(&entries[i].node);
+ mutex_unlock(&nvmem_lookup_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
+
+/**
+ * nvmem_dev_name() - Get the name of a given nvmem device.
+ *
+ * @nvmem: nvmem device.
+ *
+ * Return: name of the nvmem device.
+ */
+const char *nvmem_dev_name(struct nvmem_device *nvmem)
+{
+ return dev_name(&nvmem->dev);
+}
+EXPORT_SYMBOL_GPL(nvmem_dev_name);
+
static int __init nvmem_init(void)
{
return bus_register(&nvmem_bus_type);
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index a9534a6e8636..66cff1e2147a 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -236,7 +236,7 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev)
lpc18xx_nvmem_config.dev = dev;
lpc18xx_nvmem_config.priv = eeprom;
- eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config);
+ eeprom->nvmem = devm_nvmem_register(dev, &lpc18xx_nvmem_config);
if (IS_ERR(eeprom->nvmem)) {
ret = PTR_ERR(eeprom->nvmem);
goto err_clk;
@@ -255,11 +255,6 @@ err_clk:
static int lpc18xx_eeprom_remove(struct platform_device *pdev)
{
struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev);
- int ret;
-
- ret = nvmem_unregister(eeprom->nvmem);
- if (ret < 0)
- return ret;
clk_disable_unprepare(eeprom->clk);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 7018e2ef5714..53122f59c4b2 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -177,7 +177,7 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
ocotp_config.size = data->size;
ocotp_config.priv = otp;
ocotp_config.dev = dev;
- otp->nvmem = nvmem_register(&ocotp_config);
+ otp->nvmem = devm_nvmem_register(dev, &ocotp_config);
if (IS_ERR(otp->nvmem)) {
ret = PTR_ERR(otp->nvmem);
goto err_clk;
@@ -199,7 +199,7 @@ static int mxs_ocotp_remove(struct platform_device *pdev)
clk_unprepare(otp->clk);
- return nvmem_unregister(otp->nvmem);
+ return 0;
}
static struct platform_driver mxs_ocotp_driver = {
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index d020f89248fd..570a2e354f30 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -154,7 +154,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
struct resource *res;
struct nvmem_device *nvmem;
struct sunxi_sid *sid;
- int ret, i, size;
+ int i, size;
char *randomness;
const struct sunxi_sid_cfg *cfg;
@@ -181,15 +181,13 @@ static int sunxi_sid_probe(struct platform_device *pdev)
else
econfig.reg_read = sunxi_sid_read;
econfig.priv = sid;
- nvmem = nvmem_register(&econfig);
+ nvmem = devm_nvmem_register(dev, &econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
randomness = kzalloc(size, GFP_KERNEL);
- if (!randomness) {
- ret = -EINVAL;
- goto err_unreg_nvmem;
- }
+ if (!randomness)
+ return -ENOMEM;
for (i = 0; i < size; i++)
econfig.reg_read(sid, i, &randomness[i], 1);
@@ -200,17 +198,6 @@ static int sunxi_sid_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nvmem);
return 0;
-
-err_unreg_nvmem:
- nvmem_unregister(nvmem);
- return ret;
-}
-
-static int sunxi_sid_remove(struct platform_device *pdev)
-{
- struct nvmem_device *nvmem = platform_get_drvdata(pdev);
-
- return nvmem_unregister(nvmem);
}
static const struct sunxi_sid_cfg sun4i_a10_cfg = {
@@ -243,7 +230,6 @@ MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
static struct platform_driver sunxi_sid_driver = {
.probe = sunxi_sid_probe,
- .remove = sunxi_sid_remove,
.driver = {
.name = "eeprom-sunxi-sid",
.of_match_table = sunxi_sid_of_match,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 74eaedd5b860..13ebb16be64e 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -67,6 +67,7 @@ bool of_node_name_eq(const struct device_node *np, const char *name)
return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
}
+EXPORT_SYMBOL(of_node_name_eq);
bool of_node_name_prefix(const struct device_node *np, const char *prefix)
{
@@ -75,6 +76,7 @@ bool of_node_name_prefix(const struct device_node *np, const char *prefix)
return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
}
+EXPORT_SYMBOL(of_node_name_prefix);
int of_n_addr_cells(struct device_node *np)
{
@@ -330,6 +332,8 @@ static bool __of_find_n_match_cpu_property(struct device_node *cpun,
ac = of_n_addr_cells(cpun);
cell = of_get_property(cpun, prop_name, &prop_len);
+ if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
+ return true;
if (!cell || !ac)
return false;
prop_len /= sizeof(*cell) * ac;
@@ -390,7 +394,7 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
struct device_node *cpun;
- for_each_node_by_type(cpun, "cpu") {
+ for_each_of_cpu_node(cpun) {
if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
return cpun;
}
@@ -745,6 +749,45 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
EXPORT_SYMBOL(of_get_next_available_child);
/**
+ * of_get_next_cpu_node - Iterate on cpu nodes
+ * @prev: previous child of the /cpus node, or NULL to get first
+ *
+ * Returns a cpu node pointer with refcount incremented, use of_node_put()
+ * on it when done. Returns NULL when prev is the last child. Decrements
+ * the refcount of prev.
+ */
+struct device_node *of_get_next_cpu_node(struct device_node *prev)
+{
+ struct device_node *next = NULL;
+ unsigned long flags;
+ struct device_node *node;
+
+ if (!prev)
+ node = of_find_node_by_path("/cpus");
+
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ if (prev)
+ next = prev->sibling;
+ else if (node) {
+ next = node->child;
+ of_node_put(node);
+ }
+ for (; next; next = next->sibling) {
+ if (!(of_node_name_eq(next, "cpu") ||
+ (next->type && !of_node_cmp(next->type, "cpu"))))
+ continue;
+ if (!__of_device_is_available(next))
+ continue;
+ if (of_node_get(next))
+ break;
+ }
+ of_node_put(prev);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return next;
+}
+EXPORT_SYMBOL(of_get_next_cpu_node);
+
+/**
* of_get_compatible_child - Find compatible child node
* @parent: parent node
* @compatible: compatible string
@@ -2013,7 +2056,7 @@ struct device_node *of_find_next_cache_node(const struct device_node *np)
/* OF on pmac has nodes instead of properties named "l2-cache"
* beneath CPU nodes.
*/
- if (!strcmp(np->type, "cpu"))
+ if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu"))
for_each_child_of_node(np, child)
if (!strcmp(child->type, "cache"))
return child;
@@ -2045,3 +2088,105 @@ int of_find_last_cache_level(unsigned int cpu)
return cache_level;
}
+
+/**
+ * of_map_rid - Translate a requester ID through a downstream mapping.
+ * @np: root complex device node.
+ * @rid: device requester ID to map.
+ * @map_name: property name of the map to use.
+ * @map_mask_name: optional property name of the mask to use.
+ * @target: optional pointer to a target device node.
+ * @id_out: optional pointer to receive the translated ID.
+ *
+ * Given a device requester ID, look up the appropriate implementation-defined
+ * platform ID and/or the target device which receives transactions on that
+ * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
+ * @id_out may be NULL if only the other is required. If @target points to
+ * a non-NULL device node pointer, only entries targeting that node will be
+ * matched; if it points to a NULL value, it will receive the device node of
+ * the first matching target phandle, with a reference held.
+ *
+ * Return: 0 on success or a standard error code on failure.
+ */
+int of_map_rid(struct device_node *np, u32 rid,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out)
+{
+ u32 map_mask, masked_rid;
+ int map_len;
+ const __be32 *map = NULL;
+
+ if (!np || !map_name || (!target && !id_out))
+ return -EINVAL;
+
+ map = of_get_property(np, map_name, &map_len);
+ if (!map) {
+ if (target)
+ return -ENODEV;
+ /* Otherwise, no map implies no translation */
+ *id_out = rid;
+ return 0;
+ }
+
+ if (!map_len || map_len % (4 * sizeof(*map))) {
+ pr_err("%pOF: Error: Bad %s length: %d\n", np,
+ map_name, map_len);
+ return -EINVAL;
+ }
+
+ /* The default is to select all bits. */
+ map_mask = 0xffffffff;
+
+ /*
+ * Can be overridden by "{iommu,msi}-map-mask" property.
+ * If of_property_read_u32() fails, the default is used.
+ */
+ if (map_mask_name)
+ of_property_read_u32(np, map_mask_name, &map_mask);
+
+ masked_rid = map_mask & rid;
+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
+ struct device_node *phandle_node;
+ u32 rid_base = be32_to_cpup(map + 0);
+ u32 phandle = be32_to_cpup(map + 1);
+ u32 out_base = be32_to_cpup(map + 2);
+ u32 rid_len = be32_to_cpup(map + 3);
+
+ if (rid_base & ~map_mask) {
+ pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
+ np, map_name, map_name,
+ map_mask, rid_base);
+ return -EFAULT;
+ }
+
+ if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
+ continue;
+
+ phandle_node = of_find_node_by_phandle(phandle);
+ if (!phandle_node)
+ return -ENODEV;
+
+ if (target) {
+ if (*target)
+ of_node_put(phandle_node);
+ else
+ *target = phandle_node;
+
+ if (*target != phandle_node)
+ continue;
+ }
+
+ if (id_out)
+ *id_out = masked_rid - rid_base + out_base;
+
+ pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
+ np, map_name, map_mask, rid_base, out_base,
+ rid_len, rid, masked_rid - rid_base + out_base);
+ return 0;
+ }
+
+ pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
+ np, map_name, rid, target && *target ? *target : NULL);
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(of_map_rid);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index c7fa5a9697c9..0f27fad9fe94 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -207,7 +207,8 @@ static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len
return -ENODEV;
/* Name & Type */
- csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name,
+ /* %p eats all alphanum characters, so %c must be used here */
+ csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T',
dev->of_node->type);
tsize = csize;
len -= csize;
@@ -286,7 +287,7 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
if ((!dev) || (!dev->of_node))
return;
- add_uevent_var(env, "OF_NAME=%s", dev->of_node->name);
+ add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
if (dev->of_node->type && strcmp("<NULL>", dev->of_node->type) != 0)
add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 02ad93a304a4..e1f6f392a4c0 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_pci.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -588,8 +587,8 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
* "msi-map" property.
*/
for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
- if (!of_pci_map_rid(parent_dev->of_node, rid_in, "msi-map",
- "msi-map-mask", np, &rid_out))
+ if (!of_map_rid(parent_dev->of_node, rid_in, "msi-map",
+ "msi-map-mask", np, &rid_out))
break;
return rid_out;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index e92391d6d1bd..5ad1342f5682 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -97,8 +97,8 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
return rc;
}
- dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
- child->name, addr);
+ dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
+ child, addr);
return 0;
}
@@ -127,8 +127,8 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
return rc;
}
- dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
- child->name, addr);
+ dev_dbg(&mdio->dev, "registered mdio device %pOFn at address %i\n",
+ child, addr);
return 0;
}
@@ -263,8 +263,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
continue;
/* be noisy to encourage people to set reg property */
- dev_info(&mdio->dev, "scan phy %s at address %i\n",
- child->name, addr);
+ dev_info(&mdio->dev, "scan phy %pOFn at address %i\n",
+ child, addr);
if (of_mdiobus_child_is_phy(child)) {
rc = of_mdiobus_register_phy(mdio, child, addr);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 27d9b4bba535..35c64a4295e0 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -24,18 +24,9 @@ static void __init of_numa_parse_cpu_nodes(void)
{
u32 nid;
int r;
- struct device_node *cpus;
- struct device_node *np = NULL;
-
- cpus = of_find_node_by_path("/cpus");
- if (!cpus)
- return;
-
- for_each_child_of_node(cpus, np) {
- /* Skip things that are not CPUs */
- if (of_node_cmp(np->type, "cpu") != 0)
- continue;
+ struct device_node *np;
+ for_each_of_cpu_node(np) {
r = of_property_read_u32(np, "numa-node-id", &nid);
if (r)
continue;
@@ -46,8 +37,6 @@ static void __init of_numa_parse_cpu_nodes(void)
else
node_set(nid, numa_nodes_parsed);
}
-
- of_node_put(cpus);
}
static int __init of_numa_parse_memory_nodes(void)
@@ -163,8 +152,8 @@ int of_node_to_nid(struct device_node *device)
np = of_get_next_parent(np);
}
if (np && r)
- pr_warn("Invalid \"numa-node-id\" property in node %s\n",
- np->name);
+ pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n",
+ np);
of_node_put(np);
/*
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 216175d11d3d..5d1567025358 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -27,6 +27,14 @@ struct alias_prop {
char stem[0];
};
+#if defined(CONFIG_SPARC)
+#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2
+#else
+#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
+#endif
+
+#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
+
extern struct mutex of_mutex;
extern struct list_head aliases_lookup;
extern struct kset *of_kset;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index eda57ef12fd0..42b1f73ac5f6 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -425,8 +425,8 @@ static int build_changeset_next_level(struct overlay_changeset *ovcs,
for_each_child_of_node(overlay_node, child) {
ret = add_changeset_node(ovcs, target_node, child);
if (ret) {
- pr_debug("Failed to apply node @%pOF/%s, err=%d\n",
- target_node, child->name, ret);
+ pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
+ target_node, child, ret);
of_node_put(child);
return ret;
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 6c59673933e9..04ad312fd85b 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -91,8 +91,8 @@ static void of_device_make_bus_id(struct device *dev)
*/
reg = of_get_property(node, "reg", NULL);
if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
- dev_set_name(dev, dev_name(dev) ? "%llx.%s:%s" : "%llx.%s",
- (unsigned long long)addr, node->name,
+ dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
+ (unsigned long long)addr, node,
dev_name(dev));
return;
}
@@ -142,8 +142,8 @@ struct platform_device *of_device_alloc(struct device_node *np,
WARN_ON(rc);
}
if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
- pr_debug("not all legacy IRQ resources mapped for %s\n",
- np->name);
+ pr_debug("not all legacy IRQ resources mapped for %pOFn\n",
+ np);
}
dev->dev.of_node = of_node_get(np);
diff --git a/drivers/of/unittest-data/overlay_15.dts b/drivers/of/unittest-data/overlay_15.dts
index b98f2514df4b..5728490474f6 100644
--- a/drivers/of/unittest-data/overlay_15.dts
+++ b/drivers/of/unittest-data/overlay_15.dts
@@ -20,8 +20,8 @@
#size-cells = <0>;
reg = <0>;
- test-mux-dev {
- reg = <32>;
+ test-mux-dev@20 {
+ reg = <0x20>;
compatible = "unittest-i2c-dev";
status = "okay";
};
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
index 25cf397b8f6b..4ea024d908ee 100644
--- a/drivers/of/unittest-data/tests-overlay.dtsi
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -103,8 +103,8 @@
#size-cells = <0>;
reg = <0>;
- test-mux-dev {
- reg = <32>;
+ test-mux-dev@20 {
+ reg = <0x20>;
compatible = "unittest-i2c-dev";
status = "okay";
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 41b49716ac75..a3a6866765f2 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -212,8 +212,8 @@ static int __init of_unittest_check_node_linkage(struct device_node *np)
for_each_child_of_node(np, child) {
if (child->parent != np) {
- pr_err("Child node %s links to wrong parent %s\n",
- child->name, np->name);
+ pr_err("Child node %pOFn links to wrong parent %pOFn\n",
+ child, np);
rc = -EINVAL;
goto put_child;
}
@@ -299,6 +299,10 @@ static void __init of_unittest_printf(void)
of_unittest_printf_one(np, "%pOF", full_name);
of_unittest_printf_one(np, "%pOFf", full_name);
+ of_unittest_printf_one(np, "%pOFn", "dev");
+ of_unittest_printf_one(np, "%2pOFn", "dev");
+ of_unittest_printf_one(np, "%5pOFn", " dev");
+ of_unittest_printf_one(np, "%pOFnc", "dev:test-sub-device");
of_unittest_printf_one(np, "%pOFp", phandle_str);
of_unittest_printf_one(np, "%pOFP", "dev@100");
of_unittest_printf_one(np, "ABC %pOFP ABC", "ABC dev@100 ABC");
@@ -1046,16 +1050,16 @@ static void __init of_unittest_platform_populate(void)
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild)
unittest(of_find_device_by_node(grandchild),
- "Could not create device for node '%s'\n",
- grandchild->name);
+ "Could not create device for node '%pOFn'\n",
+ grandchild);
}
of_platform_depopulate(&test_bus->dev);
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild)
unittest(!of_find_device_by_node(grandchild),
- "device didn't get destroyed '%s'\n",
- grandchild->name);
+ "device didn't get destroyed '%pOFn'\n",
+ grandchild);
}
platform_device_unregister(test_bus);
@@ -2357,11 +2361,14 @@ static __init void of_unittest_overlay_high_level(void)
}
}
- for (np = overlay_base_root->child; np; np = np->sibling) {
- if (of_get_child_by_name(of_root, np->name)) {
- unittest(0, "illegal node name in overlay_base %s",
- np->name);
- return;
+ for_each_child_of_node(overlay_base_root, np) {
+ struct device_node *base_child;
+ for_each_child_of_node(of_root, base_child) {
+ if (!strcmp(np->full_name, base_child->full_name)) {
+ unittest(0, "illegal node name in overlay_base %pOFn",
+ np);
+ return;
+ }
}
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 56ff8f6d31fc..2dcc30429e8b 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -98,6 +98,9 @@ config PCI_ECAM
config PCI_LOCKLESS_CONFIG
bool
+config PCI_BRIDGE_EMUL
+ bool
+
config PCI_IOV
bool "PCI IOV support"
depends on PCI
@@ -132,6 +135,23 @@ config PCI_PASID
If unsure, say N.
+config PCI_P2PDMA
+ bool "PCI peer-to-peer transfer support"
+ depends on PCI && ZONE_DEVICE
+ select GENERIC_ALLOCATOR
+ help
+ Enableѕ drivers to do PCI peer-to-peer transactions to and from
+ BARs that are exposed in other devices that are the part of
+ the hierarchy where peer-to-peer DMA is guaranteed by the PCI
+ specification to work (ie. anything below a single PCI bridge).
+
+ Many PCIe root complexes do not support P2P transactions and
+ it's hard to tell which support it at all, so at this time,
+ P2P DMA transations must be between devices behind the same root
+ port.
+
+ If unsure, say N.
+
config PCI_LABEL
def_bool y if (DMI || ACPI)
depends on PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 1b2cfe51e8d7..f2bda77a2df1 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
+obj-$(CONFIG_PCI_BRIDGE_EMUL) += pci-bridge-emul.o
obj-$(CONFIG_ACPI) += pci-acpi.o
obj-$(CONFIG_PCI_LABEL) += pci-label.o
obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
obj-$(CONFIG_PCI_ECAM) += ecam.o
+obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
# Endpoint library must be initialized before its users
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index a3ad2fe185b9..544922f097c0 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -33,7 +33,7 @@ DEFINE_RAW_SPINLOCK(pci_lock);
#endif
#define PCI_OP_READ(size, type, len) \
-int pci_bus_read_config_##size \
+int noinline pci_bus_read_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
{ \
int res; \
@@ -48,7 +48,7 @@ int pci_bus_read_config_##size \
}
#define PCI_OP_WRITE(size, type, len) \
-int pci_bus_write_config_##size \
+int noinline pci_bus_write_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type value) \
{ \
int res; \
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 028b287466fb..6671946dbf66 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -9,12 +9,14 @@ config PCI_MVEBU
depends on MVEBU_MBUS
depends on ARM
depends on OF
+ select PCI_BRIDGE_EMUL
config PCI_AARDVARK
bool "Aardvark PCIe controller"
depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
+ select PCI_BRIDGE_EMUL
help
Add support for Aardvark 64bit PCIe Host Controller. This
controller is part of the South Bridge of the Marvel Armada
@@ -231,7 +233,7 @@ config PCIE_ROCKCHIP_EP
available to support GEN2 with 4 slots.
config PCIE_MEDIATEK
- bool "MediaTek PCIe controller"
+ tristate "MediaTek PCIe controller"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 5d2ce72c7a52..fcf91eacfc63 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
-obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
+obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index ce9224a36f62..a32d6dde7a57 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -542,7 +542,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
};
/*
- * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
+ * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
* @dra7xx: the dra7xx device where the workaround should be applied
*
* Access to the PCIe slave port that are not 32-bit aligned will result
@@ -552,7 +552,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
*
* To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
*/
-static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
+static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
@@ -704,6 +704,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
DEVICE_TYPE_RC);
+
+ ret = dra7xx_pcie_unaligned_memaccess(dev);
+ if (ret)
+ dev_err(dev, "WA for Errata i870 not applied\n");
+
ret = dra7xx_add_pcie_port(dra7xx, pdev);
if (ret < 0)
goto err_gpio;
@@ -717,7 +722,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
DEVICE_TYPE_EP);
- ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
+ ret = dra7xx_pcie_unaligned_memaccess(dev);
if (ret)
goto err_gpio;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 4a9a673b4777..2cbef2d7c207 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -50,6 +50,7 @@ struct imx6_pcie {
struct regmap *iomuxc_gpr;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
+ struct reset_control *turnoff_reset;
enum imx6_pcie_variants variant;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
@@ -97,6 +98,16 @@ struct imx6_pcie {
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_ATEOVRD 0x10
+#define PCIE_PHY_ATEOVRD_EN (0x1 << 2)
+#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
+#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
+
+#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
+#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
+#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
+#define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9)
+
#define PCIE_PHY_RX_ASIC_OUT 0x100D
#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
@@ -508,6 +519,50 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
}
+static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+{
+ unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ int mult, div;
+ u32 val;
+
+ switch (phy_rate) {
+ case 125000000:
+ /*
+ * The default settings of the MPLL are for a 125MHz input
+ * clock, so no need to reconfigure anything in that case.
+ */
+ return 0;
+ case 100000000:
+ mult = 25;
+ div = 0;
+ break;
+ case 200000000:
+ mult = 25;
+ div = 1;
+ break;
+ default:
+ dev_err(imx6_pcie->pci->dev,
+ "Unsupported PHY reference clock rate %lu\n", phy_rate);
+ return -EINVAL;
+ }
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+ PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+ val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+ val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+ PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+ val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+ val |= PCIE_PHY_ATEOVRD_EN;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+
+ return 0;
+}
+
static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -542,6 +597,24 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -EINVAL;
}
+static void imx6_pcie_ltssm_enable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->variant) {
+ case IMX6Q:
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2,
+ IMX6Q_GPR12_PCIE_CTL_2);
+ break;
+ case IMX7D:
+ reset_control_deassert(imx6_pcie->apps_reset);
+ break;
+ }
+}
+
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -560,11 +633,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
/* Start LTSSM. */
- if (imx6_pcie->variant == IMX7D)
- reset_control_deassert(imx6_pcie->apps_reset);
- else
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+ imx6_pcie_ltssm_enable(dev);
ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret)
@@ -632,6 +701,7 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
imx6_pcie_assert_core_reset(imx6_pcie);
imx6_pcie_init_phy(imx6_pcie);
imx6_pcie_deassert_core_reset(imx6_pcie);
+ imx6_setup_phy_mpll(imx6_pcie);
dw_pcie_setup_rc(pp);
imx6_pcie_establish_link(imx6_pcie);
@@ -682,6 +752,94 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = imx6_pcie_link_up,
};
+#ifdef CONFIG_PM_SLEEP
+static void imx6_pcie_ltssm_disable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0);
+ break;
+ case IMX7D:
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ default:
+ dev_err(dev, "ltssm_disable not supported\n");
+ }
+}
+
+static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+{
+ reset_control_assert(imx6_pcie->turnoff_reset);
+ reset_control_deassert(imx6_pcie->turnoff_reset);
+
+ /*
+ * Components with an upstream port must respond to
+ * PME_Turn_Off with PME_TO_Ack but we can't check.
+ *
+ * The standard recommends a 1-10ms timeout after which to
+ * proceed anyway as if acks were received.
+ */
+ usleep_range(1000, 10000);
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+ clk_disable_unprepare(imx6_pcie->pcie);
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+
+ if (imx6_pcie->variant == IMX7D) {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ }
+}
+
+static int imx6_pcie_suspend_noirq(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ if (imx6_pcie->variant != IMX7D)
+ return 0;
+
+ imx6_pcie_pm_turnoff(imx6_pcie);
+ imx6_pcie_clk_disable(imx6_pcie);
+ imx6_pcie_ltssm_disable(dev);
+
+ return 0;
+}
+
+static int imx6_pcie_resume_noirq(struct device *dev)
+{
+ int ret;
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct pcie_port *pp = &imx6_pcie->pci->pp;
+
+ if (imx6_pcie->variant != IMX7D)
+ return 0;
+
+ imx6_pcie_assert_core_reset(imx6_pcie);
+ imx6_pcie_init_phy(imx6_pcie);
+ imx6_pcie_deassert_core_reset(imx6_pcie);
+ dw_pcie_setup_rc(pp);
+
+ ret = imx6_pcie_establish_link(imx6_pcie);
+ if (ret < 0)
+ dev_info(dev, "pcie link is down after resume.\n");
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx6_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
+ imx6_pcie_resume_noirq)
+};
+
static int imx6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -776,6 +934,13 @@ static int imx6_pcie_probe(struct platform_device *pdev)
break;
}
+ /* Grab turnoff reset */
+ imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
+ if (IS_ERR(imx6_pcie->turnoff_reset)) {
+ dev_err(dev, "Failed to get TURNOFF reset control\n");
+ return PTR_ERR(imx6_pcie->turnoff_reset);
+ }
+
/* Grab GPR config register range */
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
@@ -848,6 +1013,7 @@ static struct platform_driver imx6_pcie_driver = {
.name = "imx6q-pcie",
.of_match_table = imx6_pcie_of_match,
.suppress_bind_attrs = true,
+ .pm = &imx6_pcie_pm_ops,
},
.probe = imx6_pcie_probe,
.shutdown = imx6_pcie_shutdown,
diff --git a/drivers/pci/controller/dwc/pci-keystone-dw.c b/drivers/pci/controller/dwc/pci-keystone-dw.c
deleted file mode 100644
index 0682213328e9..000000000000
--- a/drivers/pci/controller/dwc/pci-keystone-dw.c
+++ /dev/null
@@ -1,484 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DesignWare application register space functions for Keystone PCI controller
- *
- * Copyright (C) 2013-2014 Texas Instruments., Ltd.
- * http://www.ti.com
- *
- * Author: Murali Karicheri <m-karicheri2@ti.com>
- */
-
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/irqreturn.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_pci.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-
-#include "pcie-designware.h"
-#include "pci-keystone.h"
-
-/* Application register defines */
-#define LTSSM_EN_VAL 1
-#define LTSSM_STATE_MASK 0x1f
-#define LTSSM_STATE_L0 0x11
-#define DBI_CS2_EN_VAL 0x20
-#define OB_XLAT_EN_VAL 2
-
-/* Application registers */
-#define CMD_STATUS 0x004
-#define CFG_SETUP 0x008
-#define OB_SIZE 0x030
-#define CFG_PCIM_WIN_SZ_IDX 3
-#define CFG_PCIM_WIN_CNT 32
-#define SPACE0_REMOTE_CFG_OFFSET 0x1000
-#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
-#define OB_OFFSET_HI(n) (0x204 + (8 * n))
-
-/* IRQ register defines */
-#define IRQ_EOI 0x050
-#define IRQ_STATUS 0x184
-#define IRQ_ENABLE_SET 0x188
-#define IRQ_ENABLE_CLR 0x18c
-
-#define MSI_IRQ 0x054
-#define MSI0_IRQ_STATUS 0x104
-#define MSI0_IRQ_ENABLE_SET 0x108
-#define MSI0_IRQ_ENABLE_CLR 0x10c
-#define IRQ_STATUS 0x184
-#define MSI_IRQ_OFFSET 4
-
-/* Error IRQ bits */
-#define ERR_AER BIT(5) /* ECRC error */
-#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
-#define ERR_CORR BIT(3) /* Correctable error */
-#define ERR_NONFATAL BIT(2) /* Non-fatal error */
-#define ERR_FATAL BIT(1) /* Fatal error */
-#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */
-#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
- ERR_NONFATAL | ERR_FATAL | ERR_SYS)
-#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI)
-#define ERR_IRQ_STATUS_RAW 0x1c0
-#define ERR_IRQ_STATUS 0x1c4
-#define ERR_IRQ_ENABLE_SET 0x1c8
-#define ERR_IRQ_ENABLE_CLR 0x1cc
-
-/* Config space registers */
-#define DEBUG0 0x728
-
-#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
-
-static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
- u32 *bit_pos)
-{
- *reg_offset = offset % 8;
- *bit_pos = offset >> 3;
-}
-
-phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- return ks_pcie->app.start + MSI_IRQ;
-}
-
-static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
-{
- return readl(ks_pcie->va_app_base + offset);
-}
-
-static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
-{
- writel(val, ks_pcie->va_app_base + offset);
-}
-
-void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- u32 pending, vector;
- int src, virq;
-
- pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
-
- /*
- * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
- * shows 1, 9, 17, 25 and so forth
- */
- for (src = 0; src < 4; src++) {
- if (BIT(src) & pending) {
- vector = offset + (src << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
- src, vector, virq);
- generic_handle_irq(virq);
- }
- }
-}
-
-void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
-{
- u32 reg_offset, bit_pos;
- struct keystone_pcie *ks_pcie;
- struct dw_pcie *pci;
-
- pci = to_dw_pcie_from_pp(pp);
- ks_pcie = to_keystone_pcie(pci);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
-
- ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
- BIT(bit_pos));
- ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
-}
-
-void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
-{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
- BIT(bit_pos));
-}
-
-void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
-{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
- BIT(bit_pos));
-}
-
-int ks_dw_pcie_msi_host_init(struct pcie_port *pp)
-{
- return dw_pcie_allocate_domains(pp);
-}
-
-void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
-{
- int i;
-
- for (i = 0; i < PCI_NUM_INTX; i++)
- ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
-}
-
-void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct device *dev = pci->dev;
- u32 pending;
- int virq;
-
- pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
-
- if (BIT(0) & pending) {
- virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
- generic_handle_irq(virq);
- }
-
- /* EOI the INTx interrupt */
- ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
-}
-
-void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
-{
- ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
-}
-
-irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
-{
- u32 status;
-
- status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
- if (!status)
- return IRQ_NONE;
-
- if (status & ERR_FATAL_IRQ)
- dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n",
- status);
-
- /* Ack the IRQ; status bits are RW1C */
- ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
- return IRQ_HANDLED;
-}
-
-static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
-{
-}
-
-static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
-{
-}
-
-static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
-{
-}
-
-static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
- .name = "Keystone-PCI-Legacy-IRQ",
- .irq_ack = ks_dw_pcie_ack_legacy_irq,
- .irq_mask = ks_dw_pcie_mask_legacy_irq,
- .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
-};
-
-static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
- unsigned int irq, irq_hw_number_t hw_irq)
-{
- irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
- handle_level_irq);
- irq_set_chip_data(irq, d->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
- .map = ks_dw_pcie_init_legacy_irq_map,
- .xlate = irq_domain_xlate_onetwocell,
-};
-
-/**
- * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
- * registers
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
-
- do {
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- } while (!(val & DBI_CS2_EN_VAL));
-}
-
-/**
- * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
-
- do {
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- } while (val & DBI_CS2_EN_VAL);
-}
-
-void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- u32 start = pp->mem->start, end = pp->mem->end;
- int i, tr_size;
- u32 val;
-
- /* Disable BARs for inbound access */
- ks_dw_pcie_set_dbi_mode(ks_pcie);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
- ks_dw_pcie_clear_dbi_mode(ks_pcie);
-
- /* Set outbound translation size per window division */
- ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
-
- tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
-
- /* Using Direct 1:1 mapping of RC <-> PCI memory space */
- for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
- ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
- ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
- start += tr_size;
- }
-
- /* Enable OB translation */
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
-}
-
-/**
- * ks_pcie_cfg_setup() - Set up configuration space address for a device
- *
- * @ks_pcie: ptr to keystone_pcie structure
- * @bus: Bus number the device is residing on
- * @devfn: device, function number info
- *
- * Forms and returns the address of configuration space mapped in PCIESS
- * address space 0. Also configures CFG_SETUP for remote configuration space
- * access.
- *
- * The address space has two regions to access configuration - local and remote.
- * We access local region for bus 0 (as RC is attached on bus 0) and remote
- * region for others with TYPE 1 access when bus > 1. As for device on bus = 1,
- * we will do TYPE 0 access as it will be on our secondary bus (logical).
- * CFG_SETUP is needed only for remote configuration access.
- */
-static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
- unsigned int devfn)
-{
- u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- u32 regval;
-
- if (bus == 0)
- return pci->dbi_base;
-
- regval = (bus << 16) | (device << 8) | function;
-
- /*
- * Since Bus#1 will be a virtual bus, we need to have TYPE0
- * access only.
- * TYPE 1
- */
- if (bus != 1)
- regval |= BIT(24);
-
- ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
- return pp->va_cfg0_base;
-}
-
-int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u8 bus_num = bus->number;
- void __iomem *addr;
-
- addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
-
- return dw_pcie_read(addr + where, size, val);
-}
-
-int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u8 bus_num = bus->number;
- void __iomem *addr;
-
- addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
-
- return dw_pcie_write(addr + where, size, val);
-}
-
-/**
- * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
- *
- * This sets BAR0 to enable inbound access for MSI_IRQ register
- */
-void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- /* Configure and set up BAR0 */
- ks_dw_pcie_set_dbi_mode(ks_pcie);
-
- /* Enable BAR0 */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
-
- ks_dw_pcie_clear_dbi_mode(ks_pcie);
-
- /*
- * For BAR0, just setting bus address for inbound writes (MSI) should
- * be sufficient. Use physical address to avoid any conflicts.
- */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
-}
-
-/**
- * ks_dw_pcie_link_up() - Check if link up
- */
-int ks_dw_pcie_link_up(struct dw_pcie *pci)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(pci, DEBUG0);
- return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
-}
-
-void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- /* Disable Link training */
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- val &= ~LTSSM_EN_VAL;
- ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-
- /* Initiate Link Training */
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-}
-
-/**
- * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
- *
- * Ioremap the register resources, initialize legacy irq domain
- * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
- * PCI host controller.
- */
-int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
- struct device_node *msi_intc_np)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
-
- /* Index 0 is the config reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- /*
- * We set these same and is used in pcie rd/wr_other_conf
- * functions
- */
- pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
- pp->va_cfg1_base = pp->va_cfg0_base;
-
- /* Index 1 is the application reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ks_pcie->va_app_base))
- return PTR_ERR(ks_pcie->va_app_base);
-
- ks_pcie->app = *res;
-
- /* Create legacy IRQ domain */
- ks_pcie->legacy_irq_domain =
- irq_domain_add_linear(ks_pcie->legacy_intc_np,
- PCI_NUM_INTX,
- &ks_dw_pcie_legacy_irq_domain_ops,
- NULL);
- if (!ks_pcie->legacy_irq_domain) {
- dev_err(dev, "Failed to add irq domain for legacy irqs\n");
- return -EINVAL;
- }
-
- return dw_pcie_host_init(pp);
-}
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index e88bd221fffe..14f2b0b4ed5e 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -9,40 +9,510 @@
* Implementation based on pci-exynos.c and pcie-designware.c
*/
-#include <linux/irqchip/chained_irq.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/init.h>
+#include <linux/mfd/syscon.h>
#include <linux/msi.h>
-#include <linux/of_irq.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
-#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/resource.h>
#include <linux/signal.h>
#include "pcie-designware.h"
-#include "pci-keystone.h"
-#define DRIVER_NAME "keystone-pcie"
+#define PCIE_VENDORID_MASK 0xffff
+#define PCIE_DEVICEID_SHIFT 16
+
+/* Application registers */
+#define CMD_STATUS 0x004
+#define LTSSM_EN_VAL BIT(0)
+#define OB_XLAT_EN_VAL BIT(1)
+#define DBI_CS2 BIT(5)
+
+#define CFG_SETUP 0x008
+#define CFG_BUS(x) (((x) & 0xff) << 16)
+#define CFG_DEVICE(x) (((x) & 0x1f) << 8)
+#define CFG_FUNC(x) ((x) & 0x7)
+#define CFG_TYPE1 BIT(24)
+
+#define OB_SIZE 0x030
+#define SPACE0_REMOTE_CFG_OFFSET 0x1000
+#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
+#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
+#define OB_ENABLEN BIT(0)
+#define OB_WIN_SIZE 8 /* 8MB */
+
+/* IRQ register defines */
+#define IRQ_EOI 0x050
+#define IRQ_STATUS 0x184
+#define IRQ_ENABLE_SET 0x188
+#define IRQ_ENABLE_CLR 0x18c
+
+#define MSI_IRQ 0x054
+#define MSI0_IRQ_STATUS 0x104
+#define MSI0_IRQ_ENABLE_SET 0x108
+#define MSI0_IRQ_ENABLE_CLR 0x10c
+#define IRQ_STATUS 0x184
+#define MSI_IRQ_OFFSET 4
+
+#define ERR_IRQ_STATUS 0x1c4
+#define ERR_IRQ_ENABLE_SET 0x1c8
+#define ERR_AER BIT(5) /* ECRC error */
+#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
+#define ERR_CORR BIT(3) /* Correctable error */
+#define ERR_NONFATAL BIT(2) /* Non-fatal error */
+#define ERR_FATAL BIT(1) /* Fatal error */
+#define ERR_SYS BIT(0) /* System error */
+#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
+ ERR_NONFATAL | ERR_FATAL | ERR_SYS)
+
+#define MAX_MSI_HOST_IRQS 8
+/* PCIE controller device IDs */
+#define PCIE_RC_K2HK 0xb008
+#define PCIE_RC_K2E 0xb009
+#define PCIE_RC_K2L 0xb00a
+#define PCIE_RC_K2G 0xb00b
+
+#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+
+struct keystone_pcie {
+ struct dw_pcie *pci;
+ /* PCI Device ID */
+ u32 device_id;
+ int num_legacy_host_irqs;
+ int legacy_host_irqs[PCI_NUM_INTX];
+ struct device_node *legacy_intc_np;
+
+ int num_msi_host_irqs;
+ int msi_host_irqs[MAX_MSI_HOST_IRQS];
+ int num_lanes;
+ u32 num_viewport;
+ struct phy **phy;
+ struct device_link **link;
+ struct device_node *msi_intc_np;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np;
+
+ int error_irq;
+
+ /* Application register space */
+ void __iomem *va_app_base; /* DT 1st resource */
+ struct resource app;
+};
-/* DEV_STAT_CTRL */
-#define PCIE_CAP_BASE 0x70
+static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
+ u32 *bit_pos)
+{
+ *reg_offset = offset % 8;
+ *bit_pos = offset >> 3;
+}
-/* PCIE controller device IDs */
-#define PCIE_RC_K2HK 0xb008
-#define PCIE_RC_K2E 0xb009
-#define PCIE_RC_K2L 0xb00a
+static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ return ks_pcie->app.start + MSI_IRQ;
+}
+
+static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
+{
+ return readl(ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
+ u32 val)
+{
+ writel(val, ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 pending, vector;
+ int src, virq;
+
+ pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
+
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (src = 0; src < 4; src++) {
+ if (BIT(src) & pending) {
+ vector = offset + (src << 3);
+ virq = irq_linear_revmap(pp->irq_domain, vector);
+ dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
+ src, vector, virq);
+ generic_handle_irq(virq);
+ }
+ }
+}
+
+static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
+{
+ u32 reg_offset, bit_pos;
+ struct keystone_pcie *ks_pcie;
+ struct dw_pcie *pci;
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+
+ ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
+ BIT(bit_pos));
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+}
+
+static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+ u32 reg_offset, bit_pos;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+ ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
+ BIT(bit_pos));
+}
+
+static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+ u32 reg_offset, bit_pos;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+ ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
+ BIT(bit_pos));
+}
+
+static int ks_pcie_msi_host_init(struct pcie_port *pp)
+{
+ return dw_pcie_allocate_domains(pp);
+}
+
+static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_INTX; i++)
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
+}
+
+static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
+ int offset)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 pending;
+ int virq;
+
+ pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
+
+ if (BIT(0) & pending) {
+ virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
+ dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
+ generic_handle_irq(virq);
+ }
+
+ /* EOI the INTx interrupt */
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
+}
+
+static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
+{
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
+}
+
+static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
+{
+ u32 reg;
+ struct device *dev = ks_pcie->pci->dev;
+
+ reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
+ if (!reg)
+ return IRQ_NONE;
+
+ if (reg & ERR_SYS)
+ dev_err(dev, "System Error\n");
+
+ if (reg & ERR_FATAL)
+ dev_err(dev, "Fatal Error\n");
+
+ if (reg & ERR_NONFATAL)
+ dev_dbg(dev, "Non Fatal Error\n");
+
+ if (reg & ERR_CORR)
+ dev_dbg(dev, "Correctable Error\n");
+
+ if (reg & ERR_AXI)
+ dev_err(dev, "AXI tag lookup fatal Error\n");
+
+ if (reg & ERR_AER)
+ dev_err(dev, "ECRC Error\n");
+
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
+
+ return IRQ_HANDLED;
+}
+
+static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip ks_pcie_legacy_irq_chip = {
+ .name = "Keystone-PCI-Legacy-IRQ",
+ .irq_ack = ks_pcie_ack_legacy_irq,
+ .irq_mask = ks_pcie_mask_legacy_irq,
+ .irq_unmask = ks_pcie_unmask_legacy_irq,
+};
+
+static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
+ .map = ks_pcie_init_legacy_irq_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
+ * registers
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
+static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+ u32 num_viewport = ks_pcie->num_viewport;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ u64 start = pp->mem->start;
+ u64 end = pp->mem->end;
+ int i;
+
+ /* Disable BARs for inbound access */
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ val = ilog2(OB_WIN_SIZE);
+ ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
+
+ /* Using Direct 1:1 mapping of RC <-> PCI memory space */
+ for (i = 0; i < num_viewport && (start < end); i++) {
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
+ lower_32_bits(start) | OB_ENABLEN);
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
+ upper_32_bits(start));
+ start += OB_WIN_SIZE;
+ }
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= OB_XLAT_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size,
+ u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 reg;
+
+ reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+ CFG_FUNC(PCI_FUNC(devfn));
+ if (bus->parent->number != pp->root_bus_nr)
+ reg |= CFG_TYPE1;
+ ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+ return dw_pcie_read(pp->va_cfg0_base + where, size, val);
+}
+
+static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size,
+ u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 reg;
+
+ reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+ CFG_FUNC(PCI_FUNC(devfn));
+ if (bus->parent->number != pp->root_bus_nr)
+ reg |= CFG_TYPE1;
+ ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+ return dw_pcie_write(pp->va_cfg0_base + where, size, val);
+}
+
+/**
+ * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ *
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
+ */
+static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+}
+
+/**
+ * ks_pcie_link_up() - Check if link up
+ */
+static int ks_pcie_link_up(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+ val &= PORT_LOGIC_LTSSM_STATE_MASK;
+ return (val == PORT_LOGIC_LTSSM_STATE_L0);
+}
+
+static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
-#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+ /* Disable Link training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~LTSSM_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-static void quirk_limit_mrrs(struct pci_dev *dev)
+ /* Initiate Link Training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+}
+
+/**
+ * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware
+ *
+ * Ioremap the register resources, initialize legacy irq domain
+ * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
+ * PCI host controller.
+ */
+static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ /* Index 0 is the config reg. space address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ /*
+ * We set these same and is used in pcie rd/wr_other_conf
+ * functions
+ */
+ pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
+ pp->va_cfg1_base = pp->va_cfg0_base;
+
+ /* Index 1 is the application reg. space address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ ks_pcie->app = *res;
+
+ /* Create legacy IRQ domain */
+ ks_pcie->legacy_irq_domain =
+ irq_domain_add_linear(ks_pcie->legacy_intc_np,
+ PCI_NUM_INTX,
+ &ks_pcie_legacy_irq_domain_ops,
+ NULL);
+ if (!ks_pcie->legacy_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ return -EINVAL;
+ }
+
+ return dw_pcie_host_init(pp);
+}
+
+static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
- struct pci_dev *bridge = bus->self;
+ struct pci_dev *bridge;
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
@@ -50,11 +520,13 @@ static void quirk_limit_mrrs(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ 0, },
};
if (pci_is_root_bus(bus))
- return;
+ bridge = dev;
/* look for the host bridge */
while (!pci_is_root_bus(bus)) {
@@ -62,43 +534,39 @@ static void quirk_limit_mrrs(struct pci_dev *dev)
bus = bus->parent;
}
- if (bridge) {
- /*
- * Keystone PCI controller has a h/w limitation of
- * 256 bytes maximum read request size. It can't handle
- * anything higher than this. So force this limit on
- * all downstream devices.
- */
- if (pci_match_id(rc_pci_devids, bridge)) {
- if (pcie_get_readrq(dev) > 256) {
- dev_info(&dev->dev, "limiting MRRS to 256\n");
- pcie_set_readrq(dev, 256);
- }
+ if (!bridge)
+ return;
+
+ /*
+ * Keystone PCI controller has a h/w limitation of
+ * 256 bytes maximum read request size. It can't handle
+ * anything higher than this. So force this limit on
+ * all downstream devices.
+ */
+ if (pci_match_id(rc_pci_devids, bridge)) {
+ if (pcie_get_readrq(dev) > 256) {
+ dev_info(&dev->dev, "limiting MRRS to 256\n");
+ pcie_set_readrq(dev, 256);
}
}
}
-DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
{
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
- unsigned int retries;
-
- dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pci)) {
dev_info(dev, "Link already up\n");
return 0;
}
+ ks_pcie_initiate_link_train(ks_pcie);
+
/* check if the link is up or not */
- for (retries = 0; retries < 5; retries++) {
- ks_dw_pcie_initiate_link_train(ks_pcie);
- if (!dw_pcie_wait_for_link(pci))
- return 0;
- }
+ if (!dw_pcie_wait_for_link(pci))
+ return 0;
dev_err(dev, "phy link never came up\n");
return -ETIMEDOUT;
@@ -121,7 +589,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
+ ks_pcie_handle_msi_irq(ks_pcie, offset);
chained_irq_exit(chip, desc);
}
@@ -150,7 +618,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
chained_irq_exit(chip, desc);
}
@@ -222,7 +690,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
ks_pcie_legacy_irq_handler,
ks_pcie);
}
- ks_dw_pcie_enable_legacy_irqs(ks_pcie);
+ ks_pcie_enable_legacy_irqs(ks_pcie);
/* MSI IRQ */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -234,7 +702,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
}
if (ks_pcie->error_irq > 0)
- ks_dw_pcie_enable_error_irq(ks_pcie);
+ ks_pcie_enable_error_irq(ks_pcie);
}
/*
@@ -242,8 +710,8 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
* bus error instead of returning 0xffffffff. This handler always returns 0
* for this kind of faults.
*/
-static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
- struct pt_regs *regs)
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
{
unsigned long instr = *(unsigned long *) instruction_pointer(regs);
@@ -257,59 +725,78 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
return 0;
}
+static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
+{
+ int ret;
+ unsigned int id;
+ struct regmap *devctrl_regs;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+
+ devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
+ if (IS_ERR(devctrl_regs))
+ return PTR_ERR(devctrl_regs);
+
+ ret = regmap_read(devctrl_regs, 0, &id);
+ if (ret)
+ return ret;
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+
+ return 0;
+}
+
static int __init ks_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u32 val;
+ int ret;
+
+ dw_pcie_setup_rc(pp);
ks_pcie_establish_link(ks_pcie);
- ks_dw_pcie_setup_rc_app_regs(ks_pcie);
+ ks_pcie_setup_rc_app_regs(ks_pcie);
ks_pcie_setup_interrupts(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
- /* update the Vendor ID */
- writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID);
-
- /* update the DEV_STAT_CTRL to publish right mrrs */
- val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
- val &= ~PCI_EXP_DEVCTL_READRQ;
- /* set the mrrs to 256 bytes */
- val |= BIT(12);
- writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
+ ret = ks_pcie_init_id(ks_pcie);
+ if (ret < 0)
+ return ret;
/*
* PCIe access errors that result into OCP errors are caught by ARM as
* "External aborts"
*/
- hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
+ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
"Asynchronous external abort");
return 0;
}
-static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
- .rd_other_conf = ks_dw_pcie_rd_other_conf,
- .wr_other_conf = ks_dw_pcie_wr_other_conf,
+static const struct dw_pcie_host_ops ks_pcie_host_ops = {
+ .rd_other_conf = ks_pcie_rd_other_conf,
+ .wr_other_conf = ks_pcie_wr_other_conf,
.host_init = ks_pcie_host_init,
- .msi_set_irq = ks_dw_pcie_msi_set_irq,
- .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
- .get_msi_addr = ks_dw_pcie_get_msi_addr,
- .msi_host_init = ks_dw_pcie_msi_host_init,
- .msi_irq_ack = ks_dw_pcie_msi_irq_ack,
- .scan_bus = ks_dw_pcie_v3_65_scan_bus,
+ .msi_set_irq = ks_pcie_msi_set_irq,
+ .msi_clear_irq = ks_pcie_msi_clear_irq,
+ .get_msi_addr = ks_pcie_get_msi_addr,
+ .msi_host_init = ks_pcie_msi_host_init,
+ .msi_irq_ack = ks_pcie_msi_irq_ack,
+ .scan_bus = ks_pcie_v3_65_scan_bus,
};
-static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
+static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
- return ks_dw_pcie_handle_error_irq(ks_pcie);
+ return ks_pcie_handle_error_irq(ks_pcie);
}
-static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
+static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
+ struct platform_device *pdev)
{
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
@@ -338,7 +825,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
if (ks_pcie->error_irq <= 0)
dev_info(dev, "no error IRQ defined\n");
else {
- ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
+ ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler,
IRQF_SHARED, "pcie-error-irq", ks_pcie);
if (ret < 0) {
dev_err(dev, "failed to request error IRQ %d\n",
@@ -347,8 +834,8 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
}
}
- pp->ops = &keystone_pcie_host_ops;
- ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
+ pp->ops = &ks_pcie_host_ops;
+ ret = ks_pcie_dw_host_init(ks_pcie);
if (ret) {
dev_err(dev, "failed to initialize host\n");
return ret;
@@ -365,28 +852,62 @@ static const struct of_device_id ks_pcie_of_match[] = {
{ },
};
-static const struct dw_pcie_ops dw_pcie_ops = {
- .link_up = ks_dw_pcie_link_up,
+static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+ .link_up = ks_pcie_link_up,
};
-static int __exit ks_pcie_remove(struct platform_device *pdev)
+static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
{
- struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ int num_lanes = ks_pcie->num_lanes;
- clk_disable_unprepare(ks_pcie->clk);
+ while (num_lanes--) {
+ phy_power_off(ks_pcie->phy[num_lanes]);
+ phy_exit(ks_pcie->phy[num_lanes]);
+ }
+}
+
+static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
+{
+ int i;
+ int ret;
+ int num_lanes = ks_pcie->num_lanes;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = phy_init(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(ks_pcie->phy[i]);
+ if (ret < 0) {
+ phy_exit(ks_pcie->phy[i]);
+ goto err_phy;
+ }
+ }
return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(ks_pcie->phy[i]);
+ phy_exit(ks_pcie->phy[i]);
+ }
+
+ return ret;
}
static int __init ks_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct dw_pcie *pci;
struct keystone_pcie *ks_pcie;
- struct resource *res;
- void __iomem *reg_p;
- struct phy *phy;
+ struct device_link **link;
+ u32 num_viewport;
+ struct phy **phy;
+ u32 num_lanes;
+ char name[10];
int ret;
+ int i;
ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
@@ -397,54 +918,99 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
- pci->ops = &dw_pcie_ops;
+ pci->ops = &ks_pcie_dw_pcie_ops;
- ks_pcie->pci = pci;
+ ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-viewport* property\n");
+ return ret;
+ }
- /* initialize SerDes Phy if present */
- phy = devm_phy_get(dev, "pcie-phy");
- if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER)
- return PTR_ERR(phy);
+ ret = of_property_read_u32(np, "num-lanes", &num_lanes);
+ if (ret)
+ num_lanes = 1;
- if (!IS_ERR_OR_NULL(phy)) {
- ret = phy_init(phy);
- if (ret < 0)
- return ret;
+ phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ for (i = 0; i < num_lanes; i++) {
+ snprintf(name, sizeof(name), "pcie-phy%d", i);
+ phy[i] = devm_phy_optional_get(dev, name);
+ if (IS_ERR(phy[i])) {
+ ret = PTR_ERR(phy[i]);
+ goto err_link;
+ }
+
+ if (!phy[i])
+ continue;
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
}
- /* index 2 is to read PCI DEVICE_ID */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- reg_p = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg_p))
- return PTR_ERR(reg_p);
- ks_pcie->device_id = readl(reg_p) >> 16;
- devm_iounmap(dev, reg_p);
- devm_release_mem_region(dev, res->start, resource_size(res));
+ ks_pcie->np = np;
+ ks_pcie->pci = pci;
+ ks_pcie->link = link;
+ ks_pcie->num_lanes = num_lanes;
+ ks_pcie->num_viewport = num_viewport;
+ ks_pcie->phy = phy;
- ks_pcie->np = dev->of_node;
- platform_set_drvdata(pdev, ks_pcie);
- ks_pcie->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ks_pcie->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ks_pcie->clk);
+ ret = ks_pcie_enable_phy(ks_pcie);
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ goto err_link;
}
- ret = clk_prepare_enable(ks_pcie->clk);
- if (ret)
- return ret;
platform_set_drvdata(pdev, ks_pcie);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
- ret = ks_add_pcie_port(ks_pcie, pdev);
+ ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
if (ret < 0)
- goto fail_clk;
+ goto err_get_sync;
return 0;
-fail_clk:
- clk_disable_unprepare(ks_pcie->clk);
+
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ ks_pcie_disable_phy(ks_pcie);
+
+err_link:
+ while (--i >= 0 && link[i])
+ device_link_del(link[i]);
return ret;
}
+static int __exit ks_pcie_remove(struct platform_device *pdev)
+{
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ struct device_link **link = ks_pcie->link;
+ int num_lanes = ks_pcie->num_lanes;
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ ks_pcie_disable_phy(ks_pcie);
+ while (num_lanes--)
+ device_link_del(link[num_lanes]);
+
+ return 0;
+}
+
static struct platform_driver ks_pcie_driver __refdata = {
.probe = ks_pcie_probe,
.remove = __exit_p(ks_pcie_remove),
diff --git a/drivers/pci/controller/dwc/pci-keystone.h b/drivers/pci/controller/dwc/pci-keystone.h
deleted file mode 100644
index 8a13da391543..000000000000
--- a/drivers/pci/controller/dwc/pci-keystone.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Keystone PCI Controller's common includes
- *
- * Copyright (C) 2013-2014 Texas Instruments., Ltd.
- * http://www.ti.com
- *
- * Author: Murali Karicheri <m-karicheri2@ti.com>
- */
-
-#define MAX_MSI_HOST_IRQS 8
-
-struct keystone_pcie {
- struct dw_pcie *pci;
- struct clk *clk;
- /* PCI Device ID */
- u32 device_id;
- int num_legacy_host_irqs;
- int legacy_host_irqs[PCI_NUM_INTX];
- struct device_node *legacy_intc_np;
-
- int num_msi_host_irqs;
- int msi_host_irqs[MAX_MSI_HOST_IRQS];
- struct device_node *msi_intc_np;
- struct irq_domain *legacy_irq_domain;
- struct device_node *np;
-
- int error_irq;
-
- /* Application register space */
- void __iomem *va_app_base; /* DT 1st resource */
- struct resource app;
-};
-
-/* Keystone DW specific MSI controller APIs/definitions */
-void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
-phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
-
-/* Keystone specific PCI controller APIs */
-void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
-void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
-void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie);
-irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie);
-int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
- struct device_node *msi_intc_np);
-int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val);
-int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val);
-void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
-void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
-void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp);
-void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
-void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
-void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
-int ks_dw_pcie_msi_host_init(struct pcie_port *pp);
-int ks_dw_pcie_link_up(struct dw_pcie *pci);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 9f1a5e399b70..0989d880ac46 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -36,6 +36,10 @@
#define PORT_LINK_MODE_4_LANES (0x7 << 16)
#define PORT_LINK_MODE_8_LANES (0xf << 16)
+#define PCIE_PORT_DEBUG0 0x728
+#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
+#define PORT_LOGIC_LTSSM_STATE_L0 0x11
+
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 5352e0c3be82..9b599296205d 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -467,8 +467,8 @@ static int kirin_pcie_add_msi(struct dw_pcie *pci,
return 0;
}
-static int __init kirin_add_pcie_port(struct dw_pcie *pci,
- struct platform_device *pdev)
+static int kirin_add_pcie_port(struct dw_pcie *pci,
+ struct platform_device *pdev)
{
int ret;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 4352c1cb926d..d185ea5fe996 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1089,7 +1089,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
struct qcom_pcie *pcie = to_qcom_pcie(pci);
int ret;
- pm_runtime_get_sync(pci->dev);
qcom_ep_reset_assert(pcie);
ret = pcie->ops->init(pcie);
@@ -1126,7 +1125,6 @@ err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
pcie->ops->deinit(pcie);
- pm_runtime_put(pci->dev);
return ret;
}
@@ -1216,6 +1214,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
@@ -1225,44 +1229,56 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->ops = of_device_get_match_data(dev);
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
- if (IS_ERR(pcie->reset))
- return PTR_ERR(pcie->reset);
+ if (IS_ERR(pcie->reset)) {
+ ret = PTR_ERR(pcie->reset);
+ goto err_pm_runtime_put;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
pcie->parf = devm_ioremap_resource(dev, res);
- if (IS_ERR(pcie->parf))
- return PTR_ERR(pcie->parf);
+ if (IS_ERR(pcie->parf)) {
+ ret = PTR_ERR(pcie->parf);
+ goto err_pm_runtime_put;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
+ if (IS_ERR(pci->dbi_base)) {
+ ret = PTR_ERR(pci->dbi_base);
+ goto err_pm_runtime_put;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
pcie->elbi = devm_ioremap_resource(dev, res);
- if (IS_ERR(pcie->elbi))
- return PTR_ERR(pcie->elbi);
+ if (IS_ERR(pcie->elbi)) {
+ ret = PTR_ERR(pcie->elbi);
+ goto err_pm_runtime_put;
+ }
pcie->phy = devm_phy_optional_get(dev, "pciephy");
- if (IS_ERR(pcie->phy))
- return PTR_ERR(pcie->phy);
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
+ goto err_pm_runtime_put;
+ }
ret = pcie->ops->get_resources(pcie);
if (ret)
- return ret;
+ goto err_pm_runtime_put;
pp->ops = &qcom_pcie_dw_ops;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
+ if (pp->msi_irq < 0) {
+ ret = pp->msi_irq;
+ goto err_pm_runtime_put;
+ }
}
ret = phy_init(pcie->phy);
if (ret) {
pm_runtime_disable(&pdev->dev);
- return ret;
+ goto err_pm_runtime_put;
}
platform_set_drvdata(pdev, pcie);
@@ -1271,10 +1287,16 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "cannot initialize host\n");
pm_runtime_disable(&pdev->dev);
- return ret;
+ goto err_pm_runtime_put;
}
return 0;
+
+err_pm_runtime_put:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
}
static const struct of_device_id qcom_pcie_match[] = {
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 6b4555ff2548..750081c1cb48 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -20,12 +20,16 @@
#include <linux/of_pci.h>
#include "../pci.h"
+#include "../pci-bridge-emul.h"
/* PCIe core registers */
+#define PCIE_CORE_DEV_ID_REG 0x0
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
+#define PCIE_CORE_DEV_REV_REG 0x8
+#define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
@@ -41,7 +45,10 @@
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
-
+#define PCIE_CORE_INT_A_ASSERT_ENABLE 1
+#define PCIE_CORE_INT_B_ASSERT_ENABLE 2
+#define PCIE_CORE_INT_C_ASSERT_ENABLE 3
+#define PCIE_CORE_INT_D_ASSERT_ENABLE 4
/* PIO registers base address and register offsets */
#define PIO_BASE_ADDR 0x4000
#define PIO_CTRL (PIO_BASE_ADDR + 0x0)
@@ -93,7 +100,9 @@
#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
+#define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
+#define PCIE_MSG_PM_PME_MASK BIT(7)
#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
@@ -189,6 +198,7 @@ struct advk_pcie {
struct mutex msi_used_lock;
u16 msi_msg;
int root_bus_nr;
+ struct pci_bridge_emul bridge;
};
static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
@@ -390,6 +400,109 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
+
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+
+ switch (reg) {
+ case PCI_EXP_SLTCTL:
+ *value = PCI_EXP_SLTSTA_PDS << 16;
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_EXP_RTCTL: {
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+ case PCI_EXP_RTSTA: {
+ u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
+ u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
+ *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16);
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+ case PCI_CAP_LIST_ID:
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_LNKCAP:
+ case PCI_EXP_LNKCTL:
+ *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
+ return PCI_BRIDGE_EMUL_HANDLED;
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+
+}
+
+static void
+advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_LNKCTL:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ break;
+
+ case PCI_EXP_RTCTL:
+ new = (new & PCI_EXP_RTCTL_PMEIE) << 3;
+ advk_writel(pcie, new, PCIE_ISR0_MASK_REG);
+ break;
+
+ case PCI_EXP_RTSTA:
+ new = (new & PCI_EXP_RTSTA_PME) >> 9;
+ advk_writel(pcie, new, PCIE_ISR0_REG);
+ break;
+
+ default:
+ break;
+ }
+}
+
+struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
+ .read_pcie = advk_pci_bridge_emul_pcie_conf_read,
+ .write_pcie = advk_pci_bridge_emul_pcie_conf_write,
+};
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+{
+ struct pci_bridge_emul *bridge = &pcie->bridge;
+
+ bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff;
+ bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16;
+ bridge->conf.class_revision =
+ advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff;
+
+ /* Support 32 bits I/O addressing */
+ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
+ bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
+
+ /* Support 64 bits memory pref */
+ bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64;
+ bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64;
+
+ /* Support interrupt A for MSI feature */
+ bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
+
+ bridge->has_pcie = true;
+ bridge->data = pcie;
+ bridge->ops = &advk_pci_bridge_emul_ops;
+
+ pci_bridge_emul_init(bridge);
+
+}
+
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
int devfn)
{
@@ -411,6 +524,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
}
+ if (bus->number == pcie->root_bus_nr)
+ return pci_bridge_emul_conf_read(&pcie->bridge, where,
+ size, val);
+
/* Start PIO */
advk_writel(pcie, 0, PIO_START);
advk_writel(pcie, 1, PIO_ISR);
@@ -418,7 +535,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
reg &= ~PIO_CTRL_TYPE_MASK;
- if (bus->number == pcie->root_bus_nr)
+ if (bus->primary == pcie->root_bus_nr)
reg |= PCIE_CONFIG_RD_TYPE0;
else
reg |= PCIE_CONFIG_RD_TYPE1;
@@ -463,6 +580,10 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (!advk_pcie_valid_device(pcie, bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
+ if (bus->number == pcie->root_bus_nr)
+ return pci_bridge_emul_conf_write(&pcie->bridge, where,
+ size, val);
+
if (where % size)
return PCIBIOS_SET_FAILED;
@@ -473,7 +594,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
reg &= ~PIO_CTRL_TYPE_MASK;
- if (bus->number == pcie->root_bus_nr)
+ if (bus->primary == pcie->root_bus_nr)
reg |= PCIE_CONFIG_WR_TYPE0;
else
reg |= PCIE_CONFIG_WR_TYPE1;
@@ -875,6 +996,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
advk_pcie_setup_hw(pcie);
+ advk_sw_pci_bridge_init(pcie);
+
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(dev, "Failed to initialize irq\n");
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index d8f10451f273..c742881b5061 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -58,9 +58,7 @@ err_out:
int pci_host_common_probe(struct platform_device *pdev,
struct pci_ecam_ops *ops)
{
- const char *type;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
struct list_head resources;
@@ -70,12 +68,6 @@ int pci_host_common_probe(struct platform_device *pdev,
if (!bridge)
return -ENOMEM;
- type = of_get_property(np, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
-
of_pci_check_probe_only();
/* Parse and map our Configuration Space windows */
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index a41d79b8d46a..fa0fc46edb0c 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -22,6 +22,7 @@
#include <linux/of_platform.h>
#include "../pci.h"
+#include "../pci-bridge-emul.h"
/*
* PCIe unit register offsets.
@@ -63,61 +64,6 @@
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
-enum {
- PCISWCAP = PCI_BRIDGE_CONTROL + 2,
- PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID,
- PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP,
- PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL,
- PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP,
- PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL,
- PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP,
- PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL,
- PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL,
- PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA,
- PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2,
- PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2,
- PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2,
- PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2,
- PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2,
- PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2,
-};
-
-/* PCI configuration space of a PCI-to-PCI bridge */
-struct mvebu_sw_pci_bridge {
- u16 vendor;
- u16 device;
- u16 command;
- u16 status;
- u16 class;
- u8 interface;
- u8 revision;
- u8 bist;
- u8 header_type;
- u8 latency_timer;
- u8 cache_line_size;
- u32 bar[2];
- u8 primary_bus;
- u8 secondary_bus;
- u8 subordinate_bus;
- u8 secondary_latency_timer;
- u8 iobase;
- u8 iolimit;
- u16 secondary_status;
- u16 membase;
- u16 memlimit;
- u16 iobaseupper;
- u16 iolimitupper;
- u32 romaddr;
- u8 intline;
- u8 intpin;
- u16 bridgectrl;
-
- /* PCI express capability */
- u32 pcie_sltcap;
- u16 pcie_devctl;
- u16 pcie_rtctl;
-};
-
struct mvebu_pcie_port;
/* Structure representing all PCIe interfaces */
@@ -153,7 +99,7 @@ struct mvebu_pcie_port {
struct clk *clk;
struct gpio_desc *reset_gpio;
char *reset_name;
- struct mvebu_sw_pci_bridge bridge;
+ struct pci_bridge_emul bridge;
struct device_node *dn;
struct mvebu_pcie *pcie;
struct mvebu_pcie_window memwin;
@@ -415,11 +361,12 @@ static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {};
+ struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new iobase/iolimit values invalid? */
- if (port->bridge.iolimit < port->bridge.iobase ||
- port->bridge.iolimitupper < port->bridge.iobaseupper ||
- !(port->bridge.command & PCI_COMMAND_IO)) {
+ if (conf->iolimit < conf->iobase ||
+ conf->iolimitupper < conf->iobaseupper ||
+ !(conf->command & PCI_COMMAND_IO)) {
mvebu_pcie_set_window(port, port->io_target, port->io_attr,
&desired, &port->iowin);
return;
@@ -438,11 +385,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
* specifications. iobase is the bus address, port->iowin_base
* is the CPU address.
*/
- desired.remap = ((port->bridge.iobase & 0xF0) << 8) |
- (port->bridge.iobaseupper << 16);
+ desired.remap = ((conf->iobase & 0xF0) << 8) |
+ (conf->iobaseupper << 16);
desired.base = port->pcie->io.start + desired.remap;
- desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
- (port->bridge.iolimitupper << 16)) -
+ desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
+ (conf->iolimitupper << 16)) -
desired.remap) +
1;
@@ -453,10 +400,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
+ struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new membase/memlimit values invalid? */
- if (port->bridge.memlimit < port->bridge.membase ||
- !(port->bridge.command & PCI_COMMAND_MEMORY)) {
+ if (conf->memlimit < conf->membase ||
+ !(conf->command & PCI_COMMAND_MEMORY)) {
mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
&desired, &port->memwin);
return;
@@ -468,130 +416,32 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
* window to setup, according to the PCI-to-PCI bridge
* specifications.
*/
- desired.base = ((port->bridge.membase & 0xFFF0) << 16);
- desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ desired.base = ((conf->membase & 0xFFF0) << 16);
+ desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
desired.base + 1;
mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
&port->memwin);
}
-/*
- * Initialize the configuration space of the PCI-to-PCI bridge
- * associated with the given PCIe interface.
- */
-static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
-{
- struct mvebu_sw_pci_bridge *bridge = &port->bridge;
-
- memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge));
-
- bridge->class = PCI_CLASS_BRIDGE_PCI;
- bridge->vendor = PCI_VENDOR_ID_MARVELL;
- bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
- bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
- bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
- bridge->cache_line_size = 0x10;
-
- /* We support 32 bits I/O addressing */
- bridge->iobase = PCI_IO_RANGE_TYPE_32;
- bridge->iolimit = PCI_IO_RANGE_TYPE_32;
-
- /* Add capabilities */
- bridge->status = PCI_STATUS_CAP_LIST;
-}
-
-/*
- * Read the configuration space of the PCI-to-PCI bridge associated to
- * the given PCIe interface.
- */
-static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
- unsigned int where, int size, u32 *value)
+static pci_bridge_emul_read_status_t
+mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
{
- struct mvebu_sw_pci_bridge *bridge = &port->bridge;
-
- switch (where & ~3) {
- case PCI_VENDOR_ID:
- *value = bridge->device << 16 | bridge->vendor;
- break;
-
- case PCI_COMMAND:
- *value = bridge->command | bridge->status << 16;
- break;
-
- case PCI_CLASS_REVISION:
- *value = bridge->class << 16 | bridge->interface << 8 |
- bridge->revision;
- break;
-
- case PCI_CACHE_LINE_SIZE:
- *value = bridge->bist << 24 | bridge->header_type << 16 |
- bridge->latency_timer << 8 | bridge->cache_line_size;
- break;
-
- case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
- *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4];
- break;
-
- case PCI_PRIMARY_BUS:
- *value = (bridge->secondary_latency_timer << 24 |
- bridge->subordinate_bus << 16 |
- bridge->secondary_bus << 8 |
- bridge->primary_bus);
- break;
-
- case PCI_IO_BASE:
- if (!mvebu_has_ioport(port))
- *value = bridge->secondary_status << 16;
- else
- *value = (bridge->secondary_status << 16 |
- bridge->iolimit << 8 |
- bridge->iobase);
- break;
-
- case PCI_MEMORY_BASE:
- *value = (bridge->memlimit << 16 | bridge->membase);
- break;
-
- case PCI_PREF_MEMORY_BASE:
- *value = 0;
- break;
-
- case PCI_IO_BASE_UPPER16:
- *value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
- break;
-
- case PCI_CAPABILITY_LIST:
- *value = PCISWCAP;
- break;
-
- case PCI_ROM_ADDRESS1:
- *value = 0;
- break;
+ struct mvebu_pcie_port *port = bridge->data;
- case PCI_INTERRUPT_LINE:
- /* LINE PIN MIN_GNT MAX_LAT */
- *value = 0;
- break;
-
- case PCISWCAP_EXP_LIST_ID:
- /* Set PCIe v2, root port, slot support */
- *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
- PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP;
- break;
-
- case PCISWCAP_EXP_DEVCAP:
+ switch (reg) {
+ case PCI_EXP_DEVCAP:
*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
break;
- case PCISWCAP_EXP_DEVCTL:
+ case PCI_EXP_DEVCTL:
*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
- *value |= bridge->pcie_devctl;
break;
- case PCISWCAP_EXP_LNKCAP:
+ case PCI_EXP_LNKCAP:
/*
* PCIe requires the clock power management capability to be
* hard-wired to zero for downstream ports
@@ -600,176 +450,140 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
~PCI_EXP_LNKCAP_CLKPM;
break;
- case PCISWCAP_EXP_LNKCTL:
+ case PCI_EXP_LNKCTL:
*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
break;
- case PCISWCAP_EXP_SLTCAP:
- *value = bridge->pcie_sltcap;
- break;
-
- case PCISWCAP_EXP_SLTCTL:
+ case PCI_EXP_SLTCTL:
*value = PCI_EXP_SLTSTA_PDS << 16;
break;
- case PCISWCAP_EXP_RTCTL:
- *value = bridge->pcie_rtctl;
- break;
-
- case PCISWCAP_EXP_RTSTA:
+ case PCI_EXP_RTSTA:
*value = mvebu_readl(port, PCIE_RC_RTSTA);
break;
- /* PCIe requires the v2 fields to be hard-wired to zero */
- case PCISWCAP_EXP_DEVCAP2:
- case PCISWCAP_EXP_DEVCTL2:
- case PCISWCAP_EXP_LNKCAP2:
- case PCISWCAP_EXP_LNKCTL2:
- case PCISWCAP_EXP_SLTCAP2:
- case PCISWCAP_EXP_SLTCTL2:
default:
- /*
- * PCI defines configuration read accesses to reserved or
- * unimplemented registers to read as zero and complete
- * normally.
- */
- *value = 0;
- return PCIBIOS_SUCCESSFUL;
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
}
- if (size == 2)
- *value = (*value >> (8 * (where & 3))) & 0xffff;
- else if (size == 1)
- *value = (*value >> (8 * (where & 3))) & 0xff;
-
- return PCIBIOS_SUCCESSFUL;
+ return PCI_BRIDGE_EMUL_HANDLED;
}
-/* Write to the PCI-to-PCI bridge configuration space */
-static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
- unsigned int where, int size, u32 value)
+static void
+mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
{
- struct mvebu_sw_pci_bridge *bridge = &port->bridge;
- u32 mask, reg;
- int err;
-
- if (size == 4)
- mask = 0x0;
- else if (size == 2)
- mask = ~(0xffff << ((where & 3) * 8));
- else if (size == 1)
- mask = ~(0xff << ((where & 3) * 8));
- else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, &reg);
- if (err)
- return err;
+ struct mvebu_pcie_port *port = bridge->data;
+ struct pci_bridge_emul_conf *conf = &bridge->conf;
- value = (reg & mask) | value << ((where & 3) * 8);
-
- switch (where & ~3) {
+ switch (reg) {
case PCI_COMMAND:
{
- u32 old = bridge->command;
-
if (!mvebu_has_ioport(port))
- value &= ~PCI_COMMAND_IO;
+ conf->command &= ~PCI_COMMAND_IO;
- bridge->command = value & 0xffff;
- if ((old ^ bridge->command) & PCI_COMMAND_IO)
+ if ((old ^ new) & PCI_COMMAND_IO)
mvebu_pcie_handle_iobase_change(port);
- if ((old ^ bridge->command) & PCI_COMMAND_MEMORY)
+ if ((old ^ new) & PCI_COMMAND_MEMORY)
mvebu_pcie_handle_membase_change(port);
- break;
- }
- case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
- bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
break;
+ }
case PCI_IO_BASE:
/*
- * We also keep bit 1 set, it is a read-only bit that
+ * We keep bit 1 set, it is a read-only bit that
* indicates we support 32 bits addressing for the
* I/O
*/
- bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
- bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
+ conf->iobase |= PCI_IO_RANGE_TYPE_32;
+ conf->iolimit |= PCI_IO_RANGE_TYPE_32;
mvebu_pcie_handle_iobase_change(port);
break;
case PCI_MEMORY_BASE:
- bridge->membase = value & 0xffff;
- bridge->memlimit = value >> 16;
mvebu_pcie_handle_membase_change(port);
break;
case PCI_IO_BASE_UPPER16:
- bridge->iobaseupper = value & 0xffff;
- bridge->iolimitupper = value >> 16;
mvebu_pcie_handle_iobase_change(port);
break;
case PCI_PRIMARY_BUS:
- bridge->primary_bus = value & 0xff;
- bridge->secondary_bus = (value >> 8) & 0xff;
- bridge->subordinate_bus = (value >> 16) & 0xff;
- bridge->secondary_latency_timer = (value >> 24) & 0xff;
- mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
+ mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
+ break;
+
+ default:
break;
+ }
+}
+
+static void
+mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct mvebu_pcie_port *port = bridge->data;
- case PCISWCAP_EXP_DEVCTL:
+ switch (reg) {
+ case PCI_EXP_DEVCTL:
/*
* Armada370 data says these bits must always
* be zero when in root complex mode.
*/
- value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
-
- /*
- * If the mask is 0xffff0000, then we only want to write
- * the device control register, rather than clearing the
- * RW1C bits in the device status register. Mask out the
- * status register bits.
- */
- if (mask == 0xffff0000)
- value &= 0xffff;
+ new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
- mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
break;
- case PCISWCAP_EXP_LNKCTL:
+ case PCI_EXP_LNKCTL:
/*
* If we don't support CLKREQ, we must ensure that the
* CLKREQ enable bit always reads zero. Since we haven't
* had this capability, and it's dependent on board wiring,
* disable it for the time being.
*/
- value &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- /*
- * If the mask is 0xffff0000, then we only want to write
- * the link control register, rather than clearing the
- * RW1C bits in the link status register. Mask out the
- * RW1C status register bits.
- */
- if (mask == 0xffff0000)
- value &= ~((PCI_EXP_LNKSTA_LABS |
- PCI_EXP_LNKSTA_LBMS) << 16);
-
- mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
break;
- case PCISWCAP_EXP_RTSTA:
- mvebu_writel(port, value, PCIE_RC_RTSTA);
+ case PCI_EXP_RTSTA:
+ mvebu_writel(port, new, PCIE_RC_RTSTA);
break;
+ }
+}
- default:
- break;
+struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+ .write_base = mvebu_pci_bridge_emul_base_conf_write,
+ .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
+ .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
+};
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
+{
+ struct pci_bridge_emul *bridge = &port->bridge;
+
+ bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
+ bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
+ bridge->conf.class_revision =
+ mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
+
+ if (mvebu_has_ioport(port)) {
+ /* We support 32 bits I/O addressing */
+ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
+ bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
}
- return PCIBIOS_SUCCESSFUL;
+ bridge->has_pcie = true;
+ bridge->data = port;
+ bridge->ops = &mvebu_pci_bridge_emul_ops;
+
+ pci_bridge_emul_init(bridge);
}
static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
@@ -789,8 +603,8 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
if (bus->number == 0 && port->devfn == devfn)
return port;
if (bus->number != 0 &&
- bus->number >= port->bridge.secondary_bus &&
- bus->number <= port->bridge.subordinate_bus)
+ bus->number >= port->bridge.conf.secondary_bus &&
+ bus->number <= port->bridge.conf.subordinate_bus)
return port;
}
@@ -811,7 +625,8 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Access the emulated PCI-to-PCI bridge */
if (bus->number == 0)
- return mvebu_sw_pci_bridge_write(port, where, size, val);
+ return pci_bridge_emul_conf_write(&port->bridge, where,
+ size, val);
if (!mvebu_pcie_link_up(port))
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -839,7 +654,8 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
/* Access the emulated PCI-to-PCI bridge */
if (bus->number == 0)
- return mvebu_sw_pci_bridge_read(port, where, size, val);
+ return pci_bridge_emul_conf_read(&port->bridge, where,
+ size, val);
if (!mvebu_pcie_link_up(port)) {
*val = 0xffffffff;
@@ -1297,7 +1113,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
mvebu_pcie_setup_hw(port);
mvebu_pcie_set_local_dev_nr(port, 1);
- mvebu_sw_pci_bridge_init(port);
+ mvebu_pci_bridge_emul_init(port);
}
pcie->nports = i;
diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c
index 9e87dd7f9ac3..c3a088910f48 100644
--- a/drivers/pci/controller/pcie-cadence-ep.c
+++ b/drivers/pci/controller/pcie-cadence-ep.c
@@ -258,7 +258,6 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
u8 intx, bool is_asserted)
{
struct cdns_pcie *pcie = &ep->pcie;
- u32 r = ep->max_regions - 1;
u32 offset;
u16 status;
u8 msg_code;
@@ -268,8 +267,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
/* Set the outbound region if needed. */
if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
ep->irq_pci_fn != fn)) {
- /* Last region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r,
+ /* First region was reserved for IRQ writes. */
+ cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0,
ep->irq_phys_addr);
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
ep->irq_pci_fn = fn;
@@ -347,8 +346,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
/* Set the outbound region if needed. */
if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
ep->irq_pci_fn != fn)) {
- /* Last region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1,
+ /* First region was reserved for IRQ writes. */
+ cdns_pcie_set_outbound_region(pcie, fn, 0,
false,
ep->irq_phys_addr,
pci_addr & ~pci_addr_mask,
@@ -356,7 +355,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
ep->irq_pci_fn = fn;
}
- writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+ writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
return 0;
}
@@ -517,6 +516,8 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
goto free_epc_mem;
}
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
+ /* Reserve region 0 for IRQs */
+ set_bit(0, &ep->ob_region_map);
return 0;
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c
index ec394f6a19c8..97e251090b4f 100644
--- a/drivers/pci/controller/pcie-cadence-host.c
+++ b/drivers/pci/controller/pcie-cadence-host.c
@@ -235,7 +235,6 @@ static int cdns_pcie_host_init(struct device *dev,
static int cdns_pcie_host_probe(struct platform_device *pdev)
{
- const char *type;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
@@ -268,12 +267,6 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
rc->device_id = 0xffff;
of_property_read_u16(np, "device-id", &rc->device_id);
- type = of_get_property(np, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
pcie->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->reg_base)) {
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c
index 975bcdd6b5c0..cd795f6fc1e2 100644
--- a/drivers/pci/controller/pcie-cadence.c
+++ b/drivers/pci/controller/pcie-cadence.c
@@ -190,14 +190,16 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
for (i = 0; i < phy_count; i++) {
of_property_read_string_index(np, "phy-names", i, &name);
- phy[i] = devm_phy_optional_get(dev, name);
- if (IS_ERR(phy))
- return PTR_ERR(phy);
-
+ phy[i] = devm_phy_get(dev, name);
+ if (IS_ERR(phy[i])) {
+ ret = PTR_ERR(phy[i]);
+ goto err_phy;
+ }
link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
if (!link[i]) {
+ devm_phy_put(dev, phy[i]);
ret = -EINVAL;
- goto err_link;
+ goto err_phy;
}
}
@@ -207,13 +209,15 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
ret = cdns_pcie_enable_phy(pcie);
if (ret)
- goto err_link;
+ goto err_phy;
return 0;
-err_link:
- while (--i >= 0)
+err_phy:
+ while (--i >= 0) {
device_link_del(link[i]);
+ devm_phy_put(dev, phy[i]);
+ }
return ret;
}
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 3160e9342a2f..c20fd6bd68fd 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -630,14 +630,6 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
return (pcie->base + offset);
}
- /*
- * PAXC is connected to an internally emulated EP within the SoC. It
- * allows only one device.
- */
- if (pcie->ep_is_internal)
- if (slot > 0)
- return NULL;
-
return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
}
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 861dda69f366..d069a76cbb95 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -15,6 +15,7 @@
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/msi.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
@@ -162,6 +163,7 @@ struct mtk_pcie_soc {
* @phy: pointer to PHY control block
* @lane: lane count
* @slot: port slot
+ * @irq: GIC irq
* @irq_domain: legacy INTx IRQ domain
* @inner_domain: inner IRQ domain
* @msi_domain: MSI IRQ domain
@@ -182,6 +184,7 @@ struct mtk_pcie_port {
struct phy *phy;
u32 lane;
u32 slot;
+ int irq;
struct irq_domain *irq_domain;
struct irq_domain *inner_domain;
struct irq_domain *msi_domain;
@@ -225,10 +228,8 @@ static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
clk_disable_unprepare(pcie->free_ck);
- if (dev->pm_domain) {
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
- }
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
}
static void mtk_pcie_port_free(struct mtk_pcie_port *port)
@@ -337,6 +338,17 @@ static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
{
struct mtk_pcie *pcie = bus->sysdata;
struct mtk_pcie_port *port;
+ struct pci_dev *dev = NULL;
+
+ /*
+ * Walk the bus hierarchy to get the devfn value
+ * of the port in the root bus.
+ */
+ while (bus && bus->number) {
+ dev = bus->self;
+ bus = dev->bus;
+ devfn = dev->devfn;
+ }
list_for_each_entry(port, &pcie->ports, list)
if (port->slot == PCI_SLOT(devfn))
@@ -383,75 +395,6 @@ static struct pci_ops mtk_pcie_ops_v2 = {
.write = mtk_pcie_config_write,
};
-static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
-{
- struct mtk_pcie *pcie = port->pcie;
- struct resource *mem = &pcie->mem;
- const struct mtk_pcie_soc *soc = port->pcie->soc;
- u32 val;
- size_t size;
- int err;
-
- /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
- if (pcie->base) {
- val = readl(pcie->base + PCIE_SYS_CFG_V2);
- val |= PCIE_CSR_LTSSM_EN(port->slot) |
- PCIE_CSR_ASPM_L1_EN(port->slot);
- writel(val, pcie->base + PCIE_SYS_CFG_V2);
- }
-
- /* Assert all reset signals */
- writel(0, port->base + PCIE_RST_CTRL);
-
- /*
- * Enable PCIe link down reset, if link status changed from link up to
- * link down, this will reset MAC control registers and configuration
- * space.
- */
- writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
-
- /* De-assert PHY, PE, PIPE, MAC and configuration reset */
- val = readl(port->base + PCIE_RST_CTRL);
- val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
- PCIE_MAC_SRSTB | PCIE_CRSTB;
- writel(val, port->base + PCIE_RST_CTRL);
-
- /* Set up vendor ID and class code */
- if (soc->need_fix_class_id) {
- val = PCI_VENDOR_ID_MEDIATEK;
- writew(val, port->base + PCIE_CONF_VEND_ID);
-
- val = PCI_CLASS_BRIDGE_HOST;
- writew(val, port->base + PCIE_CONF_CLASS_ID);
- }
-
- /* 100ms timeout value should be enough for Gen1/2 training */
- err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
- !!(val & PCIE_PORT_LINKUP_V2), 20,
- 100 * USEC_PER_MSEC);
- if (err)
- return -ETIMEDOUT;
-
- /* Set INTx mask */
- val = readl(port->base + PCIE_INT_MASK);
- val &= ~INTX_MASK;
- writel(val, port->base + PCIE_INT_MASK);
-
- /* Set AHB to PCIe translation windows */
- size = mem->end - mem->start;
- val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
- writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
-
- val = upper_32_bits(mem->start);
- writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
-
- /* Set PCIe to AXI translation memory space.*/
- val = fls(0xffffffff) | WIN_ENABLE;
- writel(val, port->base + PCIE_AXI_WINDOW0);
-
- return 0;
-}
-
static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -590,6 +533,27 @@ static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
writel(val, port->base + PCIE_INT_MASK);
}
+static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
+{
+ struct mtk_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+
+ if (port->irq_domain)
+ irq_domain_remove(port->irq_domain);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (port->msi_domain)
+ irq_domain_remove(port->msi_domain);
+ if (port->inner_domain)
+ irq_domain_remove(port->inner_domain);
+ }
+
+ irq_dispose_mapping(port->irq);
+ }
+}
+
static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -628,8 +592,6 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
ret = mtk_pcie_allocate_msi_domains(port);
if (ret)
return ret;
-
- mtk_pcie_enable_msi(port);
}
return 0;
@@ -682,7 +644,7 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
struct mtk_pcie *pcie = port->pcie;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- int err, irq;
+ int err;
err = mtk_pcie_init_irq_domain(port, node);
if (err) {
@@ -690,8 +652,81 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
return err;
}
- irq = platform_get_irq(pdev, port->slot);
- irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
+ port->irq = platform_get_irq(pdev, port->slot);
+ irq_set_chained_handler_and_data(port->irq,
+ mtk_pcie_intr_handler, port);
+
+ return 0;
+}
+
+static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct resource *mem = &pcie->mem;
+ const struct mtk_pcie_soc *soc = port->pcie->soc;
+ u32 val;
+ size_t size;
+ int err;
+
+ /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
+ if (pcie->base) {
+ val = readl(pcie->base + PCIE_SYS_CFG_V2);
+ val |= PCIE_CSR_LTSSM_EN(port->slot) |
+ PCIE_CSR_ASPM_L1_EN(port->slot);
+ writel(val, pcie->base + PCIE_SYS_CFG_V2);
+ }
+
+ /* Assert all reset signals */
+ writel(0, port->base + PCIE_RST_CTRL);
+
+ /*
+ * Enable PCIe link down reset, if link status changed from link up to
+ * link down, this will reset MAC control registers and configuration
+ * space.
+ */
+ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
+
+ /* De-assert PHY, PE, PIPE, MAC and configuration reset */
+ val = readl(port->base + PCIE_RST_CTRL);
+ val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
+ PCIE_MAC_SRSTB | PCIE_CRSTB;
+ writel(val, port->base + PCIE_RST_CTRL);
+
+ /* Set up vendor ID and class code */
+ if (soc->need_fix_class_id) {
+ val = PCI_VENDOR_ID_MEDIATEK;
+ writew(val, port->base + PCIE_CONF_VEND_ID);
+
+ val = PCI_CLASS_BRIDGE_PCI;
+ writew(val, port->base + PCIE_CONF_CLASS_ID);
+ }
+
+ /* 100ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
+ !!(val & PCIE_PORT_LINKUP_V2), 20,
+ 100 * USEC_PER_MSEC);
+ if (err)
+ return -ETIMEDOUT;
+
+ /* Set INTx mask */
+ val = readl(port->base + PCIE_INT_MASK);
+ val &= ~INTX_MASK;
+ writel(val, port->base + PCIE_INT_MASK);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ mtk_pcie_enable_msi(port);
+
+ /* Set AHB to PCIe translation windows */
+ size = mem->end - mem->start;
+ val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
+
+ val = upper_32_bits(mem->start);
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
+
+ /* Set PCIe to AXI translation memory space.*/
+ val = fls(0xffffffff) | WIN_ENABLE;
+ writel(val, port->base + PCIE_AXI_WINDOW0);
return 0;
}
@@ -987,10 +1022,8 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
pcie->free_ck = NULL;
}
- if (dev->pm_domain) {
- pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
/* enable top level clock */
err = clk_prepare_enable(pcie->free_ck);
@@ -1002,10 +1035,8 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
return 0;
err_free_ck:
- if (dev->pm_domain) {
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
- }
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return err;
}
@@ -1109,36 +1140,10 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
if (err < 0)
return err;
- devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
-
- return 0;
-}
-
-static int mtk_pcie_register_host(struct pci_host_bridge *host)
-{
- struct mtk_pcie *pcie = pci_host_bridge_priv(host);
- struct pci_bus *child;
- int err;
-
- host->busnr = pcie->busn.start;
- host->dev.parent = pcie->dev;
- host->ops = pcie->soc->ops;
- host->map_irq = of_irq_parse_and_map_pci;
- host->swizzle_irq = pci_common_swizzle;
- host->sysdata = pcie;
-
- err = pci_scan_root_bus_bridge(host);
- if (err < 0)
+ err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
+ if (err)
return err;
- pci_bus_size_bridges(host->bus);
- pci_bus_assign_resources(host->bus);
-
- list_for_each_entry(child, &host->bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(host->bus);
-
return 0;
}
@@ -1168,7 +1173,14 @@ static int mtk_pcie_probe(struct platform_device *pdev)
if (err)
goto put_resources;
- err = mtk_pcie_register_host(host);
+ host->busnr = pcie->busn.start;
+ host->dev.parent = pcie->dev;
+ host->ops = pcie->soc->ops;
+ host->map_irq = of_irq_parse_and_map_pci;
+ host->swizzle_irq = pci_common_swizzle;
+ host->sysdata = pcie;
+
+ err = pci_host_probe(host);
if (err)
goto put_resources;
@@ -1181,6 +1193,80 @@ put_resources:
return err;
}
+
+static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct list_head *windows = &host->windows;
+
+ pci_free_resource_list(windows);
+}
+
+static int mtk_pcie_remove(struct platform_device *pdev)
+{
+ struct mtk_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+
+ pci_stop_root_bus(host->bus);
+ pci_remove_root_bus(host->bus);
+ mtk_pcie_free_resources(pcie);
+
+ mtk_pcie_irq_teardown(pcie);
+
+ mtk_pcie_put_resources(pcie);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+{
+ struct mtk_pcie *pcie = dev_get_drvdata(dev);
+ struct mtk_pcie_port *port;
+
+ if (list_empty(&pcie->ports))
+ return 0;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ clk_disable_unprepare(port->pipe_ck);
+ clk_disable_unprepare(port->obff_ck);
+ clk_disable_unprepare(port->axi_ck);
+ clk_disable_unprepare(port->aux_ck);
+ clk_disable_unprepare(port->ahb_ck);
+ clk_disable_unprepare(port->sys_ck);
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ }
+
+ clk_disable_unprepare(pcie->free_ck);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+{
+ struct mtk_pcie *pcie = dev_get_drvdata(dev);
+ struct mtk_pcie_port *port, *tmp;
+
+ if (list_empty(&pcie->ports))
+ return 0;
+
+ clk_prepare_enable(pcie->free_ck);
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ mtk_pcie_enable_port(port);
+
+ /* In case of EP was removed while system suspend. */
+ if (list_empty(&pcie->ports))
+ clk_disable_unprepare(pcie->free_ck);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mtk_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
+};
+
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
.ops = &mtk_pcie_ops,
.startup = mtk_pcie_startup_port,
@@ -1209,10 +1295,13 @@ static const struct of_device_id mtk_pcie_ids[] = {
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
+ .remove = mtk_pcie_remove,
.driver = {
.name = "mtk-pcie",
.of_match_table = mtk_pcie_ids,
.suppress_bind_attrs = true,
+ .pm = &mtk_pcie_pm_ops,
},
};
-builtin_platform_driver(mtk_pcie_driver);
+module_platform_driver(mtk_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index a939e8d31735..77052a0712d0 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -301,13 +301,6 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
struct platform_device *pdev = pcie->pdev;
struct device_node *node = dev->of_node;
struct resource *res;
- const char *type;
-
- type = of_get_property(node, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
/* map config resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index fb32840ce8e6..81538d77f790 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -777,16 +777,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
struct platform_device *pdev)
{
struct device *dev = pcie->dev;
- struct device_node *node = dev->of_node;
struct resource *res;
- const char *type;
-
- /* Check for device type */
- type = of_get_property(node, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(dev, res);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 7b1389d8e2a5..9bd1a35cd5d8 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -574,15 +574,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct resource regs;
- const char *type;
int err;
- type = of_get_property(node, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
-
err = of_address_to_resource(node, 0, &regs);
if (err) {
dev_err(dev, "missing \"reg\" property\n");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index f31ed62d518c..e50b0b5815ff 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -809,12 +809,12 @@ static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
- vmd_detach_resources(vmd);
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_teardown_dma_ops(vmd);
+ vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
}
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
new file mode 100644
index 000000000000..a32070be5adf
--- /dev/null
+++ b/drivers/pci/hotplug/TODO
@@ -0,0 +1,74 @@
+Contributions are solicited in particular to remedy the following issues:
+
+cpcihp:
+
+* There are no implementations of the ->hardware_test, ->get_power and
+ ->set_power callbacks in struct cpci_hp_controller_ops. Why were they
+ introduced? Can they be removed from the struct?
+
+cpqphp:
+
+* The driver spawns a kthread cpqhp_event_thread() which is woken by the
+ hardirq handler cpqhp_ctrl_intr(). Convert this to threaded IRQ handling.
+ The kthread is also woken from the timer pushbutton_helper_thread(),
+ convert it to call irq_wake_thread(). Use pciehp as a template.
+
+* A large portion of cpqphp_ctrl.c and cpqphp_pci.c concerns resource
+ management. Doesn't this duplicate functionality in the core?
+
+ibmphp:
+
+* Implementations of hotplug_slot_ops callbacks such as get_adapter_present()
+ in ibmphp_core.c create a copy of the struct slot on the stack, then perform
+ the actual operation on that copy. Determine if this overhead is necessary,
+ delete it if not. The functions also perform a NULL pointer check on the
+ struct hotplug_slot, this seems superfluous.
+
+* Several functions access the pci_slot member in struct hotplug_slot even
+ though pci_hotplug.h declares it private. See get_max_bus_speed() for an
+ example. Either the pci_slot member should no longer be declared private
+ or ibmphp should store a pointer to its bus in struct slot. Probably the
+ former.
+
+* The functions get_max_adapter_speed() and get_bus_name() are commented out.
+ Can they be deleted? There are also forward declarations at the top of
+ ibmphp_core.c as well as pointers in ibmphp_hotplug_slot_ops, likewise
+ commented out.
+
+* ibmphp_init_devno() takes a struct slot **, it could instead take a
+ struct slot *.
+
+* The return value of pci_hp_register() is not checked.
+
+* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
+ and once more in the error path of its caller ibmphp_access_ebda().
+
+* The various slot data structures are difficult to follow and need to be
+ simplified. A lot of functions are too large and too complex, they need
+ to be broken up into smaller, manageable pieces. Negative examples are
+ ebda_rsrc_controller() and configure_bridge().
+
+* A large portion of ibmphp_res.c and ibmphp_pci.c concerns resource
+ management. Doesn't this duplicate functionality in the core?
+
+sgi_hotplug:
+
+* Several functions access the pci_slot member in struct hotplug_slot even
+ though pci_hotplug.h declares it private. See sn_hp_destroy() for an
+ example. Either the pci_slot member should no longer be declared private
+ or sgi_hotplug should store a pointer to it in struct slot. Probably the
+ former.
+
+shpchp:
+
+* There is only a single implementation of struct hpc_ops. Can the struct be
+ removed and its functions invoked directly? This has already been done in
+ pciehp with commit 82a9e79ef132 ("PCI: pciehp: remove hpc_ops"). Clarify
+ if there was a specific reason not to apply the same change to shpchp.
+
+* The ->get_mode1_ECC_cap callback in shpchp_hpc_ops is never invoked.
+ Why was it introduced? Can it be removed?
+
+* The hardirq handler shpc_isr() queues events on a workqueue. It can be
+ simplified by converting it to threaded IRQ handling. Use pciehp as a
+ template.
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index e438a2d734f2..cf3058404f41 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -33,15 +33,19 @@ struct acpiphp_slot;
* struct slot - slot information for each *physical* slot
*/
struct slot {
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct acpiphp_slot *acpi_slot;
- struct hotplug_slot_info info;
unsigned int sun; /* ACPI _SUN (Slot User Number) value */
};
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
+}
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
/*
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index ad32ffbc4b91..c9e2bd40c038 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -57,7 +57,7 @@ static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
+static const struct hotplug_slot_ops acpi_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
@@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(acpiphp_unregister_attention);
*/
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -135,7 +135,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
*/
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -179,7 +179,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
*/
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -225,7 +225,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -245,7 +245,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -266,39 +266,26 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
if (!slot)
goto error;
- slot->hotplug_slot = kzalloc(sizeof(*slot->hotplug_slot), GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
-
- slot->hotplug_slot->info = &slot->info;
-
- slot->hotplug_slot->private = slot;
- slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
+ slot->hotplug_slot.ops = &acpi_hotplug_slot_ops;
slot->acpi_slot = acpiphp_slot;
- slot->hotplug_slot->info->power_status = acpiphp_get_power_status(slot->acpi_slot);
- slot->hotplug_slot->info->attention_status = 0;
- slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot);
- slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
acpiphp_slot->slot = slot;
slot->sun = sun;
snprintf(name, SLOT_NAME_SIZE, "%u", sun);
- retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bus,
+ retval = pci_hp_register(&slot->hotplug_slot, acpiphp_slot->bus,
acpiphp_slot->device, name);
if (retval == -EBUSY)
- goto error_hpslot;
+ goto error_slot;
if (retval) {
pr_err("pci_hp_register failed with error %d\n", retval);
- goto error_hpslot;
+ goto error_slot;
}
pr_info("Slot [%s] registered\n", slot_name(slot));
return 0;
-error_hpslot:
- kfree(slot->hotplug_slot);
error_slot:
kfree(slot);
error:
@@ -312,8 +299,7 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
pr_info("Slot [%s] unregistered\n", slot_name(slot));
- pci_hp_deregister(slot->hotplug_slot);
- kfree(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
kfree(slot);
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 41713f16ff97..df48b3b03ab4 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -41,7 +41,7 @@ MODULE_VERSION(DRIVER_VERSION);
#define IBM_HARDWARE_ID1 "IBM37D0"
#define IBM_HARDWARE_ID2 "IBM37D4"
-#define hpslot_to_sun(A) (((struct slot *)((A)->private))->sun)
+#define hpslot_to_sun(A) (to_slot(A)->sun)
/* union apci_descriptor - allows access to the
* various device descriptors that are embedded in the
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 4658557be01a..f33ff2bca414 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -32,8 +32,10 @@ struct slot {
unsigned int devfn;
struct pci_bus *bus;
struct pci_dev *dev;
+ unsigned int latch_status:1;
+ unsigned int adapter_status:1;
unsigned int extracting;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct list_head slot_list;
};
@@ -58,7 +60,12 @@ struct cpci_hp_controller {
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
+}
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
int cpci_hp_register_controller(struct cpci_hp_controller *controller);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 52a339baf06c..603eadf3d965 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -57,7 +57,7 @@ static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
+static const struct hotplug_slot_ops cpci_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
@@ -68,29 +68,9 @@ static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
};
static int
-update_latch_status(struct hotplug_slot *hotplug_slot, u8 value)
-{
- struct hotplug_slot_info info;
-
- memcpy(&info, hotplug_slot->info, sizeof(struct hotplug_slot_info));
- info.latch_status = value;
- return pci_hp_change_slot_info(hotplug_slot, &info);
-}
-
-static int
-update_adapter_status(struct hotplug_slot *hotplug_slot, u8 value)
-{
- struct hotplug_slot_info info;
-
- memcpy(&info, hotplug_slot->info, sizeof(struct hotplug_slot_info));
- info.adapter_status = value;
- return pci_hp_change_slot_info(hotplug_slot, &info);
-}
-
-static int
enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int retval = 0;
dbg("%s - physical_slot = %s", __func__, slot_name(slot));
@@ -103,7 +83,7 @@ enable_slot(struct hotplug_slot *hotplug_slot)
static int
disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int retval = 0;
dbg("%s - physical_slot = %s", __func__, slot_name(slot));
@@ -135,8 +115,7 @@ disable_slot(struct hotplug_slot *hotplug_slot)
goto disable_error;
}
- if (update_adapter_status(slot->hotplug_slot, 0))
- warn("failure to update adapter file");
+ slot->adapter_status = 0;
if (slot->extracting) {
slot->extracting = 0;
@@ -160,7 +139,7 @@ cpci_get_power_status(struct slot *slot)
static int
get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
*value = cpci_get_power_status(slot);
return 0;
@@ -169,7 +148,7 @@ get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int
get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
*value = cpci_get_attention_status(slot);
return 0;
@@ -178,27 +157,29 @@ get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int
set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
- return cpci_set_attention_status(hotplug_slot->private, status);
+ return cpci_set_attention_status(to_slot(hotplug_slot), status);
}
static int
get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- *value = hotplug_slot->info->adapter_status;
+ struct slot *slot = to_slot(hotplug_slot);
+
+ *value = slot->adapter_status;
return 0;
}
static int
get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- *value = hotplug_slot->info->latch_status;
+ struct slot *slot = to_slot(hotplug_slot);
+
+ *value = slot->latch_status;
return 0;
}
static void release_slot(struct slot *slot)
{
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
pci_dev_put(slot->dev);
kfree(slot);
}
@@ -209,8 +190,6 @@ int
cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
{
struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
int status;
int i;
@@ -229,43 +208,19 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
goto error;
}
- hotplug_slot =
- kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot) {
- status = -ENOMEM;
- goto error_slot;
- }
- slot->hotplug_slot = hotplug_slot;
-
- info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
- if (!info) {
- status = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot->info = info;
-
slot->bus = bus;
slot->number = i;
slot->devfn = PCI_DEVFN(i, 0);
snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
- hotplug_slot->private = slot;
- hotplug_slot->ops = &cpci_hotplug_slot_ops;
-
- /*
- * Initialize the slot info structure with some known
- * good values.
- */
- dbg("initializing slot %s", name);
- info->power_status = cpci_get_power_status(slot);
- info->attention_status = cpci_get_attention_status(slot);
+ slot->hotplug_slot.ops = &cpci_hotplug_slot_ops;
dbg("registering slot %s", name);
- status = pci_hp_register(slot->hotplug_slot, bus, i, name);
+ status = pci_hp_register(&slot->hotplug_slot, bus, i, name);
if (status) {
err("pci_hp_register failed with error %d", status);
- goto error_info;
+ goto error_slot;
}
dbg("slot registered with name: %s", slot_name(slot));
@@ -276,10 +231,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
up_write(&list_rwsem);
}
return 0;
-error_info:
- kfree(info);
-error_hpslot:
- kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
@@ -305,7 +256,7 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
slots--;
dbg("deregistering slot %s", slot_name(slot));
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
release_slot(slot);
}
}
@@ -359,10 +310,8 @@ init_slots(int clear_ins)
__func__, slot_name(slot));
dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
if (dev) {
- if (update_adapter_status(slot->hotplug_slot, 1))
- warn("failure to update adapter file");
- if (update_latch_status(slot->hotplug_slot, 1))
- warn("failure to update latch file");
+ slot->adapter_status = 1;
+ slot->latch_status = 1;
slot->dev = dev;
}
}
@@ -424,11 +373,8 @@ check_slots(void)
dbg("%s - slot %s HS_CSR (2) = %04x",
__func__, slot_name(slot), hs_csr);
- if (update_latch_status(slot->hotplug_slot, 1))
- warn("failure to update latch file");
-
- if (update_adapter_status(slot->hotplug_slot, 1))
- warn("failure to update adapter file");
+ slot->latch_status = 1;
+ slot->adapter_status = 1;
cpci_led_off(slot);
@@ -449,9 +395,7 @@ check_slots(void)
__func__, slot_name(slot), hs_csr);
if (!slot->extracting) {
- if (update_latch_status(slot->hotplug_slot, 0))
- warn("failure to update latch file");
-
+ slot->latch_status = 0;
slot->extracting = 1;
atomic_inc(&extracting);
}
@@ -465,8 +409,7 @@ check_slots(void)
*/
err("card in slot %s was improperly removed",
slot_name(slot));
- if (update_adapter_status(slot->hotplug_slot, 0))
- warn("failure to update adapter file");
+ slot->adapter_status = 0;
slot->extracting = 0;
atomic_dec(&extracting);
}
@@ -615,7 +558,7 @@ cleanup_slots(void)
goto cleanup_null;
list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
list_del(&slot->slot_list);
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
release_slot(slot);
}
cleanup_null:
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 389b8fb50cd9..2c16adb7f4ec 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -194,8 +194,7 @@ int cpci_led_on(struct slot *slot)
slot->devfn,
hs_cap + 2,
hs_csr)) {
- err("Could not set LOO for slot %s",
- hotplug_slot_name(slot->hotplug_slot));
+ err("Could not set LOO for slot %s", slot_name(slot));
return -ENODEV;
}
}
@@ -223,8 +222,7 @@ int cpci_led_off(struct slot *slot)
slot->devfn,
hs_cap + 2,
hs_csr)) {
- err("Could not clear LOO for slot %s",
- hotplug_slot_name(slot->hotplug_slot));
+ err("Could not clear LOO for slot %s", slot_name(slot));
return -ENODEV;
}
}
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index db78b394a075..77e4e0142fbc 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -260,7 +260,7 @@ struct slot {
u8 hp_slot;
struct controller *ctrl;
void __iomem *p_sm_slot;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
};
struct pci_resource {
@@ -445,7 +445,12 @@ extern u8 cpqhp_disk_irq;
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
+}
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
/*
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 5a06636e910a..16bbb183695a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -121,7 +121,6 @@ static int init_SERR(struct controller *ctrl)
{
u32 tempdword;
u32 number_of_slots;
- u8 physical_slot;
if (!ctrl)
return 1;
@@ -131,7 +130,6 @@ static int init_SERR(struct controller *ctrl)
number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
/* Loop through slots */
while (number_of_slots) {
- physical_slot = tempdword;
writeb(0, ctrl->hpc_reg + SLOT_SERR);
tempdword++;
number_of_slots--;
@@ -275,9 +273,7 @@ static int ctrl_slot_cleanup(struct controller *ctrl)
while (old_slot) {
next_slot = old_slot->next;
- pci_hp_deregister(old_slot->hotplug_slot);
- kfree(old_slot->hotplug_slot->info);
- kfree(old_slot->hotplug_slot);
+ pci_hp_deregister(&old_slot->hotplug_slot);
kfree(old_slot);
old_slot = next_slot;
}
@@ -419,7 +415,7 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
struct pci_func *slot_func;
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
@@ -446,7 +442,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
static int process_SI(struct hotplug_slot *hotplug_slot)
{
struct pci_func *slot_func;
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
@@ -478,7 +474,7 @@ static int process_SI(struct hotplug_slot *hotplug_slot)
static int process_SS(struct hotplug_slot *hotplug_slot)
{
struct pci_func *slot_func;
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
@@ -505,7 +501,7 @@ static int process_SS(struct hotplug_slot *hotplug_slot)
static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -516,7 +512,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -527,7 +523,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -538,7 +534,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -550,7 +546,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -560,7 +556,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
+static const struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = process_SI,
.disable_slot = process_SS,
@@ -578,8 +574,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
void __iomem *smbios_table)
{
struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *hotplug_slot_info;
struct pci_bus *bus = ctrl->pci_bus;
u8 number_of_slots;
u8 slot_device;
@@ -605,22 +599,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
goto error;
}
- slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
- GFP_KERNEL);
- if (!slot->hotplug_slot) {
- result = -ENOMEM;
- goto error_slot;
- }
- hotplug_slot = slot->hotplug_slot;
-
- hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
- GFP_KERNEL);
- if (!hotplug_slot->info) {
- result = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot_info = hotplug_slot->info;
-
slot->ctrl = ctrl;
slot->bus = ctrl->bus;
slot->device = slot_device;
@@ -669,29 +647,20 @@ static int ctrl_slot_setup(struct controller *ctrl,
((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
/* register this slot with the hotplug pci core */
- hotplug_slot->private = slot;
snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
- hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
-
- hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
- hotplug_slot_info->attention_status =
- cpq_get_attention_status(ctrl, slot);
- hotplug_slot_info->latch_status =
- cpq_get_latch_status(ctrl, slot);
- hotplug_slot_info->adapter_status =
- get_presence_status(ctrl, slot);
+ slot->hotplug_slot.ops = &cpqphp_hotplug_slot_ops;
dbg("registering bus %d, dev %d, number %d, ctrl->slot_device_offset %d, slot %d\n",
slot->bus, slot->device,
slot->number, ctrl->slot_device_offset,
slot_number);
- result = pci_hp_register(hotplug_slot,
+ result = pci_hp_register(&slot->hotplug_slot,
ctrl->pci_dev->bus,
slot->device,
name);
if (result) {
err("pci_hp_register failed with error %d\n", result);
- goto error_info;
+ goto error_slot;
}
slot->next = ctrl->slot;
@@ -703,10 +672,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
}
return 0;
-error_info:
- kfree(hotplug_slot_info);
-error_hpslot:
- kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 616df442520b..b7f4e1f099d9 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1130,9 +1130,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
for (slot = ctrl->slot; slot; slot = slot->next) {
if (slot->device == (hp_slot + ctrl->slot_device_offset))
continue;
- if (!slot->hotplug_slot || !slot->hotplug_slot->info)
- continue;
- if (slot->hotplug_slot->info->adapter_status == 0)
+ if (get_presence_status(ctrl, slot) == 0)
continue;
/* If another adapter is running on the same segment but at a
* lower speed/mode, we allow the new adapter to function at
@@ -1767,24 +1765,6 @@ void cpqhp_event_stop_thread(void)
}
-static int update_slot_info(struct controller *ctrl, struct slot *slot)
-{
- struct hotplug_slot_info *info;
- int result;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->power_status = get_slot_enabled(ctrl, slot);
- info->attention_status = cpq_get_attention_status(ctrl, slot);
- info->latch_status = cpq_get_latch_status(ctrl, slot);
- info->adapter_status = get_presence_status(ctrl, slot);
- result = pci_hp_change_slot_info(slot->hotplug_slot, info);
- kfree(info);
- return result;
-}
-
static void interrupt_event_handler(struct controller *ctrl)
{
int loop = 0;
@@ -1884,9 +1864,6 @@ static void interrupt_event_handler(struct controller *ctrl)
/***********POWER FAULT */
else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) {
dbg("power fault\n");
- } else {
- /* refresh notification */
- update_slot_info(ctrl, p_slot);
}
ctrl->event_queue[loop].event_type = 0;
@@ -2057,9 +2034,6 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
if (rc)
dbg("%s: rc = %d\n", __func__, rc);
- if (p_slot)
- update_slot_info(ctrl, p_slot);
-
return rc;
}
@@ -2125,9 +2099,6 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
rc = 1;
}
- if (p_slot)
- update_slot_info(ctrl, p_slot);
-
return rc;
}
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index fddb78606c74..b89f850c3a4e 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -698,7 +698,7 @@ struct slot {
u8 supported_bus_mode;
u8 flag; /* this is for disable slot and polling */
u8 ctlr_index;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct controller *ctrl;
struct pci_func *func;
u8 irq[4];
@@ -740,7 +740,12 @@ int ibmphp_do_disable_slot(struct slot *slot_cur);
int ibmphp_update_slot_info(struct slot *); /* This function is called from HPC, so we need it to not be be static */
int ibmphp_configure_card(struct pci_func *, u8);
int ibmphp_unconfigure_card(struct slot **, int);
-extern struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
+extern const struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
+}
#endif //__IBMPHP_H
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 4ea57e9019f1..08a58e911fc2 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -247,11 +247,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
break;
}
if (rc == 0) {
- pslot = hotplug_slot->private;
- if (pslot)
- rc = ibmphp_hpc_writeslot(pslot, cmd);
- else
- rc = -ENODEV;
+ pslot = to_slot(hotplug_slot);
+ rc = ibmphp_hpc_writeslot(pslot, cmd);
}
} else
rc = -ENODEV;
@@ -273,19 +270,15 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc)
- rc = ibmphp_hpc_readslot(pslot,
- READ_EXTSLOTSTATUS,
- &(myslot.ext_status));
- if (!rc)
- *value = SLOT_ATTN(myslot.status,
- myslot.ext_status);
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc)
+ rc = ibmphp_hpc_readslot(pslot, READ_EXTSLOTSTATUS,
+ &myslot.ext_status);
+ if (!rc)
+ *value = SLOT_ATTN(myslot.status, myslot.ext_status);
}
ibmphp_unlock_operations();
@@ -303,14 +296,12 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
(ulong) hotplug_slot, (ulong) value);
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc)
- *value = SLOT_LATCH(myslot.status);
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc)
+ *value = SLOT_LATCH(myslot.status);
}
ibmphp_unlock_operations();
@@ -330,14 +321,12 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
(ulong) hotplug_slot, (ulong) value);
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc)
- *value = SLOT_PWRGD(myslot.status);
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc)
+ *value = SLOT_PWRGD(myslot.status);
}
ibmphp_unlock_operations();
@@ -357,18 +346,16 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
(ulong) hotplug_slot, (ulong) value);
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc) {
- present = SLOT_PRESENT(myslot.status);
- if (present == HPC_SLOT_EMPTY)
- *value = 0;
- else
- *value = 1;
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc) {
+ present = SLOT_PRESENT(myslot.status);
+ if (present == HPC_SLOT_EMPTY)
+ *value = 0;
+ else
+ *value = 1;
}
}
@@ -382,7 +369,7 @@ static int get_max_bus_speed(struct slot *slot)
int rc = 0;
u8 mode = 0;
enum pci_bus_speed speed;
- struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
+ struct pci_bus *bus = slot->hotplug_slot.pci_slot->bus;
debug("%s - Entry slot[%p]\n", __func__, slot);
@@ -582,29 +569,10 @@ static int validate(struct slot *slot_cur, int opn)
****************************************************************************/
int ibmphp_update_slot_info(struct slot *slot_cur)
{
- struct hotplug_slot_info *info;
- struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus;
- int rc;
+ struct pci_bus *bus = slot_cur->hotplug_slot.pci_slot->bus;
u8 bus_speed;
u8 mode;
- info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->power_status = SLOT_PWRGD(slot_cur->status);
- info->attention_status = SLOT_ATTN(slot_cur->status,
- slot_cur->ext_status);
- info->latch_status = SLOT_LATCH(slot_cur->status);
- if (!SLOT_PRESENT(slot_cur->status)) {
- info->adapter_status = 0;
-/* info->max_adapter_speed_status = MAX_ADAPTER_NONE; */
- } else {
- info->adapter_status = 1;
-/* get_max_adapter_speed_1(slot_cur->hotplug_slot,
- &info->max_adapter_speed_status, 0); */
- }
-
bus_speed = slot_cur->bus_on->current_speed;
mode = slot_cur->bus_on->current_bus_mode;
@@ -630,9 +598,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
bus->cur_bus_speed = bus_speed;
// To do: bus_names
- rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
- kfree(info);
- return rc;
+ return 0;
}
@@ -673,7 +639,7 @@ static void free_slots(void)
list_for_each_entry_safe(slot_cur, next, &ibmphp_slot_head,
ibm_slot_list) {
- pci_hp_del(slot_cur->hotplug_slot);
+ pci_hp_del(&slot_cur->hotplug_slot);
slot_cur->ctrl = NULL;
slot_cur->bus_on = NULL;
@@ -683,9 +649,7 @@ static void free_slots(void)
*/
ibmphp_unconfigure_card(&slot_cur, -1);
- pci_hp_destroy(slot_cur->hotplug_slot);
- kfree(slot_cur->hotplug_slot->info);
- kfree(slot_cur->hotplug_slot);
+ pci_hp_destroy(&slot_cur->hotplug_slot);
kfree(slot_cur);
}
debug("%s -- exit\n", __func__);
@@ -1007,7 +971,7 @@ static int enable_slot(struct hotplug_slot *hs)
ibmphp_lock_operations();
debug("ENABLING SLOT........\n");
- slot_cur = hs->private;
+ slot_cur = to_slot(hs);
rc = validate(slot_cur, ENABLE);
if (rc) {
@@ -1095,8 +1059,7 @@ static int enable_slot(struct hotplug_slot *hs)
slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL);
if (!slot_cur->func) {
- /* We cannot do update_slot_info here, since no memory for
- * kmalloc n.e.ways, and update_slot_info allocates some */
+ /* do update_slot_info here? */
rc = -ENOMEM;
goto error_power;
}
@@ -1169,7 +1132,7 @@ error_power:
**************************************************************/
static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int rc;
ibmphp_lock_operations();
@@ -1259,7 +1222,7 @@ error:
goto exit;
}
-struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
+const struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = ibmphp_disable_slot,
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 6f8e90e3ec08..11a2661dc062 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -666,36 +666,8 @@ static int fillslotinfo(struct hotplug_slot *hotplug_slot)
struct slot *slot;
int rc = 0;
- if (!hotplug_slot || !hotplug_slot->private)
- return -EINVAL;
-
- slot = hotplug_slot->private;
+ slot = to_slot(hotplug_slot);
rc = ibmphp_hpc_readslot(slot, READ_ALLSTAT, NULL);
- if (rc)
- return rc;
-
- // power - enabled:1 not:0
- hotplug_slot->info->power_status = SLOT_POWER(slot->status);
-
- // attention - off:0, on:1, blinking:2
- hotplug_slot->info->attention_status = SLOT_ATTN(slot->status, slot->ext_status);
-
- // latch - open:1 closed:0
- hotplug_slot->info->latch_status = SLOT_LATCH(slot->status);
-
- // pci board - present:1 not:0
- if (SLOT_PRESENT(slot->status))
- hotplug_slot->info->adapter_status = 1;
- else
- hotplug_slot->info->adapter_status = 0;
-/*
- if (slot->bus_on->supported_bus_mode
- && (slot->bus_on->supported_speed == BUS_SPEED_66))
- hotplug_slot->info->max_bus_speed_status = BUS_SPEED_66PCIX;
- else
- hotplug_slot->info->max_bus_speed_status = slot->bus_on->supported_speed;
-*/
-
return rc;
}
@@ -712,7 +684,6 @@ static int __init ebda_rsrc_controller(void)
u8 ctlr_id, temp, bus_index;
u16 ctlr, slot, bus;
u16 slot_num, bus_num, index;
- struct hotplug_slot *hp_slot_ptr;
struct controller *hpc_ptr;
struct ebda_hpc_bus *bus_ptr;
struct ebda_hpc_slot *slot_ptr;
@@ -771,7 +742,7 @@ static int __init ebda_rsrc_controller(void)
bus_info_ptr1 = kzalloc(sizeof(struct bus_info), GFP_KERNEL);
if (!bus_info_ptr1) {
rc = -ENOMEM;
- goto error_no_hp_slot;
+ goto error_no_slot;
}
bus_info_ptr1->slot_min = slot_ptr->slot_num;
bus_info_ptr1->slot_max = slot_ptr->slot_num;
@@ -842,7 +813,7 @@ static int __init ebda_rsrc_controller(void)
(hpc_ptr->u.isa_ctlr.io_end - hpc_ptr->u.isa_ctlr.io_start + 1),
"ibmphp")) {
rc = -ENODEV;
- goto error_no_hp_slot;
+ goto error_no_slot;
}
hpc_ptr->irq = readb(io_mem + addr + 4);
addr += 5;
@@ -857,7 +828,7 @@ static int __init ebda_rsrc_controller(void)
break;
default:
rc = -ENODEV;
- goto error_no_hp_slot;
+ goto error_no_slot;
}
//reorganize chassis' linked list
@@ -870,19 +841,6 @@ static int __init ebda_rsrc_controller(void)
// register slots with hpc core as well as create linked list of ibm slot
for (index = 0; index < hpc_ptr->slot_count; index++) {
-
- hp_slot_ptr = kzalloc(sizeof(*hp_slot_ptr), GFP_KERNEL);
- if (!hp_slot_ptr) {
- rc = -ENOMEM;
- goto error_no_hp_slot;
- }
-
- hp_slot_ptr->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
- if (!hp_slot_ptr->info) {
- rc = -ENOMEM;
- goto error_no_hp_info;
- }
-
tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL);
if (!tmp_slot) {
rc = -ENOMEM;
@@ -909,7 +867,6 @@ static int __init ebda_rsrc_controller(void)
bus_info_ptr1 = ibmphp_find_same_bus_num(hpc_ptr->slots[index].slot_bus_num);
if (!bus_info_ptr1) {
- kfree(tmp_slot);
rc = -ENODEV;
goto error;
}
@@ -919,22 +876,19 @@ static int __init ebda_rsrc_controller(void)
tmp_slot->ctlr_index = hpc_ptr->slots[index].ctl_index;
tmp_slot->number = hpc_ptr->slots[index].slot_num;
- tmp_slot->hotplug_slot = hp_slot_ptr;
-
- hp_slot_ptr->private = tmp_slot;
- rc = fillslotinfo(hp_slot_ptr);
+ rc = fillslotinfo(&tmp_slot->hotplug_slot);
if (rc)
goto error;
- rc = ibmphp_init_devno((struct slot **) &hp_slot_ptr->private);
+ rc = ibmphp_init_devno(&tmp_slot);
if (rc)
goto error;
- hp_slot_ptr->ops = &ibmphp_hotplug_slot_ops;
+ tmp_slot->hotplug_slot.ops = &ibmphp_hotplug_slot_ops;
// end of registering ibm slot with hotplug core
- list_add(&((struct slot *)(hp_slot_ptr->private))->ibm_slot_list, &ibmphp_slot_head);
+ list_add(&tmp_slot->ibm_slot_list, &ibmphp_slot_head);
}
print_bus_info();
@@ -944,7 +898,7 @@ static int __init ebda_rsrc_controller(void)
list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot));
- pci_hp_register(tmp_slot->hotplug_slot,
+ pci_hp_register(&tmp_slot->hotplug_slot,
pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name);
}
@@ -953,12 +907,8 @@ static int __init ebda_rsrc_controller(void)
return 0;
error:
- kfree(hp_slot_ptr->private);
+ kfree(tmp_slot);
error_no_slot:
- kfree(hp_slot_ptr->info);
-error_no_hp_info:
- kfree(hp_slot_ptr);
-error_no_hp_slot:
free_ebda_hpc(hpc_ptr);
error_no_hpc:
iounmap(io_mem);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 90fde5f106d8..5ac31f683b85 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -49,15 +49,13 @@ static DEFINE_MUTEX(pci_hp_mutex);
#define GET_STATUS(name, type) \
static int get_##name(struct hotplug_slot *slot, type *value) \
{ \
- struct hotplug_slot_ops *ops = slot->ops; \
+ const struct hotplug_slot_ops *ops = slot->ops; \
int retval = 0; \
- if (!try_module_get(ops->owner)) \
+ if (!try_module_get(slot->owner)) \
return -ENODEV; \
if (ops->get_##name) \
retval = ops->get_##name(slot, value); \
- else \
- *value = slot->info->name; \
- module_put(ops->owner); \
+ module_put(slot->owner); \
return retval; \
}
@@ -90,7 +88,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
power = (u8)(lpower & 0xff);
dbg("power = %d\n", power);
- if (!try_module_get(slot->ops->owner)) {
+ if (!try_module_get(slot->owner)) {
retval = -ENODEV;
goto exit;
}
@@ -109,7 +107,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
err("Illegal value specified for power\n");
retval = -EINVAL;
}
- module_put(slot->ops->owner);
+ module_put(slot->owner);
exit:
if (retval)
@@ -138,7 +136,8 @@ static ssize_t attention_read_file(struct pci_slot *pci_slot, char *buf)
static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
size_t count)
{
- struct hotplug_slot_ops *ops = pci_slot->hotplug->ops;
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ const struct hotplug_slot_ops *ops = slot->ops;
unsigned long lattention;
u8 attention;
int retval = 0;
@@ -147,13 +146,13 @@ static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
attention = (u8)(lattention & 0xff);
dbg(" - attention = %d\n", attention);
- if (!try_module_get(ops->owner)) {
+ if (!try_module_get(slot->owner)) {
retval = -ENODEV;
goto exit;
}
if (ops->set_attention_status)
- retval = ops->set_attention_status(pci_slot->hotplug, attention);
- module_put(ops->owner);
+ retval = ops->set_attention_status(slot, attention);
+ module_put(slot->owner);
exit:
if (retval)
@@ -213,13 +212,13 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
test = (u32)(ltest & 0xffffffff);
dbg("test = %d\n", test);
- if (!try_module_get(slot->ops->owner)) {
+ if (!try_module_get(slot->owner)) {
retval = -ENODEV;
goto exit;
}
if (slot->ops->hardware_test)
retval = slot->ops->hardware_test(slot, test);
- module_put(slot->ops->owner);
+ module_put(slot->owner);
exit:
if (retval)
@@ -444,11 +443,11 @@ int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus,
if (slot == NULL)
return -ENODEV;
- if ((slot->info == NULL) || (slot->ops == NULL))
+ if (slot->ops == NULL)
return -EINVAL;
- slot->ops->owner = owner;
- slot->ops->mod_name = mod_name;
+ slot->owner = owner;
+ slot->mod_name = mod_name;
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
@@ -559,28 +558,6 @@ void pci_hp_destroy(struct hotplug_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_hp_destroy);
-/**
- * pci_hp_change_slot_info - changes the slot's information structure in the core
- * @slot: pointer to the slot whose info has changed
- * @info: pointer to the info copy into the slot's info structure
- *
- * @slot must have been registered with the pci
- * hotplug subsystem previously with a call to pci_hp_register().
- *
- * Returns 0 if successful, anything else for an error.
- */
-int pci_hp_change_slot_info(struct hotplug_slot *slot,
- struct hotplug_slot_info *info)
-{
- if (!slot || !info)
- return -ENODEV;
-
- memcpy(slot->info, info, sizeof(struct hotplug_slot_info));
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
-
static int __init pci_hotplug_init(void)
{
int result;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 811cf83f956d..506e1d923a1f 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -19,7 +19,6 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/delay.h>
-#include <linux/sched/signal.h> /* signal_pending() */
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/workqueue.h>
@@ -60,71 +59,63 @@ do { \
#define SLOT_NAME_SIZE 10
/**
- * struct slot - PCIe hotplug slot
- * @state: current state machine position
- * @ctrl: pointer to the slot's controller structure
- * @hotplug_slot: pointer to the structure registered with the PCI hotplug core
- * @work: work item to turn the slot on or off after 5 seconds in response to
- * an Attention Button press
- * @lock: protects reads and writes of @state;
- * protects scheduling, execution and cancellation of @work
- */
-struct slot {
- u8 state;
- struct controller *ctrl;
- struct hotplug_slot *hotplug_slot;
- struct delayed_work work;
- struct mutex lock;
-};
-
-/**
* struct controller - PCIe hotplug controller
- * @ctrl_lock: serializes writes to the Slot Control register
* @pcie: pointer to the controller's PCIe port service device
- * @reset_lock: prevents access to the Data Link Layer Link Active bit in the
- * Link Status register and to the Presence Detect State bit in the Slot
- * Status register during a slot reset which may cause them to flap
- * @slot: pointer to the controller's slot structure
- * @queue: wait queue to wake up on reception of a Command Completed event,
- * used for synchronous writes to the Slot Control register
* @slot_cap: cached copy of the Slot Capabilities register
* @slot_ctrl: cached copy of the Slot Control register
- * @poll_thread: thread to poll for slot events if no IRQ is available,
- * enabled with pciehp_poll_mode module parameter
+ * @ctrl_lock: serializes writes to the Slot Control register
* @cmd_started: jiffies when the Slot Control register was last written;
* the next write is allowed 1 second later, absent a Command Completed
* interrupt (PCIe r4.0, sec 6.7.3.2)
* @cmd_busy: flag set on Slot Control register write, cleared by IRQ handler
* on reception of a Command Completed event
- * @link_active_reporting: cached copy of Data Link Layer Link Active Reporting
- * Capable bit in Link Capabilities register; if this bit is zero, the
- * Data Link Layer Link Active bit in the Link Status register will never
- * be set and the driver is thus confined to wait 1 second before assuming
- * the link to a hotplugged device is up and accessing it
+ * @queue: wait queue to wake up on reception of a Command Completed event,
+ * used for synchronous writes to the Slot Control register
+ * @pending_events: used by the IRQ handler to save events retrieved from the
+ * Slot Status register for later consumption by the IRQ thread
* @notification_enabled: whether the IRQ was requested successfully
* @power_fault_detected: whether a power fault was detected by the hardware
* that has not yet been cleared by the user
- * @pending_events: used by the IRQ handler to save events retrieved from the
- * Slot Status register for later consumption by the IRQ thread
+ * @poll_thread: thread to poll for slot events if no IRQ is available,
+ * enabled with pciehp_poll_mode module parameter
+ * @state: current state machine position
+ * @state_lock: protects reads and writes of @state;
+ * protects scheduling, execution and cancellation of @button_work
+ * @button_work: work item to turn the slot on or off after 5 seconds
+ * in response to an Attention Button press
+ * @hotplug_slot: structure registered with the PCI hotplug core
+ * @reset_lock: prevents access to the Data Link Layer Link Active bit in the
+ * Link Status register and to the Presence Detect State bit in the Slot
+ * Status register during a slot reset which may cause them to flap
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
* used for synchronous slot enable/disable request via sysfs
+ *
+ * PCIe hotplug has a 1:1 relationship between controller and slot, hence
+ * unlike other drivers, the two aren't represented by separate structures.
*/
struct controller {
- struct mutex ctrl_lock;
struct pcie_device *pcie;
- struct rw_semaphore reset_lock;
- struct slot *slot;
- wait_queue_head_t queue;
- u32 slot_cap;
- u16 slot_ctrl;
- struct task_struct *poll_thread;
- unsigned long cmd_started; /* jiffies */
+
+ u32 slot_cap; /* capabilities and quirks */
+
+ u16 slot_ctrl; /* control register access */
+ struct mutex ctrl_lock;
+ unsigned long cmd_started;
unsigned int cmd_busy:1;
- unsigned int link_active_reporting:1;
+ wait_queue_head_t queue;
+
+ atomic_t pending_events; /* event handling */
unsigned int notification_enabled:1;
unsigned int power_fault_detected;
- atomic_t pending_events;
+ struct task_struct *poll_thread;
+
+ u8 state; /* state machine */
+ struct mutex state_lock;
+ struct delayed_work button_work;
+
+ struct hotplug_slot hotplug_slot; /* hotplug core interface */
+ struct rw_semaphore reset_lock;
int request_result;
wait_queue_head_t requester;
};
@@ -174,42 +165,50 @@ struct controller {
#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
#define PSN(ctrl) (((ctrl)->slot_cap & PCI_EXP_SLTCAP_PSN) >> 19)
-int pciehp_sysfs_enable_slot(struct slot *slot);
-int pciehp_sysfs_disable_slot(struct slot *slot);
void pciehp_request(struct controller *ctrl, int action);
-void pciehp_handle_button_press(struct slot *slot);
-void pciehp_handle_disable_request(struct slot *slot);
-void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events);
-int pciehp_configure_device(struct slot *p_slot);
-void pciehp_unconfigure_device(struct slot *p_slot);
+void pciehp_handle_button_press(struct controller *ctrl);
+void pciehp_handle_disable_request(struct controller *ctrl);
+void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events);
+int pciehp_configure_device(struct controller *ctrl);
+void pciehp_unconfigure_device(struct controller *ctrl, bool presence);
void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
void pcie_shutdown_notification(struct controller *ctrl);
void pcie_clear_hotplug_events(struct controller *ctrl);
-int pciehp_power_on_slot(struct slot *slot);
-void pciehp_power_off_slot(struct slot *slot);
-void pciehp_get_power_status(struct slot *slot, u8 *status);
-void pciehp_get_attention_status(struct slot *slot, u8 *status);
-
-void pciehp_set_attention_status(struct slot *slot, u8 status);
-void pciehp_get_latch_status(struct slot *slot, u8 *status);
-void pciehp_get_adapter_status(struct slot *slot, u8 *status);
-int pciehp_query_power_fault(struct slot *slot);
-void pciehp_green_led_on(struct slot *slot);
-void pciehp_green_led_off(struct slot *slot);
-void pciehp_green_led_blink(struct slot *slot);
+void pcie_enable_interrupt(struct controller *ctrl);
+void pcie_disable_interrupt(struct controller *ctrl);
+int pciehp_power_on_slot(struct controller *ctrl);
+void pciehp_power_off_slot(struct controller *ctrl);
+void pciehp_get_power_status(struct controller *ctrl, u8 *status);
+
+void pciehp_set_attention_status(struct controller *ctrl, u8 status);
+void pciehp_get_latch_status(struct controller *ctrl, u8 *status);
+int pciehp_query_power_fault(struct controller *ctrl);
+void pciehp_green_led_on(struct controller *ctrl);
+void pciehp_green_led_off(struct controller *ctrl);
+void pciehp_green_led_blink(struct controller *ctrl);
+bool pciehp_card_present(struct controller *ctrl);
+bool pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
bool pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
-int pciehp_reset_slot(struct slot *slot, int probe);
+int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
+int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot);
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe);
+int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status);
int pciehp_set_raw_indicator_status(struct hotplug_slot *h_slot, u8 status);
int pciehp_get_raw_indicator_status(struct hotplug_slot *h_slot, u8 *status);
-static inline const char *slot_name(struct slot *slot)
+static inline const char *slot_name(struct controller *ctrl)
+{
+ return hotplug_slot_name(&ctrl->hotplug_slot);
+}
+
+static inline struct controller *to_ctrl(struct hotplug_slot *hotplug_slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return container_of(hotplug_slot, struct controller, hotplug_slot);
}
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index ec48c9433ae5..fc5366b50e95 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -23,8 +23,6 @@
#include <linux/types.h>
#include <linux/pci.h>
#include "pciehp.h"
-#include <linux/interrupt.h>
-#include <linux/time.h>
#include "../pci.h"
@@ -47,45 +45,30 @@ MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
#define PCIE_MODULE_NAME "pciehp"
static int set_attention_status(struct hotplug_slot *slot, u8 value);
-static int enable_slot(struct hotplug_slot *slot);
-static int disable_slot(struct hotplug_slot *slot);
static int get_power_status(struct hotplug_slot *slot, u8 *value);
-static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-static int reset_slot(struct hotplug_slot *slot, int probe);
static int init_slot(struct controller *ctrl)
{
- struct slot *slot = ctrl->slot;
- struct hotplug_slot *hotplug = NULL;
- struct hotplug_slot_info *info = NULL;
- struct hotplug_slot_ops *ops = NULL;
+ struct hotplug_slot_ops *ops;
char name[SLOT_NAME_SIZE];
- int retval = -ENOMEM;
-
- hotplug = kzalloc(sizeof(*hotplug), GFP_KERNEL);
- if (!hotplug)
- goto out;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto out;
+ int retval;
/* Setup hotplug slot ops */
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
- goto out;
+ return -ENOMEM;
- ops->enable_slot = enable_slot;
- ops->disable_slot = disable_slot;
+ ops->enable_slot = pciehp_sysfs_enable_slot;
+ ops->disable_slot = pciehp_sysfs_disable_slot;
ops->get_power_status = get_power_status;
ops->get_adapter_status = get_adapter_status;
- ops->reset_slot = reset_slot;
+ ops->reset_slot = pciehp_reset_slot;
if (MRL_SENS(ctrl))
ops->get_latch_status = get_latch_status;
if (ATTN_LED(ctrl)) {
- ops->get_attention_status = get_attention_status;
+ ops->get_attention_status = pciehp_get_attention_status;
ops->set_attention_status = set_attention_status;
} else if (ctrl->pcie->port->hotplug_user_indicators) {
ops->get_attention_status = pciehp_get_raw_indicator_status;
@@ -93,33 +76,24 @@ static int init_slot(struct controller *ctrl)
}
/* register this slot with the hotplug pci core */
- hotplug->info = info;
- hotplug->private = slot;
- hotplug->ops = ops;
- slot->hotplug_slot = hotplug;
+ ctrl->hotplug_slot.ops = ops;
snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
- retval = pci_hp_initialize(hotplug,
+ retval = pci_hp_initialize(&ctrl->hotplug_slot,
ctrl->pcie->port->subordinate, 0, name);
- if (retval)
- ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval);
-out:
if (retval) {
+ ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval);
kfree(ops);
- kfree(info);
- kfree(hotplug);
}
return retval;
}
static void cleanup_slot(struct controller *ctrl)
{
- struct hotplug_slot *hotplug_slot = ctrl->slot->hotplug_slot;
+ struct hotplug_slot *hotplug_slot = &ctrl->hotplug_slot;
pci_hp_destroy(hotplug_slot);
kfree(hotplug_slot->ops);
- kfree(hotplug_slot->info);
- kfree(hotplug_slot);
}
/*
@@ -127,79 +101,48 @@ static void cleanup_slot(struct controller *ctrl)
*/
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_set_attention_status(slot, status);
+ pciehp_set_attention_status(ctrl, status);
pci_config_pm_runtime_put(pdev);
return 0;
}
-
-static int enable_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- return pciehp_sysfs_enable_slot(slot);
-}
-
-
-static int disable_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- return pciehp_sysfs_disable_slot(slot);
-}
-
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_get_power_status(slot, value);
+ pciehp_get_power_status(ctrl, value);
pci_config_pm_runtime_put(pdev);
return 0;
}
-static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
-
- pciehp_get_attention_status(slot, value);
- return 0;
-}
-
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_get_latch_status(slot, value);
+ pciehp_get_latch_status(ctrl, value);
pci_config_pm_runtime_put(pdev);
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_get_adapter_status(slot, value);
+ *value = pciehp_card_present_or_link_active(ctrl);
pci_config_pm_runtime_put(pdev);
return 0;
}
-static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
-{
- struct slot *slot = hotplug_slot->private;
-
- return pciehp_reset_slot(slot, probe);
-}
-
/**
* pciehp_check_presence() - synthesize event if presence has changed
*
@@ -212,20 +155,19 @@ static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
*/
static void pciehp_check_presence(struct controller *ctrl)
{
- struct slot *slot = ctrl->slot;
- u8 occupied;
+ bool occupied;
down_read(&ctrl->reset_lock);
- mutex_lock(&slot->lock);
+ mutex_lock(&ctrl->state_lock);
- pciehp_get_adapter_status(slot, &occupied);
- if ((occupied && (slot->state == OFF_STATE ||
- slot->state == BLINKINGON_STATE)) ||
- (!occupied && (slot->state == ON_STATE ||
- slot->state == BLINKINGOFF_STATE)))
+ occupied = pciehp_card_present_or_link_active(ctrl);
+ if ((occupied && (ctrl->state == OFF_STATE ||
+ ctrl->state == BLINKINGON_STATE)) ||
+ (!occupied && (ctrl->state == ON_STATE ||
+ ctrl->state == BLINKINGOFF_STATE)))
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
up_read(&ctrl->reset_lock);
}
@@ -233,7 +175,6 @@ static int pciehp_probe(struct pcie_device *dev)
{
int rc;
struct controller *ctrl;
- struct slot *slot;
/* If this is not a "hotplug" service, we have no business here. */
if (dev->service != PCIE_PORT_SERVICE_HP)
@@ -271,8 +212,7 @@ static int pciehp_probe(struct pcie_device *dev)
}
/* Publish to user space */
- slot = ctrl->slot;
- rc = pci_hp_add(slot->hotplug_slot);
+ rc = pci_hp_add(&ctrl->hotplug_slot);
if (rc) {
ctrl_err(ctrl, "Publication to user space failed (%d)\n", rc);
goto err_out_shutdown_notification;
@@ -295,29 +235,43 @@ static void pciehp_remove(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
- pci_hp_del(ctrl->slot->hotplug_slot);
+ pci_hp_del(&ctrl->hotplug_slot);
pcie_shutdown_notification(ctrl);
cleanup_slot(ctrl);
pciehp_release_ctrl(ctrl);
}
#ifdef CONFIG_PM
+static bool pme_is_native(struct pcie_device *dev)
+{
+ const struct pci_host_bridge *host;
+
+ host = pci_find_host_bridge(dev->port->bus);
+ return pcie_ports_native || host->native_pme;
+}
+
static int pciehp_suspend(struct pcie_device *dev)
{
+ /*
+ * Disable hotplug interrupt so that it does not trigger
+ * immediately when the downstream link goes down.
+ */
+ if (pme_is_native(dev))
+ pcie_disable_interrupt(get_service_data(dev));
+
return 0;
}
static int pciehp_resume_noirq(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
- struct slot *slot = ctrl->slot;
/* pci_restore_state() just wrote to the Slot Control register */
ctrl->cmd_started = jiffies;
ctrl->cmd_busy = true;
/* clear spurious events from rediscovery of inserted card */
- if (slot->state == ON_STATE || slot->state == BLINKINGOFF_STATE)
+ if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE)
pcie_clear_hotplug_events(ctrl);
return 0;
@@ -327,10 +281,29 @@ static int pciehp_resume(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
+ if (pme_is_native(dev))
+ pcie_enable_interrupt(ctrl);
+
pciehp_check_presence(ctrl);
return 0;
}
+
+static int pciehp_runtime_resume(struct pcie_device *dev)
+{
+ struct controller *ctrl = get_service_data(dev);
+
+ /* pci_restore_state() just wrote to the Slot Control register */
+ ctrl->cmd_started = jiffies;
+ ctrl->cmd_busy = true;
+
+ /* clear spurious events from rediscovery of inserted card */
+ if ((ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE) &&
+ pme_is_native(dev))
+ pcie_clear_hotplug_events(ctrl);
+
+ return pciehp_resume(dev);
+}
#endif /* PM */
static struct pcie_port_service_driver hpdriver_portdrv = {
@@ -345,10 +318,12 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
.suspend = pciehp_suspend,
.resume_noirq = pciehp_resume_noirq,
.resume = pciehp_resume,
+ .runtime_suspend = pciehp_suspend,
+ .runtime_resume = pciehp_runtime_resume,
#endif /* PM */
};
-static int __init pcied_init(void)
+int __init pcie_hp_init(void)
{
int retval = 0;
@@ -359,4 +334,3 @@ static int __init pcied_init(void)
return retval;
}
-device_initcall(pcied_init);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index da7c72372ffc..3f3df4c29f6e 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -13,24 +13,24 @@
*
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
-#include "../pci.h"
#include "pciehp.h"
/* The following routines constitute the bulk of the
hotplug controller logic
*/
-static void set_slot_off(struct controller *ctrl, struct slot *pslot)
+#define SAFE_REMOVAL true
+#define SURPRISE_REMOVAL false
+
+static void set_slot_off(struct controller *ctrl)
{
/* turn off slot, turn on Amber LED, turn off Green LED if supported*/
if (POWER_CTRL(ctrl)) {
- pciehp_power_off_slot(pslot);
+ pciehp_power_off_slot(ctrl);
/*
* After turning power off, we must wait for at least 1 second
@@ -40,31 +40,30 @@ static void set_slot_off(struct controller *ctrl, struct slot *pslot)
msleep(1000);
}
- pciehp_green_led_off(pslot);
- pciehp_set_attention_status(pslot, 1);
+ pciehp_green_led_off(ctrl);
+ pciehp_set_attention_status(ctrl, 1);
}
/**
* board_added - Called after a board has been added to the system.
- * @p_slot: &slot where board is added
+ * @ctrl: PCIe hotplug controller where board is added
*
* Turns power on for the board.
* Configures board.
*/
-static int board_added(struct slot *p_slot)
+static int board_added(struct controller *ctrl)
{
int retval = 0;
- struct controller *ctrl = p_slot->ctrl;
struct pci_bus *parent = ctrl->pcie->port->subordinate;
if (POWER_CTRL(ctrl)) {
/* Power on slot */
- retval = pciehp_power_on_slot(p_slot);
+ retval = pciehp_power_on_slot(ctrl);
if (retval)
return retval;
}
- pciehp_green_led_blink(p_slot);
+ pciehp_green_led_blink(ctrl);
/* Check link training status */
retval = pciehp_check_link_status(ctrl);
@@ -74,13 +73,13 @@ static int board_added(struct slot *p_slot)
}
/* Check for a power fault */
- if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
- ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(p_slot));
+ if (ctrl->power_fault_detected || pciehp_query_power_fault(ctrl)) {
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
retval = -EIO;
goto err_exit;
}
- retval = pciehp_configure_device(p_slot);
+ retval = pciehp_configure_device(ctrl);
if (retval) {
if (retval != -EEXIST) {
ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
@@ -89,27 +88,26 @@ static int board_added(struct slot *p_slot)
}
}
- pciehp_green_led_on(p_slot);
- pciehp_set_attention_status(p_slot, 0);
+ pciehp_green_led_on(ctrl);
+ pciehp_set_attention_status(ctrl, 0);
return 0;
err_exit:
- set_slot_off(ctrl, p_slot);
+ set_slot_off(ctrl);
return retval;
}
/**
* remove_board - Turns off slot and LEDs
- * @p_slot: slot where board is being removed
+ * @ctrl: PCIe hotplug controller where board is being removed
+ * @safe_removal: whether the board is safely removed (versus surprise removed)
*/
-static void remove_board(struct slot *p_slot)
+static void remove_board(struct controller *ctrl, bool safe_removal)
{
- struct controller *ctrl = p_slot->ctrl;
-
- pciehp_unconfigure_device(p_slot);
+ pciehp_unconfigure_device(ctrl, safe_removal);
if (POWER_CTRL(ctrl)) {
- pciehp_power_off_slot(p_slot);
+ pciehp_power_off_slot(ctrl);
/*
* After turning power off, we must wait for at least 1 second
@@ -120,11 +118,11 @@ static void remove_board(struct slot *p_slot)
}
/* turn off Green LED */
- pciehp_green_led_off(p_slot);
+ pciehp_green_led_off(ctrl);
}
-static int pciehp_enable_slot(struct slot *slot);
-static int pciehp_disable_slot(struct slot *slot);
+static int pciehp_enable_slot(struct controller *ctrl);
+static int pciehp_disable_slot(struct controller *ctrl, bool safe_removal);
void pciehp_request(struct controller *ctrl, int action)
{
@@ -135,11 +133,11 @@ void pciehp_request(struct controller *ctrl, int action)
void pciehp_queue_pushbutton_work(struct work_struct *work)
{
- struct slot *p_slot = container_of(work, struct slot, work.work);
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = container_of(work, struct controller,
+ button_work.work);
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGOFF_STATE:
pciehp_request(ctrl, DISABLE_SLOT);
break;
@@ -149,30 +147,28 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
default:
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
}
-void pciehp_handle_button_press(struct slot *p_slot)
+void pciehp_handle_button_press(struct controller *ctrl)
{
- struct controller *ctrl = p_slot->ctrl;
-
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case OFF_STATE:
case ON_STATE:
- if (p_slot->state == ON_STATE) {
- p_slot->state = BLINKINGOFF_STATE;
+ if (ctrl->state == ON_STATE) {
+ ctrl->state = BLINKINGOFF_STATE;
ctrl_info(ctrl, "Slot(%s): Powering off due to button press\n",
- slot_name(p_slot));
+ slot_name(ctrl));
} else {
- p_slot->state = BLINKINGON_STATE;
+ ctrl->state = BLINKINGON_STATE;
ctrl_info(ctrl, "Slot(%s) Powering on due to button press\n",
- slot_name(p_slot));
+ slot_name(ctrl));
}
/* blink green LED and turn off amber */
- pciehp_green_led_blink(p_slot);
- pciehp_set_attention_status(p_slot, 0);
- schedule_delayed_work(&p_slot->work, 5 * HZ);
+ pciehp_green_led_blink(ctrl);
+ pciehp_set_attention_status(ctrl, 0);
+ schedule_delayed_work(&ctrl->button_work, 5 * HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
@@ -181,197 +177,184 @@ void pciehp_handle_button_press(struct slot *p_slot)
* press the attention again before the 5 sec. limit
* expires to cancel hot-add or hot-remove
*/
- ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(p_slot));
- cancel_delayed_work(&p_slot->work);
- if (p_slot->state == BLINKINGOFF_STATE) {
- p_slot->state = ON_STATE;
- pciehp_green_led_on(p_slot);
+ ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(ctrl));
+ cancel_delayed_work(&ctrl->button_work);
+ if (ctrl->state == BLINKINGOFF_STATE) {
+ ctrl->state = ON_STATE;
+ pciehp_green_led_on(ctrl);
} else {
- p_slot->state = OFF_STATE;
- pciehp_green_led_off(p_slot);
+ ctrl->state = OFF_STATE;
+ pciehp_green_led_off(ctrl);
}
- pciehp_set_attention_status(p_slot, 0);
+ pciehp_set_attention_status(ctrl, 0);
ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
default:
ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ slot_name(ctrl), ctrl->state);
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
}
-void pciehp_handle_disable_request(struct slot *slot)
+void pciehp_handle_disable_request(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
- mutex_lock(&slot->lock);
- switch (slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGON_STATE:
case BLINKINGOFF_STATE:
- cancel_delayed_work(&slot->work);
+ cancel_delayed_work(&ctrl->button_work);
break;
}
- slot->state = POWEROFF_STATE;
- mutex_unlock(&slot->lock);
+ ctrl->state = POWEROFF_STATE;
+ mutex_unlock(&ctrl->state_lock);
- ctrl->request_result = pciehp_disable_slot(slot);
+ ctrl->request_result = pciehp_disable_slot(ctrl, SAFE_REMOVAL);
}
-void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events)
+void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
{
- struct controller *ctrl = slot->ctrl;
- bool link_active;
- u8 present;
+ bool present, link_active;
/*
* If the slot is on and presence or link has changed, turn it off.
* Even if it's occupied again, we cannot assume the card is the same.
*/
- mutex_lock(&slot->lock);
- switch (slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGOFF_STATE:
- cancel_delayed_work(&slot->work);
+ cancel_delayed_work(&ctrl->button_work);
/* fall through */
case ON_STATE:
- slot->state = POWEROFF_STATE;
- mutex_unlock(&slot->lock);
+ ctrl->state = POWEROFF_STATE;
+ mutex_unlock(&ctrl->state_lock);
if (events & PCI_EXP_SLTSTA_DLLSC)
ctrl_info(ctrl, "Slot(%s): Link Down\n",
- slot_name(slot));
+ slot_name(ctrl));
if (events & PCI_EXP_SLTSTA_PDC)
ctrl_info(ctrl, "Slot(%s): Card not present\n",
- slot_name(slot));
- pciehp_disable_slot(slot);
+ slot_name(ctrl));
+ pciehp_disable_slot(ctrl, SURPRISE_REMOVAL);
break;
default:
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
break;
}
/* Turn the slot on if it's occupied or link is up */
- mutex_lock(&slot->lock);
- pciehp_get_adapter_status(slot, &present);
+ mutex_lock(&ctrl->state_lock);
+ present = pciehp_card_present(ctrl);
link_active = pciehp_check_link_active(ctrl);
if (!present && !link_active) {
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
return;
}
- switch (slot->state) {
+ switch (ctrl->state) {
case BLINKINGON_STATE:
- cancel_delayed_work(&slot->work);
+ cancel_delayed_work(&ctrl->button_work);
/* fall through */
case OFF_STATE:
- slot->state = POWERON_STATE;
- mutex_unlock(&slot->lock);
+ ctrl->state = POWERON_STATE;
+ mutex_unlock(&ctrl->state_lock);
if (present)
ctrl_info(ctrl, "Slot(%s): Card present\n",
- slot_name(slot));
+ slot_name(ctrl));
if (link_active)
ctrl_info(ctrl, "Slot(%s): Link Up\n",
- slot_name(slot));
- ctrl->request_result = pciehp_enable_slot(slot);
+ slot_name(ctrl));
+ ctrl->request_result = pciehp_enable_slot(ctrl);
break;
default:
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
break;
}
}
-static int __pciehp_enable_slot(struct slot *p_slot)
+static int __pciehp_enable_slot(struct controller *ctrl)
{
u8 getstatus = 0;
- struct controller *ctrl = p_slot->ctrl;
- pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus) {
- ctrl_info(ctrl, "Slot(%s): No adapter\n", slot_name(p_slot));
- return -ENODEV;
- }
- if (MRL_SENS(p_slot->ctrl)) {
- pciehp_get_latch_status(p_slot, &getstatus);
+ if (MRL_SENS(ctrl)) {
+ pciehp_get_latch_status(ctrl, &getstatus);
if (getstatus) {
ctrl_info(ctrl, "Slot(%s): Latch open\n",
- slot_name(p_slot));
+ slot_name(ctrl));
return -ENODEV;
}
}
- if (POWER_CTRL(p_slot->ctrl)) {
- pciehp_get_power_status(p_slot, &getstatus);
+ if (POWER_CTRL(ctrl)) {
+ pciehp_get_power_status(ctrl, &getstatus);
if (getstatus) {
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
return 0;
}
}
- return board_added(p_slot);
+ return board_added(ctrl);
}
-static int pciehp_enable_slot(struct slot *slot)
+static int pciehp_enable_slot(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
int ret;
pm_runtime_get_sync(&ctrl->pcie->port->dev);
- ret = __pciehp_enable_slot(slot);
+ ret = __pciehp_enable_slot(ctrl);
if (ret && ATTN_BUTTN(ctrl))
- pciehp_green_led_off(slot); /* may be blinking */
+ pciehp_green_led_off(ctrl); /* may be blinking */
pm_runtime_put(&ctrl->pcie->port->dev);
- mutex_lock(&slot->lock);
- slot->state = ret ? OFF_STATE : ON_STATE;
- mutex_unlock(&slot->lock);
+ mutex_lock(&ctrl->state_lock);
+ ctrl->state = ret ? OFF_STATE : ON_STATE;
+ mutex_unlock(&ctrl->state_lock);
return ret;
}
-static int __pciehp_disable_slot(struct slot *p_slot)
+static int __pciehp_disable_slot(struct controller *ctrl, bool safe_removal)
{
u8 getstatus = 0;
- struct controller *ctrl = p_slot->ctrl;
- if (POWER_CTRL(p_slot->ctrl)) {
- pciehp_get_power_status(p_slot, &getstatus);
+ if (POWER_CTRL(ctrl)) {
+ pciehp_get_power_status(ctrl, &getstatus);
if (!getstatus) {
ctrl_info(ctrl, "Slot(%s): Already disabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
return -EINVAL;
}
}
- remove_board(p_slot);
+ remove_board(ctrl, safe_removal);
return 0;
}
-static int pciehp_disable_slot(struct slot *slot)
+static int pciehp_disable_slot(struct controller *ctrl, bool safe_removal)
{
- struct controller *ctrl = slot->ctrl;
int ret;
pm_runtime_get_sync(&ctrl->pcie->port->dev);
- ret = __pciehp_disable_slot(slot);
+ ret = __pciehp_disable_slot(ctrl, safe_removal);
pm_runtime_put(&ctrl->pcie->port->dev);
- mutex_lock(&slot->lock);
- slot->state = OFF_STATE;
- mutex_unlock(&slot->lock);
+ mutex_lock(&ctrl->state_lock);
+ ctrl->state = OFF_STATE;
+ mutex_unlock(&ctrl->state_lock);
return ret;
}
-int pciehp_sysfs_enable_slot(struct slot *p_slot)
+int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGON_STATE:
case OFF_STATE:
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
/*
* The IRQ thread becomes a no-op if the user pulls out the
* card before the thread wakes up, so initialize to -ENODEV.
@@ -383,53 +366,53 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
return ctrl->request_result;
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
case BLINKINGOFF_STATE:
case ON_STATE:
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
default:
ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ slot_name(ctrl), ctrl->state);
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
return -ENODEV;
}
-int pciehp_sysfs_disable_slot(struct slot *p_slot)
+int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGOFF_STATE:
case ON_STATE:
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
pciehp_request(ctrl, DISABLE_SLOT);
wait_event(ctrl->requester,
!atomic_read(&ctrl->pending_events));
return ctrl->request_result;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
case BLINKINGON_STATE:
case OFF_STATE:
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already disabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
default:
ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ slot_name(ctrl), ctrl->state);
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
return -ENODEV;
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index a938abdb41ce..7dd443aea5a5 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -13,15 +13,12 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/types.h>
-#include <linux/signal.h>
#include <linux/jiffies.h>
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
-#include <linux/time.h>
#include <linux/slab.h>
#include "../pci.h"
@@ -43,7 +40,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
if (pciehp_poll_mode) {
ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
"pciehp_poll-%s",
- slot_name(ctrl->slot));
+ slot_name(ctrl));
return PTR_ERR_OR_ZERO(ctrl->poll_thread);
}
@@ -217,13 +214,6 @@ bool pciehp_check_link_active(struct controller *ctrl)
return ret;
}
-static void pcie_wait_link_active(struct controller *ctrl)
-{
- struct pci_dev *pdev = ctrl_dev(ctrl);
-
- pcie_wait_for_link(pdev, true);
-}
-
static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
{
u32 l;
@@ -256,18 +246,9 @@ int pciehp_check_link_status(struct controller *ctrl)
bool found;
u16 lnk_status;
- /*
- * Data Link Layer Link Active Reporting must be capable for
- * hot-plug capable downstream port. But old controller might
- * not implement it. In this case, we wait for 1000 ms.
- */
- if (ctrl->link_active_reporting)
- pcie_wait_link_active(ctrl);
- else
- msleep(1000);
+ if (!pcie_wait_for_link(pdev, true))
+ return -1;
- /* wait 100ms before read pci conf, and try in 1s */
- msleep(100);
found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
PCI_DEVFN(0, 0));
@@ -318,8 +299,8 @@ static int pciehp_link_enable(struct controller *ctrl)
int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
u8 *status)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
pci_config_pm_runtime_get(pdev);
@@ -329,9 +310,9 @@ int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-void pciehp_get_attention_status(struct slot *slot, u8 *status)
+int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
{
- struct controller *ctrl = slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
@@ -355,11 +336,12 @@ void pciehp_get_attention_status(struct slot *slot, u8 *status)
*status = 0xFF;
break;
}
+
+ return 0;
}
-void pciehp_get_power_status(struct slot *slot, u8 *status)
+void pciehp_get_power_status(struct controller *ctrl, u8 *status)
{
- struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
@@ -380,27 +362,41 @@ void pciehp_get_power_status(struct slot *slot, u8 *status)
}
}
-void pciehp_get_latch_status(struct slot *slot, u8 *status)
+void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
{
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
}
-void pciehp_get_adapter_status(struct slot *slot, u8 *status)
+bool pciehp_card_present(struct controller *ctrl)
{
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- *status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
+ return slot_status & PCI_EXP_SLTSTA_PDS;
}
-int pciehp_query_power_fault(struct slot *slot)
+/**
+ * pciehp_card_present_or_link_active() - whether given slot is occupied
+ * @ctrl: PCIe hotplug controller
+ *
+ * Unlike pciehp_card_present(), which determines presence solely from the
+ * Presence Detect State bit, this helper also returns true if the Link Active
+ * bit is set. This is a concession to broken hotplug ports which hardwire
+ * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
+ */
+bool pciehp_card_present_or_link_active(struct controller *ctrl)
+{
+ return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
+}
+
+int pciehp_query_power_fault(struct controller *ctrl)
{
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
@@ -410,8 +406,7 @@ int pciehp_query_power_fault(struct slot *slot)
int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
u8 status)
{
- struct slot *slot = hotplug_slot->private;
- struct controller *ctrl = slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
pci_config_pm_runtime_get(pdev);
@@ -421,9 +416,8 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-void pciehp_set_attention_status(struct slot *slot, u8 value)
+void pciehp_set_attention_status(struct controller *ctrl, u8 value)
{
- struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
if (!ATTN_LED(ctrl))
@@ -447,10 +441,8 @@ void pciehp_set_attention_status(struct slot *slot, u8 value)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
}
-void pciehp_green_led_on(struct slot *slot)
+void pciehp_green_led_on(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
if (!PWR_LED(ctrl))
return;
@@ -461,10 +453,8 @@ void pciehp_green_led_on(struct slot *slot)
PCI_EXP_SLTCTL_PWR_IND_ON);
}
-void pciehp_green_led_off(struct slot *slot)
+void pciehp_green_led_off(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
if (!PWR_LED(ctrl))
return;
@@ -475,10 +465,8 @@ void pciehp_green_led_off(struct slot *slot)
PCI_EXP_SLTCTL_PWR_IND_OFF);
}
-void pciehp_green_led_blink(struct slot *slot)
+void pciehp_green_led_blink(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
if (!PWR_LED(ctrl))
return;
@@ -489,9 +477,8 @@ void pciehp_green_led_blink(struct slot *slot)
PCI_EXP_SLTCTL_PWR_IND_BLINK);
}
-int pciehp_power_on_slot(struct slot *slot)
+int pciehp_power_on_slot(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
int retval;
@@ -515,10 +502,8 @@ int pciehp_power_on_slot(struct slot *slot)
return retval;
}
-void pciehp_power_off_slot(struct slot *slot)
+void pciehp_power_off_slot(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
@@ -533,9 +518,11 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
u16 status, events;
/*
- * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
+ * Interrupts only occur in D3hot or shallower and only if enabled
+ * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
*/
- if (pdev->current_state == PCI_D3cold)
+ if (pdev->current_state == PCI_D3cold ||
+ (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
return IRQ_NONE;
/*
@@ -616,7 +603,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
- struct slot *slot = ctrl->slot;
irqreturn_t ret;
u32 events;
@@ -642,16 +628,16 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
/* Check Attention Button Pressed */
if (events & PCI_EXP_SLTSTA_ABP) {
ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
- slot_name(slot));
- pciehp_handle_button_press(slot);
+ slot_name(ctrl));
+ pciehp_handle_button_press(ctrl);
}
/* Check Power Fault Detected */
if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
- ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
- pciehp_set_attention_status(slot, 1);
- pciehp_green_led_off(slot);
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
+ pciehp_set_attention_status(ctrl, 1);
+ pciehp_green_led_off(ctrl);
}
/*
@@ -660,9 +646,9 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
*/
down_read(&ctrl->reset_lock);
if (events & DISABLE_SLOT)
- pciehp_handle_disable_request(slot);
+ pciehp_handle_disable_request(ctrl);
else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
- pciehp_handle_presence_or_link_change(slot, events);
+ pciehp_handle_presence_or_link_change(ctrl, events);
up_read(&ctrl->reset_lock);
pci_config_pm_runtime_put(pdev);
@@ -748,6 +734,16 @@ void pcie_clear_hotplug_events(struct controller *ctrl)
PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
}
+void pcie_enable_interrupt(struct controller *ctrl)
+{
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_HPIE, PCI_EXP_SLTCTL_HPIE);
+}
+
+void pcie_disable_interrupt(struct controller *ctrl)
+{
+ pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_HPIE);
+}
+
/*
* pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
* bus reset of the bridge, but at the same time we want to ensure that it is
@@ -756,9 +752,9 @@ void pcie_clear_hotplug_events(struct controller *ctrl)
* momentarily, if we see that they could interfere. Also, clear any spurious
* events after.
*/
-int pciehp_reset_slot(struct slot *slot, int probe)
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
{
- struct controller *ctrl = slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 stat_mask = 0, ctrl_mask = 0;
int rc;
@@ -808,34 +804,6 @@ void pcie_shutdown_notification(struct controller *ctrl)
}
}
-static int pcie_init_slot(struct controller *ctrl)
-{
- struct pci_bus *subordinate = ctrl_dev(ctrl)->subordinate;
- struct slot *slot;
-
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- return -ENOMEM;
-
- down_read(&pci_bus_sem);
- slot->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
- up_read(&pci_bus_sem);
-
- slot->ctrl = ctrl;
- mutex_init(&slot->lock);
- INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
- ctrl->slot = slot;
- return 0;
-}
-
-static void pcie_cleanup_slot(struct controller *ctrl)
-{
- struct slot *slot = ctrl->slot;
-
- cancel_delayed_work_sync(&slot->work);
- kfree(slot);
-}
-
static inline void dbg_ctrl(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl->pcie->port;
@@ -857,12 +825,13 @@ struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
u32 slot_cap, link_cap;
- u8 occupied, poweron;
+ u8 poweron;
struct pci_dev *pdev = dev->port;
+ struct pci_bus *subordinate = pdev->subordinate;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
- goto abort;
+ return NULL;
ctrl->pcie = dev;
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
@@ -879,15 +848,19 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
+ mutex_init(&ctrl->state_lock);
init_rwsem(&ctrl->reset_lock);
init_waitqueue_head(&ctrl->requester);
init_waitqueue_head(&ctrl->queue);
+ INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
dbg_ctrl(ctrl);
+ down_read(&pci_bus_sem);
+ ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
+ up_read(&pci_bus_sem);
+
/* Check if Data Link Layer Link Active Reporting is implemented */
pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
- if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
- ctrl->link_active_reporting = 1;
/* Clear all remaining event bits in Slot Status register. */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -909,33 +882,24 @@ struct controller *pcie_init(struct pcie_device *dev)
FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
- if (pcie_init_slot(ctrl))
- goto abort_ctrl;
-
/*
* If empty slot's power status is on, turn power off. The IRQ isn't
* requested yet, so avoid triggering a notification with this command.
*/
if (POWER_CTRL(ctrl)) {
- pciehp_get_adapter_status(ctrl->slot, &occupied);
- pciehp_get_power_status(ctrl->slot, &poweron);
- if (!occupied && poweron) {
+ pciehp_get_power_status(ctrl, &poweron);
+ if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
pcie_disable_notification(ctrl);
- pciehp_power_off_slot(ctrl->slot);
+ pciehp_power_off_slot(ctrl);
}
}
return ctrl;
-
-abort_ctrl:
- kfree(ctrl);
-abort:
- return NULL;
}
void pciehp_release_ctrl(struct controller *ctrl)
{
- pcie_cleanup_slot(ctrl);
+ cancel_delayed_work_sync(&ctrl->button_work);
kfree(ctrl);
}
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 5c58c22e0c08..b9c1396db6fe 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -13,20 +13,26 @@
*
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
-int pciehp_configure_device(struct slot *p_slot)
+/**
+ * pciehp_configure_device() - enumerate PCI devices below a hotplug bridge
+ * @ctrl: PCIe hotplug controller
+ *
+ * Enumerate PCI devices below a hotplug bridge and add them to the system.
+ * Return 0 on success, %-EEXIST if the devices are already enumerated or
+ * %-ENODEV if enumeration failed.
+ */
+int pciehp_configure_device(struct controller *ctrl)
{
struct pci_dev *dev;
- struct pci_dev *bridge = p_slot->ctrl->pcie->port;
+ struct pci_dev *bridge = ctrl->pcie->port;
struct pci_bus *parent = bridge->subordinate;
int num, ret = 0;
- struct controller *ctrl = p_slot->ctrl;
pci_lock_rescan_remove();
@@ -62,17 +68,28 @@ int pciehp_configure_device(struct slot *p_slot)
return ret;
}
-void pciehp_unconfigure_device(struct slot *p_slot)
+/**
+ * pciehp_unconfigure_device() - remove PCI devices below a hotplug bridge
+ * @ctrl: PCIe hotplug controller
+ * @presence: whether the card is still present in the slot;
+ * true for safe removal via sysfs or an Attention Button press,
+ * false for surprise removal
+ *
+ * Unbind PCI devices below a hotplug bridge from their drivers and remove
+ * them from the system. Safely removed devices are quiesced. Surprise
+ * removed devices are marked as such to prevent further accesses.
+ */
+void pciehp_unconfigure_device(struct controller *ctrl, bool presence)
{
- u8 presence = 0;
struct pci_dev *dev, *temp;
- struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
+ struct pci_bus *parent = ctrl->pcie->port->subordinate;
u16 command;
- struct controller *ctrl = p_slot->ctrl;
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
__func__, pci_domain_nr(parent), parent->number);
- pciehp_get_adapter_status(p_slot, &presence);
+
+ if (!presence)
+ pci_walk_bus(parent, pci_dev_set_disconnected, NULL);
pci_lock_rescan_remove();
@@ -85,12 +102,6 @@ void pciehp_unconfigure_device(struct slot *p_slot)
list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
bus_list) {
pci_dev_get(dev);
- if (!presence) {
- pci_dev_set_disconnected(dev, NULL);
- if (pci_has_subordinate(dev))
- pci_walk_bus(dev->subordinate,
- pci_dev_set_disconnected, NULL);
- }
pci_stop_and_remove_bus_device(dev);
/*
* Ensure that no new Requests will be generated from
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 3276a5e4c430..6758fd7c382e 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -275,14 +275,13 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
goto free_fdt1;
}
- fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL);
+ fdt = kmemdup(fdt1, fdt_totalsize(fdt1), GFP_KERNEL);
if (!fdt) {
ret = -ENOMEM;
goto free_fdt1;
}
/* Unflatten device tree blob */
- memcpy(fdt, fdt1, fdt_totalsize(fdt1));
dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
if (!dt) {
ret = -EINVAL;
@@ -328,10 +327,15 @@ out:
return ret;
}
+static inline struct pnv_php_slot *to_pnv_php_slot(struct hotplug_slot *slot)
+{
+ return container_of(slot, struct pnv_php_slot, slot);
+}
+
int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
uint8_t state)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
struct opal_msg msg;
int ret;
@@ -363,7 +367,7 @@ EXPORT_SYMBOL_GPL(pnv_php_set_slot_power_state);
static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
uint8_t power_state = OPAL_PCI_SLOT_POWER_ON;
int ret;
@@ -378,7 +382,6 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
ret);
} else {
*state = power_state;
- slot->info->power_status = power_state;
}
return 0;
@@ -386,7 +389,7 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
uint8_t presence = OPAL_PCI_SLOT_EMPTY;
int ret;
@@ -397,7 +400,6 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
if (ret >= 0) {
*state = presence;
- slot->info->adapter_status = presence;
ret = 0;
} else {
pci_warn(php_slot->pdev, "Error %d getting presence\n", ret);
@@ -406,10 +408,20 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
return ret;
}
+static int pnv_php_get_attention_state(struct hotplug_slot *slot, u8 *state)
+{
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+
+ *state = php_slot->attention_state;
+ return 0;
+}
+
static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
{
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+
/* FIXME: Make it real once firmware supports it */
- slot->info->attention_status = state;
+ php_slot->attention_state = state;
return 0;
}
@@ -501,15 +513,14 @@ scan:
static int pnv_php_enable_slot(struct hotplug_slot *slot)
{
- struct pnv_php_slot *php_slot = container_of(slot,
- struct pnv_php_slot, slot);
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
return pnv_php_enable(php_slot, true);
}
static int pnv_php_disable_slot(struct hotplug_slot *slot)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
int ret;
if (php_slot->state != PNV_PHP_STATE_POPULATED)
@@ -530,9 +541,10 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
return ret;
}
-static struct hotplug_slot_ops php_slot_ops = {
+static const struct hotplug_slot_ops php_slot_ops = {
.get_power_status = pnv_php_get_power_state,
.get_adapter_status = pnv_php_get_adapter_state,
+ .get_attention_status = pnv_php_get_attention_state,
.set_attention_status = pnv_php_set_attention_state,
.enable_slot = pnv_php_enable_slot,
.disable_slot = pnv_php_disable_slot,
@@ -594,8 +606,6 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
php_slot->id = id;
php_slot->power_state_check = false;
php_slot->slot.ops = &php_slot_ops;
- php_slot->slot.info = &php_slot->slot_info;
- php_slot->slot.private = php_slot;
INIT_LIST_HEAD(&php_slot->children);
INIT_LIST_HEAD(&php_slot->link);
@@ -736,7 +746,7 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
pe = edev ? edev->pe : NULL;
if (pe) {
eeh_serialize_lock(&flags);
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(pe);
eeh_serialize_unlock(flags);
eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE);
}
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index c8311724bd76..bdc954d70869 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -63,16 +63,22 @@ struct slot {
u32 index;
u32 type;
u32 power_domain;
+ u8 attention_status;
char *name;
struct device_node *dn;
struct pci_bus *bus;
struct list_head *pci_devs;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
};
-extern struct hotplug_slot_ops rpaphp_hotplug_slot_ops;
+extern const struct hotplug_slot_ops rpaphp_hotplug_slot_ops;
extern struct list_head rpaphp_slot_head;
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
+}
+
/* function prototypes */
/* rpaphp_pci.c */
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 857c358b727b..bcd5d357ca23 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -52,7 +52,7 @@ module_param_named(debug, rpaphp_debug, bool, 0644);
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
{
int rc;
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
switch (value) {
case 0:
@@ -66,7 +66,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
rc = rtas_set_indicator(DR_INDICATOR, slot->index, value);
if (!rc)
- hotplug_slot->info->attention_status = value;
+ slot->attention_status = value;
return rc;
}
@@ -79,7 +79,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int retval, level;
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
retval = rtas_get_power_level(slot->power_domain, &level);
if (!retval)
@@ -94,14 +94,14 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
- *value = slot->hotplug_slot->info->attention_status;
+ struct slot *slot = to_slot(hotplug_slot);
+ *value = slot->attention_status;
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int rc, state;
rc = rpaphp_get_sensor_state(slot, &state);
@@ -409,7 +409,7 @@ static void __exit cleanup_slots(void)
list_for_each_entry_safe(slot, next, &rpaphp_slot_head,
rpaphp_slot_list) {
list_del(&slot->rpaphp_slot_list);
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
dealloc_slot_struct(slot);
}
return;
@@ -434,7 +434,7 @@ static void __exit rpaphp_exit(void)
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int state;
int retval;
@@ -464,7 +464,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
if (slot->state == NOT_CONFIGURED)
return -EINVAL;
@@ -477,7 +477,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
return 0;
}
-struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
+const struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 0aac33e15dab..beca61badeea 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -54,25 +54,21 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state)
* rpaphp_enable_slot - record slot state, config pci device
* @slot: target &slot
*
- * Initialize values in the slot, and the hotplug_slot info
- * structures to indicate if there is a pci card plugged into
- * the slot. If the slot is not empty, run the pcibios routine
+ * Initialize values in the slot structure to indicate if there is a pci card
+ * plugged into the slot. If the slot is not empty, run the pcibios routine
* to get pcibios stuff correctly set up.
*/
int rpaphp_enable_slot(struct slot *slot)
{
int rc, level, state;
struct pci_bus *bus;
- struct hotplug_slot_info *info = slot->hotplug_slot->info;
- info->adapter_status = NOT_VALID;
slot->state = EMPTY;
/* Find out if the power is turned on for the slot */
rc = rtas_get_power_level(slot->power_domain, &level);
if (rc)
return rc;
- info->power_status = level;
/* Figure out if there is an adapter in the slot */
rc = rpaphp_get_sensor_state(slot, &state);
@@ -85,13 +81,11 @@ int rpaphp_enable_slot(struct slot *slot)
return -EINVAL;
}
- info->adapter_status = EMPTY;
slot->bus = bus;
slot->pci_devs = &bus->devices;
/* if there's an adapter in the slot, go add the pci devices */
if (state == PRESENT) {
- info->adapter_status = NOT_CONFIGURED;
slot->state = NOT_CONFIGURED;
/* non-empty slot has to have child */
@@ -105,7 +99,6 @@ int rpaphp_enable_slot(struct slot *slot)
pci_hp_add_devices(bus);
if (!list_empty(&bus->devices)) {
- info->adapter_status = CONFIGURED;
slot->state = CONFIGURED;
}
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index b916c8e4372d..5282aa3e33c5 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -21,9 +21,7 @@
/* free up the memory used by a slot */
void dealloc_slot_struct(struct slot *slot)
{
- kfree(slot->hotplug_slot->info);
kfree(slot->name);
- kfree(slot->hotplug_slot);
kfree(slot);
}
@@ -35,28 +33,16 @@ struct slot *alloc_slot_struct(struct device_node *dn,
slot = kzalloc(sizeof(struct slot), GFP_KERNEL);
if (!slot)
goto error_nomem;
- slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
- slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!slot->hotplug_slot->info)
- goto error_hpslot;
slot->name = kstrdup(drc_name, GFP_KERNEL);
if (!slot->name)
- goto error_info;
+ goto error_slot;
slot->dn = dn;
slot->index = drc_index;
slot->power_domain = power_domain;
- slot->hotplug_slot->private = slot;
- slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
+ slot->hotplug_slot.ops = &rpaphp_hotplug_slot_ops;
return (slot);
-error_info:
- kfree(slot->hotplug_slot->info);
-error_hpslot:
- kfree(slot->hotplug_slot);
error_slot:
kfree(slot);
error_nomem:
@@ -77,7 +63,7 @@ static int is_registered(struct slot *slot)
int rpaphp_deregister_slot(struct slot *slot)
{
int retval = 0;
- struct hotplug_slot *php_slot = slot->hotplug_slot;
+ struct hotplug_slot *php_slot = &slot->hotplug_slot;
dbg("%s - Entry: deregistering slot=%s\n",
__func__, slot->name);
@@ -93,7 +79,7 @@ EXPORT_SYMBOL_GPL(rpaphp_deregister_slot);
int rpaphp_register_slot(struct slot *slot)
{
- struct hotplug_slot *php_slot = slot->hotplug_slot;
+ struct hotplug_slot *php_slot = &slot->hotplug_slot;
struct device_node *child;
u32 my_index;
int retval;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 93b5341d282c..30ee72268790 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -32,10 +32,15 @@ static int zpci_fn_configured(enum zpci_state state)
*/
struct slot {
struct list_head slot_list;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct zpci_dev *zdev;
};
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
+}
+
static inline int slot_configure(struct slot *slot)
{
int ret = sclp_pci_configure(slot->zdev->fid);
@@ -60,7 +65,7 @@ static inline int slot_deconfigure(struct slot *slot)
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int rc;
if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
@@ -88,7 +93,7 @@ out_deconfigure:
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct pci_dev *pdev;
int rc;
@@ -110,7 +115,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
switch (slot->zdev->state) {
case ZPCI_FN_STATE_STANDBY:
@@ -130,7 +135,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static struct hotplug_slot_ops s390_hotplug_slot_ops = {
+static const struct hotplug_slot_ops s390_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
@@ -139,8 +144,6 @@ static struct hotplug_slot_ops s390_hotplug_slot_ops = {
int zpci_init_slot(struct zpci_dev *zdev)
{
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
struct slot *slot;
int rc;
@@ -152,26 +155,11 @@ int zpci_init_slot(struct zpci_dev *zdev)
if (!slot)
goto error;
- hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
- goto error_hp;
- hotplug_slot->private = slot;
-
- slot->hotplug_slot = hotplug_slot;
slot->zdev = zdev;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto error_info;
- hotplug_slot->info = info;
-
- hotplug_slot->ops = &s390_hotplug_slot_ops;
-
- get_power_status(hotplug_slot, &info->power_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
+ slot->hotplug_slot.ops = &s390_hotplug_slot_ops;
snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
- rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
+ rc = pci_hp_register(&slot->hotplug_slot, zdev->bus,
ZPCI_DEVFN, name);
if (rc)
goto error_reg;
@@ -180,10 +168,6 @@ int zpci_init_slot(struct zpci_dev *zdev)
return 0;
error_reg:
- kfree(info);
-error_info:
- kfree(hotplug_slot);
-error_hp:
kfree(slot);
error:
return -ENOMEM;
@@ -198,9 +182,7 @@ void zpci_exit_slot(struct zpci_dev *zdev)
if (slot->zdev != zdev)
continue;
list_del(&slot->slot_list);
- pci_hp_deregister(slot->hotplug_slot);
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
kfree(slot);
}
}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index babd23409f61..231f5bdd3d2d 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -56,7 +56,7 @@ struct slot {
int device_num;
struct pci_bus *pci_bus;
/* this struct for glue internal only */
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct list_head hp_list;
char physical_path[SN_SLOT_NAME_SIZE];
};
@@ -80,7 +80,7 @@ static int enable_slot(struct hotplug_slot *slot);
static int disable_slot(struct hotplug_slot *slot);
static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops sn_hotplug_slot_ops = {
+static const struct hotplug_slot_ops sn_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
@@ -88,10 +88,15 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = {
static DEFINE_MUTEX(sn_hotplug_mutex);
+static struct slot *to_slot(struct hotplug_slot *bss_hotplug_slot)
+{
+ return container_of(bss_hotplug_slot, struct slot, hotplug_slot);
+}
+
static ssize_t path_show(struct pci_slot *pci_slot, char *buf)
{
int retval = -ENOENT;
- struct slot *slot = pci_slot->hotplug->private;
+ struct slot *slot = to_slot(pci_slot->hotplug);
if (!slot)
return retval;
@@ -156,7 +161,7 @@ static int sn_pci_bus_valid(struct pci_bus *pci_bus)
return -EIO;
}
-static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
+static int sn_hp_slot_private_alloc(struct hotplug_slot **bss_hotplug_slot,
struct pci_bus *pci_bus, int device,
char *name)
{
@@ -168,7 +173,6 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
- bss_hotplug_slot->private = slot;
slot->device_num = device;
slot->pci_bus = pci_bus;
@@ -179,8 +183,8 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
sn_generate_path(pci_bus, slot->physical_path);
- slot->hotplug_slot = bss_hotplug_slot;
list_add(&slot->hp_list, &sn_hp_list);
+ *bss_hotplug_slot = &slot->hotplug_slot;
return 0;
}
@@ -192,10 +196,9 @@ static struct hotplug_slot *sn_hp_destroy(void)
struct hotplug_slot *bss_hotplug_slot = NULL;
list_for_each_entry(slot, &sn_hp_list, hp_list) {
- bss_hotplug_slot = slot->hotplug_slot;
+ bss_hotplug_slot = &slot->hotplug_slot;
pci_slot = bss_hotplug_slot->pci_slot;
- list_del(&((struct slot *)bss_hotplug_slot->private)->
- hp_list);
+ list_del(&slot->hp_list);
sysfs_remove_file(&pci_slot->kobj,
&sn_slot_path_attr.attr);
break;
@@ -227,7 +230,7 @@ static void sn_bus_free_data(struct pci_dev *dev)
static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
int device_num, char **ssdt)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pcibus_info *pcibus_info;
struct pcibr_slot_enable_resp resp;
int rc;
@@ -267,7 +270,7 @@ static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
int device_num, int action)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pcibus_info *pcibus_info;
struct pcibr_slot_disable_resp resp;
int rc;
@@ -323,7 +326,7 @@ static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
*/
static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pci_bus *new_bus = NULL;
struct pci_dev *dev;
int num_funcs;
@@ -469,7 +472,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pci_dev *dev, *temp;
int rc;
acpi_handle ssdt_hdl = NULL;
@@ -571,7 +574,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
u8 *value)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pcibus_info *pcibus_info;
u32 power;
@@ -585,9 +588,7 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
{
- kfree(bss_hotplug_slot->info);
- kfree(bss_hotplug_slot->private);
- kfree(bss_hotplug_slot);
+ kfree(to_slot(bss_hotplug_slot));
}
static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
@@ -607,22 +608,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
if (sn_pci_slot_valid(pci_bus, device) != 1)
continue;
- bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot),
- GFP_KERNEL);
- if (!bss_hotplug_slot) {
- rc = -ENOMEM;
- goto alloc_err;
- }
-
- bss_hotplug_slot->info =
- kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!bss_hotplug_slot->info) {
- rc = -ENOMEM;
- goto alloc_err;
- }
-
- if (sn_hp_slot_private_alloc(bss_hotplug_slot,
+ if (sn_hp_slot_private_alloc(&bss_hotplug_slot,
pci_bus, device, name)) {
rc = -ENOMEM;
goto alloc_err;
@@ -637,7 +623,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
rc = sysfs_create_file(&pci_slot->kobj,
&sn_slot_path_attr.attr);
if (rc)
- goto register_err;
+ goto alloc_err;
}
pci_dbg(pci_bus->self, "Registered bus with hotplug\n");
return rc;
@@ -646,14 +632,11 @@ register_err:
pci_dbg(pci_bus->self, "bus failed to register with err = %d\n",
rc);
-alloc_err:
- if (rc == -ENOMEM)
- pci_dbg(pci_bus->self, "Memory allocation error\n");
-
/* destroy THIS element */
- if (bss_hotplug_slot)
- sn_release_slot(bss_hotplug_slot);
+ sn_hp_destroy();
+ sn_release_slot(bss_hotplug_slot);
+alloc_err:
/* destroy anything else on the list */
while ((bss_hotplug_slot = sn_hp_destroy())) {
pci_hp_deregister(bss_hotplug_slot);
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 516e4835019c..f7f13ee5d06e 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -67,11 +67,13 @@ struct slot {
u32 number;
u8 is_a_board;
u8 state;
+ u8 attention_save;
u8 presence_save;
+ u8 latch_save;
u8 pwr_save;
struct controller *ctrl;
const struct hpc_ops *hpc_ops;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct list_head slot_list;
struct delayed_work work; /* work for button event */
struct mutex lock;
@@ -169,7 +171,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev);
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
}
struct ctrl_reg {
@@ -207,7 +209,7 @@ enum ctrl_offsets {
static inline struct slot *get_slot(struct hotplug_slot *hotplug_slot)
{
- return hotplug_slot->private;
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
static inline struct slot *shpchp_find_slot(struct controller *ctrl, u8 device)
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 97cee23f3d51..81a918d47895 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -51,7 +51,7 @@ static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
+static const struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@@ -65,7 +65,6 @@ static int init_slots(struct controller *ctrl)
{
struct slot *slot;
struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
int retval;
int i;
@@ -77,19 +76,7 @@ static int init_slots(struct controller *ctrl)
goto error;
}
- hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot) {
- retval = -ENOMEM;
- goto error_slot;
- }
- slot->hotplug_slot = hotplug_slot;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- retval = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot->info = info;
+ hotplug_slot = &slot->hotplug_slot;
slot->hp_slot = i;
slot->ctrl = ctrl;
@@ -101,14 +88,13 @@ static int init_slots(struct controller *ctrl)
slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number);
if (!slot->wq) {
retval = -ENOMEM;
- goto error_info;
+ goto error_slot;
}
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
/* register this slot with the hotplug pci core */
- hotplug_slot->private = slot;
snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
@@ -116,7 +102,7 @@ static int init_slots(struct controller *ctrl)
pci_domain_nr(ctrl->pci_dev->subordinate),
slot->bus, slot->device, slot->hp_slot, slot->number,
ctrl->slot_device_offset);
- retval = pci_hp_register(slot->hotplug_slot,
+ retval = pci_hp_register(hotplug_slot,
ctrl->pci_dev->subordinate, slot->device, name);
if (retval) {
ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
@@ -124,10 +110,10 @@ static int init_slots(struct controller *ctrl)
goto error_slotwq;
}
- get_power_status(hotplug_slot, &info->power_status);
- get_attention_status(hotplug_slot, &info->attention_status);
- get_latch_status(hotplug_slot, &info->latch_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
+ get_power_status(hotplug_slot, &slot->pwr_save);
+ get_attention_status(hotplug_slot, &slot->attention_save);
+ get_latch_status(hotplug_slot, &slot->latch_save);
+ get_adapter_status(hotplug_slot, &slot->presence_save);
list_add(&slot->slot_list, &ctrl->slot_list);
}
@@ -135,10 +121,6 @@ static int init_slots(struct controller *ctrl)
return 0;
error_slotwq:
destroy_workqueue(slot->wq);
-error_info:
- kfree(info);
-error_hpslot:
- kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
@@ -153,9 +135,7 @@ void cleanup_slots(struct controller *ctrl)
list_del(&slot->slot_list);
cancel_delayed_work(&slot->work);
destroy_workqueue(slot->wq);
- pci_hp_deregister(slot->hotplug_slot);
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
kfree(slot);
}
}
@@ -170,7 +150,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- hotplug_slot->info->attention_status = status;
+ slot->attention_save = status;
slot->hpc_ops->set_attention_status(slot, status);
return 0;
@@ -206,7 +186,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_power_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->power_status;
+ *value = slot->pwr_save;
return 0;
}
@@ -221,7 +201,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_attention_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->attention_status;
+ *value = slot->attention_save;
return 0;
}
@@ -236,7 +216,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_latch_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->latch_status;
+ *value = slot->latch_save;
return 0;
}
@@ -251,7 +231,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_adapter_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->adapter_status;
+ *value = slot->presence_save;
return 0;
}
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 1267dcc5a531..078003dcde5b 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -446,23 +446,12 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
mutex_unlock(&p_slot->lock);
}
-static int update_slot_info (struct slot *slot)
+static void update_slot_info(struct slot *slot)
{
- struct hotplug_slot_info *info;
- int result;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- slot->hpc_ops->get_power_status(slot, &(info->power_status));
- slot->hpc_ops->get_attention_status(slot, &(info->attention_status));
- slot->hpc_ops->get_latch_status(slot, &(info->latch_status));
- slot->hpc_ops->get_adapter_status(slot, &(info->adapter_status));
-
- result = pci_hp_change_slot_info(slot->hotplug_slot, info);
- kfree (info);
- return result;
+ slot->hpc_ops->get_power_status(slot, &slot->pwr_save);
+ slot->hpc_ops->get_attention_status(slot, &slot->attention_save);
+ slot->hpc_ops->get_latch_status(slot, &slot->latch_save);
+ slot->hpc_ops->get_adapter_status(slot, &slot->presence_save);
}
/*
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index c5f3cd4ed766..9616eca3182f 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -13,7 +13,6 @@
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/pci-ats.h>
#include "pci.h"
#define VIRTFN_ID_LEN 16
@@ -133,6 +132,8 @@ static void pci_read_vf_config_common(struct pci_dev *virtfn)
&physfn->sriov->subsystem_vendor);
pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
&physfn->sriov->subsystem_device);
+
+ physfn->sriov->cfg_size = pci_cfg_space_size(virtfn);
}
int pci_iov_add_virtfn(struct pci_dev *dev, int id)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index f2ef896464b3..af24ed50a245 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -958,7 +958,6 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
}
}
}
- WARN_ON(!!dev->msix_enabled);
/* Check whether driver already requested for MSI irq */
if (dev->msi_enabled) {
@@ -1028,8 +1027,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (!pci_msi_supported(dev, minvec))
return -EINVAL;
- WARN_ON(!!dev->msi_enabled);
-
/* Check whether driver already requested MSI-X irqs */
if (dev->msix_enabled) {
pci_info(dev, "can't enable MSI (MSI-X already enabled)\n");
@@ -1039,6 +1036,9 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (maxvec < minvec)
return -ERANGE;
+ if (WARN_ON_ONCE(dev->msi_enabled))
+ return -EINVAL;
+
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
@@ -1087,6 +1087,9 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
if (maxvec < minvec)
return -ERANGE;
+ if (WARN_ON_ONCE(dev->msix_enabled))
+ return -EINVAL;
+
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 1836b8ddf292..4c4217d0c3f1 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -355,107 +355,6 @@ failed:
EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
#endif /* CONFIG_OF_ADDRESS */
-/**
- * of_pci_map_rid - Translate a requester ID through a downstream mapping.
- * @np: root complex device node.
- * @rid: PCI requester ID to map.
- * @map_name: property name of the map to use.
- * @map_mask_name: optional property name of the mask to use.
- * @target: optional pointer to a target device node.
- * @id_out: optional pointer to receive the translated ID.
- *
- * Given a PCI requester ID, look up the appropriate implementation-defined
- * platform ID and/or the target device which receives transactions on that
- * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
- * @id_out may be NULL if only the other is required. If @target points to
- * a non-NULL device node pointer, only entries targeting that node will be
- * matched; if it points to a NULL value, it will receive the device node of
- * the first matching target phandle, with a reference held.
- *
- * Return: 0 on success or a standard error code on failure.
- */
-int of_pci_map_rid(struct device_node *np, u32 rid,
- const char *map_name, const char *map_mask_name,
- struct device_node **target, u32 *id_out)
-{
- u32 map_mask, masked_rid;
- int map_len;
- const __be32 *map = NULL;
-
- if (!np || !map_name || (!target && !id_out))
- return -EINVAL;
-
- map = of_get_property(np, map_name, &map_len);
- if (!map) {
- if (target)
- return -ENODEV;
- /* Otherwise, no map implies no translation */
- *id_out = rid;
- return 0;
- }
-
- if (!map_len || map_len % (4 * sizeof(*map))) {
- pr_err("%pOF: Error: Bad %s length: %d\n", np,
- map_name, map_len);
- return -EINVAL;
- }
-
- /* The default is to select all bits. */
- map_mask = 0xffffffff;
-
- /*
- * Can be overridden by "{iommu,msi}-map-mask" property.
- * If of_property_read_u32() fails, the default is used.
- */
- if (map_mask_name)
- of_property_read_u32(np, map_mask_name, &map_mask);
-
- masked_rid = map_mask & rid;
- for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
- struct device_node *phandle_node;
- u32 rid_base = be32_to_cpup(map + 0);
- u32 phandle = be32_to_cpup(map + 1);
- u32 out_base = be32_to_cpup(map + 2);
- u32 rid_len = be32_to_cpup(map + 3);
-
- if (rid_base & ~map_mask) {
- pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
- np, map_name, map_name,
- map_mask, rid_base);
- return -EFAULT;
- }
-
- if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
- continue;
-
- phandle_node = of_find_node_by_phandle(phandle);
- if (!phandle_node)
- return -ENODEV;
-
- if (target) {
- if (*target)
- of_node_put(phandle_node);
- else
- *target = phandle_node;
-
- if (*target != phandle_node)
- continue;
- }
-
- if (id_out)
- *id_out = masked_rid - rid_base + out_base;
-
- pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
- np, map_name, map_mask, rid_base, out_base,
- rid_len, rid, masked_rid - rid_base + out_base);
- return 0;
- }
-
- pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
- np, map_name, rid, target && *target ? *target : NULL);
- return -EFAULT;
-}
-
#if IS_ENABLED(CONFIG_OF_IRQ)
/**
* of_irq_parse_pci - Resolve the interrupt for a PCI device
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
new file mode 100644
index 000000000000..ae3c5b25dcc7
--- /dev/null
+++ b/drivers/pci/p2pdma.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Peer 2 Peer DMA support.
+ *
+ * Copyright (c) 2016-2018, Logan Gunthorpe
+ * Copyright (c) 2016-2017, Microsemi Corporation
+ * Copyright (c) 2017, Christoph Hellwig
+ * Copyright (c) 2018, Eideticom Inc.
+ */
+
+#define pr_fmt(fmt) "pci-p2pdma: " fmt
+#include <linux/ctype.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/genalloc.h>
+#include <linux/memremap.h>
+#include <linux/percpu-refcount.h>
+#include <linux/random.h>
+#include <linux/seq_buf.h>
+
+struct pci_p2pdma {
+ struct percpu_ref devmap_ref;
+ struct completion devmap_ref_done;
+ struct gen_pool *pool;
+ bool p2pmem_published;
+};
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ size_t size = 0;
+
+ if (pdev->p2pdma->pool)
+ size = gen_pool_size(pdev->p2pdma->pool);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", size);
+}
+static DEVICE_ATTR_RO(size);
+
+static ssize_t available_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ size_t avail = 0;
+
+ if (pdev->p2pdma->pool)
+ avail = gen_pool_avail(pdev->p2pdma->pool);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", avail);
+}
+static DEVICE_ATTR_RO(available);
+
+static ssize_t published_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pdev->p2pdma->p2pmem_published);
+}
+static DEVICE_ATTR_RO(published);
+
+static struct attribute *p2pmem_attrs[] = {
+ &dev_attr_size.attr,
+ &dev_attr_available.attr,
+ &dev_attr_published.attr,
+ NULL,
+};
+
+static const struct attribute_group p2pmem_group = {
+ .attrs = p2pmem_attrs,
+ .name = "p2pmem",
+};
+
+static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
+{
+ struct pci_p2pdma *p2p =
+ container_of(ref, struct pci_p2pdma, devmap_ref);
+
+ complete_all(&p2p->devmap_ref_done);
+}
+
+static void pci_p2pdma_percpu_kill(void *data)
+{
+ struct percpu_ref *ref = data;
+
+ /*
+ * pci_p2pdma_add_resource() may be called multiple times
+ * by a driver and may register the percpu_kill devm action multiple
+ * times. We only want the first action to actually kill the
+ * percpu_ref.
+ */
+ if (percpu_ref_is_dying(ref))
+ return;
+
+ percpu_ref_kill(ref);
+}
+
+static void pci_p2pdma_release(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ if (!pdev->p2pdma)
+ return;
+
+ wait_for_completion(&pdev->p2pdma->devmap_ref_done);
+ percpu_ref_exit(&pdev->p2pdma->devmap_ref);
+
+ gen_pool_destroy(pdev->p2pdma->pool);
+ sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
+ pdev->p2pdma = NULL;
+}
+
+static int pci_p2pdma_setup(struct pci_dev *pdev)
+{
+ int error = -ENOMEM;
+ struct pci_p2pdma *p2p;
+
+ p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
+ if (!p2p)
+ return -ENOMEM;
+
+ p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
+ if (!p2p->pool)
+ goto out;
+
+ init_completion(&p2p->devmap_ref_done);
+ error = percpu_ref_init(&p2p->devmap_ref,
+ pci_p2pdma_percpu_release, 0, GFP_KERNEL);
+ if (error)
+ goto out_pool_destroy;
+
+ error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
+ if (error)
+ goto out_pool_destroy;
+
+ pdev->p2pdma = p2p;
+
+ error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
+ if (error)
+ goto out_pool_destroy;
+
+ return 0;
+
+out_pool_destroy:
+ pdev->p2pdma = NULL;
+ gen_pool_destroy(p2p->pool);
+out:
+ devm_kfree(&pdev->dev, p2p);
+ return error;
+}
+
+/**
+ * pci_p2pdma_add_resource - add memory for use as p2p memory
+ * @pdev: the device to add the memory to
+ * @bar: PCI BAR to add
+ * @size: size of the memory to add, may be zero to use the whole BAR
+ * @offset: offset into the PCI BAR
+ *
+ * The memory will be given ZONE_DEVICE struct pages so that it may
+ * be used with any DMA request.
+ */
+int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
+ u64 offset)
+{
+ struct dev_pagemap *pgmap;
+ void *addr;
+ int error;
+
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ return -EINVAL;
+
+ if (offset >= pci_resource_len(pdev, bar))
+ return -EINVAL;
+
+ if (!size)
+ size = pci_resource_len(pdev, bar) - offset;
+
+ if (size + offset > pci_resource_len(pdev, bar))
+ return -EINVAL;
+
+ if (!pdev->p2pdma) {
+ error = pci_p2pdma_setup(pdev);
+ if (error)
+ return error;
+ }
+
+ pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL);
+ if (!pgmap)
+ return -ENOMEM;
+
+ pgmap->res.start = pci_resource_start(pdev, bar) + offset;
+ pgmap->res.end = pgmap->res.start + size - 1;
+ pgmap->res.flags = pci_resource_flags(pdev, bar);
+ pgmap->ref = &pdev->p2pdma->devmap_ref;
+ pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
+ pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
+ pci_resource_start(pdev, bar);
+
+ addr = devm_memremap_pages(&pdev->dev, pgmap);
+ if (IS_ERR(addr)) {
+ error = PTR_ERR(addr);
+ goto pgmap_free;
+ }
+
+ error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr,
+ pci_bus_address(pdev, bar) + offset,
+ resource_size(&pgmap->res), dev_to_node(&pdev->dev));
+ if (error)
+ goto pgmap_free;
+
+ error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_percpu_kill,
+ &pdev->p2pdma->devmap_ref);
+ if (error)
+ goto pgmap_free;
+
+ pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
+ &pgmap->res);
+
+ return 0;
+
+pgmap_free:
+ devm_kfree(&pdev->dev, pgmap);
+ return error;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
+
+/*
+ * Note this function returns the parent PCI device with a
+ * reference taken. It is the caller's responsibily to drop
+ * the reference.
+ */
+static struct pci_dev *find_parent_pci_dev(struct device *dev)
+{
+ struct device *parent;
+
+ dev = get_device(dev);
+
+ while (dev) {
+ if (dev_is_pci(dev))
+ return to_pci_dev(dev);
+
+ parent = get_device(dev->parent);
+ put_device(dev);
+ dev = parent;
+ }
+
+ return NULL;
+}
+
+/*
+ * Check if a PCI bridge has its ACS redirection bits set to redirect P2P
+ * TLPs upstream via ACS. Returns 1 if the packets will be redirected
+ * upstream, 0 otherwise.
+ */
+static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
+{
+ int pos;
+ u16 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return 0;
+
+ pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
+
+ if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
+ return 1;
+
+ return 0;
+}
+
+static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
+{
+ if (!buf)
+ return;
+
+ seq_buf_printf(buf, "%s;", pci_name(pdev));
+}
+
+/*
+ * Find the distance through the nearest common upstream bridge between
+ * two PCI devices.
+ *
+ * If the two devices are the same device then 0 will be returned.
+ *
+ * If there are two virtual functions of the same device behind the same
+ * bridge port then 2 will be returned (one step down to the PCIe switch,
+ * then one step back to the same device).
+ *
+ * In the case where two devices are connected to the same PCIe switch, the
+ * value 4 will be returned. This corresponds to the following PCI tree:
+ *
+ * -+ Root Port
+ * \+ Switch Upstream Port
+ * +-+ Switch Downstream Port
+ * + \- Device A
+ * \-+ Switch Downstream Port
+ * \- Device B
+ *
+ * The distance is 4 because we traverse from Device A through the downstream
+ * port of the switch, to the common upstream port, back up to the second
+ * downstream port and then to Device B.
+ *
+ * Any two devices that don't have a common upstream bridge will return -1.
+ * In this way devices on separate PCIe root ports will be rejected, which
+ * is what we want for peer-to-peer seeing each PCIe root port defines a
+ * separate hierarchy domain and there's no way to determine whether the root
+ * complex supports forwarding between them.
+ *
+ * In the case where two devices are connected to different PCIe switches,
+ * this function will still return a positive distance as long as both
+ * switches eventually have a common upstream bridge. Note this covers
+ * the case of using multiple PCIe switches to achieve a desired level of
+ * fan-out from a root port. The exact distance will be a function of the
+ * number of switches between Device A and Device B.
+ *
+ * If a bridge which has any ACS redirection bits set is in the path
+ * then this functions will return -2. This is so we reject any
+ * cases where the TLPs are forwarded up into the root complex.
+ * In this case, a list of all infringing bridge addresses will be
+ * populated in acs_list (assuming it's non-null) for printk purposes.
+ */
+static int upstream_bridge_distance(struct pci_dev *a,
+ struct pci_dev *b,
+ struct seq_buf *acs_list)
+{
+ int dist_a = 0;
+ int dist_b = 0;
+ struct pci_dev *bb = NULL;
+ int acs_cnt = 0;
+
+ /*
+ * Note, we don't need to take references to devices returned by
+ * pci_upstream_bridge() seeing we hold a reference to a child
+ * device which will already hold a reference to the upstream bridge.
+ */
+
+ while (a) {
+ dist_b = 0;
+
+ if (pci_bridge_has_acs_redir(a)) {
+ seq_buf_print_bus_devfn(acs_list, a);
+ acs_cnt++;
+ }
+
+ bb = b;
+
+ while (bb) {
+ if (a == bb)
+ goto check_b_path_acs;
+
+ bb = pci_upstream_bridge(bb);
+ dist_b++;
+ }
+
+ a = pci_upstream_bridge(a);
+ dist_a++;
+ }
+
+ return -1;
+
+check_b_path_acs:
+ bb = b;
+
+ while (bb) {
+ if (a == bb)
+ break;
+
+ if (pci_bridge_has_acs_redir(bb)) {
+ seq_buf_print_bus_devfn(acs_list, bb);
+ acs_cnt++;
+ }
+
+ bb = pci_upstream_bridge(bb);
+ }
+
+ if (acs_cnt)
+ return -2;
+
+ return dist_a + dist_b;
+}
+
+static int upstream_bridge_distance_warn(struct pci_dev *provider,
+ struct pci_dev *client)
+{
+ struct seq_buf acs_list;
+ int ret;
+
+ seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
+ if (!acs_list.buffer)
+ return -ENOMEM;
+
+ ret = upstream_bridge_distance(provider, client, &acs_list);
+ if (ret == -2) {
+ pci_warn(client, "cannot be used for peer-to-peer DMA as ACS redirect is set between the client and provider (%s)\n",
+ pci_name(provider));
+ /* Drop final semicolon */
+ acs_list.buffer[acs_list.len-1] = 0;
+ pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
+ acs_list.buffer);
+
+ } else if (ret < 0) {
+ pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge\n",
+ pci_name(provider));
+ }
+
+ kfree(acs_list.buffer);
+
+ return ret;
+}
+
+/**
+ * pci_p2pdma_distance_many - Determive the cumulative distance between
+ * a p2pdma provider and the clients in use.
+ * @provider: p2pdma provider to check against the client list
+ * @clients: array of devices to check (NULL-terminated)
+ * @num_clients: number of clients in the array
+ * @verbose: if true, print warnings for devices when we return -1
+ *
+ * Returns -1 if any of the clients are not compatible (behind the same
+ * root port as the provider), otherwise returns a positive number where
+ * a lower number is the preferrable choice. (If there's one client
+ * that's the same as the provider it will return 0, which is best choice).
+ *
+ * For now, "compatible" means the provider and the clients are all behind
+ * the same PCI root port. This cuts out cases that may work but is safest
+ * for the user. Future work can expand this to white-list root complexes that
+ * can safely forward between each ports.
+ */
+int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
+ int num_clients, bool verbose)
+{
+ bool not_supported = false;
+ struct pci_dev *pci_client;
+ int distance = 0;
+ int i, ret;
+
+ if (num_clients == 0)
+ return -1;
+
+ for (i = 0; i < num_clients; i++) {
+ pci_client = find_parent_pci_dev(clients[i]);
+ if (!pci_client) {
+ if (verbose)
+ dev_warn(clients[i],
+ "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
+ return -1;
+ }
+
+ if (verbose)
+ ret = upstream_bridge_distance_warn(provider,
+ pci_client);
+ else
+ ret = upstream_bridge_distance(provider, pci_client,
+ NULL);
+
+ pci_dev_put(pci_client);
+
+ if (ret < 0)
+ not_supported = true;
+
+ if (not_supported && !verbose)
+ break;
+
+ distance += ret;
+ }
+
+ if (not_supported)
+ return -1;
+
+ return distance;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
+
+/**
+ * pci_has_p2pmem - check if a given PCI device has published any p2pmem
+ * @pdev: PCI device to check
+ */
+bool pci_has_p2pmem(struct pci_dev *pdev)
+{
+ return pdev->p2pdma && pdev->p2pdma->p2pmem_published;
+}
+EXPORT_SYMBOL_GPL(pci_has_p2pmem);
+
+/**
+ * pci_p2pmem_find - find a peer-to-peer DMA memory device compatible with
+ * the specified list of clients and shortest distance (as determined
+ * by pci_p2pmem_dma())
+ * @clients: array of devices to check (NULL-terminated)
+ * @num_clients: number of client devices in the list
+ *
+ * If multiple devices are behind the same switch, the one "closest" to the
+ * client devices in use will be chosen first. (So if one of the providers are
+ * the same as one of the clients, that provider will be used ahead of any
+ * other providers that are unrelated). If multiple providers are an equal
+ * distance away, one will be chosen at random.
+ *
+ * Returns a pointer to the PCI device with a reference taken (use pci_dev_put
+ * to return the reference) or NULL if no compatible device is found. The
+ * found provider will also be assigned to the client list.
+ */
+struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
+{
+ struct pci_dev *pdev = NULL;
+ int distance;
+ int closest_distance = INT_MAX;
+ struct pci_dev **closest_pdevs;
+ int dev_cnt = 0;
+ const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
+ int i;
+
+ closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!closest_pdevs)
+ return NULL;
+
+ while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+ if (!pci_has_p2pmem(pdev))
+ continue;
+
+ distance = pci_p2pdma_distance_many(pdev, clients,
+ num_clients, false);
+ if (distance < 0 || distance > closest_distance)
+ continue;
+
+ if (distance == closest_distance && dev_cnt >= max_devs)
+ continue;
+
+ if (distance < closest_distance) {
+ for (i = 0; i < dev_cnt; i++)
+ pci_dev_put(closest_pdevs[i]);
+
+ dev_cnt = 0;
+ closest_distance = distance;
+ }
+
+ closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
+ }
+
+ if (dev_cnt)
+ pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
+
+ for (i = 0; i < dev_cnt; i++)
+ pci_dev_put(closest_pdevs[i]);
+
+ kfree(closest_pdevs);
+ return pdev;
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
+
+/**
+ * pci_alloc_p2p_mem - allocate peer-to-peer DMA memory
+ * @pdev: the device to allocate memory from
+ * @size: number of bytes to allocate
+ *
+ * Returns the allocated memory or NULL on error.
+ */
+void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
+{
+ void *ret;
+
+ if (unlikely(!pdev->p2pdma))
+ return NULL;
+
+ if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref)))
+ return NULL;
+
+ ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size);
+
+ if (unlikely(!ret))
+ percpu_ref_put(&pdev->p2pdma->devmap_ref);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
+
+/**
+ * pci_free_p2pmem - free peer-to-peer DMA memory
+ * @pdev: the device the memory was allocated from
+ * @addr: address of the memory that was allocated
+ * @size: number of bytes that was allocated
+ */
+void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
+{
+ gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size);
+ percpu_ref_put(&pdev->p2pdma->devmap_ref);
+}
+EXPORT_SYMBOL_GPL(pci_free_p2pmem);
+
+/**
+ * pci_virt_to_bus - return the PCI bus address for a given virtual
+ * address obtained with pci_alloc_p2pmem()
+ * @pdev: the device the memory was allocated from
+ * @addr: address of the memory that was allocated
+ */
+pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
+{
+ if (!addr)
+ return 0;
+ if (!pdev->p2pdma)
+ return 0;
+
+ /*
+ * Note: when we added the memory to the pool we used the PCI
+ * bus address as the physical address. So gen_pool_virt_to_phys()
+ * actually returns the bus address despite the misleading name.
+ */
+ return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr);
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
+
+/**
+ * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist
+ * @pdev: the device to allocate memory from
+ * @nents: the number of SG entries in the list
+ * @length: number of bytes to allocate
+ *
+ * Returns 0 on success
+ */
+struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length)
+{
+ struct scatterlist *sg;
+ void *addr;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return NULL;
+
+ sg_init_table(sg, 1);
+
+ addr = pci_alloc_p2pmem(pdev, length);
+ if (!addr)
+ goto out_free_sg;
+
+ sg_set_buf(sg, addr, length);
+ *nents = 1;
+ return sg;
+
+out_free_sg:
+ kfree(sg);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
+
+/**
+ * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl()
+ * @pdev: the device to allocate memory from
+ * @sgl: the allocated scatterlist
+ */
+void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
+{
+ struct scatterlist *sg;
+ int count;
+
+ for_each_sg(sgl, sg, INT_MAX, count) {
+ if (!sg)
+ break;
+
+ pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
+ }
+ kfree(sgl);
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
+
+/**
+ * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by
+ * other devices with pci_p2pmem_find()
+ * @pdev: the device with peer-to-peer DMA memory to publish
+ * @publish: set to true to publish the memory, false to unpublish it
+ *
+ * Published memory can be used by other PCI device drivers for
+ * peer-2-peer DMA operations. Non-published memory is reserved for
+ * exlusive use of the device driver that registers the peer-to-peer
+ * memory.
+ */
+void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
+{
+ if (pdev->p2pdma)
+ pdev->p2pdma->p2pmem_published = publish;
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
+
+/**
+ * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA
+ * @dev: device doing the DMA request
+ * @sg: scatter list to map
+ * @nents: elements in the scatterlist
+ * @dir: DMA direction
+ *
+ * Scatterlists mapped with this function should not be unmapped in any way.
+ *
+ * Returns the number of SG entries mapped or 0 on error.
+ */
+int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ struct dev_pagemap *pgmap;
+ struct scatterlist *s;
+ phys_addr_t paddr;
+ int i;
+
+ /*
+ * p2pdma mappings are not compatible with devices that use
+ * dma_virt_ops. If the upper layers do the right thing
+ * this should never happen because it will be prevented
+ * by the check in pci_p2pdma_add_client()
+ */
+ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
+ dev->dma_ops == &dma_virt_ops))
+ return 0;
+
+ for_each_sg(sg, s, nents, i) {
+ pgmap = sg_page(s)->pgmap;
+ paddr = sg_phys(s);
+
+ s->dma_address = paddr - pgmap->pci_p2pdma_bus_offset;
+ sg_dma_len(s) = s->length;
+ }
+
+ return nents;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg);
+
+/**
+ * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
+ * to enable p2pdma
+ * @page: contents of the value to be stored
+ * @p2p_dev: returns the PCI device that was selected to be used
+ * (if one was specified in the stored value)
+ * @use_p2pdma: returns whether to enable p2pdma or not
+ *
+ * Parses an attribute value to decide whether to enable p2pdma.
+ * The value can select a PCI device (using it's full BDF device
+ * name) or a boolean (in any format strtobool() accepts). A false
+ * value disables p2pdma, a true value expects the caller
+ * to automatically find a compatible device and specifying a PCI device
+ * expects the caller to use the specific provider.
+ *
+ * pci_p2pdma_enable_show() should be used as the show operation for
+ * the attribute.
+ *
+ * Returns 0 on success
+ */
+int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
+ bool *use_p2pdma)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
+ if (dev) {
+ *use_p2pdma = true;
+ *p2p_dev = to_pci_dev(dev);
+
+ if (!pci_has_p2pmem(*p2p_dev)) {
+ pci_err(*p2p_dev,
+ "PCI device has no peer-to-peer memory: %s\n",
+ page);
+ pci_dev_put(*p2p_dev);
+ return -ENODEV;
+ }
+
+ return 0;
+ } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
+ /*
+ * If the user enters a PCI device that doesn't exist
+ * like "0000:01:00.1", we don't want strtobool to think
+ * it's a '0' when it's clearly not what the user wanted.
+ * So we require 0's and 1's to be exactly one character.
+ */
+ } else if (!strtobool(page, use_p2pdma)) {
+ return 0;
+ }
+
+ pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
+
+/**
+ * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating
+ * whether p2pdma is enabled
+ * @page: contents of the stored value
+ * @p2p_dev: the selected p2p device (NULL if no device is selected)
+ * @use_p2pdma: whether p2pdme has been enabled
+ *
+ * Attributes that use pci_p2pdma_enable_store() should use this function
+ * to show the value of the attribute.
+ *
+ * Returns 0 on success
+ */
+ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
+ bool use_p2pdma)
+{
+ if (!use_p2pdma)
+ return sprintf(page, "0\n");
+
+ if (!p2p_dev)
+ return sprintf(page, "1\n");
+
+ return sprintf(page, "%s\n", pci_name(p2p_dev));
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index c2ab57705043..2a4aa6468579 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -519,6 +519,46 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
return PCI_POWER_ERROR;
}
+static struct acpi_device *acpi_pci_find_companion(struct device *dev);
+
+static bool acpi_pci_bridge_d3(struct pci_dev *dev)
+{
+ const struct fwnode_handle *fwnode;
+ struct acpi_device *adev;
+ struct pci_dev *root;
+ u8 val;
+
+ if (!dev->is_hotplug_bridge)
+ return false;
+
+ /*
+ * Look for a special _DSD property for the root port and if it
+ * is set we know the hierarchy behind it supports D3 just fine.
+ */
+ root = pci_find_pcie_root_port(dev);
+ if (!root)
+ return false;
+
+ adev = ACPI_COMPANION(&root->dev);
+ if (root == dev) {
+ /*
+ * It is possible that the ACPI companion is not yet bound
+ * for the root port so look it up manually here.
+ */
+ if (!adev && !pci_dev_is_added(root))
+ adev = acpi_pci_find_companion(&root->dev);
+ }
+
+ if (!adev)
+ return false;
+
+ fwnode = acpi_fwnode_handle(adev);
+ if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
+ return false;
+
+ return val == 1;
+}
+
static bool acpi_pci_power_manageable(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
@@ -548,6 +588,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
error = -EBUSY;
break;
}
+ /* Fall through */
case PCI_D0:
case PCI_D1:
case PCI_D2:
@@ -635,6 +676,7 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
}
static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
+ .bridge_d3 = acpi_pci_bridge_d3,
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.get_state = acpi_pci_get_power_state,
@@ -751,10 +793,15 @@ static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct acpi_device *adev = ACPI_COMPANION(dev);
+ int node;
if (!adev)
return;
+ node = acpi_get_node(adev->handle);
+ if (node != NUMA_NO_NODE)
+ set_dev_node(dev, node);
+
pci_acpi_optimize_delay(pci_dev, adev->handle);
pci_acpi_add_pm_notifier(adev, pci_dev);
@@ -762,19 +809,33 @@ static void pci_acpi_setup(struct device *dev)
return;
device_set_wakeup_capable(dev, true);
+ /*
+ * For bridges that can do D3 we enable wake automatically (as
+ * we do for the power management itself in that case). The
+ * reason is that the bridge may have additional methods such as
+ * _DSW that need to be called.
+ */
+ if (pci_dev->bridge_d3)
+ device_wakeup_enable(dev);
+
acpi_pci_wakeup(pci_dev, false);
}
static void pci_acpi_cleanup(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct pci_dev *pci_dev = to_pci_dev(dev);
if (!adev)
return;
pci_acpi_remove_pm_notifier(adev);
- if (adev->wakeup.flags.valid)
+ if (adev->wakeup.flags.valid) {
+ if (pci_dev->bridge_d3)
+ device_wakeup_disable(dev);
+
device_set_wakeup_capable(dev, false);
+ }
}
static bool pci_acpi_bus_match(struct device *dev)
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
new file mode 100644
index 000000000000..129738362d90
--- /dev/null
+++ b/drivers/pci/pci-bridge-emul.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell
+ *
+ * Author: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
+ *
+ * This file helps PCI controller drivers implement a fake root port
+ * PCI bridge when the HW doesn't provide such a root port PCI
+ * bridge.
+ *
+ * It emulates a PCI bridge by providing a fake PCI configuration
+ * space (and optionally a PCIe capability configuration space) in
+ * memory. By default the read/write operations simply read and update
+ * this fake configuration space in memory. However, PCI controller
+ * drivers can provide through the 'struct pci_sw_bridge_ops'
+ * structure a set of operations to override or complement this
+ * default behavior.
+ */
+
+#include <linux/pci.h>
+#include "pci-bridge-emul.h"
+
+#define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
+#define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
+#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
+
+/*
+ * Initialize a pci_bridge_emul structure to represent a fake PCI
+ * bridge configuration space. The caller needs to have initialized
+ * the PCI configuration space with whatever values make sense
+ * (typically at least vendor, device, revision), the ->ops pointer,
+ * and optionally ->data and ->has_pcie.
+ */
+void pci_bridge_emul_init(struct pci_bridge_emul *bridge)
+{
+ bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
+ bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
+ bridge->conf.cache_line_size = 0x10;
+ bridge->conf.status = PCI_STATUS_CAP_LIST;
+
+ if (bridge->has_pcie) {
+ bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
+ bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
+ /* Set PCIe v2, root port, slot support */
+ bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
+ PCI_EXP_FLAGS_SLOT;
+ }
+}
+
+struct pci_bridge_reg_behavior {
+ /* Read-only bits */
+ u32 ro;
+
+ /* Read-write bits */
+ u32 rw;
+
+ /* Write-1-to-clear bits */
+ u32 w1c;
+
+ /* Reserved bits (hardwired to 0) */
+ u32 rsvd;
+};
+
+const static struct pci_bridge_reg_behavior pci_regs_behavior[] = {
+ [PCI_VENDOR_ID / 4] = { .ro = ~0 },
+ [PCI_COMMAND / 4] = {
+ .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
+ PCI_COMMAND_SERR),
+ .ro = ((PCI_COMMAND_SPECIAL | PCI_COMMAND_INVALIDATE |
+ PCI_COMMAND_VGA_PALETTE | PCI_COMMAND_WAIT |
+ PCI_COMMAND_FAST_BACK) |
+ (PCI_STATUS_CAP_LIST | PCI_STATUS_66MHZ |
+ PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16),
+ .rsvd = GENMASK(15, 10) | ((BIT(6) | GENMASK(3, 0)) << 16),
+ .w1c = (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY) << 16,
+ },
+ [PCI_CLASS_REVISION / 4] = { .ro = ~0 },
+
+ /*
+ * Cache Line Size register: implement as read-only, we do not
+ * pretend implementing "Memory Write and Invalidate"
+ * transactions"
+ *
+ * Latency Timer Register: implemented as read-only, as "A
+ * bridge that is not capable of a burst transfer of more than
+ * two data phases on its primary interface is permitted to
+ * hardwire the Latency Timer to a value of 16 or less"
+ *
+ * Header Type: always read-only
+ *
+ * BIST register: implemented as read-only, as "A bridge that
+ * does not support BIST must implement this register as a
+ * read-only register that returns 0 when read"
+ */
+ [PCI_CACHE_LINE_SIZE / 4] = { .ro = ~0 },
+
+ /*
+ * Base Address registers not used must be implemented as
+ * read-only registers that return 0 when read.
+ */
+ [PCI_BASE_ADDRESS_0 / 4] = { .ro = ~0 },
+ [PCI_BASE_ADDRESS_1 / 4] = { .ro = ~0 },
+
+ [PCI_PRIMARY_BUS / 4] = {
+ /* Primary, secondary and subordinate bus are RW */
+ .rw = GENMASK(24, 0),
+ /* Secondary latency is read-only */
+ .ro = GENMASK(31, 24),
+ },
+
+ [PCI_IO_BASE / 4] = {
+ /* The high four bits of I/O base/limit are RW */
+ .rw = (GENMASK(15, 12) | GENMASK(7, 4)),
+
+ /* The low four bits of I/O base/limit are RO */
+ .ro = (((PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK |
+ PCI_STATUS_DEVSEL_MASK) << 16) |
+ GENMASK(11, 8) | GENMASK(3, 0)),
+
+ .w1c = (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY) << 16,
+
+ .rsvd = ((BIT(6) | GENMASK(4, 0)) << 16),
+ },
+
+ [PCI_MEMORY_BASE / 4] = {
+ /* The high 12-bits of mem base/limit are RW */
+ .rw = GENMASK(31, 20) | GENMASK(15, 4),
+
+ /* The low four bits of mem base/limit are RO */
+ .ro = GENMASK(19, 16) | GENMASK(3, 0),
+ },
+
+ [PCI_PREF_MEMORY_BASE / 4] = {
+ /* The high 12-bits of pref mem base/limit are RW */
+ .rw = GENMASK(31, 20) | GENMASK(15, 4),
+
+ /* The low four bits of pref mem base/limit are RO */
+ .ro = GENMASK(19, 16) | GENMASK(3, 0),
+ },
+
+ [PCI_PREF_BASE_UPPER32 / 4] = {
+ .rw = ~0,
+ },
+
+ [PCI_PREF_LIMIT_UPPER32 / 4] = {
+ .rw = ~0,
+ },
+
+ [PCI_IO_BASE_UPPER16 / 4] = {
+ .rw = ~0,
+ },
+
+ [PCI_CAPABILITY_LIST / 4] = {
+ .ro = GENMASK(7, 0),
+ .rsvd = GENMASK(31, 8),
+ },
+
+ [PCI_ROM_ADDRESS1 / 4] = {
+ .rw = GENMASK(31, 11) | BIT(0),
+ .rsvd = GENMASK(10, 1),
+ },
+
+ /*
+ * Interrupt line (bits 7:0) are RW, interrupt pin (bits 15:8)
+ * are RO, and bridge control (31:16) are a mix of RW, RO,
+ * reserved and W1C bits
+ */
+ [PCI_INTERRUPT_LINE / 4] = {
+ /* Interrupt line is RW */
+ .rw = (GENMASK(7, 0) |
+ ((PCI_BRIDGE_CTL_PARITY |
+ PCI_BRIDGE_CTL_SERR |
+ PCI_BRIDGE_CTL_ISA |
+ PCI_BRIDGE_CTL_VGA |
+ PCI_BRIDGE_CTL_MASTER_ABORT |
+ PCI_BRIDGE_CTL_BUS_RESET |
+ BIT(8) | BIT(9) | BIT(11)) << 16)),
+
+ /* Interrupt pin is RO */
+ .ro = (GENMASK(15, 8) | ((PCI_BRIDGE_CTL_FAST_BACK) << 16)),
+
+ .w1c = BIT(10) << 16,
+
+ .rsvd = (GENMASK(15, 12) | BIT(4)) << 16,
+ },
+};
+
+const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
+ [PCI_CAP_LIST_ID / 4] = {
+ /*
+ * Capability ID, Next Capability Pointer and
+ * Capabilities register are all read-only.
+ */
+ .ro = ~0,
+ },
+
+ [PCI_EXP_DEVCAP / 4] = {
+ .ro = ~0,
+ },
+
+ [PCI_EXP_DEVCTL / 4] = {
+ /* Device control register is RW */
+ .rw = GENMASK(15, 0),
+
+ /*
+ * Device status register has 4 bits W1C, then 2 bits
+ * RO, the rest is reserved
+ */
+ .w1c = GENMASK(19, 16),
+ .ro = GENMASK(20, 19),
+ .rsvd = GENMASK(31, 21),
+ },
+
+ [PCI_EXP_LNKCAP / 4] = {
+ /* All bits are RO, except bit 23 which is reserved */
+ .ro = lower_32_bits(~BIT(23)),
+ .rsvd = BIT(23),
+ },
+
+ [PCI_EXP_LNKCTL / 4] = {
+ /*
+ * Link control has bits [1:0] and [11:3] RW, the
+ * other bits are reserved.
+ * Link status has bits [13:0] RO, and bits [14:15]
+ * W1C.
+ */
+ .rw = GENMASK(11, 3) | GENMASK(1, 0),
+ .ro = GENMASK(13, 0) << 16,
+ .w1c = GENMASK(15, 14) << 16,
+ .rsvd = GENMASK(15, 12) | BIT(2),
+ },
+
+ [PCI_EXP_SLTCAP / 4] = {
+ .ro = ~0,
+ },
+
+ [PCI_EXP_SLTCTL / 4] = {
+ /*
+ * Slot control has bits [12:0] RW, the rest is
+ * reserved.
+ *
+ * Slot status has a mix of W1C and RO bits, as well
+ * as reserved bits.
+ */
+ .rw = GENMASK(12, 0),
+ .w1c = (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC) << 16,
+ .ro = (PCI_EXP_SLTSTA_MRLSS | PCI_EXP_SLTSTA_PDS |
+ PCI_EXP_SLTSTA_EIS) << 16,
+ .rsvd = GENMASK(15, 12) | (GENMASK(15, 9) << 16),
+ },
+
+ [PCI_EXP_RTCTL / 4] = {
+ /*
+ * Root control has bits [4:0] RW, the rest is
+ * reserved.
+ *
+ * Root status has bit 0 RO, the rest is reserved.
+ */
+ .rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
+ PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
+ PCI_EXP_RTCTL_CRSSVE),
+ .ro = PCI_EXP_RTCAP_CRSVIS << 16,
+ .rsvd = GENMASK(15, 5) | (GENMASK(15, 1) << 16),
+ },
+
+ [PCI_EXP_RTSTA / 4] = {
+ .ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING,
+ .w1c = PCI_EXP_RTSTA_PME,
+ .rsvd = GENMASK(31, 18),
+ },
+};
+
+/*
+ * Should be called by the PCI controller driver when reading the PCI
+ * configuration space of the fake bridge. It will call back the
+ * ->ops->read_base or ->ops->read_pcie operations.
+ */
+int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
+ int size, u32 *value)
+{
+ int ret;
+ int reg = where & ~3;
+ pci_bridge_emul_read_status_t (*read_op)(struct pci_bridge_emul *bridge,
+ int reg, u32 *value);
+ u32 *cfgspace;
+ const struct pci_bridge_reg_behavior *behavior;
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (!bridge->has_pcie && reg >= PCI_BRIDGE_CONF_END) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
+ reg -= PCI_CAP_PCIE_START;
+ read_op = bridge->ops->read_pcie;
+ cfgspace = (u32 *) &bridge->pcie_conf;
+ behavior = pcie_cap_regs_behavior;
+ } else {
+ read_op = bridge->ops->read_base;
+ cfgspace = (u32 *) &bridge->conf;
+ behavior = pci_regs_behavior;
+ }
+
+ if (read_op)
+ ret = read_op(bridge, reg, value);
+ else
+ ret = PCI_BRIDGE_EMUL_NOT_HANDLED;
+
+ if (ret == PCI_BRIDGE_EMUL_NOT_HANDLED)
+ *value = cfgspace[reg / 4];
+
+ /*
+ * Make sure we never return any reserved bit with a value
+ * different from 0.
+ */
+ *value &= ~behavior[reg / 4].rsvd;
+
+ if (size == 1)
+ *value = (*value >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *value = (*value >> (8 * (where & 3))) & 0xffff;
+ else if (size != 4)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * Should be called by the PCI controller driver when writing the PCI
+ * configuration space of the fake bridge. It will call back the
+ * ->ops->write_base or ->ops->write_pcie operations.
+ */
+int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
+ int size, u32 value)
+{
+ int reg = where & ~3;
+ int mask, ret, old, new, shift;
+ void (*write_op)(struct pci_bridge_emul *bridge, int reg,
+ u32 old, u32 new, u32 mask);
+ u32 *cfgspace;
+ const struct pci_bridge_reg_behavior *behavior;
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (!bridge->has_pcie && reg >= PCI_BRIDGE_CONF_END)
+ return PCIBIOS_SUCCESSFUL;
+
+ shift = (where & 0x3) * 8;
+
+ if (size == 4)
+ mask = 0xffffffff;
+ else if (size == 2)
+ mask = 0xffff << shift;
+ else if (size == 1)
+ mask = 0xff << shift;
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ ret = pci_bridge_emul_conf_read(bridge, reg, 4, &old);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
+ reg -= PCI_CAP_PCIE_START;
+ write_op = bridge->ops->write_pcie;
+ cfgspace = (u32 *) &bridge->pcie_conf;
+ behavior = pcie_cap_regs_behavior;
+ } else {
+ write_op = bridge->ops->write_base;
+ cfgspace = (u32 *) &bridge->conf;
+ behavior = pci_regs_behavior;
+ }
+
+ /* Keep all bits, except the RW bits */
+ new = old & (~mask | ~behavior[reg / 4].rw);
+
+ /* Update the value of the RW bits */
+ new |= (value << shift) & (behavior[reg / 4].rw & mask);
+
+ /* Clear the W1C bits */
+ new &= ~((value << shift) & (behavior[reg / 4].w1c & mask));
+
+ cfgspace[reg / 4] = new;
+
+ if (write_op)
+ write_op(bridge, reg, old, new, mask);
+
+ return PCIBIOS_SUCCESSFUL;
+}
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
new file mode 100644
index 000000000000..9d510ccf738b
--- /dev/null
+++ b/drivers/pci/pci-bridge-emul.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PCI_BRIDGE_EMUL_H__
+#define __PCI_BRIDGE_EMUL_H__
+
+#include <linux/kernel.h>
+
+/* PCI configuration space of a PCI-to-PCI bridge. */
+struct pci_bridge_emul_conf {
+ u16 vendor;
+ u16 device;
+ u16 command;
+ u16 status;
+ u32 class_revision;
+ u8 cache_line_size;
+ u8 latency_timer;
+ u8 header_type;
+ u8 bist;
+ u32 bar[2];
+ u8 primary_bus;
+ u8 secondary_bus;
+ u8 subordinate_bus;
+ u8 secondary_latency_timer;
+ u8 iobase;
+ u8 iolimit;
+ u16 secondary_status;
+ u16 membase;
+ u16 memlimit;
+ u16 pref_mem_base;
+ u16 pref_mem_limit;
+ u32 prefbaseupper;
+ u32 preflimitupper;
+ u16 iobaseupper;
+ u16 iolimitupper;
+ u8 capabilities_pointer;
+ u8 reserve[3];
+ u32 romaddr;
+ u8 intline;
+ u8 intpin;
+ u16 bridgectrl;
+};
+
+/* PCI configuration space of the PCIe capabilities */
+struct pci_bridge_emul_pcie_conf {
+ u8 cap_id;
+ u8 next;
+ u16 cap;
+ u32 devcap;
+ u16 devctl;
+ u16 devsta;
+ u32 lnkcap;
+ u16 lnkctl;
+ u16 lnksta;
+ u32 slotcap;
+ u16 slotctl;
+ u16 slotsta;
+ u16 rootctl;
+ u16 rsvd;
+ u32 rootsta;
+ u32 devcap2;
+ u16 devctl2;
+ u16 devsta2;
+ u32 lnkcap2;
+ u16 lnkctl2;
+ u16 lnksta2;
+ u32 slotcap2;
+ u16 slotctl2;
+ u16 slotsta2;
+};
+
+struct pci_bridge_emul;
+
+typedef enum { PCI_BRIDGE_EMUL_HANDLED,
+ PCI_BRIDGE_EMUL_NOT_HANDLED } pci_bridge_emul_read_status_t;
+
+struct pci_bridge_emul_ops {
+ /*
+ * Called when reading from the regular PCI bridge
+ * configuration space. Return PCI_BRIDGE_EMUL_HANDLED when the
+ * operation has handled the read operation and filled in the
+ * *value, or PCI_BRIDGE_EMUL_NOT_HANDLED when the read should
+ * be emulated by the common code by reading from the
+ * in-memory copy of the configuration space.
+ */
+ pci_bridge_emul_read_status_t (*read_base)(struct pci_bridge_emul *bridge,
+ int reg, u32 *value);
+
+ /*
+ * Same as ->read_base(), except it is for reading from the
+ * PCIe capability configuration space.
+ */
+ pci_bridge_emul_read_status_t (*read_pcie)(struct pci_bridge_emul *bridge,
+ int reg, u32 *value);
+ /*
+ * Called when writing to the regular PCI bridge configuration
+ * space. old is the current value, new is the new value being
+ * written, and mask indicates which parts of the value are
+ * being changed.
+ */
+ void (*write_base)(struct pci_bridge_emul *bridge, int reg,
+ u32 old, u32 new, u32 mask);
+
+ /*
+ * Same as ->write_base(), except it is for writing from the
+ * PCIe capability configuration space.
+ */
+ void (*write_pcie)(struct pci_bridge_emul *bridge, int reg,
+ u32 old, u32 new, u32 mask);
+};
+
+struct pci_bridge_emul {
+ struct pci_bridge_emul_conf conf;
+ struct pci_bridge_emul_pcie_conf pcie_conf;
+ struct pci_bridge_emul_ops *ops;
+ void *data;
+ bool has_pcie;
+};
+
+void pci_bridge_emul_init(struct pci_bridge_emul *bridge);
+int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
+ int size, u32 *value);
+int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
+ int size, u32 value);
+
+#endif /* __PCI_BRIDGE_EMUL_H__ */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 51b6c81671c1..d068f11d08a7 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -35,6 +35,8 @@
#include <linux/aer.h>
#include "pci.h"
+DEFINE_MUTEX(pci_slot_mutex);
+
const char *pci_power_names[] = {
"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
};
@@ -196,7 +198,7 @@ EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
/**
* pci_dev_str_match_path - test if a path string matches a device
* @dev: the PCI device to test
- * @p: string to match the device against
+ * @path: string to match the device against
* @endptr: pointer to the string after the match
*
* Test if a string (typically from a kernel parameter) formatted as a
@@ -791,6 +793,11 @@ static inline bool platform_pci_need_resume(struct pci_dev *dev)
return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
}
+static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
+{
+ return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
+}
+
/**
* pci_raw_set_power_state - Use PCI PM registers to set the power state of
* given PCI device
@@ -999,7 +1006,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
* because have already delayed for the bridge.
*/
if (dev->runtime_d3cold) {
- if (dev->d3cold_delay)
+ if (dev->d3cold_delay && !dev->imm_ready)
msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
@@ -1284,6 +1291,7 @@ int pci_save_state(struct pci_dev *dev)
if (i != 0)
return i;
+ pci_save_dpc_state(dev);
return pci_save_vc_state(dev);
}
EXPORT_SYMBOL(pci_save_state);
@@ -1389,6 +1397,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
pci_restore_rebar_state(dev);
+ pci_restore_dpc_state(dev);
pci_cleanup_aer_error_status_regs(dev);
@@ -2144,10 +2153,13 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
int ret = 0;
/*
- * Bridges can only signal wakeup on behalf of subordinate devices,
- * but that is set up elsewhere, so skip them.
+ * Bridges that are not power-manageable directly only signal
+ * wakeup on behalf of subordinate devices which is set up
+ * elsewhere, so skip them. However, bridges that are
+ * power-manageable may signal wakeup for themselves (for example,
+ * on a hotplug event) and they need to be covered here.
*/
- if (pci_has_subordinate(dev))
+ if (!pci_power_manageable(dev))
return 0;
/* Don't do the same thing twice in a row for one device. */
@@ -2522,6 +2534,10 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
if (bridge->is_thunderbolt)
return true;
+ /* Platform might know better if the bridge supports D3 */
+ if (platform_pci_bridge_d3(bridge))
+ return true;
+
/*
* Hotplug ports handled natively by the OS were not validated
* by vendors for runtime D3 at least until 2018 because there
@@ -2655,6 +2671,7 @@ EXPORT_SYMBOL_GPL(pci_d3cold_disable);
void pci_pm_init(struct pci_dev *dev)
{
int pm;
+ u16 status;
u16 pmc;
pm_runtime_forbid(&dev->dev);
@@ -2717,6 +2734,10 @@ void pci_pm_init(struct pci_dev *dev)
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
}
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+ if (status & PCI_STATUS_IMM_READY)
+ dev->imm_ready = 1;
}
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
@@ -4387,6 +4408,9 @@ int pcie_flr(struct pci_dev *dev)
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ if (dev->imm_ready)
+ return 0;
+
/*
* Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
* 100ms, but may silently discard requests while the FLR is in
@@ -4428,6 +4452,9 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
+ if (dev->imm_ready)
+ return 0;
+
/*
* Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
* updated 27 July 2006; a device must complete an FLR within
@@ -4496,21 +4523,42 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
bool ret;
u16 lnk_status;
+ /*
+ * Some controllers might not implement link active reporting. In this
+ * case, we wait for 1000 + 100 ms.
+ */
+ if (!pdev->link_active_reporting) {
+ msleep(1100);
+ return true;
+ }
+
+ /*
+ * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
+ * after which we should expect an link active if the reset was
+ * successful. If so, software must wait a minimum 100ms before sending
+ * configuration requests to devices downstream this port.
+ *
+ * If the link fails to activate, either the device was physically
+ * removed or the link is permanently failed.
+ */
+ if (active)
+ msleep(20);
for (;;) {
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
if (ret == active)
- return true;
+ break;
if (timeout <= 0)
break;
msleep(10);
timeout -= 10;
}
-
- pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
- active ? "set" : "cleared");
-
- return false;
+ if (active && ret)
+ msleep(100);
+ else if (ret != active)
+ pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+ return ret == active;
}
void pci_reset_secondary_bus(struct pci_dev *dev)
@@ -4582,13 +4630,13 @@ static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
{
int rc = -ENOTTY;
- if (!hotplug || !try_module_get(hotplug->ops->owner))
+ if (!hotplug || !try_module_get(hotplug->owner))
return rc;
if (hotplug->ops->reset_slot)
rc = hotplug->ops->reset_slot(hotplug, probe);
- module_put(hotplug->ops->owner);
+ module_put(hotplug->owner);
return rc;
}
@@ -5165,6 +5213,41 @@ static int pci_bus_reset(struct pci_bus *bus, int probe)
}
/**
+ * pci_bus_error_reset - reset the bridge's subordinate bus
+ * @bridge: The parent device that connects to the bus to reset
+ *
+ * This function will first try to reset the slots on this bus if the method is
+ * available. If slot reset fails or is not available, this will fall back to a
+ * secondary bus reset.
+ */
+int pci_bus_error_reset(struct pci_dev *bridge)
+{
+ struct pci_bus *bus = bridge->subordinate;
+ struct pci_slot *slot;
+
+ if (!bus)
+ return -ENOTTY;
+
+ mutex_lock(&pci_slot_mutex);
+ if (list_empty(&bus->slots))
+ goto bus_reset;
+
+ list_for_each_entry(slot, &bus->slots, list)
+ if (pci_probe_reset_slot(slot))
+ goto bus_reset;
+
+ list_for_each_entry(slot, &bus->slots, list)
+ if (pci_slot_reset(slot, 0))
+ goto bus_reset;
+
+ mutex_unlock(&pci_slot_mutex);
+ return 0;
+bus_reset:
+ mutex_unlock(&pci_slot_mutex);
+ return pci_bus_reset(bridge->subordinate, 0);
+}
+
+/**
* pci_probe_reset_bus - probe whether a PCI bus can be reset
* @bus: PCI bus to probe
*
@@ -5701,8 +5784,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
{
if (!dev->dma_alias_mask)
- dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
- sizeof(long), GFP_KERNEL);
+ dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
if (!dev->dma_alias_mask) {
pci_warn(dev, "Unable to allocate DMA alias mask\n");
return;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6e0d1528d471..662b7457db23 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -35,10 +35,13 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
int pci_probe_reset_function(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
+int pci_bus_error_reset(struct pci_dev *dev);
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
*
+ * @bridge_d3: Does the bridge allow entering into D3
+ *
* @is_manageable: returns 'true' if given device is power manageable by the
* platform firmware
*
@@ -60,6 +63,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
* these callbacks are mandatory.
*/
struct pci_platform_pm_ops {
+ bool (*bridge_d3)(struct pci_dev *dev);
bool (*is_manageable)(struct pci_dev *dev);
int (*set_state)(struct pci_dev *dev, pci_power_t state);
pci_power_t (*get_state)(struct pci_dev *dev);
@@ -136,6 +140,7 @@ static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
/* Lock for read/write access to pci device and bus lists */
extern struct rw_semaphore pci_bus_sem;
+extern struct mutex pci_slot_mutex;
extern raw_spinlock_t pci_lock;
@@ -285,6 +290,7 @@ struct pci_sriov {
u16 driver_max_VFs; /* Max num VFs driver supports */
struct pci_dev *dev; /* Lowest numbered PF */
struct pci_dev *self; /* This PF */
+ u32 cfg_size; /* VF config space size */
u32 class; /* VF device */
u8 hdr_type; /* VF header type */
u16 subsystem_vendor; /* VF subsystem vendor */
@@ -293,21 +299,71 @@ struct pci_sriov {
bool drivers_autoprobe; /* Auto probing of VFs by driver */
};
-/* pci_dev priv_flags */
-#define PCI_DEV_DISCONNECTED 0
-#define PCI_DEV_ADDED 1
+/**
+ * pci_dev_set_io_state - Set the new error state if possible.
+ *
+ * @dev - pci device to set new error_state
+ * @new - the state we want dev to be in
+ *
+ * Must be called with device_lock held.
+ *
+ * Returns true if state has been changed to the requested state.
+ */
+static inline bool pci_dev_set_io_state(struct pci_dev *dev,
+ pci_channel_state_t new)
+{
+ bool changed = false;
+
+ device_lock_assert(&dev->dev);
+ switch (new) {
+ case pci_channel_io_perm_failure:
+ switch (dev->error_state) {
+ case pci_channel_io_frozen:
+ case pci_channel_io_normal:
+ case pci_channel_io_perm_failure:
+ changed = true;
+ break;
+ }
+ break;
+ case pci_channel_io_frozen:
+ switch (dev->error_state) {
+ case pci_channel_io_frozen:
+ case pci_channel_io_normal:
+ changed = true;
+ break;
+ }
+ break;
+ case pci_channel_io_normal:
+ switch (dev->error_state) {
+ case pci_channel_io_frozen:
+ case pci_channel_io_normal:
+ changed = true;
+ break;
+ }
+ break;
+ }
+ if (changed)
+ dev->error_state = new;
+ return changed;
+}
static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
{
- set_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
+ device_lock(&dev->dev);
+ pci_dev_set_io_state(dev, pci_channel_io_perm_failure);
+ device_unlock(&dev->dev);
+
return 0;
}
static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
{
- return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
+ return dev->error_state == pci_channel_io_perm_failure;
}
+/* pci_dev priv_flags */
+#define PCI_DEV_ADDED 0
+
static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
{
assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added);
@@ -346,6 +402,14 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
#endif /* CONFIG_PCIEAER */
+#ifdef CONFIG_PCIE_DPC
+void pci_save_dpc_state(struct pci_dev *dev);
+void pci_restore_dpc_state(struct pci_dev *dev);
+#else
+static inline void pci_save_dpc_state(struct pci_dev *dev) {}
+static inline void pci_restore_dpc_state(struct pci_dev *dev) {}
+#endif
+
#ifdef CONFIG_PCI_ATS
void pci_restore_ats_state(struct pci_dev *dev);
#else
@@ -423,8 +487,8 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
#endif
/* PCI error reporting and recovery */
-void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
-void pcie_do_nonfatal_recovery(struct pci_dev *dev);
+void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
+ u32 service);
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 0a1e9d379bc5..44742b2e1126 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -36,7 +36,6 @@ config PCIEAER
config PCIEAER_INJECT
tristate "PCI Express error injection support"
depends on PCIEAER
- default n
help
This enables PCI Express Root Port Advanced Error Reporting
(AER) software error injector.
@@ -84,7 +83,6 @@ config PCIEASPM
config PCIEASPM_DEBUG
bool "Debug PCI Express ASPM"
depends on PCIEASPM
- default n
help
This enables PCI Express ASPM debug support. It will add per-device
interface to control ASPM.
@@ -129,7 +127,6 @@ config PCIE_PME
config PCIE_DPC
bool "PCI Express Downstream Port Containment support"
depends on PCIEPORTBUS && PCIEAER
- default n
help
This enables PCI Express Downstream Port Containment (DPC)
driver support. DPC events from Root and Downstream ports
@@ -139,7 +136,6 @@ config PCIE_DPC
config PCIE_PTM
bool "PCI Express Precision Time Measurement support"
- default n
depends on PCIEPORTBUS
help
This enables PCI Express Precision Time Measurement (PTM)
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 83180edd6ed4..a90a9194ac4a 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -30,7 +30,7 @@
#include "../pci.h"
#include "portdrv.h"
-#define AER_ERROR_SOURCES_MAX 100
+#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
@@ -42,21 +42,7 @@ struct aer_err_source {
struct aer_rpc {
struct pci_dev *rpd; /* Root Port device */
- struct work_struct dpc_handler;
- struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
- struct aer_err_info e_info;
- unsigned short prod_idx; /* Error Producer Index */
- unsigned short cons_idx; /* Error Consumer Index */
- int isr;
- spinlock_t e_lock; /*
- * Lock access to Error Status/ID Regs
- * and error producer/consumer index
- */
- struct mutex rpc_mutex; /*
- * only one thread could do
- * recovery on the same
- * root port hierarchy
- */
+ DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
};
/* AER stats for the device */
@@ -866,7 +852,7 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
- e_info->dev[e_info->error_dev_num] = dev;
+ e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
e_info->error_dev_num++;
return 0;
}
@@ -1010,9 +996,12 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
info->status);
pci_aer_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
- pcie_do_nonfatal_recovery(dev);
+ pcie_do_recovery(dev, pci_channel_io_normal,
+ PCIE_PORT_SERVICE_AER);
else if (info->severity == AER_FATAL)
- pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER);
+ pcie_do_recovery(dev, pci_channel_io_frozen,
+ PCIE_PORT_SERVICE_AER);
+ pci_dev_put(dev);
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -1047,9 +1036,11 @@ static void aer_recover_work_func(struct work_struct *work)
}
cper_print_aer(pdev, entry.severity, entry.regs);
if (entry.severity == AER_NONFATAL)
- pcie_do_nonfatal_recovery(pdev);
+ pcie_do_recovery(pdev, pci_channel_io_normal,
+ PCIE_PORT_SERVICE_AER);
else if (entry.severity == AER_FATAL)
- pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER);
+ pcie_do_recovery(pdev, pci_channel_io_frozen,
+ PCIE_PORT_SERVICE_AER);
pci_dev_put(pdev);
}
}
@@ -1065,7 +1056,6 @@ static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
int severity, struct aer_capability_regs *aer_regs)
{
- unsigned long flags;
struct aer_recover_entry entry = {
.bus = bus,
.devfn = devfn,
@@ -1074,13 +1064,12 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
.regs = aer_regs,
};
- spin_lock_irqsave(&aer_recover_ring_lock, flags);
- if (kfifo_put(&aer_recover_ring, entry))
+ if (kfifo_in_spinlocked(&aer_recover_ring, &entry, sizeof(entry),
+ &aer_recover_ring_lock))
schedule_work(&aer_recover_work);
else
pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
}
EXPORT_SYMBOL_GPL(aer_recover_queue);
#endif
@@ -1115,8 +1104,9 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
&info->mask);
if (!(info->status & ~info->mask))
return 0;
- } else if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- info->severity == AER_NONFATAL) {
+ } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
+ info->severity == AER_NONFATAL) {
/* Link is still healthy for IO reads */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
@@ -1170,7 +1160,7 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
struct aer_err_source *e_src)
{
struct pci_dev *pdev = rpc->rpd;
- struct aer_err_info *e_info = &rpc->e_info;
+ struct aer_err_info e_info;
pci_rootport_aer_stats_incr(pdev, e_src);
@@ -1179,83 +1169,57 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
* uncorrectable error being logged. Report correctable error first.
*/
if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
- e_info->id = ERR_COR_ID(e_src->id);
- e_info->severity = AER_CORRECTABLE;
+ e_info.id = ERR_COR_ID(e_src->id);
+ e_info.severity = AER_CORRECTABLE;
if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
- e_info->multi_error_valid = 1;
+ e_info.multi_error_valid = 1;
else
- e_info->multi_error_valid = 0;
- aer_print_port_info(pdev, e_info);
+ e_info.multi_error_valid = 0;
+ aer_print_port_info(pdev, &e_info);
- if (find_source_device(pdev, e_info))
- aer_process_err_devices(e_info);
+ if (find_source_device(pdev, &e_info))
+ aer_process_err_devices(&e_info);
}
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
- e_info->id = ERR_UNCOR_ID(e_src->id);
+ e_info.id = ERR_UNCOR_ID(e_src->id);
if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- e_info->severity = AER_FATAL;
+ e_info.severity = AER_FATAL;
else
- e_info->severity = AER_NONFATAL;
+ e_info.severity = AER_NONFATAL;
if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
- e_info->multi_error_valid = 1;
+ e_info.multi_error_valid = 1;
else
- e_info->multi_error_valid = 0;
+ e_info.multi_error_valid = 0;
- aer_print_port_info(pdev, e_info);
+ aer_print_port_info(pdev, &e_info);
- if (find_source_device(pdev, e_info))
- aer_process_err_devices(e_info);
+ if (find_source_device(pdev, &e_info))
+ aer_process_err_devices(&e_info);
}
}
/**
- * get_e_source - retrieve an error source
- * @rpc: pointer to the root port which holds an error
- * @e_src: pointer to store retrieved error source
- *
- * Return 1 if an error source is retrieved, otherwise 0.
- *
- * Invoked by DPC handler to consume an error.
- */
-static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
-{
- unsigned long flags;
-
- /* Lock access to Root error producer/consumer index */
- spin_lock_irqsave(&rpc->e_lock, flags);
- if (rpc->prod_idx == rpc->cons_idx) {
- spin_unlock_irqrestore(&rpc->e_lock, flags);
- return 0;
- }
-
- *e_src = rpc->e_sources[rpc->cons_idx];
- rpc->cons_idx++;
- if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
- rpc->cons_idx = 0;
- spin_unlock_irqrestore(&rpc->e_lock, flags);
-
- return 1;
-}
-
-/**
* aer_isr - consume errors detected by root port
* @work: definition of this work item
*
* Invoked, as DPC, when root port records new detected error
*/
-static void aer_isr(struct work_struct *work)
+static irqreturn_t aer_isr(int irq, void *context)
{
- struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
+ struct pcie_device *dev = (struct pcie_device *)context;
+ struct aer_rpc *rpc = get_service_data(dev);
struct aer_err_source uninitialized_var(e_src);
- mutex_lock(&rpc->rpc_mutex);
- while (get_e_source(rpc, &e_src))
+ if (kfifo_is_empty(&rpc->aer_fifo))
+ return IRQ_NONE;
+
+ while (kfifo_get(&rpc->aer_fifo, &e_src))
aer_isr_one_error(rpc, &e_src);
- mutex_unlock(&rpc->rpc_mutex);
+ return IRQ_HANDLED;
}
/**
@@ -1265,56 +1229,26 @@ static void aer_isr(struct work_struct *work)
*
* Invoked when Root Port detects AER messages.
*/
-irqreturn_t aer_irq(int irq, void *context)
+static irqreturn_t aer_irq(int irq, void *context)
{
- unsigned int status, id;
struct pcie_device *pdev = (struct pcie_device *)context;
struct aer_rpc *rpc = get_service_data(pdev);
- int next_prod_idx;
- unsigned long flags;
- int pos;
-
- pos = pdev->port->aer_cap;
- /*
- * Must lock access to Root Error Status Reg, Root Error ID Reg,
- * and Root error producer/consumer index
- */
- spin_lock_irqsave(&rpc->e_lock, flags);
+ struct pci_dev *rp = rpc->rpd;
+ struct aer_err_source e_src = {};
+ int pos = rp->aer_cap;
- /* Read error status */
- pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status);
- if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) {
- spin_unlock_irqrestore(&rpc->e_lock, flags);
+ pci_read_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, &e_src.status);
+ if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV)))
return IRQ_NONE;
- }
- /* Read error source and clear error status */
- pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id);
- pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status);
+ pci_read_config_dword(rp, pos + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
+ pci_write_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, e_src.status);
- /* Store error source for later DPC handler */
- next_prod_idx = rpc->prod_idx + 1;
- if (next_prod_idx == AER_ERROR_SOURCES_MAX)
- next_prod_idx = 0;
- if (next_prod_idx == rpc->cons_idx) {
- /*
- * Error Storm Condition - possibly the same error occurred.
- * Drop the error.
- */
- spin_unlock_irqrestore(&rpc->e_lock, flags);
+ if (!kfifo_put(&rpc->aer_fifo, e_src))
return IRQ_HANDLED;
- }
- rpc->e_sources[rpc->prod_idx].status = status;
- rpc->e_sources[rpc->prod_idx].id = id;
- rpc->prod_idx = next_prod_idx;
- spin_unlock_irqrestore(&rpc->e_lock, flags);
-
- /* Invoke DPC handler */
- schedule_work(&rpc->dpc_handler);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
-EXPORT_SYMBOL_GPL(aer_irq);
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
@@ -1423,33 +1357,6 @@ static void aer_disable_rootport(struct aer_rpc *rpc)
}
/**
- * aer_alloc_rpc - allocate Root Port data structure
- * @dev: pointer to the pcie_dev data structure
- *
- * Invoked when Root Port's AER service is loaded.
- */
-static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
-{
- struct aer_rpc *rpc;
-
- rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL);
- if (!rpc)
- return NULL;
-
- /* Initialize Root lock access, e_lock, to Root Error Status Reg */
- spin_lock_init(&rpc->e_lock);
-
- rpc->rpd = dev->port;
- INIT_WORK(&rpc->dpc_handler, aer_isr);
- mutex_init(&rpc->rpc_mutex);
-
- /* Use PCIe bus function to store rpc into PCIe device */
- set_service_data(dev, rpc);
-
- return rpc;
-}
-
-/**
* aer_remove - clean up resources
* @dev: pointer to the pcie_dev data structure
*
@@ -1459,16 +1366,7 @@ static void aer_remove(struct pcie_device *dev)
{
struct aer_rpc *rpc = get_service_data(dev);
- if (rpc) {
- /* If register interrupt service, it must be free. */
- if (rpc->isr)
- free_irq(dev->irq, dev);
-
- flush_work(&rpc->dpc_handler);
- aer_disable_rootport(rpc);
- kfree(rpc);
- set_service_data(dev, NULL);
- }
+ aer_disable_rootport(rpc);
}
/**
@@ -1481,27 +1379,24 @@ static int aer_probe(struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
- struct device *device = &dev->port->dev;
+ struct device *device = &dev->device;
- /* Alloc rpc data structure */
- rpc = aer_alloc_rpc(dev);
+ rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL);
if (!rpc) {
dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
- aer_remove(dev);
return -ENOMEM;
}
+ rpc->rpd = dev->port;
+ set_service_data(dev, rpc);
- /* Request IRQ ISR */
- status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
+ status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
+ IRQF_SHARED, "aerdrv", dev);
if (status) {
dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
dev->irq);
- aer_remove(dev);
return status;
}
- rpc->isr = 1;
-
aer_enable_rootport(rpc);
dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
return 0;
@@ -1526,7 +1421,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- rc = pci_bridge_secondary_bus_reset(dev);
+ rc = pci_bus_error_reset(dev);
pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
@@ -1541,18 +1436,6 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
-/**
- * aer_error_resume - clean up corresponding error status bits
- * @dev: pointer to Root Port's pci_dev data structure
- *
- * Invoked by Port Bus driver during nonfatal recovery.
- */
-static void aer_error_resume(struct pci_dev *dev)
-{
- pci_aer_clear_device_status(dev);
- pci_cleanup_aer_uncorrect_error_status(dev);
-}
-
static struct pcie_port_service_driver aerdriver = {
.name = "aer",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -1560,7 +1443,6 @@ static struct pcie_port_service_driver aerdriver = {
.probe = aer_probe,
.remove = aer_remove,
- .error_resume = aer_error_resume,
.reset_link = aer_root_reset,
};
@@ -1569,10 +1451,9 @@ static struct pcie_port_service_driver aerdriver = {
*
* Invoked when AER root service driver is loaded.
*/
-static int __init aer_service_init(void)
+int __init pcie_aer_init(void)
{
if (!pci_aer_available() || aer_acpi_firmware_first())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
-device_initcall(aer_service_init);
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 0eb24346cad3..95d4759664b3 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -175,14 +176,48 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
return target;
}
+static int aer_inj_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ struct pci_ops *ops, *my_ops;
+ int rv;
+
+ ops = __find_pci_bus_ops(bus);
+ if (!ops)
+ return -1;
+
+ my_ops = bus->ops;
+ bus->ops = ops;
+ rv = ops->read(bus, devfn, where, size, val);
+ bus->ops = my_ops;
+
+ return rv;
+}
+
+static int aer_inj_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ struct pci_ops *ops, *my_ops;
+ int rv;
+
+ ops = __find_pci_bus_ops(bus);
+ if (!ops)
+ return -1;
+
+ my_ops = bus->ops;
+ bus->ops = ops;
+ rv = ops->write(bus, devfn, where, size, val);
+ bus->ops = my_ops;
+
+ return rv;
+}
+
static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
u32 *sim;
struct aer_error *err;
unsigned long flags;
- struct pci_ops *ops;
- struct pci_ops *my_ops;
int domain;
int rv;
@@ -203,18 +238,7 @@ static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
return 0;
}
out:
- ops = __find_pci_bus_ops(bus);
- /*
- * pci_lock must already be held, so we can directly
- * manipulate bus->ops. Many config access functions,
- * including pci_generic_config_read() require the original
- * bus->ops be installed to function, so temporarily put them
- * back.
- */
- my_ops = bus->ops;
- bus->ops = ops;
- rv = ops->read(bus, devfn, where, size, val);
- bus->ops = my_ops;
+ rv = aer_inj_read(bus, devfn, where, size, val);
spin_unlock_irqrestore(&inject_lock, flags);
return rv;
}
@@ -226,8 +250,6 @@ static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
struct aer_error *err;
unsigned long flags;
int rw1cs;
- struct pci_ops *ops;
- struct pci_ops *my_ops;
int domain;
int rv;
@@ -251,18 +273,7 @@ static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
return 0;
}
out:
- ops = __find_pci_bus_ops(bus);
- /*
- * pci_lock must already be held, so we can directly
- * manipulate bus->ops. Many config access functions,
- * including pci_generic_config_write() require the original
- * bus->ops be installed to function, so temporarily put them
- * back.
- */
- my_ops = bus->ops;
- bus->ops = ops;
- rv = ops->write(bus, devfn, where, size, val);
- bus->ops = my_ops;
+ rv = aer_inj_write(bus, devfn, where, size, val);
spin_unlock_irqrestore(&inject_lock, flags);
return rv;
}
@@ -303,32 +314,13 @@ out:
return 0;
}
-static int find_aer_device_iter(struct device *device, void *data)
-{
- struct pcie_device **result = data;
- struct pcie_device *pcie_dev;
-
- if (device->bus == &pcie_port_bus_type) {
- pcie_dev = to_pcie_device(device);
- if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
- *result = pcie_dev;
- return 1;
- }
- }
- return 0;
-}
-
-static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
-{
- return device_for_each_child(&dev->dev, result, find_aer_device_iter);
-}
-
static int aer_inject(struct aer_error_inj *einj)
{
struct aer_error *err, *rperr;
struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
struct pci_dev *dev, *rpdev;
struct pcie_device *edev;
+ struct device *device;
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
@@ -464,7 +456,9 @@ static int aer_inject(struct aer_error_inj *einj)
if (ret)
goto out_put;
- if (find_aer_device(rpdev, &edev)) {
+ device = pcie_port_find_device(rpdev, PCIE_PORT_SERVICE_AER);
+ if (device) {
+ edev = to_pcie_device(device);
if (!get_service_data(edev)) {
dev_warn(&edev->device,
"aer_inject: AER service is not initialized\n");
@@ -474,7 +468,9 @@ static int aer_inject(struct aer_error_inj *einj)
dev_info(&edev->device,
"aer_inject: Injecting errors %08x/%08x into device %s\n",
einj->cor_status, einj->uncor_status, pci_name(dev));
- aer_irq(-1, edev);
+ local_irq_disable();
+ generic_handle_irq(edev->irq);
+ local_irq_enable();
} else {
pci_err(rpdev, "aer_inject: AER device not found\n");
ret = -ENODEV;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5326916715d2..dcb29cb76dc6 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -895,7 +895,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
struct pcie_link_state *link;
int blacklist = !!pcie_aspm_sanity_check(pdev);
- if (!aspm_support_enabled)
+ if (!aspm_support_enabled || aspm_disabled)
return;
if (pdev->link_state)
@@ -991,7 +991,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
* All PCIe functions are in one slot, remove one function will remove
* the whole slot, so just wait until we are the last function left.
*/
- if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
+ if (!list_empty(&parent->subordinate->devices))
goto out;
link = parent->link_state;
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index f03279fc87cd..e435d12e61a0 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -44,6 +44,58 @@ static const char * const rp_pio_error_string[] = {
"Memory Request Completion Timeout", /* Bit Position 18 */
};
+static struct dpc_dev *to_dpc_dev(struct pci_dev *dev)
+{
+ struct device *device;
+
+ device = pcie_port_find_device(dev, PCIE_PORT_SERVICE_DPC);
+ if (!device)
+ return NULL;
+ return get_service_data(to_pcie_device(device));
+}
+
+void pci_save_dpc_state(struct pci_dev *dev)
+{
+ struct dpc_dev *dpc;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ dpc = to_dpc_dev(dev);
+ if (!dpc)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
+ if (!save_state)
+ return;
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pci_read_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, cap);
+}
+
+void pci_restore_dpc_state(struct pci_dev *dev)
+{
+ struct dpc_dev *dpc;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ dpc = to_dpc_dev(dev);
+ if (!dpc)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
+ if (!save_state)
+ return;
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pci_write_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, *cap);
+}
+
static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
{
unsigned long timeout = jiffies + HZ;
@@ -67,18 +119,13 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
struct dpc_dev *dpc;
- struct pcie_device *pciedev;
- struct device *devdpc;
-
u16 cap;
/*
* DPC disables the Link automatically in hardware, so it has
* already been reset by the time we get here.
*/
- devdpc = pcie_port_find_device(pdev, PCIE_PORT_SERVICE_DPC);
- pciedev = to_pcie_device(devdpc);
- dpc = get_service_data(pciedev);
+ dpc = to_dpc_dev(pdev);
cap = dpc->cap_pos;
/*
@@ -93,10 +140,12 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
+ if (!pcie_wait_for_link(pdev, true))
+ return PCI_ERS_RESULT_DISCONNECT;
+
return PCI_ERS_RESULT_RECOVERED;
}
-
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
{
struct device *dev = &dpc->dev->device;
@@ -169,7 +218,7 @@ static irqreturn_t dpc_handler(int irq, void *context)
reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
- dev_warn(dev, "DPC %s detected, remove downstream devices\n",
+ dev_warn(dev, "DPC %s detected\n",
(reason == 0) ? "unmasked uncorrectable error" :
(reason == 1) ? "ERR_NONFATAL" :
(reason == 2) ? "ERR_FATAL" :
@@ -186,7 +235,7 @@ static irqreturn_t dpc_handler(int irq, void *context)
}
/* We configure DPC so it only triggers on ERR_FATAL */
- pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
+ pcie_do_recovery(pdev, pci_channel_io_frozen, PCIE_PORT_SERVICE_DPC);
return IRQ_HANDLED;
}
@@ -259,6 +308,8 @@ static int dpc_probe(struct pcie_device *dev)
FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size,
FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
+
+ pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
return status;
}
@@ -282,8 +333,7 @@ static struct pcie_port_service_driver dpcdriver = {
.reset_link = dpc_reset_link,
};
-static int __init dpc_service_init(void)
+int __init pcie_dpc_init(void)
{
return pcie_port_service_register(&dpcdriver);
}
-device_initcall(dpc_service_init);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 708fd3a0d646..773197a12568 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -12,18 +12,12 @@
#include <linux/pci.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/aer.h>
#include "portdrv.h"
#include "../pci.h"
-struct aer_broadcast_data {
- enum pci_channel_state state;
- enum pci_ers_result result;
-};
-
static pci_ers_result_t merge_result(enum pci_ers_result orig,
enum pci_ers_result new)
{
@@ -49,66 +43,52 @@ static pci_ers_result_t merge_result(enum pci_ers_result orig,
return orig;
}
-static int report_error_detected(struct pci_dev *dev, void *data)
+static int report_error_detected(struct pci_dev *dev,
+ enum pci_channel_state state,
+ enum pci_ers_result *result)
{
pci_ers_result_t vote;
const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
-
- result_data = (struct aer_broadcast_data *) data;
device_lock(&dev->dev);
- dev->error_state = result_data->state;
-
- if (!dev->driver ||
+ if (!pci_dev_set_io_state(dev, state) ||
+ !dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->error_detected) {
- if (result_data->state == pci_channel_io_frozen &&
- dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
- /*
- * In case of fatal recovery, if one of down-
- * stream device has no driver. We might be
- * unable to recover because a later insmod
- * of a driver for this device is unaware of
- * its hw state.
- */
- pci_printk(KERN_DEBUG, dev, "device has %s\n",
- dev->driver ?
- "no AER-aware driver" : "no driver");
- }
-
/*
- * If there's any device in the subtree that does not
- * have an error_detected callback, returning
- * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
- * the subsequent mmio_enabled/slot_reset/resume
- * callbacks of "any" device in the subtree. All the
- * devices in the subtree are left in the error state
- * without recovery.
+ * If any device in the subtree does not have an error_detected
+ * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent
+ * error callbacks of "any" device in the subtree, and will
+ * exit in the disconnected error state.
*/
-
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
vote = PCI_ERS_RESULT_NO_AER_DRIVER;
else
vote = PCI_ERS_RESULT_NONE;
} else {
err_handler = dev->driver->err_handler;
- vote = err_handler->error_detected(dev, result_data->state);
- pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
+ vote = err_handler->error_detected(dev, state);
}
-
- result_data->result = merge_result(result_data->result, vote);
+ pci_uevent_ers(dev, vote);
+ *result = merge_result(*result, vote);
device_unlock(&dev->dev);
return 0;
}
+static int report_frozen_detected(struct pci_dev *dev, void *data)
+{
+ return report_error_detected(dev, pci_channel_io_frozen, data);
+}
+
+static int report_normal_detected(struct pci_dev *dev, void *data)
+{
+ return report_error_detected(dev, pci_channel_io_normal, data);
+}
+
static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
- pci_ers_result_t vote;
+ pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
-
- result_data = (struct aer_broadcast_data *) data;
device_lock(&dev->dev);
if (!dev->driver ||
@@ -118,7 +98,7 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
- result_data->result = merge_result(result_data->result, vote);
+ *result = merge_result(*result, vote);
out:
device_unlock(&dev->dev);
return 0;
@@ -126,11 +106,8 @@ out:
static int report_slot_reset(struct pci_dev *dev, void *data)
{
- pci_ers_result_t vote;
+ pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
-
- result_data = (struct aer_broadcast_data *) data;
device_lock(&dev->dev);
if (!dev->driver ||
@@ -140,7 +117,7 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
- result_data->result = merge_result(result_data->result, vote);
+ *result = merge_result(*result, vote);
out:
device_unlock(&dev->dev);
return 0;
@@ -151,17 +128,16 @@ static int report_resume(struct pci_dev *dev, void *data)
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
- dev->error_state = pci_channel_io_normal;
-
- if (!dev->driver ||
+ if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
+ !dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
goto out;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
- pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
out:
+ pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
device_unlock(&dev->dev);
return 0;
}
@@ -177,207 +153,86 @@ static pci_ers_result_t default_reset_link(struct pci_dev *dev)
{
int rc;
- rc = pci_bridge_secondary_bus_reset(dev);
+ rc = pci_bus_error_reset(dev);
pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
{
- struct pci_dev *udev;
pci_ers_result_t status;
struct pcie_port_service_driver *driver = NULL;
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /* Reset this port for all subordinates */
- udev = dev;
- } else {
- /* Reset the upstream component (likely downstream port) */
- udev = dev->bus->self;
- }
-
- /* Use the aer driver of the component firstly */
- driver = pcie_port_find_service(udev, service);
-
+ driver = pcie_port_find_service(dev, service);
if (driver && driver->reset_link) {
- status = driver->reset_link(udev);
- } else if (udev->has_secondary_link) {
- status = default_reset_link(udev);
+ status = driver->reset_link(dev);
+ } else if (dev->has_secondary_link) {
+ status = default_reset_link(dev);
} else {
pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
- pci_name(udev));
+ pci_name(dev));
return PCI_ERS_RESULT_DISCONNECT;
}
if (status != PCI_ERS_RESULT_RECOVERED) {
pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
- pci_name(udev));
+ pci_name(dev));
return PCI_ERS_RESULT_DISCONNECT;
}
return status;
}
-/**
- * broadcast_error_message - handle message broadcast to downstream drivers
- * @dev: pointer to from where in a hierarchy message is broadcasted down
- * @state: error state
- * @error_mesg: message to print
- * @cb: callback to be broadcasted
- *
- * Invoked during error recovery process. Once being invoked, the content
- * of error severity will be broadcasted to all downstream drivers in a
- * hierarchy in question.
- */
-static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
- enum pci_channel_state state,
- char *error_mesg,
- int (*cb)(struct pci_dev *, void *))
-{
- struct aer_broadcast_data result_data;
-
- pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
- result_data.state = state;
- if (cb == report_error_detected)
- result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
- else
- result_data.result = PCI_ERS_RESULT_RECOVERED;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /*
- * If the error is reported by a bridge, we think this error
- * is related to the downstream link of the bridge, so we
- * do error recovery on all subordinates of the bridge instead
- * of the bridge and clear the error status of the bridge.
- */
- if (cb == report_error_detected)
- dev->error_state = state;
- pci_walk_bus(dev->subordinate, cb, &result_data);
- if (cb == report_resume) {
- pci_aer_clear_device_status(dev);
- pci_cleanup_aer_uncorrect_error_status(dev);
- dev->error_state = pci_channel_io_normal;
- }
- } else {
- /*
- * If the error is reported by an end point, we think this
- * error is related to the upstream link of the end point.
- * The error is non fatal so the bus is ok; just invoke
- * the callback for the function that logged the error.
- */
- cb(dev, &result_data);
- }
-
- return result_data.result;
-}
-
-/**
- * pcie_do_fatal_recovery - handle fatal error recovery process
- * @dev: pointer to a pci_dev data structure of agent detecting an error
- *
- * Invoked when an error is fatal. Once being invoked, removes the devices
- * beneath this AER agent, followed by reset link e.g. secondary bus reset
- * followed by re-enumeration of devices.
- */
-void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
+void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
+ u32 service)
{
- struct pci_dev *udev;
- struct pci_bus *parent;
- struct pci_dev *pdev, *temp;
- pci_ers_result_t result;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- udev = dev;
+ pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
+ struct pci_bus *bus;
+
+ /*
+ * Error recovery runs on all subordinates of the first downstream port.
+ * If the downstream port detected the error, it is cleared at the end.
+ */
+ if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM))
+ dev = dev->bus->self;
+ bus = dev->subordinate;
+
+ pci_dbg(dev, "broadcast error_detected message\n");
+ if (state == pci_channel_io_frozen)
+ pci_walk_bus(bus, report_frozen_detected, &status);
else
- udev = dev->bus->self;
-
- parent = udev->subordinate;
- pci_lock_rescan_remove();
- pci_dev_get(dev);
- list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
- bus_list) {
- pci_dev_get(pdev);
- pci_dev_set_disconnected(pdev, NULL);
- if (pci_has_subordinate(pdev))
- pci_walk_bus(pdev->subordinate,
- pci_dev_set_disconnected, NULL);
- pci_stop_and_remove_bus_device(pdev);
- pci_dev_put(pdev);
- }
-
- result = reset_link(udev, service);
+ pci_walk_bus(bus, report_normal_detected, &status);
- if ((service == PCIE_PORT_SERVICE_AER) &&
- (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
- /*
- * If the error is reported by a bridge, we think this error
- * is related to the downstream link of the bridge, so we
- * do error recovery on all subordinates of the bridge instead
- * of the bridge and clear the error status of the bridge.
- */
- pci_aer_clear_fatal_status(dev);
- pci_aer_clear_device_status(dev);
- }
+ if (state == pci_channel_io_frozen &&
+ reset_link(dev, service) != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
- if (result == PCI_ERS_RESULT_RECOVERED) {
- if (pcie_wait_for_link(udev, true))
- pci_rescan_bus(udev->bus);
- pci_info(dev, "Device recovery from fatal error successful\n");
- } else {
- pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
- pci_info(dev, "Device recovery from fatal error failed\n");
+ if (status == PCI_ERS_RESULT_CAN_RECOVER) {
+ status = PCI_ERS_RESULT_RECOVERED;
+ pci_dbg(dev, "broadcast mmio_enabled message\n");
+ pci_walk_bus(bus, report_mmio_enabled, &status);
}
- pci_dev_put(dev);
- pci_unlock_rescan_remove();
-}
-
-/**
- * pcie_do_nonfatal_recovery - handle nonfatal error recovery process
- * @dev: pointer to a pci_dev data structure of agent detecting an error
- *
- * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
- * error detected message to all downstream drivers within a hierarchy in
- * question and return the returned code.
- */
-void pcie_do_nonfatal_recovery(struct pci_dev *dev)
-{
- pci_ers_result_t status;
- enum pci_channel_state state;
-
- state = pci_channel_io_normal;
-
- status = broadcast_error_message(dev,
- state,
- "error_detected",
- report_error_detected);
-
- if (status == PCI_ERS_RESULT_CAN_RECOVER)
- status = broadcast_error_message(dev,
- state,
- "mmio_enabled",
- report_mmio_enabled);
-
if (status == PCI_ERS_RESULT_NEED_RESET) {
/*
* TODO: Should call platform-specific
* functions to reset slot before calling
* drivers' slot_reset callbacks?
*/
- status = broadcast_error_message(dev,
- state,
- "slot_reset",
- report_slot_reset);
+ status = PCI_ERS_RESULT_RECOVERED;
+ pci_dbg(dev, "broadcast slot_reset message\n");
+ pci_walk_bus(bus, report_slot_reset, &status);
}
if (status != PCI_ERS_RESULT_RECOVERED)
goto failed;
- broadcast_error_message(dev,
- state,
- "resume",
- report_resume);
+ pci_dbg(dev, "broadcast resume message\n");
+ pci_walk_bus(bus, report_resume, &status);
+ pci_aer_clear_device_status(dev);
+ pci_cleanup_aer_uncorrect_error_status(dev);
pci_info(dev, "AER: Device recovery successful\n");
return;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 3ed67676ea2a..0dbcf429089f 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -432,6 +432,31 @@ static void pcie_pme_remove(struct pcie_device *srv)
kfree(get_service_data(srv));
}
+static int pcie_pme_runtime_suspend(struct pcie_device *srv)
+{
+ struct pcie_pme_service_data *data = get_service_data(srv);
+
+ spin_lock_irq(&data->lock);
+ pcie_pme_interrupt_enable(srv->port, false);
+ pcie_clear_root_pme_status(srv->port);
+ data->noirq = true;
+ spin_unlock_irq(&data->lock);
+
+ return 0;
+}
+
+static int pcie_pme_runtime_resume(struct pcie_device *srv)
+{
+ struct pcie_pme_service_data *data = get_service_data(srv);
+
+ spin_lock_irq(&data->lock);
+ pcie_pme_interrupt_enable(srv->port, true);
+ data->noirq = false;
+ spin_unlock_irq(&data->lock);
+
+ return 0;
+}
+
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -439,6 +464,8 @@ static struct pcie_port_service_driver pcie_pme_driver = {
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
+ .runtime_suspend = pcie_pme_runtime_suspend,
+ .runtime_resume = pcie_pme_runtime_resume,
.resume = pcie_pme_resume,
.remove = pcie_pme_remove,
};
@@ -446,8 +473,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
/**
* pcie_pme_service_init - Register the PCIe PME service driver.
*/
-static int __init pcie_pme_service_init(void)
+int __init pcie_pme_init(void)
{
return pcie_port_service_register(&pcie_pme_driver);
}
-device_initcall(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d59afa42fc14..e495f04394d0 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -23,6 +23,30 @@
#define PCIE_PORT_DEVICE_MAXSERVICES 4
+#ifdef CONFIG_PCIEAER
+int pcie_aer_init(void);
+#else
+static inline int pcie_aer_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_HOTPLUG_PCI_PCIE
+int pcie_hp_init(void);
+#else
+static inline int pcie_hp_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_PCIE_PME
+int pcie_pme_init(void);
+#else
+static inline int pcie_pme_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_PCIE_DPC
+int pcie_dpc_init(void);
+#else
+static inline int pcie_dpc_init(void) { return 0; }
+#endif
+
/* Port Type */
#define PCIE_ANY_PORT (~0)
@@ -52,6 +76,8 @@ struct pcie_port_service_driver {
int (*suspend) (struct pcie_device *dev);
int (*resume_noirq) (struct pcie_device *dev);
int (*resume) (struct pcie_device *dev);
+ int (*runtime_suspend) (struct pcie_device *dev);
+ int (*runtime_resume) (struct pcie_device *dev);
/* Device driver may resume normal operations */
void (*error_resume)(struct pci_dev *dev);
@@ -85,6 +111,8 @@ int pcie_port_device_register(struct pci_dev *dev);
int pcie_port_device_suspend(struct device *dev);
int pcie_port_device_resume_noirq(struct device *dev);
int pcie_port_device_resume(struct device *dev);
+int pcie_port_device_runtime_suspend(struct device *dev);
+int pcie_port_device_runtime_resume(struct device *dev);
#endif
void pcie_port_device_remove(struct pci_dev *dev);
int __must_check pcie_port_bus_register(void);
@@ -123,10 +151,6 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
}
#endif
-#ifdef CONFIG_PCIEAER
-irqreturn_t aer_irq(int irq, void *context);
-#endif
-
struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
u32 service);
struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 7c37d815229e..f458ac9cb70c 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -395,6 +395,26 @@ int pcie_port_device_resume(struct device *dev)
size_t off = offsetof(struct pcie_port_service_driver, resume);
return device_for_each_child(dev, &off, pm_iter);
}
+
+/**
+ * pcie_port_device_runtime_suspend - runtime suspend port services
+ * @dev: PCI Express port to handle
+ */
+int pcie_port_device_runtime_suspend(struct device *dev)
+{
+ size_t off = offsetof(struct pcie_port_service_driver, runtime_suspend);
+ return device_for_each_child(dev, &off, pm_iter);
+}
+
+/**
+ * pcie_port_device_runtime_resume - runtime resume port services
+ * @dev: PCI Express port to handle
+ */
+int pcie_port_device_runtime_resume(struct device *dev)
+{
+ size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
+ return device_for_each_child(dev, &off, pm_iter);
+}
#endif /* PM */
static int remove_iter(struct device *dev, void *data)
@@ -466,6 +486,7 @@ struct device *pcie_port_find_device(struct pci_dev *dev,
device = pdrvs.dev;
return device;
}
+EXPORT_SYMBOL_GPL(pcie_port_find_device);
/**
* pcie_port_device_remove - unregister PCI Express port service devices
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index eef22dc29140..0acca3596807 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -45,12 +45,10 @@ __setup("pcie_ports=", pcie_port_setup);
#ifdef CONFIG_PM
static int pcie_port_runtime_suspend(struct device *dev)
{
- return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
-}
+ if (!to_pci_dev(dev)->bridge_d3)
+ return -EBUSY;
-static int pcie_port_runtime_resume(struct device *dev)
-{
- return 0;
+ return pcie_port_device_runtime_suspend(dev);
}
static int pcie_port_runtime_idle(struct device *dev)
@@ -73,7 +71,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.restore_noirq = pcie_port_device_resume_noirq,
.restore = pcie_port_device_resume,
.runtime_suspend = pcie_port_runtime_suspend,
- .runtime_resume = pcie_port_runtime_resume,
+ .runtime_resume = pcie_port_device_runtime_resume,
.runtime_idle = pcie_port_runtime_idle,
};
@@ -109,8 +107,8 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
- dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED);
+ dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NEVER_SKIP |
+ DPM_FLAG_SMART_SUSPEND);
if (pci_bridge_d3_possible(dev)) {
/*
@@ -146,6 +144,13 @@ static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
return PCI_ERS_RESULT_CAN_RECOVER;
}
+static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
+{
+ pci_restore_state(dev);
+ pci_save_state(dev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
{
return PCI_ERS_RESULT_RECOVERED;
@@ -185,6 +190,7 @@ static const struct pci_device_id port_pci_ids[] = { {
static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
+ .slot_reset = pcie_portdrv_slot_reset,
.mmio_enabled = pcie_portdrv_mmio_enabled,
.resume = pcie_portdrv_err_resume,
};
@@ -226,11 +232,20 @@ static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = {
{}
};
+static void __init pcie_init_services(void)
+{
+ pcie_aer_init();
+ pcie_pme_init();
+ pcie_dpc_init();
+ pcie_hp_init();
+}
+
static int __init pcie_portdrv_init(void)
{
if (pcie_ports_disabled)
return -EACCES;
+ pcie_init_services();
dmi_check_system(pcie_portdrv_dmi_table);
return pci_register_driver(&pcie_portdriver);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 201f9e5ff55c..b1c05b5054a0 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -713,6 +713,7 @@ static void pci_set_bus_speed(struct pci_bus *bus)
pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
+ bridge->link_active_reporting = !!(linkcap & PCI_EXP_LNKCAP_DLLLARC);
pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
pcie_update_link_speed(bus, linksta);
@@ -1438,12 +1439,29 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev)
return PCI_CFG_SPACE_EXP_SIZE;
}
+#ifdef CONFIG_PCI_IOV
+static bool is_vf0(struct pci_dev *dev)
+{
+ if (pci_iov_virtfn_devfn(dev->physfn, 0) == dev->devfn &&
+ pci_iov_virtfn_bus(dev->physfn, 0) == dev->bus->number)
+ return true;
+
+ return false;
+}
+#endif
+
int pci_cfg_space_size(struct pci_dev *dev)
{
int pos;
u32 status;
u16 class;
+#ifdef CONFIG_PCI_IOV
+ /* Read cached value for all VFs except for VF0 */
+ if (dev->is_virtfn && !is_vf0(dev))
+ return dev->physfn->sriov->cfg_size;
+#endif
+
if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
return PCI_CFG_SPACE_SIZE;
@@ -2143,7 +2161,7 @@ static void pci_release_dev(struct device *dev)
pcibios_release_device(pci_dev);
pci_bus_put(pci_dev->bus);
kfree(pci_dev->driver_override);
- kfree(pci_dev->dma_alias_mask);
+ bitmap_free(pci_dev->dma_alias_mask);
kfree(pci_dev);
}
@@ -2397,8 +2415,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
- pci_set_dma_max_seg_size(dev, 65536);
- pci_set_dma_seg_boundary(dev, 0xffffffff);
+ dma_set_max_seg_size(&dev->dev, 65536);
+ dma_set_seg_boundary(&dev->dev, 0xffffffff);
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6bc27b7fd452..4700d24e5d55 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3190,7 +3190,11 @@ static void disable_igfx_irq(struct pci_dev *dev)
pci_iounmap(dev, regs);
}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
@@ -4987,7 +4991,6 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
void __iomem *mmio;
struct ntb_info_regs __iomem *mmio_ntb;
struct ntb_ctrl_regs __iomem *mmio_ctrl;
- struct sys_info_regs __iomem *mmio_sys_info;
u64 partition_map;
u8 partition;
int pp;
@@ -5008,7 +5011,6 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
- mmio_sys_info = mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
partition = ioread8(&mmio_ntb->partition_id);
@@ -5057,59 +5059,37 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
pci_iounmap(pdev, mmio);
pci_disable_device(pdev);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576,
- quirk_switchtec_ntb_dma_alias);
+#define SWITCHTEC_QUIRK(vid) \
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
+ PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
+
+SWITCHTEC_QUIRK(0x8531); /* PFX 24xG3 */
+SWITCHTEC_QUIRK(0x8532); /* PFX 32xG3 */
+SWITCHTEC_QUIRK(0x8533); /* PFX 48xG3 */
+SWITCHTEC_QUIRK(0x8534); /* PFX 64xG3 */
+SWITCHTEC_QUIRK(0x8535); /* PFX 80xG3 */
+SWITCHTEC_QUIRK(0x8536); /* PFX 96xG3 */
+SWITCHTEC_QUIRK(0x8541); /* PSX 24xG3 */
+SWITCHTEC_QUIRK(0x8542); /* PSX 32xG3 */
+SWITCHTEC_QUIRK(0x8543); /* PSX 48xG3 */
+SWITCHTEC_QUIRK(0x8544); /* PSX 64xG3 */
+SWITCHTEC_QUIRK(0x8545); /* PSX 80xG3 */
+SWITCHTEC_QUIRK(0x8546); /* PSX 96xG3 */
+SWITCHTEC_QUIRK(0x8551); /* PAX 24XG3 */
+SWITCHTEC_QUIRK(0x8552); /* PAX 32XG3 */
+SWITCHTEC_QUIRK(0x8553); /* PAX 48XG3 */
+SWITCHTEC_QUIRK(0x8554); /* PAX 64XG3 */
+SWITCHTEC_QUIRK(0x8555); /* PAX 80XG3 */
+SWITCHTEC_QUIRK(0x8556); /* PAX 96XG3 */
+SWITCHTEC_QUIRK(0x8561); /* PFXL 24XG3 */
+SWITCHTEC_QUIRK(0x8562); /* PFXL 32XG3 */
+SWITCHTEC_QUIRK(0x8563); /* PFXL 48XG3 */
+SWITCHTEC_QUIRK(0x8564); /* PFXL 64XG3 */
+SWITCHTEC_QUIRK(0x8565); /* PFXL 80XG3 */
+SWITCHTEC_QUIRK(0x8566); /* PFXL 96XG3 */
+SWITCHTEC_QUIRK(0x8571); /* PFXI 24XG3 */
+SWITCHTEC_QUIRK(0x8572); /* PFXI 32XG3 */
+SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
+SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
+SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
+SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 461e7fd2756f..e9c6b120cf45 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -25,9 +25,6 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_dev_assign_added(dev, false);
}
-
- if (dev->bus->self)
- pcie_aspm_exit_link_state(dev);
}
static void pci_destroy_dev(struct pci_dev *dev)
@@ -41,6 +38,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
+ pcie_aspm_exit_link_state(dev);
pci_bridge_d3_update(dev);
pci_free_resources(dev);
put_device(&dev->dev);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 79b1824e83b4..ed960436df5e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -811,6 +811,8 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus,
static resource_size_t calculate_iosize(resource_size_t size,
resource_size_t min_size,
resource_size_t size1,
+ resource_size_t add_size,
+ resource_size_t children_add_size,
resource_size_t old_size,
resource_size_t align)
{
@@ -823,15 +825,18 @@ static resource_size_t calculate_iosize(resource_size_t size,
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
- size = ALIGN(size + size1, align);
+ size = size + size1;
if (size < old_size)
size = old_size;
+
+ size = ALIGN(max(size, add_size) + children_add_size, align);
return size;
}
static resource_size_t calculate_memsize(resource_size_t size,
resource_size_t min_size,
- resource_size_t size1,
+ resource_size_t add_size,
+ resource_size_t children_add_size,
resource_size_t old_size,
resource_size_t align)
{
@@ -841,7 +846,8 @@ static resource_size_t calculate_memsize(resource_size_t size,
old_size = 0;
if (size < old_size)
size = old_size;
- size = ALIGN(size + size1, align);
+
+ size = ALIGN(max(size, add_size) + children_add_size, align);
return size;
}
@@ -930,12 +936,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
}
}
- size0 = calculate_iosize(size, min_size, size1,
+ size0 = calculate_iosize(size, min_size, size1, 0, 0,
resource_size(b_res), min_align);
- if (children_add_size > add_size)
- add_size = children_add_size;
- size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
- calculate_iosize(size, min_size, add_size + size1,
+ size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
+ calculate_iosize(size, min_size, size1, add_size, children_add_size,
resource_size(b_res), min_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
@@ -1079,12 +1083,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
min_align = calculate_mem_align(aligns, max_order);
min_align = max(min_align, window_alignment(bus, b_res->flags));
- size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
add_align = max(min_align, add_align);
- if (children_add_size > add_size)
- add_size = children_add_size;
- size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
- calculate_memsize(size, min_size, add_size,
+ size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
+ calculate_memsize(size, min_size, add_size, children_add_size,
resource_size(b_res), add_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index e634229ece89..c46d5e1ff536 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -14,7 +14,6 @@
struct kset *pci_slots_kset;
EXPORT_SYMBOL_GPL(pci_slots_kset);
-static DEFINE_MUTEX(pci_slot_mutex);
static ssize_t pci_slot_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -371,7 +370,7 @@ void pci_hp_create_module_link(struct pci_slot *pci_slot)
if (!slot || !slot->ops)
return;
- kobj = kset_find_obj(module_kset, slot->ops->mod_name);
+ kobj = kset_find_obj(module_kset, slot->mod_name);
if (!kobj)
return;
ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 9671ded549f0..b31abe35ed2c 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -230,7 +230,7 @@ static int electra_cf_probe(struct platform_device *ofdev)
if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
(__ioremap_at(io.start, cf->io_virt, cf->io_size,
- pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL)) {
+ pgprot_noncached(PAGE_KERNEL)) == NULL)) {
dev_err(device, "can't ioremap ranges\n");
status = -ENOMEM;
goto fail1;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index c89d3effd99d..60f949e2a684 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -43,6 +43,7 @@ config PHY_XGENE
source "drivers/phy/allwinner/Kconfig"
source "drivers/phy/amlogic/Kconfig"
source "drivers/phy/broadcom/Kconfig"
+source "drivers/phy/cadence/Kconfig"
source "drivers/phy/hisilicon/Kconfig"
source "drivers/phy/lantiq/Kconfig"
source "drivers/phy/marvell/Kconfig"
@@ -54,6 +55,7 @@ source "drivers/phy/ralink/Kconfig"
source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
+source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index ce8339ff0022..0301e25d07c1 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_ARCH_RENESAS) += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += broadcom/ \
+ cadence/ \
hisilicon/ \
marvell/ \
motorola/ \
@@ -22,5 +23,6 @@ obj-y += broadcom/ \
qualcomm/ \
ralink/ \
samsung/ \
+ socionext/ \
st/ \
ti/
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 8786a9674471..aa917a61071d 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -60,7 +60,8 @@ config PHY_NS2_USB_DRD
config PHY_BRCM_SATA
tristate "Broadcom SATA PHY driver"
- depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || COMPILE_TEST
+ depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || \
+ ARCH_BCM_63XX || COMPILE_TEST
depends on OF
select GENERIC_PHY
default ARCH_BCM_IPROC
diff --git a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
index 0f4ac5d63cff..b074682d9dd8 100644
--- a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
@@ -153,8 +153,8 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
struct cygnus_pcie_phy *p;
if (of_property_read_u32(child, "reg", &id)) {
- dev_err(dev, "missing reg property for %s\n",
- child->name);
+ dev_err(dev, "missing reg property for %pOFn\n",
+ child);
ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 8708ea3b4d6d..0f4a06ff7fd3 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -47,6 +47,7 @@ enum brcm_sata_phy_version {
BRCM_SATA_PHY_IPROC_NS2,
BRCM_SATA_PHY_IPROC_NSP,
BRCM_SATA_PHY_IPROC_SR,
+ BRCM_SATA_PHY_DSL_28NM,
};
enum brcm_sata_phy_rxaeq_mode {
@@ -96,7 +97,10 @@ enum sata_phy_regs {
PLLCONTROL_0_FREQ_DET_RESTART = BIT(13),
PLLCONTROL_0_FREQ_MONITOR = BIT(12),
PLLCONTROL_0_SEQ_START = BIT(15),
+ PLL_CAP_CHARGE_TIME = 0x83,
+ PLL_VCO_CAL_THRESH = 0x84,
PLL_CAP_CONTROL = 0x85,
+ PLL_FREQ_DET_TIME = 0x86,
PLL_ACTRL2 = 0x8b,
PLL_ACTRL2_SELDIV_MASK = 0x1f,
PLL_ACTRL2_SELDIV_SHIFT = 9,
@@ -106,6 +110,9 @@ enum sata_phy_regs {
PLL1_ACTRL2 = 0x82,
PLL1_ACTRL3 = 0x83,
PLL1_ACTRL4 = 0x84,
+ PLL1_ACTRL5 = 0x85,
+ PLL1_ACTRL6 = 0x86,
+ PLL1_ACTRL7 = 0x87,
TX_REG_BANK = 0x070,
TX_ACTRL0 = 0x80,
@@ -119,6 +126,8 @@ enum sata_phy_regs {
AEQ_FRC_EQ_FORCE = BIT(0),
AEQ_FRC_EQ_FORCE_VAL = BIT(1),
AEQRX_REG_BANK_1 = 0xe0,
+ AEQRX_SLCAL0_CTRL0 = 0x82,
+ AEQRX_SLCAL1_CTRL0 = 0x86,
OOB_REG_BANK = 0x150,
OOB1_REG_BANK = 0x160,
@@ -168,6 +177,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
switch (priv->version) {
case BRCM_SATA_PHY_STB_28NM:
case BRCM_SATA_PHY_IPROC_NS2:
+ case BRCM_SATA_PHY_DSL_28NM:
size = SATA_PCB_REG_28NM_SPACE_SIZE;
break;
case BRCM_SATA_PHY_STB_40NM:
@@ -482,6 +492,61 @@ static int brcm_sr_sata_init(struct brcm_sata_port *port)
return 0;
}
+static int brcm_dsl_sata_init(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ struct device *dev = port->phy_priv->dev;
+ unsigned int try;
+ u32 tmp;
+
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL7, 0, 0x873);
+
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0xc000);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ 0, 0x3089);
+ usleep_range(1000, 2000);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ 0, 0x3088);
+ usleep_range(1000, 2000);
+
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL0_CTRL0,
+ 0, 0x3000);
+
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL1_CTRL0,
+ 0, 0x3000);
+ usleep_range(1000, 2000);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CHARGE_TIME, 0, 0x32);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_VCO_CAL_THRESH, 0, 0xa);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_FREQ_DET_TIME, 0, 0x64);
+ usleep_range(1000, 2000);
+
+ /* Acquire PLL lock */
+ try = 50;
+ while (try) {
+ tmp = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ BLOCK0_XGXSSTATUS);
+ if (tmp & BLOCK0_XGXSSTATUS_PLL_LOCK)
+ break;
+ msleep(20);
+ try--;
+ };
+
+ if (!try) {
+ /* PLL did not lock; give up */
+ dev_err(dev, "port%d PLL did not lock\n", port->portnum);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "port%d initialized\n", port->portnum);
+
+ return 0;
+}
+
static int brcm_sata_phy_init(struct phy *phy)
{
int rc;
@@ -501,6 +566,9 @@ static int brcm_sata_phy_init(struct phy *phy)
case BRCM_SATA_PHY_IPROC_SR:
rc = brcm_sr_sata_init(port);
break;
+ case BRCM_SATA_PHY_DSL_28NM:
+ rc = brcm_dsl_sata_init(port);
+ break;
default:
rc = -ENODEV;
}
@@ -552,6 +620,8 @@ static const struct of_device_id brcm_sata_phy_of_match[] = {
.data = (void *)BRCM_SATA_PHY_IPROC_NSP },
{ .compatible = "brcm,iproc-sr-sata-phy",
.data = (void *)BRCM_SATA_PHY_IPROC_SR },
+ { .compatible = "brcm,bcm63138-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_DSL_28NM },
{},
};
MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match);
@@ -600,8 +670,8 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
struct brcm_sata_port *port;
if (of_property_read_u32(child, "reg", &id)) {
- dev_err(dev, "missing reg property in node %s\n",
- child->name);
+ dev_err(dev, "missing reg property in node %pOFn\n",
+ child);
ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index d1dab36fa5b7..f59b1dc30399 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -372,10 +372,8 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
clk_disable(priv->usb_30_clk);
phy_provider = devm_of_phy_provider_register(dev, brcm_usb_phy_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
- return 0;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig
new file mode 100644
index 000000000000..57fff7de4031
--- /dev/null
+++ b/drivers/phy/cadence/Kconfig
@@ -0,0 +1,10 @@
+#
+# Phy driver for Cadence MHDP DisplayPort controller
+#
+config PHY_CADENCE_DP
+ tristate "Cadence MHDP DisplayPort PHY driver"
+ depends on OF
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Support for Cadence MHDP DisplayPort PHY.
diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile
new file mode 100644
index 000000000000..e5b0a11cf28a
--- /dev/null
+++ b/drivers/phy/cadence/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PHY_CADENCE_DP) += phy-cadence-dp.o
diff --git a/drivers/phy/cadence/phy-cadence-dp.c b/drivers/phy/cadence/phy-cadence-dp.c
new file mode 100644
index 000000000000..bc10cb264b7a
--- /dev/null
+++ b/drivers/phy/cadence/phy-cadence-dp.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Cadence MHDP DisplayPort SD0801 PHY driver.
+ *
+ * Copyright 2018 Cadence Design Systems, Inc.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#define DEFAULT_NUM_LANES 2
+#define MAX_NUM_LANES 4
+#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
+
+#define POLL_TIMEOUT_US 2000
+#define LANE_MASK 0x7
+
+/*
+ * register offsets from DPTX PHY register block base (i.e MHDP
+ * register base + 0x30a00)
+ */
+#define PHY_AUX_CONFIG 0x00
+#define PHY_AUX_CTRL 0x04
+#define PHY_RESET 0x20
+#define PHY_PMA_XCVR_PLLCLK_EN 0x24
+#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28
+#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c
+#define PHY_POWER_STATE_LN_0 0x0000
+#define PHY_POWER_STATE_LN_1 0x0008
+#define PHY_POWER_STATE_LN_2 0x0010
+#define PHY_POWER_STATE_LN_3 0x0018
+#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30
+#define PHY_PMA_CMN_READY 0x34
+#define PHY_PMA_XCVR_TX_VMARGIN 0x38
+#define PHY_PMA_XCVR_TX_DEEMPH 0x3c
+
+/*
+ * register offsets from SD0801 PHY register block base (i.e MHDP
+ * register base + 0x500000)
+ */
+#define CMN_SSM_BANDGAP_TMR 0x00084
+#define CMN_SSM_BIAS_TMR 0x00088
+#define CMN_PLLSM0_PLLPRE_TMR 0x000a8
+#define CMN_PLLSM0_PLLLOCK_TMR 0x000b0
+#define CMN_PLLSM1_PLLPRE_TMR 0x000c8
+#define CMN_PLLSM1_PLLLOCK_TMR 0x000d0
+#define CMN_BGCAL_INIT_TMR 0x00190
+#define CMN_BGCAL_ITER_TMR 0x00194
+#define CMN_IBCAL_INIT_TMR 0x001d0
+#define CMN_PLL0_VCOCAL_INIT_TMR 0x00210
+#define CMN_PLL0_VCOCAL_ITER_TMR 0x00214
+#define CMN_PLL0_VCOCAL_REFTIM_START 0x00218
+#define CMN_PLL0_VCOCAL_PLLCNT_START 0x00220
+#define CMN_PLL0_INTDIV_M0 0x00240
+#define CMN_PLL0_FRACDIVL_M0 0x00244
+#define CMN_PLL0_FRACDIVH_M0 0x00248
+#define CMN_PLL0_HIGH_THR_M0 0x0024c
+#define CMN_PLL0_DSM_DIAG_M0 0x00250
+#define CMN_PLL0_LOCK_PLLCNT_START 0x00278
+#define CMN_PLL1_VCOCAL_INIT_TMR 0x00310
+#define CMN_PLL1_VCOCAL_ITER_TMR 0x00314
+#define CMN_PLL1_DSM_DIAG_M0 0x00350
+#define CMN_TXPUCAL_INIT_TMR 0x00410
+#define CMN_TXPUCAL_ITER_TMR 0x00414
+#define CMN_TXPDCAL_INIT_TMR 0x00430
+#define CMN_TXPDCAL_ITER_TMR 0x00434
+#define CMN_RXCAL_INIT_TMR 0x00450
+#define CMN_RXCAL_ITER_TMR 0x00454
+#define CMN_SD_CAL_INIT_TMR 0x00490
+#define CMN_SD_CAL_ITER_TMR 0x00494
+#define CMN_SD_CAL_REFTIM_START 0x00498
+#define CMN_SD_CAL_PLLCNT_START 0x004a0
+#define CMN_PDIAG_PLL0_CTRL_M0 0x00680
+#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x00684
+#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x00690
+#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x00694
+#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x00698
+#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x006d0
+#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x006d4
+#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x00704
+#define XCVR_DIAG_PLLDRC_CTRL 0x10394
+#define XCVR_DIAG_HSCLK_SEL 0x10398
+#define XCVR_DIAG_HSCLK_DIV 0x1039c
+#define TX_PSC_A0 0x10400
+#define TX_PSC_A1 0x10404
+#define TX_PSC_A2 0x10408
+#define TX_PSC_A3 0x1040c
+#define RX_PSC_A0 0x20000
+#define RX_PSC_A1 0x20004
+#define RX_PSC_A2 0x20008
+#define RX_PSC_A3 0x2000c
+#define PHY_PLL_CFG 0x30038
+
+struct cdns_dp_phy {
+ void __iomem *base; /* DPTX registers base */
+ void __iomem *sd_base; /* SD0801 registers base */
+ u32 num_lanes; /* Number of lanes to use */
+ u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
+ struct device *dev;
+};
+
+static int cdns_dp_phy_init(struct phy *phy);
+static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
+ unsigned int lane);
+static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
+ unsigned int offset,
+ unsigned char start_bit,
+ unsigned char num_bits,
+ unsigned int val);
+
+static const struct phy_ops cdns_dp_phy_ops = {
+ .init = cdns_dp_phy_init,
+ .owner = THIS_MODULE,
+};
+
+static int cdns_dp_phy_init(struct phy *phy)
+{
+ unsigned char lane_bits;
+
+ struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy);
+
+ writel(0x0003, cdns_phy->base + PHY_AUX_CTRL); /* enable AUX */
+
+ /* PHY PMA registers configuration function */
+ cdns_dp_phy_pma_cfg(cdns_phy);
+
+ /*
+ * Set lines power state to A0
+ * Set lines pll clk enable to 0
+ */
+
+ cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_0, 6, 0x0000);
+
+ if (cdns_phy->num_lanes >= 2) {
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_1, 6, 0x0000);
+
+ if (cdns_phy->num_lanes == 4) {
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_2, 6, 0);
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_3, 6, 0);
+ }
+ }
+
+ cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
+ 0, 1, 0x0000);
+
+ if (cdns_phy->num_lanes >= 2) {
+ cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
+ 1, 1, 0x0000);
+ if (cdns_phy->num_lanes == 4) {
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_PLLCLK_EN,
+ 2, 1, 0x0000);
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_PLLCLK_EN,
+ 3, 1, 0x0000);
+ }
+ }
+
+ /*
+ * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
+ * used lanes
+ */
+ lane_bits = (1 << cdns_phy->num_lanes) - 1;
+ writel(((0xF & ~lane_bits) << 4) | (0xF & lane_bits),
+ cdns_phy->base + PHY_RESET);
+
+ /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
+ writel(0x0001, cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN);
+
+ /* PHY PMA registers configuration functions */
+ cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy);
+ cdns_dp_phy_pma_cmn_rate(cdns_phy);
+
+ /* take out of reset */
+ cdns_dp_phy_write_field(cdns_phy, PHY_RESET, 8, 1, 1);
+ cdns_dp_phy_wait_pma_cmn_ready(cdns_phy);
+ cdns_dp_phy_run(cdns_phy);
+
+ return 0;
+}
+
+static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_CMN_READY, reg,
+ reg & 1, 0, 500);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for PMA common ready\n");
+}
+
+static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int i;
+
+ /* PMA common configuration */
+ cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy);
+
+ /* PMA lane configuration to deal with multi-link operation */
+ for (i = 0; i < cdns_phy->num_lanes; i++)
+ cdns_dp_phy_pma_lane_cfg(cdns_phy, i);
+}
+
+static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
+{
+ /* refclock registers - assumes 25 MHz refclock */
+ writel(0x0019, cdns_phy->sd_base + CMN_SSM_BIAS_TMR);
+ writel(0x0032, cdns_phy->sd_base + CMN_PLLSM0_PLLPRE_TMR);
+ writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM0_PLLLOCK_TMR);
+ writel(0x0032, cdns_phy->sd_base + CMN_PLLSM1_PLLPRE_TMR);
+ writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM1_PLLLOCK_TMR);
+ writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_INIT_TMR);
+ writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_ITER_TMR);
+ writel(0x0019, cdns_phy->sd_base + CMN_IBCAL_INIT_TMR);
+ writel(0x001E, cdns_phy->sd_base + CMN_TXPUCAL_INIT_TMR);
+ writel(0x0006, cdns_phy->sd_base + CMN_TXPUCAL_ITER_TMR);
+ writel(0x001E, cdns_phy->sd_base + CMN_TXPDCAL_INIT_TMR);
+ writel(0x0006, cdns_phy->sd_base + CMN_TXPDCAL_ITER_TMR);
+ writel(0x02EE, cdns_phy->sd_base + CMN_RXCAL_INIT_TMR);
+ writel(0x0006, cdns_phy->sd_base + CMN_RXCAL_ITER_TMR);
+ writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_INIT_TMR);
+ writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_ITER_TMR);
+ writel(0x000E, cdns_phy->sd_base + CMN_SD_CAL_REFTIM_START);
+ writel(0x012B, cdns_phy->sd_base + CMN_SD_CAL_PLLCNT_START);
+ /* PLL registers */
+ writel(0x0409, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_PADJ_M0);
+ writel(0x1001, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_IADJ_M0);
+ writel(0x0F08, cdns_phy->sd_base + CMN_PDIAG_PLL0_FILT_PADJ_M0);
+ writel(0x0004, cdns_phy->sd_base + CMN_PLL0_DSM_DIAG_M0);
+ writel(0x00FA, cdns_phy->sd_base + CMN_PLL0_VCOCAL_INIT_TMR);
+ writel(0x0004, cdns_phy->sd_base + CMN_PLL0_VCOCAL_ITER_TMR);
+ writel(0x00FA, cdns_phy->sd_base + CMN_PLL1_VCOCAL_INIT_TMR);
+ writel(0x0004, cdns_phy->sd_base + CMN_PLL1_VCOCAL_ITER_TMR);
+ writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_REFTIM_START);
+}
+
+static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
+{
+ /* Assumes 25 MHz refclock */
+ switch (cdns_phy->max_bit_rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ writel(0x01B0, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x0120, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 2430:
+ case 3240:
+ writel(0x0184, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0xCCCD, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x0104, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ writel(0x0159, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0x999A, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x00E7, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ writel(0x0144, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x00D8, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ }
+
+ writel(0x0002, cdns_phy->sd_base + CMN_PDIAG_PLL0_CTRL_M0);
+ writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_PLLCNT_START);
+}
+
+static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int clk_sel_val = 0;
+ unsigned int hsclk_div_val = 0;
+ unsigned int i;
+
+ /* 16'h0000 for single DP link configuration */
+ writel(0x0000, cdns_phy->sd_base + PHY_PLL_CFG);
+
+ switch (cdns_phy->max_bit_rate) {
+ case 1620:
+ clk_sel_val = 0x0f01;
+ hsclk_div_val = 2;
+ break;
+ case 2160:
+ case 2430:
+ case 2700:
+ clk_sel_val = 0x0701;
+ hsclk_div_val = 1;
+ break;
+ case 3240:
+ clk_sel_val = 0x0b00;
+ hsclk_div_val = 2;
+ break;
+ case 4320:
+ case 5400:
+ clk_sel_val = 0x0301;
+ hsclk_div_val = 0;
+ break;
+ case 8100:
+ clk_sel_val = 0x0200;
+ hsclk_div_val = 0;
+ break;
+ }
+
+ writel(clk_sel_val, cdns_phy->sd_base + CMN_PDIAG_PLL0_CLK_SEL_M0);
+
+ /* PMA lane configuration to deal with multi-link operation */
+ for (i = 0; i < cdns_phy->num_lanes; i++) {
+ writel(hsclk_div_val,
+ cdns_phy->sd_base + (XCVR_DIAG_HSCLK_DIV | (i<<11)));
+ }
+}
+
+static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
+ unsigned int lane)
+{
+ unsigned int lane_bits = (lane & LANE_MASK) << 11;
+
+ /* Writing Tx/Rx Power State Controllers registers */
+ writel(0x00FB, cdns_phy->sd_base + (TX_PSC_A0 | lane_bits));
+ writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A2 | lane_bits));
+ writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A3 | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (RX_PSC_A0 | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (RX_PSC_A2 | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (RX_PSC_A3 | lane_bits));
+
+ writel(0x0001, cdns_phy->sd_base + (XCVR_DIAG_PLLDRC_CTRL | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (XCVR_DIAG_HSCLK_SEL | lane_bits));
+}
+
+static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int read_val;
+ u32 write_val1 = 0;
+ u32 write_val2 = 0;
+ u32 mask = 0;
+ int ret;
+
+ /*
+ * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the
+ * master lane
+ */
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN_ACK,
+ read_val, read_val & 1, 0, POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link PLL clock enable ack\n");
+
+ ndelay(100);
+
+ switch (cdns_phy->num_lanes) {
+
+ case 1: /* lane 0 */
+ write_val1 = 0x00000004;
+ write_val2 = 0x00000001;
+ mask = 0x0000003f;
+ break;
+ case 2: /* lane 0-1 */
+ write_val1 = 0x00000404;
+ write_val2 = 0x00000101;
+ mask = 0x00003f3f;
+ break;
+ case 4: /* lane 0-3 */
+ write_val1 = 0x04040404;
+ write_val2 = 0x01010101;
+ mask = 0x3f3f3f3f;
+ break;
+ }
+
+ writel(write_val1, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK,
+ read_val, (read_val & mask) == write_val1, 0,
+ POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link power state ack\n");
+
+ writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+ ndelay(100);
+
+ writel(write_val2, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK,
+ read_val, (read_val & mask) == write_val2, 0,
+ POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link power state ack\n");
+
+ writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+ ndelay(100);
+}
+
+static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
+ unsigned int offset,
+ unsigned char start_bit,
+ unsigned char num_bits,
+ unsigned int val)
+{
+ unsigned int read_val;
+
+ read_val = readl(cdns_phy->base + offset);
+ writel(((val << start_bit) | (read_val & ~(((1 << num_bits) - 1) <<
+ start_bit))), cdns_phy->base + offset);
+}
+
+static int cdns_dp_phy_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct cdns_dp_phy *cdns_phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+ int err;
+
+ cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL);
+ if (!cdns_phy)
+ return -ENOMEM;
+
+ cdns_phy->dev = &pdev->dev;
+
+ phy = devm_phy_create(dev, NULL, &cdns_dp_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create DisplayPort PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cdns_phy->base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(cdns_phy->base))
+ return PTR_ERR(cdns_phy->base);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(cdns_phy->sd_base))
+ return PTR_ERR(cdns_phy->sd_base);
+
+ err = device_property_read_u32(dev, "num_lanes",
+ &(cdns_phy->num_lanes));
+ if (err)
+ cdns_phy->num_lanes = DEFAULT_NUM_LANES;
+
+ switch (cdns_phy->num_lanes) {
+ case 1:
+ case 2:
+ case 4:
+ /* valid number of lanes */
+ break;
+ default:
+ dev_err(dev, "unsupported number of lanes: %d\n",
+ cdns_phy->num_lanes);
+ return -EINVAL;
+ }
+
+ err = device_property_read_u32(dev, "max_bit_rate",
+ &(cdns_phy->max_bit_rate));
+ if (err)
+ cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE;
+
+ switch (cdns_phy->max_bit_rate) {
+ case 2160:
+ case 2430:
+ case 2700:
+ case 3240:
+ case 4320:
+ case 5400:
+ case 8100:
+ /* valid bit rate */
+ break;
+ default:
+ dev_err(dev, "unsupported max bit rate: %dMbps\n",
+ cdns_phy->max_bit_rate);
+ return -EINVAL;
+ }
+
+ phy_set_drvdata(phy, cdns_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n",
+ cdns_phy->num_lanes,
+ cdns_phy->max_bit_rate / 1000,
+ cdns_phy->max_bit_rate % 1000);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id cdns_dp_phy_of_match[] = {
+ {
+ .compatible = "cdns,dp-phy"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cdns_dp_phy_of_match);
+
+static struct platform_driver cdns_dp_phy_driver = {
+ .probe = cdns_dp_phy_probe,
+ .driver = {
+ .name = "cdns-dp-phy",
+ .of_match_table = cdns_dp_phy_of_match,
+ }
+};
+module_platform_driver(cdns_dp_phy_driver);
+
+MODULE_AUTHOR("Cadence Design Systems, Inc.");
+MODULE_DESCRIPTION("Cadence MHDP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
index 986224fca9e9..f9e0dd19ff26 100644
--- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
@@ -156,7 +156,6 @@ static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv,
{
struct device *dev = priv->dev;
const __be32 *offset;
- int ret;
priv->reg_bits = of_device_get_match_data(dev);
@@ -196,10 +195,8 @@ static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv,
}
priv->phy_reset = devm_reset_control_get_optional(dev, "phy");
- if (IS_ERR(priv->phy_reset))
- return PTR_ERR(priv->phy_reset);
- return 0;
+ return PTR_ERR_OR_ZERO(priv->phy_reset);
}
static int ltq_rcu_usb2_phy_probe(struct platform_device *pdev)
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 68e321225400..6fb4b56e4c14 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -59,3 +59,14 @@ config PHY_PXA_28NM_USB2
The PHY driver will be used by Marvell udc/ehci/otg driver.
To compile this driver as a module, choose M here.
+
+config PHY_PXA_USB
+ tristate "Marvell PXA USB PHY Driver"
+ depends on ARCH_PXA || ARCH_MMP
+ select GENERIC_PHY
+ help
+ Enable this to support Marvell PXA USB PHY driver for Marvell
+ SoC. This driver will do the PHY initialization and shutdown.
+ The PHY driver will be used by Marvell udc/ehci/otg driver.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile
index 5c3ec5d10e0d..3975b144f8ec 100644
--- a/drivers/phy/marvell/Makefile
+++ b/drivers/phy/marvell/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PHY_MVEBU_CP110_COMPHY) += phy-mvebu-cp110-comphy.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o
obj-$(CONFIG_PHY_PXA_28NM_USB2) += phy-pxa-28nm-usb2.o
+obj-$(CONFIG_PHY_PXA_USB) += phy-pxa-usb.o
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index c1bb6725e48f..a91fc67fc4e0 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -231,14 +231,14 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
struct phy_berlin_desc *phy_desc;
if (of_property_read_u32(child, "reg", &phy_id)) {
- dev_err(dev, "missing reg property in node %s\n",
- child->name);
+ dev_err(dev, "missing reg property in node %pOFn\n",
+ child);
ret = -EINVAL;
goto put_child;
}
if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) {
- dev_err(dev, "invalid reg in node %s\n", child->name);
+ dev_err(dev, "invalid reg in node %pOFn\n", child);
ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/marvell/phy-pxa-usb.c b/drivers/phy/marvell/phy-pxa-usb.c
new file mode 100644
index 000000000000..87ff7550b912
--- /dev/null
+++ b/drivers/phy/marvell/phy-pxa-usb.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Copyright (C) 2018 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+/* phy regs */
+#define UTMI_REVISION 0x0
+#define UTMI_CTRL 0x4
+#define UTMI_PLL 0x8
+#define UTMI_TX 0xc
+#define UTMI_RX 0x10
+#define UTMI_IVREF 0x14
+#define UTMI_T0 0x18
+#define UTMI_T1 0x1c
+#define UTMI_T2 0x20
+#define UTMI_T3 0x24
+#define UTMI_T4 0x28
+#define UTMI_T5 0x2c
+#define UTMI_RESERVE 0x30
+#define UTMI_USB_INT 0x34
+#define UTMI_DBG_CTL 0x38
+#define UTMI_OTG_ADDON 0x3c
+
+/* For UTMICTRL Register */
+#define UTMI_CTRL_USB_CLK_EN (1 << 31)
+/* pxa168 */
+#define UTMI_CTRL_SUSPEND_SET1 (1 << 30)
+#define UTMI_CTRL_SUSPEND_SET2 (1 << 29)
+#define UTMI_CTRL_RXBUF_PDWN (1 << 24)
+#define UTMI_CTRL_TXBUF_PDWN (1 << 11)
+
+#define UTMI_CTRL_INPKT_DELAY_SHIFT 30
+#define UTMI_CTRL_INPKT_DELAY_SOF_SHIFT 28
+#define UTMI_CTRL_PU_REF_SHIFT 20
+#define UTMI_CTRL_ARC_PULLDN_SHIFT 12
+#define UTMI_CTRL_PLL_PWR_UP_SHIFT 1
+#define UTMI_CTRL_PWR_UP_SHIFT 0
+
+/* For UTMI_PLL Register */
+#define UTMI_PLL_PLLCALI12_SHIFT 29
+#define UTMI_PLL_PLLCALI12_MASK (0x3 << 29)
+
+#define UTMI_PLL_PLLVDD18_SHIFT 27
+#define UTMI_PLL_PLLVDD18_MASK (0x3 << 27)
+
+#define UTMI_PLL_PLLVDD12_SHIFT 25
+#define UTMI_PLL_PLLVDD12_MASK (0x3 << 25)
+
+#define UTMI_PLL_CLK_BLK_EN_SHIFT 24
+#define CLK_BLK_EN (0x1 << 24)
+#define PLL_READY (0x1 << 23)
+#define KVCO_EXT (0x1 << 22)
+#define VCOCAL_START (0x1 << 21)
+
+#define UTMI_PLL_KVCO_SHIFT 15
+#define UTMI_PLL_KVCO_MASK (0x7 << 15)
+
+#define UTMI_PLL_ICP_SHIFT 12
+#define UTMI_PLL_ICP_MASK (0x7 << 12)
+
+#define UTMI_PLL_FBDIV_SHIFT 4
+#define UTMI_PLL_FBDIV_MASK (0xFF << 4)
+
+#define UTMI_PLL_REFDIV_SHIFT 0
+#define UTMI_PLL_REFDIV_MASK (0xF << 0)
+
+/* For UTMI_TX Register */
+#define UTMI_TX_REG_EXT_FS_RCAL_SHIFT 27
+#define UTMI_TX_REG_EXT_FS_RCAL_MASK (0xf << 27)
+
+#define UTMI_TX_REG_EXT_FS_RCAL_EN_SHIFT 26
+#define UTMI_TX_REG_EXT_FS_RCAL_EN_MASK (0x1 << 26)
+
+#define UTMI_TX_TXVDD12_SHIFT 22
+#define UTMI_TX_TXVDD12_MASK (0x3 << 22)
+
+#define UTMI_TX_CK60_PHSEL_SHIFT 17
+#define UTMI_TX_CK60_PHSEL_MASK (0xf << 17)
+
+#define UTMI_TX_IMPCAL_VTH_SHIFT 14
+#define UTMI_TX_IMPCAL_VTH_MASK (0x7 << 14)
+
+#define REG_RCAL_START (0x1 << 12)
+
+#define UTMI_TX_LOW_VDD_EN_SHIFT 11
+
+#define UTMI_TX_AMP_SHIFT 0
+#define UTMI_TX_AMP_MASK (0x7 << 0)
+
+/* For UTMI_RX Register */
+#define UTMI_REG_SQ_LENGTH_SHIFT 15
+#define UTMI_REG_SQ_LENGTH_MASK (0x3 << 15)
+
+#define UTMI_RX_SQ_THRESH_SHIFT 4
+#define UTMI_RX_SQ_THRESH_MASK (0xf << 4)
+
+#define UTMI_OTG_ADDON_OTG_ON (1 << 0)
+
+enum pxa_usb_phy_version {
+ PXA_USB_PHY_MMP2,
+ PXA_USB_PHY_PXA910,
+ PXA_USB_PHY_PXA168,
+};
+
+struct pxa_usb_phy {
+ struct phy *phy;
+ void __iomem *base;
+ enum pxa_usb_phy_version version;
+};
+
+/*****************************************************************************
+ * The registers read/write routines
+ *****************************************************************************/
+
+static unsigned int u2o_get(void __iomem *base, unsigned int offset)
+{
+ return readl_relaxed(base + offset);
+}
+
+static void u2o_set(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg |= value;
+ writel_relaxed(reg, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static void u2o_clear(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg &= ~value;
+ writel_relaxed(reg, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static void u2o_write(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ writel_relaxed(value, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static int pxa_usb_phy_init(struct phy *phy)
+{
+ struct pxa_usb_phy *pxa_usb_phy = phy_get_drvdata(phy);
+ void __iomem *base = pxa_usb_phy->base;
+ int loops;
+
+ dev_info(&phy->dev, "initializing Marvell PXA USB PHY");
+
+ /* Initialize the USB PHY power */
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA910) {
+ u2o_set(base, UTMI_CTRL, (1<<UTMI_CTRL_INPKT_DELAY_SOF_SHIFT)
+ | (1<<UTMI_CTRL_PU_REF_SHIFT));
+ }
+
+ u2o_set(base, UTMI_CTRL, 1<<UTMI_CTRL_PLL_PWR_UP_SHIFT);
+ u2o_set(base, UTMI_CTRL, 1<<UTMI_CTRL_PWR_UP_SHIFT);
+
+ /* UTMI_PLL settings */
+ u2o_clear(base, UTMI_PLL, UTMI_PLL_PLLVDD18_MASK
+ | UTMI_PLL_PLLVDD12_MASK | UTMI_PLL_PLLCALI12_MASK
+ | UTMI_PLL_FBDIV_MASK | UTMI_PLL_REFDIV_MASK
+ | UTMI_PLL_ICP_MASK | UTMI_PLL_KVCO_MASK);
+
+ u2o_set(base, UTMI_PLL, 0xee<<UTMI_PLL_FBDIV_SHIFT
+ | 0xb<<UTMI_PLL_REFDIV_SHIFT | 3<<UTMI_PLL_PLLVDD18_SHIFT
+ | 3<<UTMI_PLL_PLLVDD12_SHIFT | 3<<UTMI_PLL_PLLCALI12_SHIFT
+ | 1<<UTMI_PLL_ICP_SHIFT | 3<<UTMI_PLL_KVCO_SHIFT);
+
+ /* UTMI_TX */
+ u2o_clear(base, UTMI_TX, UTMI_TX_REG_EXT_FS_RCAL_EN_MASK
+ | UTMI_TX_TXVDD12_MASK | UTMI_TX_CK60_PHSEL_MASK
+ | UTMI_TX_IMPCAL_VTH_MASK | UTMI_TX_REG_EXT_FS_RCAL_MASK
+ | UTMI_TX_AMP_MASK);
+ u2o_set(base, UTMI_TX, 3<<UTMI_TX_TXVDD12_SHIFT
+ | 4<<UTMI_TX_CK60_PHSEL_SHIFT | 4<<UTMI_TX_IMPCAL_VTH_SHIFT
+ | 8<<UTMI_TX_REG_EXT_FS_RCAL_SHIFT | 3<<UTMI_TX_AMP_SHIFT);
+
+ /* UTMI_RX */
+ u2o_clear(base, UTMI_RX, UTMI_RX_SQ_THRESH_MASK
+ | UTMI_REG_SQ_LENGTH_MASK);
+ u2o_set(base, UTMI_RX, 7<<UTMI_RX_SQ_THRESH_SHIFT
+ | 2<<UTMI_REG_SQ_LENGTH_SHIFT);
+
+ /* UTMI_IVREF */
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA168) {
+ /*
+ * fixing Microsoft Altair board interface with NEC hub issue -
+ * Set UTMI_IVREF from 0x4a3 to 0x4bf
+ */
+ u2o_write(base, UTMI_IVREF, 0x4bf);
+ }
+
+ /* toggle VCOCAL_START bit of UTMI_PLL */
+ udelay(200);
+ u2o_set(base, UTMI_PLL, VCOCAL_START);
+ udelay(40);
+ u2o_clear(base, UTMI_PLL, VCOCAL_START);
+
+ /* toggle REG_RCAL_START bit of UTMI_TX */
+ udelay(400);
+ u2o_set(base, UTMI_TX, REG_RCAL_START);
+ udelay(40);
+ u2o_clear(base, UTMI_TX, REG_RCAL_START);
+ udelay(400);
+
+ /* Make sure PHY PLL is ready */
+ loops = 0;
+ while ((u2o_get(base, UTMI_PLL) & PLL_READY) == 0) {
+ mdelay(1);
+ loops++;
+ if (loops > 100) {
+ dev_warn(&phy->dev, "calibrate timeout, UTMI_PLL %x\n",
+ u2o_get(base, UTMI_PLL));
+ break;
+ }
+ }
+
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA168) {
+ u2o_set(base, UTMI_RESERVE, 1 << 5);
+ /* Turn on UTMI PHY OTG extension */
+ u2o_write(base, UTMI_OTG_ADDON, 1);
+ }
+
+ return 0;
+
+}
+
+static int pxa_usb_phy_exit(struct phy *phy)
+{
+ struct pxa_usb_phy *pxa_usb_phy = phy_get_drvdata(phy);
+ void __iomem *base = pxa_usb_phy->base;
+
+ dev_info(&phy->dev, "deinitializing Marvell PXA USB PHY");
+
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA168)
+ u2o_clear(base, UTMI_OTG_ADDON, UTMI_OTG_ADDON_OTG_ON);
+
+ u2o_clear(base, UTMI_CTRL, UTMI_CTRL_RXBUF_PDWN);
+ u2o_clear(base, UTMI_CTRL, UTMI_CTRL_TXBUF_PDWN);
+ u2o_clear(base, UTMI_CTRL, UTMI_CTRL_USB_CLK_EN);
+ u2o_clear(base, UTMI_CTRL, 1<<UTMI_CTRL_PWR_UP_SHIFT);
+ u2o_clear(base, UTMI_CTRL, 1<<UTMI_CTRL_PLL_PWR_UP_SHIFT);
+
+ return 0;
+}
+
+static const struct phy_ops pxa_usb_phy_ops = {
+ .init = pxa_usb_phy_init,
+ .exit = pxa_usb_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id pxa_usb_phy_of_match[] = {
+ {
+ .compatible = "marvell,mmp2-usb-phy",
+ .data = (void *)PXA_USB_PHY_MMP2,
+ }, {
+ .compatible = "marvell,pxa910-usb-phy",
+ .data = (void *)PXA_USB_PHY_PXA910,
+ }, {
+ .compatible = "marvell,pxa168-usb-phy",
+ .data = (void *)PXA_USB_PHY_PXA168,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pxa_usb_phy_of_match);
+
+static int pxa_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *resource;
+ struct pxa_usb_phy *pxa_usb_phy;
+ struct phy_provider *provider;
+ const struct of_device_id *of_id;
+
+ pxa_usb_phy = devm_kzalloc(dev, sizeof(struct pxa_usb_phy), GFP_KERNEL);
+ if (!pxa_usb_phy)
+ return -ENOMEM;
+
+ of_id = of_match_node(pxa_usb_phy_of_match, dev->of_node);
+ if (of_id)
+ pxa_usb_phy->version = (enum pxa_usb_phy_version)of_id->data;
+ else
+ pxa_usb_phy->version = PXA_USB_PHY_MMP2;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pxa_usb_phy->base = devm_ioremap_resource(dev, resource);
+ if (IS_ERR(pxa_usb_phy->base)) {
+ dev_err(dev, "failed to remap PHY regs\n");
+ return PTR_ERR(pxa_usb_phy->base);
+ }
+
+ pxa_usb_phy->phy = devm_phy_create(dev, NULL, &pxa_usb_phy_ops);
+ if (IS_ERR(pxa_usb_phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(pxa_usb_phy->phy);
+ }
+
+ phy_set_drvdata(pxa_usb_phy->phy, pxa_usb_phy);
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "failed to register PHY provider\n");
+ return PTR_ERR(provider);
+ }
+
+ if (!dev->of_node) {
+ phy_create_lookup(pxa_usb_phy->phy, "usb", "mv-udc");
+ phy_create_lookup(pxa_usb_phy->phy, "usb", "pxa-u2oehci");
+ phy_create_lookup(pxa_usb_phy->phy, "usb", "mv-otg");
+ }
+
+ dev_info(dev, "Marvell PXA USB PHY");
+ return 0;
+}
+
+static struct platform_driver pxa_usb_phy_driver = {
+ .probe = pxa_usb_phy_probe,
+ .driver = {
+ .name = "pxa-usb-phy",
+ .of_match_table = pxa_usb_phy_of_match,
+ },
+};
+module_platform_driver(pxa_usb_phy_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Marvell PXA USB PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 632a0e73ee10..32f7d34eb784 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -50,6 +50,23 @@ config PHY_QCOM_UFS
help
Support for UFS PHY on QCOM chipsets.
+if PHY_QCOM_UFS
+
+config PHY_QCOM_UFS_14NM
+ tristate
+ default PHY_QCOM_UFS
+ help
+ Support for 14nm UFS QMP phy present on QCOM chipsets.
+
+config PHY_QCOM_UFS_20NM
+ tristate
+ default PHY_QCOM_UFS
+ depends on BROKEN
+ help
+ Support for 20nm UFS QMP phy present on QCOM chipsets.
+
+endif
+
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index deb831f453ae..c56efd3af205 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
-obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
-obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o
+obj-$(CONFIG_PHY_QCOM_UFS_14NM) += phy-qcom-ufs-qmp-14nm.o
+obj-$(CONFIG_PHY_QCOM_UFS_20NM) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 4c470104a0d6..a83332411026 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -156,6 +156,11 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
};
+static const unsigned int sdm845_ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x160,
+};
+
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
@@ -601,6 +606,83 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
};
+static const struct qmp_phy_init_tbl sdm845_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xda),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE1, 0xc1),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_TERM_BW, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_CTRL2, 0x6e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SYM_RESYNC_CTRL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_CTRL1, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_MIN_HIBERN8_TIME, 0x9a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MULTI_LANE_CTRL1, 0x02),
+};
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
@@ -649,9 +731,14 @@ struct qmp_phy_cfg {
/* true, if PHY has a separate DP_COM control block */
bool has_phy_dp_com_ctrl;
+ /* true, if PHY has secondary tx/rx lanes to be configured */
+ bool is_dual_lane_phy;
/* Register offset of secondary tx/rx lanes for USB DP combo PHY */
unsigned int tx_b_lane_offset;
unsigned int rx_b_lane_offset;
+
+ /* true, if PCS block has no separate SW_RESET register */
+ bool no_pcs_sw_reset;
};
/**
@@ -748,6 +835,10 @@ static const char * const qmp_v3_phy_clk_l[] = {
"aux", "cfg_ahb", "ref", "com_aux",
};
+static const char * const sdm845_ufs_phy_clk_l[] = {
+ "ref", "ref_aux",
+};
+
/* list of resets */
static const char * const msm8996_pciephy_reset_l[] = {
"phy", "common", "cfg",
@@ -758,7 +849,7 @@ static const char * const msm8996_usb3phy_reset_l[] = {
};
/* list of regulators */
-static const char * const msm8996_phy_vreg_l[] = {
+static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
@@ -778,8 +869,8 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
.reset_list = msm8996_pciephy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = pciephy_regs_layout,
.start_ctrl = PCS_START | PLL_READY_GATE_EN,
@@ -809,8 +900,8 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = usb3phy_regs_layout,
.start_ctrl = SERDES_START | PCS_START,
@@ -870,8 +961,8 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
.start_ctrl = SERDES_START | PCS_START,
@@ -883,6 +974,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
.tx_b_lane_offset = 0x400,
.rx_b_lane_offset = 0x400,
};
@@ -903,8 +995,8 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
.start_ctrl = SERDES_START | PCS_START,
@@ -916,6 +1008,35 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sdm845_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdm845_ufsphy_serdes_tbl),
+ .tx_tbl = sdm845_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_tx_tbl),
+ .rx_tbl = sdm845_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_rx_tbl),
+ .pcs_tbl = sdm845_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdm845_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sdm845_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PCS_READY,
+
+ .is_dual_lane_phy = true,
+ .tx_b_lane_offset = 0x400,
+ .rx_b_lane_offset = 0x400,
+
+ .no_pcs_sw_reset = true,
+};
+
static void qcom_qmp_phy_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -935,10 +1056,12 @@ static void qcom_qmp_phy_configure(void __iomem *base,
}
}
-static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
+static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
{
+ struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qmp->cfg;
void __iomem *serdes = qmp->serdes;
+ void __iomem *pcs = qphy->pcs;
void __iomem *dp_com = qmp->dp_com;
int ret, i;
@@ -979,10 +1102,6 @@ static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
goto err_rst;
}
- if (cfg->has_phy_com_ctrl)
- qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
- SW_PWRDN);
-
if (cfg->has_phy_dp_com_ctrl) {
qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
SW_PWRDN);
@@ -1000,6 +1119,12 @@ static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
}
+ if (cfg->has_phy_com_ctrl)
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+ else
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
/* Serdes configuration */
qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
cfg->serdes_tbl_num);
@@ -1090,7 +1215,7 @@ static int qcom_qmp_phy_init(struct phy *phy)
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
- ret = qcom_qmp_phy_com_init(qmp);
+ ret = qcom_qmp_phy_com_init(qphy);
if (ret)
return ret;
@@ -1112,22 +1237,31 @@ static int qcom_qmp_phy_init(struct phy *phy)
/* Tx, Rx, and PCS configurations */
qcom_qmp_phy_configure(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num);
/* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->has_phy_dp_com_ctrl)
+ if (cfg->is_dual_lane_phy)
qcom_qmp_phy_configure(tx + cfg->tx_b_lane_offset, cfg->regs,
cfg->tx_tbl, cfg->tx_tbl_num);
qcom_qmp_phy_configure(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num);
- if (cfg->has_phy_dp_com_ctrl)
+ if (cfg->is_dual_lane_phy)
qcom_qmp_phy_configure(rx + cfg->rx_b_lane_offset, cfg->regs,
cfg->rx_tbl, cfg->rx_tbl_num);
qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
/*
+ * UFS PHY requires the deassert of software reset before serdes start.
+ * For UFS PHYs that do not have software reset control bits, defer
+ * starting serdes until the power on callback.
+ */
+ if ((cfg->type == PHY_TYPE_UFS) && cfg->no_pcs_sw_reset)
+ goto out;
+
+ /*
* Pull out PHY from POWER DOWN state.
* This is active low enable signal to power-down PHY.
*/
- qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+ if(cfg->type == PHY_TYPE_PCIE)
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
if (cfg->has_pwrdn_delay)
usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
@@ -1151,6 +1285,7 @@ static int qcom_qmp_phy_init(struct phy *phy)
}
qmp->phy_initialized = true;
+out:
return ret;
err_pcs_ready:
@@ -1173,7 +1308,8 @@ static int qcom_qmp_phy_exit(struct phy *phy)
clk_disable_unprepare(qphy->pipe_clk);
/* PHY reset */
- qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ if (!cfg->no_pcs_sw_reset)
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
/* stop SerDes and Phy-Coding-Sublayer */
qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
@@ -1191,6 +1327,44 @@ static int qcom_qmp_phy_exit(struct phy *phy)
return 0;
}
+static int qcom_qmp_phy_poweron(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *status;
+ unsigned int mask, val;
+ int ret = 0;
+
+ if (cfg->type != PHY_TYPE_UFS)
+ return 0;
+
+ /*
+ * For UFS PHY that has not software reset control, serdes start
+ * should only happen when UFS driver explicitly calls phy_power_on
+ * after it deasserts software reset.
+ */
+ if (cfg->no_pcs_sw_reset && !qmp->phy_initialized &&
+ (qmp->init_count != 0)) {
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = cfg->mask_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, !(val & mask), 1,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ return ret;
+ }
+ qmp->phy_initialized = true;
+ }
+
+ return ret;
+}
+
static int qcom_qmp_phy_set_mode(struct phy *phy, enum phy_mode mode)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
@@ -1400,7 +1574,7 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
ret = of_property_read_string(np, "clock-output-names", &init.name);
if (ret) {
- dev_err(qmp->dev, "%s: No clock-output-names\n", np->name);
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
return ret;
}
@@ -1420,6 +1594,7 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
static const struct phy_ops qcom_qmp_phy_gen_ops = {
.init = qcom_qmp_phy_init,
.exit = qcom_qmp_phy_exit,
+ .power_on = qcom_qmp_phy_poweron,
.set_mode = qcom_qmp_phy_set_mode,
.owner = THIS_MODULE,
};
@@ -1522,6 +1697,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,sdm845-qmp-usb3-uni-phy",
.data = &qmp_v3_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
},
{ },
};
@@ -1586,7 +1764,9 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
ret = qcom_qmp_phy_vreg_init(dev);
if (ret) {
- dev_err(dev, "failed to get regulator supplies\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 5d78d43ba9fc..d201cc307151 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -184,6 +184,8 @@
#define QSERDES_V3_COM_VCO_TUNE2_MODE0 0x0f8
#define QSERDES_V3_COM_VCO_TUNE1_MODE1 0x0fc
#define QSERDES_V3_COM_VCO_TUNE2_MODE1 0x100
+#define QSERDES_V3_COM_VCO_TUNE_INITVAL1 0x104
+#define QSERDES_V3_COM_VCO_TUNE_INITVAL2 0x108
#define QSERDES_V3_COM_VCO_TUNE_TIMER1 0x11c
#define QSERDES_V3_COM_VCO_TUNE_TIMER2 0x120
#define QSERDES_V3_COM_CLK_SELECT 0x138
@@ -211,8 +213,13 @@
/* Only for QMP V3 PHY - RX registers */
#define QSERDES_V3_RX_UCDR_SO_GAIN_HALF 0x00c
#define QSERDES_V3_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF 0x024
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER 0x028
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN 0x02c
#define QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN 0x030
#define QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V3_RX_UCDR_PI_CONTROLS 0x044
#define QSERDES_V3_RX_RX_TERM_BW 0x07c
#define QSERDES_V3_RX_VGA_CAL_CNTRL1 0x0bc
#define QSERDES_V3_RX_VGA_CAL_CNTRL2 0x0c0
@@ -239,6 +246,8 @@
#define QPHY_V3_PCS_TXMGN_V3 0x018
#define QPHY_V3_PCS_TXMGN_V4 0x01c
#define QPHY_V3_PCS_TXMGN_LS 0x020
+#define QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL 0x02c
+#define QPHY_V3_PCS_TX_SMALL_AMP_DRV_LVL 0x034
#define QPHY_V3_PCS_TXDEEMPH_M6DB_V0 0x024
#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0 0x028
#define QPHY_V3_PCS_TXDEEMPH_M6DB_V1 0x02c
@@ -275,6 +284,12 @@
#define QPHY_V3_PCS_FLL_CNT_VAL_L 0x0cc
#define QPHY_V3_PCS_FLL_CNT_VAL_H_TOL 0x0d0
#define QPHY_V3_PCS_FLL_MAN_CODE 0x0d4
+#define QPHY_V3_PCS_RX_SYM_RESYNC_CTRL 0x134
+#define QPHY_V3_PCS_RX_MIN_HIBERN8_TIME 0x138
+#define QPHY_V3_PCS_RX_SIGDET_CTRL1 0x13c
+#define QPHY_V3_PCS_RX_SIGDET_CTRL2 0x140
+#define QPHY_V3_PCS_TX_MID_TERM_CTRL1 0x1bc
+#define QPHY_V3_PCS_MULTI_LANE_CTRL1 0x1c4
#define QPHY_V3_PCS_RX_SIGDET_LVL 0x1d8
#define QPHY_V3_PCS_REFGEN_REQ_CONFIG1 0x20c
#define QPHY_V3_PCS_REFGEN_REQ_CONFIG2 0x210
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index e70e425f26f5..9ce531194f8a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -800,7 +800,9 @@ static int qusb2_phy_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, num, qphy->vregs);
if (ret) {
- dev_err(dev, "failed to get regulator supplies\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
index 822c83b8efcd..681644e43248 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
@@ -17,9 +17,9 @@
#include <linux/module.h>
#include <linux/clk.h>
+#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <linux/phy/phy-qcom-ufs.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/delay.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index c5493ea51282..f2979ccad00a 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -431,56 +431,6 @@ static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
}
}
-#define UFS_REF_CLK_EN (1 << 5)
-
-static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
-{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
- if (phy->dev_ref_clk_ctrl_mmio &&
- (enable ^ phy->is_dev_ref_clk_enabled)) {
- u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio);
-
- if (enable)
- temp |= UFS_REF_CLK_EN;
- else
- temp &= ~UFS_REF_CLK_EN;
-
- /*
- * If we are here to disable this clock immediately after
- * entering into hibern8, we need to make sure that device
- * ref_clk is active atleast 1us after the hibern8 enter.
- */
- if (!enable)
- udelay(1);
-
- writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio);
- /* ensure that ref_clk is enabled/disabled before we return */
- wmb();
- /*
- * If we call hibern8 exit after this, we need to make sure that
- * device ref_clk is stable for atleast 1us before the hibern8
- * exit command.
- */
- if (enable)
- udelay(1);
-
- phy->is_dev_ref_clk_enabled = enable;
- }
-}
-
-void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
-{
- ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
-
-void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
-{
- ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
-
/* Turn ON M-PHY RMMI interface clocks */
static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
{
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index 4bd390c79d21..e340a925bbb1 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Phy drivers for Renesas platforms
#
diff --git a/drivers/phy/renesas/Makefile b/drivers/phy/renesas/Makefile
index 4b76fc439ed6..b599ff8a4349 100644
--- a/drivers/phy/renesas/Makefile
+++ b/drivers/phy/renesas/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_RCAR_GEN2) += phy-rcar-gen2.o
obj-$(CONFIG_PHY_RCAR_GEN3_PCIE) += phy-rcar-gen3-pcie.o
obj-$(CONFIG_PHY_RCAR_GEN3_USB2) += phy-rcar-gen3-usb2.o
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 97d4dd6ea924..72eeb066912d 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen2 PHY driver
*
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index fb8f05e39cf7..d0f412c25981 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen3 for USB2.0 PHY driver
*
@@ -6,10 +7,6 @@
* This is based on the phy-rcar-gen2 driver:
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/extcon-provider.h>
@@ -81,18 +78,29 @@
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
-#define RCAR_GEN3_PHY_HAS_DEDICATED_PINS 1
-
struct rcar_gen3_chan {
void __iomem *base;
struct extcon_dev *extcon;
struct phy *phy;
struct regulator *vbus;
struct work_struct work;
+ enum usb_dr_mode dr_mode;
bool extcon_host;
- bool has_otg_pins;
+ bool is_otg_channel;
+ bool uses_otg_pins;
};
+/*
+ * Combination about is_otg_channel and uses_otg_pins:
+ *
+ * Parameters || Behaviors
+ * is_otg_channel | uses_otg_pins || irqs | role sysfs
+ * ---------------------+---------------++--------------+------------
+ * true | true || enabled | enabled
+ * true | false || disabled | enabled
+ * false | any || disabled | disabled
+ */
+
static void rcar_gen3_phy_usb2_work(struct work_struct *work)
{
struct rcar_gen3_chan *ch = container_of(work, struct rcar_gen3_chan,
@@ -147,6 +155,18 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
writel(val, usb2_base + USB2_ADPCTRL);
}
+static void rcar_gen3_control_otg_irq(struct rcar_gen3_chan *ch, int enable)
+{
+ void __iomem *usb2_base = ch->base;
+ u32 val = readl(usb2_base + USB2_OBINTEN);
+
+ if (ch->uses_otg_pins && enable)
+ val |= USB2_OBINT_BITS;
+ else
+ val &= ~USB2_OBINT_BITS;
+ writel(val, usb2_base + USB2_OBINTEN);
+}
+
static void rcar_gen3_init_for_host(struct rcar_gen3_chan *ch)
{
rcar_gen3_set_linectrl(ch, 1, 1);
@@ -192,20 +212,19 @@ static void rcar_gen3_init_for_a_peri(struct rcar_gen3_chan *ch)
static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch)
{
- void __iomem *usb2_base = ch->base;
- u32 val;
+ rcar_gen3_control_otg_irq(ch, 0);
- val = readl(usb2_base + USB2_OBINTEN);
- writel(val & ~USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
-
- rcar_gen3_enable_vbus_ctrl(ch, 0);
+ rcar_gen3_enable_vbus_ctrl(ch, 1);
rcar_gen3_init_for_host(ch);
- writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+ rcar_gen3_control_otg_irq(ch, 1);
}
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
+ if (!ch->uses_otg_pins)
+ return (ch->dr_mode == USB_DR_MODE_HOST) ? false : true;
+
return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
}
@@ -237,7 +256,7 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
bool is_b_device;
enum phy_mode cur_mode, new_mode;
- if (!ch->has_otg_pins || !ch->phy->init_count)
+ if (!ch->is_otg_channel || !ch->phy->init_count)
return -EIO;
if (!strncmp(buf, "host", strlen("host")))
@@ -275,7 +294,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
- if (!ch->has_otg_pins || !ch->phy->init_count)
+ if (!ch->is_otg_channel || !ch->phy->init_count)
return -EIO;
return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
@@ -291,8 +310,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
val = readl(usb2_base + USB2_VBCTRL);
writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
- val = readl(usb2_base + USB2_OBINTEN);
- writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+ rcar_gen3_control_otg_irq(ch, 1);
val = readl(usb2_base + USB2_ADPCTRL);
writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
val = readl(usb2_base + USB2_LINECTRL1);
@@ -314,7 +332,7 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
/* Initialize otg part */
- if (channel->has_otg_pins)
+ if (channel->is_otg_channel)
rcar_gen3_init_otg(channel);
return 0;
@@ -388,21 +406,10 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
}
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
- {
- .compatible = "renesas,usb2-phy-r8a7795",
- .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
- },
- {
- .compatible = "renesas,usb2-phy-r8a7796",
- .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
- },
- {
- .compatible = "renesas,usb2-phy-r8a77965",
- .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
- },
- {
- .compatible = "renesas,rcar-gen3-usb2-phy",
- },
+ { .compatible = "renesas,usb2-phy-r8a7795" },
+ { .compatible = "renesas,usb2-phy-r8a7796" },
+ { .compatible = "renesas,usb2-phy-r8a77965" },
+ { .compatible = "renesas,rcar-gen3-usb2-phy" },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb2_match_table);
@@ -445,10 +452,13 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
dev_err(dev, "No irq handler (%d)\n", irq);
}
- if (of_usb_get_dr_mode_by_phy(dev->of_node, 0) == USB_DR_MODE_OTG) {
+ channel->dr_mode = of_usb_get_dr_mode_by_phy(dev->of_node, 0);
+ if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
int ret;
- channel->has_otg_pins = (uintptr_t)of_device_get_match_data(dev);
+ channel->is_otg_channel = true;
+ channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
+ "renesas,no-otg-pins");
channel->extcon = devm_extcon_dev_allocate(dev,
rcar_gen3_phy_cable);
if (IS_ERR(channel->extcon))
@@ -490,7 +500,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
dev_err(dev, "Failed to register PHY provider\n");
ret = PTR_ERR(provider);
goto error;
- } else if (channel->has_otg_pins) {
+ } else if (channel->is_otg_channel) {
int ret;
ret = device_create_file(dev, &dev_attr_role);
@@ -510,7 +520,7 @@ static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
{
struct rcar_gen3_chan *channel = platform_get_drvdata(pdev);
- if (channel->has_otg_pins)
+ if (channel->is_otg_channel)
device_remove_file(&pdev->dev, &dev_attr_role);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb3.c b/drivers/phy/renesas/phy-rcar-gen3-usb3.c
index 88c83c9b8ff9..566b4cf4ff38 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb3.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb3.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen3 for USB3.0 PHY driver
*
* Copyright (C) 2017 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 0e15119ddfc6..990204a46eb6 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -15,6 +15,14 @@ config PHY_ROCKCHIP_EMMC
help
Enable this to support the Rockchip EMMC PHY.
+config PHY_ROCKCHIP_INNO_HDMI
+ tristate "Rockchip INNO HDMI PHY Driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip Innosilicon HDMI PHY.
+
config PHY_ROCKCHIP_INNO_USB2
tristate "Rockchip INNO USB2PHY Driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index 7f149d989046..fd21cbaf40dd 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
obj-$(CONFIG_PHY_ROCKCHIP_TYPEC) += phy-rockchip-typec.o
diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
index b237360f95f6..19bf84f0bc67 100644
--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
+++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
@@ -337,8 +337,8 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
return -ENOMEM;
if (of_property_read_u32(dev->of_node, "reg", &reg_offset)) {
- dev_err(dev, "missing reg property in node %s\n",
- dev->of_node->name);
+ dev_err(dev, "missing reg property in node %pOFn\n",
+ dev->of_node);
return -EINVAL;
}
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
new file mode 100644
index 000000000000..b10a84cab4a7
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -0,0 +1,1277 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Zheng Yang <zhengyang@rock-chips.com>
+ * Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/phy/phy.h>
+#include <linux/slab.h>
+
+#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
+
+/* REG: 0x00 */
+#define RK3228_PRE_PLL_REFCLK_SEL_PCLK BIT(0)
+/* REG: 0x01 */
+#define RK3228_BYPASS_RXSENSE_EN BIT(2)
+#define RK3228_BYPASS_PWRON_EN BIT(1)
+#define RK3228_BYPASS_PLLPD_EN BIT(0)
+/* REG: 0x02 */
+#define RK3228_BYPASS_PDATA_EN BIT(4)
+#define RK3228_PDATAEN_DISABLE BIT(0)
+/* REG: 0x03 */
+#define RK3228_BYPASS_AUTO_TERM_RES_CAL BIT(7)
+#define RK3228_AUTO_TERM_RES_CAL_SPEED_14_8(x) UPDATE(x, 6, 0)
+/* REG: 0x04 */
+#define RK3228_AUTO_TERM_RES_CAL_SPEED_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xaa */
+#define RK3228_POST_PLL_CTRL_MANUAL BIT(0)
+/* REG: 0xe0 */
+#define RK3228_POST_PLL_POWER_DOWN BIT(5)
+#define RK3228_PRE_PLL_POWER_DOWN BIT(4)
+#define RK3228_RXSENSE_CLK_CH_ENABLE BIT(3)
+#define RK3228_RXSENSE_DATA_CH2_ENABLE BIT(2)
+#define RK3228_RXSENSE_DATA_CH1_ENABLE BIT(1)
+#define RK3228_RXSENSE_DATA_CH0_ENABLE BIT(0)
+/* REG: 0xe1 */
+#define RK3228_BANDGAP_ENABLE BIT(4)
+#define RK3228_TMDS_DRIVER_ENABLE GENMASK(3, 0)
+/* REG: 0xe2 */
+#define RK3228_PRE_PLL_FB_DIV_8_MASK BIT(7)
+#define RK3228_PRE_PLL_FB_DIV_8(x) UPDATE((x) >> 8, 7, 7)
+#define RK3228_PCLK_VCO_DIV_5_MASK BIT(5)
+#define RK3228_PCLK_VCO_DIV_5(x) UPDATE(x, 5, 5)
+#define RK3228_PRE_PLL_PRE_DIV_MASK GENMASK(4, 0)
+#define RK3228_PRE_PLL_PRE_DIV(x) UPDATE(x, 4, 0)
+/* REG: 0xe3 */
+#define RK3228_PRE_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xe4 */
+#define RK3228_PRE_PLL_PCLK_DIV_B_MASK GENMASK(6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_B_SHIFT 5
+#define RK3228_PRE_PLL_PCLK_DIV_B(x) UPDATE(x, 6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_A_MASK GENMASK(4, 0)
+#define RK3228_PRE_PLL_PCLK_DIV_A(x) UPDATE(x, 4, 0)
+/* REG: 0xe5 */
+#define RK3228_PRE_PLL_PCLK_DIV_C_MASK GENMASK(6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_C(x) UPDATE(x, 6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_D_MASK GENMASK(4, 0)
+#define RK3228_PRE_PLL_PCLK_DIV_D(x) UPDATE(x, 4, 0)
+/* REG: 0xe6 */
+#define RK3228_PRE_PLL_TMDSCLK_DIV_C_MASK GENMASK(5, 4)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_C(x) UPDATE(x, 5, 4)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_A_MASK GENMASK(3, 2)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_A(x) UPDATE(x, 3, 2)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_B_MASK GENMASK(1, 0)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_B(x) UPDATE(x, 1, 0)
+/* REG: 0xe8 */
+#define RK3228_PRE_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xe9 */
+#define RK3228_POST_PLL_POST_DIV_ENABLE UPDATE(3, 7, 6)
+#define RK3228_POST_PLL_PRE_DIV_MASK GENMASK(4, 0)
+#define RK3228_POST_PLL_PRE_DIV(x) UPDATE(x, 4, 0)
+/* REG: 0xea */
+#define RK3228_POST_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xeb */
+#define RK3228_POST_PLL_FB_DIV_8_MASK BIT(7)
+#define RK3228_POST_PLL_FB_DIV_8(x) UPDATE((x) >> 8, 7, 7)
+#define RK3228_POST_PLL_POST_DIV_MASK GENMASK(5, 4)
+#define RK3228_POST_PLL_POST_DIV(x) UPDATE(x, 5, 4)
+#define RK3228_POST_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xee */
+#define RK3228_TMDS_CH_TA_ENABLE GENMASK(7, 4)
+/* REG: 0xef */
+#define RK3228_TMDS_CLK_CH_TA(x) UPDATE(x, 7, 6)
+#define RK3228_TMDS_DATA_CH2_TA(x) UPDATE(x, 5, 4)
+#define RK3228_TMDS_DATA_CH1_TA(x) UPDATE(x, 3, 2)
+#define RK3228_TMDS_DATA_CH0_TA(x) UPDATE(x, 1, 0)
+/* REG: 0xf0 */
+#define RK3228_TMDS_DATA_CH2_PRE_EMPHASIS_MASK GENMASK(5, 4)
+#define RK3228_TMDS_DATA_CH2_PRE_EMPHASIS(x) UPDATE(x, 5, 4)
+#define RK3228_TMDS_DATA_CH1_PRE_EMPHASIS_MASK GENMASK(3, 2)
+#define RK3228_TMDS_DATA_CH1_PRE_EMPHASIS(x) UPDATE(x, 3, 2)
+#define RK3228_TMDS_DATA_CH0_PRE_EMPHASIS_MASK GENMASK(1, 0)
+#define RK3228_TMDS_DATA_CH0_PRE_EMPHASIS(x) UPDATE(x, 1, 0)
+/* REG: 0xf1 */
+#define RK3228_TMDS_CLK_CH_OUTPUT_SWING(x) UPDATE(x, 7, 4)
+#define RK3228_TMDS_DATA_CH2_OUTPUT_SWING(x) UPDATE(x, 3, 0)
+/* REG: 0xf2 */
+#define RK3228_TMDS_DATA_CH1_OUTPUT_SWING(x) UPDATE(x, 7, 4)
+#define RK3228_TMDS_DATA_CH0_OUTPUT_SWING(x) UPDATE(x, 3, 0)
+
+/* REG: 0x01 */
+#define RK3328_BYPASS_RXSENSE_EN BIT(2)
+#define RK3328_BYPASS_POWERON_EN BIT(1)
+#define RK3328_BYPASS_PLLPD_EN BIT(0)
+/* REG: 0x02 */
+#define RK3328_INT_POL_HIGH BIT(7)
+#define RK3328_BYPASS_PDATA_EN BIT(4)
+#define RK3328_PDATA_EN BIT(0)
+/* REG:0x05 */
+#define RK3328_INT_TMDS_CLK(x) UPDATE(x, 7, 4)
+#define RK3328_INT_TMDS_D2(x) UPDATE(x, 3, 0)
+/* REG:0x07 */
+#define RK3328_INT_TMDS_D1(x) UPDATE(x, 7, 4)
+#define RK3328_INT_TMDS_D0(x) UPDATE(x, 3, 0)
+/* for all RK3328_INT_TMDS_*, ESD_DET as defined in 0xc8-0xcb */
+#define RK3328_INT_AGND_LOW_PULSE_LOCKED BIT(3)
+#define RK3328_INT_RXSENSE_LOW_PULSE_LOCKED BIT(2)
+#define RK3328_INT_VSS_AGND_ESD_DET BIT(1)
+#define RK3328_INT_AGND_VSS_ESD_DET BIT(0)
+/* REG: 0xa0 */
+#define RK3328_PCLK_VCO_DIV_5_MASK BIT(1)
+#define RK3328_PCLK_VCO_DIV_5(x) UPDATE(x, 1, 1)
+#define RK3328_PRE_PLL_POWER_DOWN BIT(0)
+/* REG: 0xa1 */
+#define RK3328_PRE_PLL_PRE_DIV_MASK GENMASK(5, 0)
+#define RK3328_PRE_PLL_PRE_DIV(x) UPDATE(x, 5, 0)
+/* REG: 0xa2 */
+/* unset means center spread */
+#define RK3328_SPREAD_SPECTRUM_MOD_DOWN BIT(7)
+#define RK3328_SPREAD_SPECTRUM_MOD_DISABLE BIT(6)
+#define RK3328_PRE_PLL_FRAC_DIV_DISABLE UPDATE(3, 5, 4)
+#define RK3328_PRE_PLL_FB_DIV_11_8_MASK GENMASK(3, 0)
+#define RK3328_PRE_PLL_FB_DIV_11_8(x) UPDATE((x) >> 8, 3, 0)
+/* REG: 0xa3 */
+#define RK3328_PRE_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xa4*/
+#define RK3328_PRE_PLL_TMDSCLK_DIV_C_MASK GENMASK(1, 0)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_C(x) UPDATE(x, 1, 0)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_B_MASK GENMASK(3, 2)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_B(x) UPDATE(x, 3, 2)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_A_MASK GENMASK(5, 4)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_A(x) UPDATE(x, 5, 4)
+/* REG: 0xa5 */
+#define RK3328_PRE_PLL_PCLK_DIV_B_SHIFT 5
+#define RK3328_PRE_PLL_PCLK_DIV_B_MASK GENMASK(6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_B(x) UPDATE(x, 6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_A_MASK GENMASK(4, 0)
+#define RK3328_PRE_PLL_PCLK_DIV_A(x) UPDATE(x, 4, 0)
+/* REG: 0xa6 */
+#define RK3328_PRE_PLL_PCLK_DIV_C_SHIFT 5
+#define RK3328_PRE_PLL_PCLK_DIV_C_MASK GENMASK(6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_C(x) UPDATE(x, 6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_D_MASK GENMASK(4, 0)
+#define RK3328_PRE_PLL_PCLK_DIV_D(x) UPDATE(x, 4, 0)
+/* REG: 0xa9 */
+#define RK3328_PRE_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xaa */
+#define RK3328_POST_PLL_POST_DIV_ENABLE GENMASK(3, 2)
+#define RK3328_POST_PLL_REFCLK_SEL_TMDS BIT(1)
+#define RK3328_POST_PLL_POWER_DOWN BIT(0)
+/* REG:0xab */
+#define RK3328_POST_PLL_FB_DIV_8(x) UPDATE((x) >> 8, 7, 7)
+#define RK3328_POST_PLL_PRE_DIV(x) UPDATE(x, 4, 0)
+/* REG: 0xac */
+#define RK3328_POST_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xad */
+#define RK3328_POST_PLL_POST_DIV_MASK GENMASK(1, 0)
+#define RK3328_POST_PLL_POST_DIV_2 0x0
+#define RK3328_POST_PLL_POST_DIV_4 0x1
+#define RK3328_POST_PLL_POST_DIV_8 0x3
+/* REG: 0xaf */
+#define RK3328_POST_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xb0 */
+#define RK3328_BANDGAP_ENABLE BIT(2)
+/* REG: 0xb2 */
+#define RK3328_TMDS_CLK_DRIVER_EN BIT(3)
+#define RK3328_TMDS_D2_DRIVER_EN BIT(2)
+#define RK3328_TMDS_D1_DRIVER_EN BIT(1)
+#define RK3328_TMDS_D0_DRIVER_EN BIT(0)
+#define RK3328_TMDS_DRIVER_ENABLE (RK3328_TMDS_CLK_DRIVER_EN | \
+ RK3328_TMDS_D2_DRIVER_EN | \
+ RK3328_TMDS_D1_DRIVER_EN | \
+ RK3328_TMDS_D0_DRIVER_EN)
+/* REG:0xc5 */
+#define RK3328_BYPASS_TERM_RESISTOR_CALIB BIT(7)
+#define RK3328_TERM_RESISTOR_CALIB_SPEED_14_8(x) UPDATE((x) >> 8, 6, 0)
+/* REG:0xc6 */
+#define RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(x) UPDATE(x, 7, 9)
+/* REG:0xc7 */
+#define RK3328_TERM_RESISTOR_50 UPDATE(0, 2, 1)
+#define RK3328_TERM_RESISTOR_62_5 UPDATE(1, 2, 1)
+#define RK3328_TERM_RESISTOR_75 UPDATE(2, 2, 1)
+#define RK3328_TERM_RESISTOR_100 UPDATE(3, 2, 1)
+/* REG 0xc8 - 0xcb */
+#define RK3328_ESD_DETECT_MASK GENMASK(7, 6)
+#define RK3328_ESD_DETECT_340MV (0x0 << 6)
+#define RK3328_ESD_DETECT_280MV (0x1 << 6)
+#define RK3328_ESD_DETECT_260MV (0x2 << 6)
+#define RK3328_ESD_DETECT_240MV (0x3 << 6)
+/* resistors can be used in parallel */
+#define RK3328_TMDS_TERM_RESIST_MASK GENMASK(5, 0)
+#define RK3328_TMDS_TERM_RESIST_75 BIT(5)
+#define RK3328_TMDS_TERM_RESIST_150 BIT(4)
+#define RK3328_TMDS_TERM_RESIST_300 BIT(3)
+#define RK3328_TMDS_TERM_RESIST_600 BIT(2)
+#define RK3328_TMDS_TERM_RESIST_1000 BIT(1)
+#define RK3328_TMDS_TERM_RESIST_2000 BIT(0)
+/* REG: 0xd1 */
+#define RK3328_PRE_PLL_FRAC_DIV_23_16(x) UPDATE((x) >> 16, 7, 0)
+/* REG: 0xd2 */
+#define RK3328_PRE_PLL_FRAC_DIV_15_8(x) UPDATE((x) >> 8, 7, 0)
+/* REG: 0xd3 */
+#define RK3328_PRE_PLL_FRAC_DIV_7_0(x) UPDATE(x, 7, 0)
+
+struct inno_hdmi_phy_drv_data;
+
+struct inno_hdmi_phy {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+
+ struct phy *phy;
+ struct clk *sysclk;
+ struct clk *refoclk;
+ struct clk *refpclk;
+
+ /* platform data */
+ const struct inno_hdmi_phy_drv_data *plat_data;
+ int chip_version;
+
+ /* clk provider */
+ struct clk_hw hw;
+ struct clk *phyclk;
+ unsigned long pixclock;
+};
+
+struct pre_pll_config {
+ unsigned long pixclock;
+ unsigned long tmdsclock;
+ u8 prediv;
+ u16 fbdiv;
+ u8 tmds_div_a;
+ u8 tmds_div_b;
+ u8 tmds_div_c;
+ u8 pclk_div_a;
+ u8 pclk_div_b;
+ u8 pclk_div_c;
+ u8 pclk_div_d;
+ u8 vco_div_5_en;
+ u32 fracdiv;
+};
+
+struct post_pll_config {
+ unsigned long tmdsclock;
+ u8 prediv;
+ u16 fbdiv;
+ u8 postdiv;
+ u8 version;
+};
+
+struct phy_config {
+ unsigned long tmdsclock;
+ u8 regs[14];
+};
+
+struct inno_hdmi_phy_ops {
+ int (*init)(struct inno_hdmi_phy *inno);
+ int (*power_on)(struct inno_hdmi_phy *inno,
+ const struct post_pll_config *cfg,
+ const struct phy_config *phy_cfg);
+ void (*power_off)(struct inno_hdmi_phy *inno);
+};
+
+struct inno_hdmi_phy_drv_data {
+ const struct inno_hdmi_phy_ops *ops;
+ const struct clk_ops *clk_ops;
+ const struct phy_config *phy_cfg_table;
+};
+
+static const struct pre_pll_config pre_pll_cfg_table[] = {
+ { 27000000, 27000000, 1, 90, 3, 2, 2, 10, 3, 3, 4, 0, 0},
+ { 27000000, 33750000, 1, 90, 1, 3, 3, 10, 3, 3, 4, 0, 0},
+ { 40000000, 40000000, 1, 80, 2, 2, 2, 12, 2, 2, 2, 0, 0},
+ { 59341000, 59341000, 1, 98, 3, 1, 2, 1, 3, 3, 4, 0, 0xE6AE6B},
+ { 59400000, 59400000, 1, 99, 3, 1, 1, 1, 3, 3, 4, 0, 0},
+ { 59341000, 74176250, 1, 98, 0, 3, 3, 1, 3, 3, 4, 0, 0xE6AE6B},
+ { 59400000, 74250000, 1, 99, 1, 2, 2, 1, 3, 3, 4, 0, 0},
+ { 74176000, 74176000, 1, 98, 1, 2, 2, 1, 2, 3, 4, 0, 0xE6AE6B},
+ { 74250000, 74250000, 1, 99, 1, 2, 2, 1, 2, 3, 4, 0, 0},
+ { 74176000, 92720000, 4, 494, 1, 2, 2, 1, 3, 3, 4, 0, 0x816817},
+ { 74250000, 92812500, 4, 495, 1, 2, 2, 1, 3, 3, 4, 0, 0},
+ {148352000, 148352000, 1, 98, 1, 1, 1, 1, 2, 2, 2, 0, 0xE6AE6B},
+ {148500000, 148500000, 1, 99, 1, 1, 1, 1, 2, 2, 2, 0, 0},
+ {148352000, 185440000, 4, 494, 0, 2, 2, 1, 3, 2, 2, 0, 0x816817},
+ {148500000, 185625000, 4, 495, 0, 2, 2, 1, 3, 2, 2, 0, 0},
+ {296703000, 296703000, 1, 98, 0, 1, 1, 1, 0, 2, 2, 0, 0xE6AE6B},
+ {297000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 2, 0, 0},
+ {296703000, 370878750, 4, 494, 1, 2, 0, 1, 3, 1, 1, 0, 0x816817},
+ {297000000, 371250000, 4, 495, 1, 2, 0, 1, 3, 1, 1, 0, 0},
+ {593407000, 296703500, 1, 98, 0, 1, 1, 1, 0, 2, 1, 0, 0xE6AE6B},
+ {594000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 1, 0, 0},
+ {593407000, 370879375, 4, 494, 1, 2, 0, 1, 3, 1, 1, 1, 0x816817},
+ {594000000, 371250000, 4, 495, 1, 2, 0, 1, 3, 1, 1, 1, 0},
+ {593407000, 593407000, 1, 98, 0, 2, 0, 1, 0, 1, 1, 0, 0xE6AE6B},
+ {594000000, 594000000, 1, 99, 0, 2, 0, 1, 0, 1, 1, 0, 0},
+ { /* sentinel */ }
+};
+
+static const struct post_pll_config post_pll_cfg_table[] = {
+ {33750000, 1, 40, 8, 1},
+ {33750000, 1, 80, 8, 2},
+ {74250000, 1, 40, 8, 1},
+ {74250000, 18, 80, 8, 2},
+ {148500000, 2, 40, 4, 3},
+ {297000000, 4, 40, 2, 3},
+ {594000000, 8, 40, 1, 3},
+ { /* sentinel */ }
+};
+
+/* phy tuning values for an undocumented set of registers */
+static const struct phy_config rk3228_phy_cfg[] = {
+ { 165000000, {
+ 0xaa, 0x00, 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ }, {
+ 340000000, {
+ 0xaa, 0x15, 0x6a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ }, {
+ 594000000, {
+ 0xaa, 0x15, 0x7a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ }, { /* sentinel */ },
+};
+
+/* phy tuning values for an undocumented set of registers */
+static const struct phy_config rk3328_phy_cfg[] = {
+ { 165000000, {
+ 0x07, 0x0a, 0x0a, 0x0a, 0x00, 0x00, 0x08, 0x08, 0x08,
+ 0x00, 0xac, 0xcc, 0xcc, 0xcc,
+ },
+ }, {
+ 340000000, {
+ 0x0b, 0x0d, 0x0d, 0x0d, 0x07, 0x15, 0x08, 0x08, 0x08,
+ 0x3f, 0xac, 0xcc, 0xcd, 0xdd,
+ },
+ }, {
+ 594000000, {
+ 0x10, 0x1a, 0x1a, 0x1a, 0x07, 0x15, 0x08, 0x08, 0x08,
+ 0x00, 0xac, 0xcc, 0xcc, 0xcc,
+ },
+ }, { /* sentinel */ },
+};
+
+static inline struct inno_hdmi_phy *to_inno_hdmi_phy(struct clk_hw *hw)
+{
+ return container_of(hw, struct inno_hdmi_phy, hw);
+}
+
+/*
+ * The register description of the IP block does not use any distinct names
+ * but instead the databook simply numbers the registers in one-increments.
+ * As the registers are obviously 32bit sized, the inno_* functions
+ * translate the databook register names to the actual registers addresses.
+ */
+static inline void inno_write(struct inno_hdmi_phy *inno, u32 reg, u8 val)
+{
+ regmap_write(inno->regmap, reg * 4, val);
+}
+
+static inline u8 inno_read(struct inno_hdmi_phy *inno, u32 reg)
+{
+ u32 val;
+
+ regmap_read(inno->regmap, reg * 4, &val);
+
+ return val;
+}
+
+static inline void inno_update_bits(struct inno_hdmi_phy *inno, u8 reg,
+ u8 mask, u8 val)
+{
+ regmap_update_bits(inno->regmap, reg * 4, mask, val);
+}
+
+#define inno_poll(inno, reg, val, cond, sleep_us, timeout_us) \
+ regmap_read_poll_timeout((inno)->regmap, (reg) * 4, val, cond, \
+ sleep_us, timeout_us)
+
+static unsigned long inno_hdmi_phy_get_tmdsclk(struct inno_hdmi_phy *inno,
+ unsigned long rate)
+{
+ int bus_width = phy_get_bus_width(inno->phy);
+
+ switch (bus_width) {
+ case 4:
+ case 5:
+ case 6:
+ case 10:
+ case 12:
+ case 16:
+ return (u64)rate * bus_width / 8;
+ default:
+ return rate;
+ }
+}
+
+static irqreturn_t inno_hdmi_phy_rk3328_hardirq(int irq, void *dev_id)
+{
+ struct inno_hdmi_phy *inno = dev_id;
+ int intr_stat1, intr_stat2, intr_stat3;
+
+ intr_stat1 = inno_read(inno, 0x04);
+ intr_stat2 = inno_read(inno, 0x06);
+ intr_stat3 = inno_read(inno, 0x08);
+
+ if (intr_stat1)
+ inno_write(inno, 0x04, intr_stat1);
+ if (intr_stat2)
+ inno_write(inno, 0x06, intr_stat2);
+ if (intr_stat3)
+ inno_write(inno, 0x08, intr_stat3);
+
+ if (intr_stat1 || intr_stat2 || intr_stat3)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t inno_hdmi_phy_rk3328_irq(int irq, void *dev_id)
+{
+ struct inno_hdmi_phy *inno = dev_id;
+
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, 0);
+ usleep_range(10, 20);
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, RK3328_PDATA_EN);
+
+ return IRQ_HANDLED;
+}
+
+static int inno_hdmi_phy_power_on(struct phy *phy)
+{
+ struct inno_hdmi_phy *inno = phy_get_drvdata(phy);
+ const struct post_pll_config *cfg = post_pll_cfg_table;
+ const struct phy_config *phy_cfg = inno->plat_data->phy_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno,
+ inno->pixclock);
+ int ret;
+
+ if (!tmdsclock) {
+ dev_err(inno->dev, "TMDS clock is zero!\n");
+ return -EINVAL;
+ }
+
+ if (!inno->plat_data->ops->power_on)
+ return -EINVAL;
+
+ for (; cfg->tmdsclock != 0; cfg++)
+ if (tmdsclock <= cfg->tmdsclock &&
+ cfg->version & inno->chip_version)
+ break;
+
+ for (; phy_cfg->tmdsclock != 0; phy_cfg++)
+ if (tmdsclock <= phy_cfg->tmdsclock)
+ break;
+
+ if (cfg->tmdsclock == 0 || phy_cfg->tmdsclock == 0)
+ return -EINVAL;
+
+ dev_dbg(inno->dev, "Inno HDMI PHY Power On\n");
+
+ ret = clk_prepare_enable(inno->phyclk);
+ if (ret)
+ return ret;
+
+ ret = inno->plat_data->ops->power_on(inno, cfg, phy_cfg);
+ if (ret) {
+ clk_disable_unprepare(inno->phyclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int inno_hdmi_phy_power_off(struct phy *phy)
+{
+ struct inno_hdmi_phy *inno = phy_get_drvdata(phy);
+
+ if (!inno->plat_data->ops->power_off)
+ return -EINVAL;
+
+ inno->plat_data->ops->power_off(inno);
+
+ clk_disable_unprepare(inno->phyclk);
+
+ dev_dbg(inno->dev, "Inno HDMI PHY Power Off\n");
+
+ return 0;
+}
+
+static const struct phy_ops inno_hdmi_phy_ops = {
+ .owner = THIS_MODULE,
+ .power_on = inno_hdmi_phy_power_on,
+ .power_off = inno_hdmi_phy_power_off,
+};
+
+static const
+struct pre_pll_config *inno_hdmi_phy_get_pre_pll_cfg(struct inno_hdmi_phy *inno,
+ unsigned long rate)
+{
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
+
+ for (; cfg->pixclock != 0; cfg++)
+ if (cfg->pixclock == rate && cfg->tmdsclock == tmdsclock)
+ break;
+
+ if (cfg->pixclock == 0)
+ return ERR_PTR(-EINVAL);
+
+ return cfg;
+}
+
+static int inno_hdmi_phy_rk3228_clk_is_prepared(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ u8 status;
+
+ status = inno_read(inno, 0xe0) & RK3228_PRE_PLL_POWER_DOWN;
+ return status ? 0 : 1;
+}
+
+static int inno_hdmi_phy_rk3228_clk_prepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN, 0);
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3228_clk_unprepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN,
+ RK3228_PRE_PLL_POWER_DOWN);
+}
+
+static
+unsigned long inno_hdmi_phy_rk3228_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ u8 nd, no_a, no_b, no_d;
+ u64 vco;
+ u16 nf;
+
+ nd = inno_read(inno, 0xe2) & RK3228_PRE_PLL_PRE_DIV_MASK;
+ nf = (inno_read(inno, 0xe2) & RK3228_PRE_PLL_FB_DIV_8_MASK) << 1;
+ nf |= inno_read(inno, 0xe3);
+ vco = parent_rate * nf;
+
+ if (inno_read(inno, 0xe2) & RK3228_PCLK_VCO_DIV_5_MASK) {
+ do_div(vco, nd * 5);
+ } else {
+ no_a = inno_read(inno, 0xe4) & RK3228_PRE_PLL_PCLK_DIV_A_MASK;
+ if (!no_a)
+ no_a = 1;
+ no_b = inno_read(inno, 0xe4) & RK3228_PRE_PLL_PCLK_DIV_B_MASK;
+ no_b >>= RK3228_PRE_PLL_PCLK_DIV_B_SHIFT;
+ no_b += 2;
+ no_d = inno_read(inno, 0xe5) & RK3228_PRE_PLL_PCLK_DIV_D_MASK;
+
+ do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
+ }
+
+ inno->pixclock = vco;
+
+ dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
+
+ return vco;
+}
+
+static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+
+ for (; cfg->pixclock != 0; cfg++)
+ if (cfg->pixclock == rate && !cfg->fracdiv)
+ break;
+
+ if (cfg->pixclock == 0)
+ return -EINVAL;
+
+ return cfg->pixclock;
+}
+
+static int inno_hdmi_phy_rk3228_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
+ u32 v;
+ int ret;
+
+ dev_dbg(inno->dev, "%s rate %lu tmdsclk %lu\n",
+ __func__, rate, tmdsclock);
+
+ cfg = inno_hdmi_phy_get_pre_pll_cfg(inno, rate);
+ if (IS_ERR(cfg))
+ return PTR_ERR(cfg);
+
+ /* Power down PRE-PLL */
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN,
+ RK3228_PRE_PLL_POWER_DOWN);
+
+ inno_update_bits(inno, 0xe2, RK3228_PRE_PLL_FB_DIV_8_MASK |
+ RK3228_PCLK_VCO_DIV_5_MASK |
+ RK3228_PRE_PLL_PRE_DIV_MASK,
+ RK3228_PRE_PLL_FB_DIV_8(cfg->fbdiv) |
+ RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en) |
+ RK3228_PRE_PLL_PRE_DIV(cfg->prediv));
+ inno_write(inno, 0xe3, RK3228_PRE_PLL_FB_DIV_7_0(cfg->fbdiv));
+ inno_update_bits(inno, 0xe4, RK3228_PRE_PLL_PCLK_DIV_B_MASK |
+ RK3228_PRE_PLL_PCLK_DIV_A_MASK,
+ RK3228_PRE_PLL_PCLK_DIV_B(cfg->pclk_div_b) |
+ RK3228_PRE_PLL_PCLK_DIV_A(cfg->pclk_div_a));
+ inno_update_bits(inno, 0xe5, RK3228_PRE_PLL_PCLK_DIV_C_MASK |
+ RK3228_PRE_PLL_PCLK_DIV_D_MASK,
+ RK3228_PRE_PLL_PCLK_DIV_C(cfg->pclk_div_c) |
+ RK3228_PRE_PLL_PCLK_DIV_D(cfg->pclk_div_d));
+ inno_update_bits(inno, 0xe6, RK3228_PRE_PLL_TMDSCLK_DIV_C_MASK |
+ RK3228_PRE_PLL_TMDSCLK_DIV_A_MASK |
+ RK3228_PRE_PLL_TMDSCLK_DIV_B_MASK,
+ RK3228_PRE_PLL_TMDSCLK_DIV_C(cfg->tmds_div_c) |
+ RK3228_PRE_PLL_TMDSCLK_DIV_A(cfg->tmds_div_a) |
+ RK3228_PRE_PLL_TMDSCLK_DIV_B(cfg->tmds_div_b));
+
+ /* Power up PRE-PLL */
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN, 0);
+
+ /* Wait for Pre-PLL lock */
+ ret = inno_poll(inno, 0xe8, v, v & RK3228_PRE_PLL_LOCK_STATUS,
+ 100, 100000);
+ if (ret) {
+ dev_err(inno->dev, "Pre-PLL locking failed\n");
+ return ret;
+ }
+
+ inno->pixclock = rate;
+
+ return 0;
+}
+
+static const struct clk_ops inno_hdmi_phy_rk3228_clk_ops = {
+ .prepare = inno_hdmi_phy_rk3228_clk_prepare,
+ .unprepare = inno_hdmi_phy_rk3228_clk_unprepare,
+ .is_prepared = inno_hdmi_phy_rk3228_clk_is_prepared,
+ .recalc_rate = inno_hdmi_phy_rk3228_clk_recalc_rate,
+ .round_rate = inno_hdmi_phy_rk3228_clk_round_rate,
+ .set_rate = inno_hdmi_phy_rk3228_clk_set_rate,
+};
+
+static int inno_hdmi_phy_rk3328_clk_is_prepared(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ u8 status;
+
+ status = inno_read(inno, 0xa0) & RK3328_PRE_PLL_POWER_DOWN;
+ return status ? 0 : 1;
+}
+
+static int inno_hdmi_phy_rk3328_clk_prepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN, 0);
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3328_clk_unprepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN,
+ RK3328_PRE_PLL_POWER_DOWN);
+}
+
+static
+unsigned long inno_hdmi_phy_rk3328_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ unsigned long frac;
+ u8 nd, no_a, no_b, no_c, no_d;
+ u64 vco;
+ u16 nf;
+
+ nd = inno_read(inno, 0xa1) & RK3328_PRE_PLL_PRE_DIV_MASK;
+ nf = ((inno_read(inno, 0xa2) & RK3328_PRE_PLL_FB_DIV_11_8_MASK) << 8);
+ nf |= inno_read(inno, 0xa3);
+ vco = parent_rate * nf;
+
+ if (!(inno_read(inno, 0xa2) & RK3328_PRE_PLL_FRAC_DIV_DISABLE)) {
+ frac = inno_read(inno, 0xd3) |
+ (inno_read(inno, 0xd2) << 8) |
+ (inno_read(inno, 0xd1) << 16);
+ vco += DIV_ROUND_CLOSEST(parent_rate * frac, (1 << 24));
+ }
+
+ if (inno_read(inno, 0xa0) & RK3328_PCLK_VCO_DIV_5_MASK) {
+ do_div(vco, nd * 5);
+ } else {
+ no_a = inno_read(inno, 0xa5) & RK3328_PRE_PLL_PCLK_DIV_A_MASK;
+ no_b = inno_read(inno, 0xa5) & RK3328_PRE_PLL_PCLK_DIV_B_MASK;
+ no_b >>= RK3328_PRE_PLL_PCLK_DIV_B_SHIFT;
+ no_b += 2;
+ no_c = inno_read(inno, 0xa6) & RK3328_PRE_PLL_PCLK_DIV_C_MASK;
+ no_c >>= RK3328_PRE_PLL_PCLK_DIV_C_SHIFT;
+ no_c = 1 << no_c;
+ no_d = inno_read(inno, 0xa6) & RK3328_PRE_PLL_PCLK_DIV_D_MASK;
+
+ do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
+ }
+
+ inno->pixclock = vco;
+ dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
+
+ return vco;
+}
+
+static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+
+ for (; cfg->pixclock != 0; cfg++)
+ if (cfg->pixclock == rate)
+ break;
+
+ if (cfg->pixclock == 0)
+ return -EINVAL;
+
+ return cfg->pixclock;
+}
+
+static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
+ u32 val;
+ int ret;
+
+ dev_dbg(inno->dev, "%s rate %lu tmdsclk %lu\n",
+ __func__, rate, tmdsclock);
+
+ cfg = inno_hdmi_phy_get_pre_pll_cfg(inno, rate);
+ if (IS_ERR(cfg))
+ return PTR_ERR(cfg);
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN,
+ RK3328_PRE_PLL_POWER_DOWN);
+
+ /* Configure pre-pll */
+ inno_update_bits(inno, 0xa0, RK3228_PCLK_VCO_DIV_5_MASK,
+ RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
+ inno_write(inno, 0xa1, RK3328_PRE_PLL_PRE_DIV(cfg->prediv));
+
+ val = RK3328_SPREAD_SPECTRUM_MOD_DISABLE;
+ if (!cfg->fracdiv)
+ val |= RK3328_PRE_PLL_FRAC_DIV_DISABLE;
+ inno_write(inno, 0xa2, RK3328_PRE_PLL_FB_DIV_11_8(cfg->fbdiv) | val);
+ inno_write(inno, 0xa3, RK3328_PRE_PLL_FB_DIV_7_0(cfg->fbdiv));
+ inno_write(inno, 0xa5, RK3328_PRE_PLL_PCLK_DIV_A(cfg->pclk_div_a) |
+ RK3328_PRE_PLL_PCLK_DIV_B(cfg->pclk_div_b));
+ inno_write(inno, 0xa6, RK3328_PRE_PLL_PCLK_DIV_C(cfg->pclk_div_c) |
+ RK3328_PRE_PLL_PCLK_DIV_D(cfg->pclk_div_d));
+ inno_write(inno, 0xa4, RK3328_PRE_PLL_TMDSCLK_DIV_C(cfg->tmds_div_c) |
+ RK3328_PRE_PLL_TMDSCLK_DIV_A(cfg->tmds_div_a) |
+ RK3328_PRE_PLL_TMDSCLK_DIV_B(cfg->tmds_div_b));
+ inno_write(inno, 0xd3, RK3328_PRE_PLL_FRAC_DIV_7_0(cfg->fracdiv));
+ inno_write(inno, 0xd2, RK3328_PRE_PLL_FRAC_DIV_15_8(cfg->fracdiv));
+ inno_write(inno, 0xd1, RK3328_PRE_PLL_FRAC_DIV_23_16(cfg->fracdiv));
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN, 0);
+
+ /* Wait for Pre-PLL lock */
+ ret = inno_poll(inno, 0xa9, val, val & RK3328_PRE_PLL_LOCK_STATUS,
+ 1000, 10000);
+ if (ret) {
+ dev_err(inno->dev, "Pre-PLL locking failed\n");
+ return ret;
+ }
+
+ inno->pixclock = rate;
+
+ return 0;
+}
+
+static const struct clk_ops inno_hdmi_phy_rk3328_clk_ops = {
+ .prepare = inno_hdmi_phy_rk3328_clk_prepare,
+ .unprepare = inno_hdmi_phy_rk3328_clk_unprepare,
+ .is_prepared = inno_hdmi_phy_rk3328_clk_is_prepared,
+ .recalc_rate = inno_hdmi_phy_rk3328_clk_recalc_rate,
+ .round_rate = inno_hdmi_phy_rk3328_clk_round_rate,
+ .set_rate = inno_hdmi_phy_rk3328_clk_set_rate,
+};
+
+static int inno_hdmi_phy_clk_register(struct inno_hdmi_phy *inno)
+{
+ struct device *dev = inno->dev;
+ struct device_node *np = dev->of_node;
+ struct clk_init_data init;
+ const char *parent_name;
+ int ret;
+
+ parent_name = __clk_get_name(inno->refoclk);
+
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+ init.name = "pin_hd20_pclk";
+ init.ops = inno->plat_data->clk_ops;
+
+ /* optional override of the clock name */
+ of_property_read_string(np, "clock-output-names", &init.name);
+
+ inno->hw.init = &init;
+
+ inno->phyclk = devm_clk_register(dev, &inno->hw);
+ if (IS_ERR(inno->phyclk)) {
+ ret = PTR_ERR(inno->phyclk);
+ dev_err(dev, "failed to register clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get, inno->phyclk);
+ if (ret) {
+ dev_err(dev, "failed to register clock provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int inno_hdmi_phy_rk3228_init(struct inno_hdmi_phy *inno)
+{
+ /*
+ * Use phy internal register control
+ * rxsense/poweron/pllpd/pdataen signal.
+ */
+ inno_write(inno, 0x01, RK3228_BYPASS_RXSENSE_EN |
+ RK3228_BYPASS_PWRON_EN |
+ RK3228_BYPASS_PLLPD_EN);
+ inno_update_bits(inno, 0x02, RK3228_BYPASS_PDATA_EN,
+ RK3228_BYPASS_PDATA_EN);
+
+ /* manual power down post-PLL */
+ inno_update_bits(inno, 0xaa, RK3228_POST_PLL_CTRL_MANUAL,
+ RK3228_POST_PLL_CTRL_MANUAL);
+
+ inno->chip_version = 1;
+
+ return 0;
+}
+
+static int
+inno_hdmi_phy_rk3228_power_on(struct inno_hdmi_phy *inno,
+ const struct post_pll_config *cfg,
+ const struct phy_config *phy_cfg)
+{
+ int ret;
+ u32 v;
+
+ inno_update_bits(inno, 0x02, RK3228_PDATAEN_DISABLE,
+ RK3228_PDATAEN_DISABLE);
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN |
+ RK3228_POST_PLL_POWER_DOWN,
+ RK3228_PRE_PLL_POWER_DOWN |
+ RK3228_POST_PLL_POWER_DOWN);
+
+ /* Post-PLL update */
+ inno_update_bits(inno, 0xe9, RK3228_POST_PLL_PRE_DIV_MASK,
+ RK3228_POST_PLL_PRE_DIV(cfg->prediv));
+ inno_update_bits(inno, 0xeb, RK3228_POST_PLL_FB_DIV_8_MASK,
+ RK3228_POST_PLL_FB_DIV_8(cfg->fbdiv));
+ inno_write(inno, 0xea, RK3228_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
+
+ if (cfg->postdiv == 1) {
+ inno_update_bits(inno, 0xe9, RK3228_POST_PLL_POST_DIV_ENABLE,
+ 0);
+ } else {
+ int div = cfg->postdiv / 2 - 1;
+
+ inno_update_bits(inno, 0xe9, RK3228_POST_PLL_POST_DIV_ENABLE,
+ RK3228_POST_PLL_POST_DIV_ENABLE);
+ inno_update_bits(inno, 0xeb, RK3228_POST_PLL_POST_DIV_MASK,
+ RK3228_POST_PLL_POST_DIV(div));
+ }
+
+ for (v = 0; v < 4; v++)
+ inno_write(inno, 0xef + v, phy_cfg->regs[v]);
+
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN |
+ RK3228_POST_PLL_POWER_DOWN, 0);
+ inno_update_bits(inno, 0xe1, RK3228_BANDGAP_ENABLE,
+ RK3228_BANDGAP_ENABLE);
+ inno_update_bits(inno, 0xe1, RK3228_TMDS_DRIVER_ENABLE,
+ RK3228_TMDS_DRIVER_ENABLE);
+
+ /* Wait for post PLL lock */
+ ret = inno_poll(inno, 0xeb, v, v & RK3228_POST_PLL_LOCK_STATUS,
+ 100, 100000);
+ if (ret) {
+ dev_err(inno->dev, "Post-PLL locking failed\n");
+ return ret;
+ }
+
+ if (cfg->tmdsclock > 340000000)
+ msleep(100);
+
+ inno_update_bits(inno, 0x02, RK3228_PDATAEN_DISABLE, 0);
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3228_power_off(struct inno_hdmi_phy *inno)
+{
+ inno_update_bits(inno, 0xe1, RK3228_TMDS_DRIVER_ENABLE, 0);
+ inno_update_bits(inno, 0xe1, RK3228_BANDGAP_ENABLE, 0);
+ inno_update_bits(inno, 0xe0, RK3228_POST_PLL_POWER_DOWN,
+ RK3228_POST_PLL_POWER_DOWN);
+}
+
+static const struct inno_hdmi_phy_ops rk3228_hdmi_phy_ops = {
+ .init = inno_hdmi_phy_rk3228_init,
+ .power_on = inno_hdmi_phy_rk3228_power_on,
+ .power_off = inno_hdmi_phy_rk3228_power_off,
+};
+
+static int inno_hdmi_phy_rk3328_init(struct inno_hdmi_phy *inno)
+{
+ struct nvmem_cell *cell;
+ unsigned char *efuse_buf;
+ size_t len;
+
+ /*
+ * Use phy internal register control
+ * rxsense/poweron/pllpd/pdataen signal.
+ */
+ inno_write(inno, 0x01, RK3328_BYPASS_RXSENSE_EN |
+ RK3328_BYPASS_POWERON_EN |
+ RK3328_BYPASS_PLLPD_EN);
+ inno_write(inno, 0x02, RK3328_INT_POL_HIGH | RK3328_BYPASS_PDATA_EN |
+ RK3328_PDATA_EN);
+
+ /* Disable phy irq */
+ inno_write(inno, 0x05, 0);
+ inno_write(inno, 0x07, 0);
+
+ /* try to read the chip-version */
+ inno->chip_version = 1;
+ cell = nvmem_cell_get(inno->dev, "cpu-version");
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ return 0;
+ }
+
+ efuse_buf = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(efuse_buf))
+ return 0;
+ if (len == 1)
+ inno->chip_version = efuse_buf[0] + 1;
+ kfree(efuse_buf);
+
+ return 0;
+}
+
+static int
+inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
+ const struct post_pll_config *cfg,
+ const struct phy_config *phy_cfg)
+{
+ int ret;
+ u32 v;
+
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, 0);
+ inno_update_bits(inno, 0xaa, RK3328_POST_PLL_POWER_DOWN,
+ RK3328_POST_PLL_POWER_DOWN);
+
+ inno_write(inno, 0xac, RK3328_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
+ if (cfg->postdiv == 1) {
+ inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
+ RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ } else {
+ v = (cfg->postdiv / 2) - 1;
+ v &= RK3328_POST_PLL_POST_DIV_MASK;
+ inno_write(inno, 0xad, v);
+ inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
+ RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ inno_write(inno, 0xaa, RK3328_POST_PLL_POST_DIV_ENABLE |
+ RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ }
+
+ for (v = 0; v < 14; v++)
+ inno_write(inno, 0xb5 + v, phy_cfg->regs[v]);
+
+ /* set ESD detection threshold for TMDS CLK, D2, D1 and D0 */
+ for (v = 0; v < 4; v++)
+ inno_update_bits(inno, 0xc8 + v, RK3328_ESD_DETECT_MASK,
+ RK3328_ESD_DETECT_340MV);
+
+ if (phy_cfg->tmdsclock > 340000000) {
+ /* Set termination resistor to 100ohm */
+ v = clk_get_rate(inno->sysclk) / 100000;
+ inno_write(inno, 0xc5, RK3328_TERM_RESISTOR_CALIB_SPEED_14_8(v)
+ | RK3328_BYPASS_TERM_RESISTOR_CALIB);
+ inno_write(inno, 0xc6, RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(v));
+ inno_write(inno, 0xc7, RK3328_TERM_RESISTOR_100);
+ inno_update_bits(inno, 0xc5,
+ RK3328_BYPASS_TERM_RESISTOR_CALIB, 0);
+ } else {
+ inno_write(inno, 0xc5, RK3328_BYPASS_TERM_RESISTOR_CALIB);
+
+ /* clk termination resistor is 50ohm (parallel resistors) */
+ if (phy_cfg->tmdsclock > 165000000)
+ inno_update_bits(inno, 0xc8,
+ RK3328_TMDS_TERM_RESIST_MASK,
+ RK3328_TMDS_TERM_RESIST_75 |
+ RK3328_TMDS_TERM_RESIST_150);
+
+ /* data termination resistor for D2, D1 and D0 is 150ohm */
+ for (v = 0; v < 3; v++)
+ inno_update_bits(inno, 0xc9 + v,
+ RK3328_TMDS_TERM_RESIST_MASK,
+ RK3328_TMDS_TERM_RESIST_150);
+ }
+
+ inno_update_bits(inno, 0xaa, RK3328_POST_PLL_POWER_DOWN, 0);
+ inno_update_bits(inno, 0xb0, RK3328_BANDGAP_ENABLE,
+ RK3328_BANDGAP_ENABLE);
+ inno_update_bits(inno, 0xb2, RK3328_TMDS_DRIVER_ENABLE,
+ RK3328_TMDS_DRIVER_ENABLE);
+
+ /* Wait for post PLL lock */
+ ret = inno_poll(inno, 0xaf, v, v & RK3328_POST_PLL_LOCK_STATUS,
+ 1000, 10000);
+ if (ret) {
+ dev_err(inno->dev, "Post-PLL locking failed\n");
+ return ret;
+ }
+
+ if (phy_cfg->tmdsclock > 340000000)
+ msleep(100);
+
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, RK3328_PDATA_EN);
+
+ /* Enable PHY IRQ */
+ inno_write(inno, 0x05, RK3328_INT_TMDS_CLK(RK3328_INT_VSS_AGND_ESD_DET)
+ | RK3328_INT_TMDS_D2(RK3328_INT_VSS_AGND_ESD_DET));
+ inno_write(inno, 0x07, RK3328_INT_TMDS_D1(RK3328_INT_VSS_AGND_ESD_DET)
+ | RK3328_INT_TMDS_D0(RK3328_INT_VSS_AGND_ESD_DET));
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3328_power_off(struct inno_hdmi_phy *inno)
+{
+ inno_update_bits(inno, 0xb2, RK3328_TMDS_DRIVER_ENABLE, 0);
+ inno_update_bits(inno, 0xb0, RK3328_BANDGAP_ENABLE, 0);
+ inno_update_bits(inno, 0xaa, RK3328_POST_PLL_POWER_DOWN,
+ RK3328_POST_PLL_POWER_DOWN);
+
+ /* Disable PHY IRQ */
+ inno_write(inno, 0x05, 0);
+ inno_write(inno, 0x07, 0);
+}
+
+static const struct inno_hdmi_phy_ops rk3328_hdmi_phy_ops = {
+ .init = inno_hdmi_phy_rk3328_init,
+ .power_on = inno_hdmi_phy_rk3328_power_on,
+ .power_off = inno_hdmi_phy_rk3328_power_off,
+};
+
+static const struct inno_hdmi_phy_drv_data rk3228_hdmi_phy_drv_data = {
+ .ops = &rk3228_hdmi_phy_ops,
+ .clk_ops = &inno_hdmi_phy_rk3228_clk_ops,
+ .phy_cfg_table = rk3228_phy_cfg,
+};
+
+static const struct inno_hdmi_phy_drv_data rk3328_hdmi_phy_drv_data = {
+ .ops = &rk3328_hdmi_phy_ops,
+ .clk_ops = &inno_hdmi_phy_rk3328_clk_ops,
+ .phy_cfg_table = rk3328_phy_cfg,
+};
+
+static const struct regmap_config inno_hdmi_phy_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x400,
+};
+
+static void inno_hdmi_phy_action(void *data)
+{
+ struct inno_hdmi_phy *inno = data;
+
+ clk_disable_unprepare(inno->refpclk);
+ clk_disable_unprepare(inno->sysclk);
+}
+
+static int inno_hdmi_phy_probe(struct platform_device *pdev)
+{
+ struct inno_hdmi_phy *inno;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ inno = devm_kzalloc(&pdev->dev, sizeof(*inno), GFP_KERNEL);
+ if (!inno)
+ return -ENOMEM;
+
+ inno->dev = &pdev->dev;
+
+ inno->plat_data = of_device_get_match_data(inno->dev);
+ if (!inno->plat_data || !inno->plat_data->ops)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(inno->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ inno->sysclk = devm_clk_get(inno->dev, "sysclk");
+ if (IS_ERR(inno->sysclk)) {
+ ret = PTR_ERR(inno->sysclk);
+ dev_err(inno->dev, "failed to get sysclk: %d\n", ret);
+ return ret;
+ }
+
+ inno->refpclk = devm_clk_get(inno->dev, "refpclk");
+ if (IS_ERR(inno->refpclk)) {
+ ret = PTR_ERR(inno->refpclk);
+ dev_err(inno->dev, "failed to get ref clock: %d\n", ret);
+ return ret;
+ }
+
+ inno->refoclk = devm_clk_get(inno->dev, "refoclk");
+ if (IS_ERR(inno->refoclk)) {
+ ret = PTR_ERR(inno->refoclk);
+ dev_err(inno->dev, "failed to get oscillator-ref clock: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(inno->sysclk);
+ if (ret) {
+ dev_err(inno->dev, "Cannot enable inno phy sysclk: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Refpclk needs to be on, on at least the rk3328 for still
+ * unknown reasons.
+ */
+ ret = clk_prepare_enable(inno->refpclk);
+ if (ret) {
+ dev_err(inno->dev, "failed to enable refpclk\n");
+ clk_disable_unprepare(inno->sysclk);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(inno->dev, inno_hdmi_phy_action,
+ inno);
+ if (ret)
+ return ret;
+
+ inno->regmap = devm_regmap_init_mmio(inno->dev, regs,
+ &inno_hdmi_phy_regmap_config);
+ if (IS_ERR(inno->regmap))
+ return PTR_ERR(inno->regmap);
+
+ /* only the newer rk3328 hdmiphy has an interrupt */
+ inno->irq = platform_get_irq(pdev, 0);
+ if (inno->irq > 0) {
+ ret = devm_request_threaded_irq(inno->dev, inno->irq,
+ inno_hdmi_phy_rk3328_hardirq,
+ inno_hdmi_phy_rk3328_irq,
+ IRQF_SHARED,
+ dev_name(inno->dev), inno);
+ if (ret)
+ return ret;
+ }
+
+ inno->phy = devm_phy_create(inno->dev, NULL, &inno_hdmi_phy_ops);
+ if (IS_ERR(inno->phy)) {
+ dev_err(inno->dev, "failed to create HDMI PHY\n");
+ return PTR_ERR(inno->phy);
+ }
+
+ phy_set_drvdata(inno->phy, inno);
+ phy_set_bus_width(inno->phy, 8);
+
+ if (inno->plat_data->ops->init) {
+ ret = inno->plat_data->ops->init(inno);
+ if (ret)
+ return ret;
+ }
+
+ ret = inno_hdmi_phy_clk_register(inno);
+ if (ret)
+ return ret;
+
+ phy_provider = devm_of_phy_provider_register(inno->dev,
+ of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static int inno_hdmi_phy_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id inno_hdmi_phy_of_match[] = {
+ {
+ .compatible = "rockchip,rk3228-hdmi-phy",
+ .data = &rk3228_hdmi_phy_drv_data
+ }, {
+ .compatible = "rockchip,rk3328-hdmi-phy",
+ .data = &rk3328_hdmi_phy_drv_data
+ }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, inno_hdmi_phy_of_match);
+
+static struct platform_driver inno_hdmi_phy_driver = {
+ .probe = inno_hdmi_phy_probe,
+ .remove = inno_hdmi_phy_remove,
+ .driver = {
+ .name = "inno-hdmi-phy",
+ .of_match_table = inno_hdmi_phy_of_match,
+ },
+};
+module_platform_driver(inno_hdmi_phy_driver);
+
+MODULE_AUTHOR("Zheng Yang <zhengyang@rock-chips.com>");
+MODULE_DESCRIPTION("Innosilion HDMI 2.0 Transmitter PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 5049dac79bd0..24bd2717abdb 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1116,8 +1116,8 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
}
if (of_property_read_u32(np, "reg", &reg)) {
- dev_err(dev, "the reg property is not assigned in %s node\n",
- np->name);
+ dev_err(dev, "the reg property is not assigned in %pOFn node\n",
+ np);
return -EINVAL;
}
@@ -1143,8 +1143,8 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
}
if (!rphy->phy_cfg) {
- dev_err(dev, "no phy-config can be matched with %s node\n",
- np->name);
+ dev_err(dev, "no phy-config can be matched with %pOFn node\n",
+ np);
return -EINVAL;
}
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 76a4b58ec771..c57e496f0b0c 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1145,8 +1145,8 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
}
if (!tcphy->port_cfgs) {
- dev_err(dev, "no phy-config can be matched with %s node\n",
- np->name);
+ dev_err(dev, "no phy-config can be matched with %pOFn node\n",
+ np);
return -EINVAL;
}
@@ -1186,8 +1186,8 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
continue;
if (IS_ERR(phy)) {
- dev_err(dev, "failed to create phy: %s\n",
- child_np->name);
+ dev_err(dev, "failed to create phy: %pOFn\n",
+ child_np);
pm_runtime_disable(dev);
return PTR_ERR(phy);
}
diff --git a/drivers/phy/rockchip/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c
index 3378eeb7a562..b2899c744ad9 100644
--- a/drivers/phy/rockchip/phy-rockchip-usb.c
+++ b/drivers/phy/rockchip/phy-rockchip-usb.c
@@ -36,7 +36,22 @@ static int enable_usb_uart;
#define HIWORD_UPDATE(val, mask) \
((val) | (mask) << 16)
-#define UOC_CON0_SIDDQ BIT(13)
+#define UOC_CON0 0x00
+#define UOC_CON0_SIDDQ BIT(13)
+#define UOC_CON0_DISABLE BIT(4)
+#define UOC_CON0_COMMON_ON_N BIT(0)
+
+#define UOC_CON2 0x08
+#define UOC_CON2_SOFT_CON_SEL BIT(2)
+
+#define UOC_CON3 0x0c
+/* bits present on rk3188 and rk3288 phys */
+#define UOC_CON3_UTMI_TERMSEL_FULLSPEED BIT(5)
+#define UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC (1 << 3)
+#define UOC_CON3_UTMI_XCVRSEELCT_MASK (3 << 3)
+#define UOC_CON3_UTMI_OPMODE_NODRIVING (1 << 1)
+#define UOC_CON3_UTMI_OPMODE_MASK (3 << 1)
+#define UOC_CON3_UTMI_SUSPENDN BIT(0)
struct rockchip_usb_phys {
int reg;
@@ -46,7 +61,8 @@ struct rockchip_usb_phys {
struct rockchip_usb_phy_base;
struct rockchip_usb_phy_pdata {
struct rockchip_usb_phys *phys;
- int (*init_usb_uart)(struct regmap *grf);
+ int (*init_usb_uart)(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata);
int usb_uart_phy;
};
@@ -208,8 +224,8 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
rk_phy->np = child;
if (of_property_read_u32(child, "reg", &reg_offset)) {
- dev_err(base->dev, "missing reg property in node %s\n",
- child->name);
+ dev_err(base->dev, "missing reg property in node %pOFn\n",
+ child);
return -EINVAL;
}
@@ -313,28 +329,88 @@ static const struct rockchip_usb_phy_pdata rk3066a_pdata = {
},
};
+static int __init rockchip_init_usb_uart_common(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata)
+{
+ int regoffs = pdata->phys[pdata->usb_uart_phy].reg;
+ int ret;
+ u32 val;
+
+ /*
+ * COMMON_ON and DISABLE settings are described in the TRM,
+ * but were not present in the original code.
+ * Also disable the analog phy components to save power.
+ */
+ val = HIWORD_UPDATE(UOC_CON0_COMMON_ON_N
+ | UOC_CON0_DISABLE
+ | UOC_CON0_SIDDQ,
+ UOC_CON0_COMMON_ON_N
+ | UOC_CON0_DISABLE
+ | UOC_CON0_SIDDQ);
+ ret = regmap_write(grf, regoffs + UOC_CON0, val);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(UOC_CON2_SOFT_CON_SEL,
+ UOC_CON2_SOFT_CON_SEL);
+ ret = regmap_write(grf, regoffs + UOC_CON2, val);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(UOC_CON3_UTMI_OPMODE_NODRIVING
+ | UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC
+ | UOC_CON3_UTMI_TERMSEL_FULLSPEED,
+ UOC_CON3_UTMI_SUSPENDN
+ | UOC_CON3_UTMI_OPMODE_MASK
+ | UOC_CON3_UTMI_XCVRSEELCT_MASK
+ | UOC_CON3_UTMI_TERMSEL_FULLSPEED);
+ ret = regmap_write(grf, UOC_CON3, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#define RK3188_UOC0_CON0 0x10c
+#define RK3188_UOC0_CON0_BYPASSSEL BIT(9)
+#define RK3188_UOC0_CON0_BYPASSDMEN BIT(8)
+
+/*
+ * Enable the bypass of uart2 data through the otg usb phy.
+ * See description of rk3288-variant for details.
+ */
+static int __init rk3188_init_usb_uart(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata)
+{
+ u32 val;
+ int ret;
+
+ ret = rockchip_init_usb_uart_common(grf, pdata);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(RK3188_UOC0_CON0_BYPASSSEL
+ | RK3188_UOC0_CON0_BYPASSDMEN,
+ RK3188_UOC0_CON0_BYPASSSEL
+ | RK3188_UOC0_CON0_BYPASSDMEN);
+ ret = regmap_write(grf, RK3188_UOC0_CON0, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct rockchip_usb_phy_pdata rk3188_pdata = {
.phys = (struct rockchip_usb_phys[]){
{ .reg = 0x10c, .pll_name = "sclk_otgphy0_480m" },
{ .reg = 0x11c, .pll_name = "sclk_otgphy1_480m" },
{ /* sentinel */ }
},
+ .init_usb_uart = rk3188_init_usb_uart,
+ .usb_uart_phy = 0,
};
-#define RK3288_UOC0_CON0 0x320
-#define RK3288_UOC0_CON0_COMMON_ON_N BIT(0)
-#define RK3288_UOC0_CON0_DISABLE BIT(4)
-
-#define RK3288_UOC0_CON2 0x328
-#define RK3288_UOC0_CON2_SOFT_CON_SEL BIT(2)
-
#define RK3288_UOC0_CON3 0x32c
-#define RK3288_UOC0_CON3_UTMI_SUSPENDN BIT(0)
-#define RK3288_UOC0_CON3_UTMI_OPMODE_NODRIVING (1 << 1)
-#define RK3288_UOC0_CON3_UTMI_OPMODE_MASK (3 << 1)
-#define RK3288_UOC0_CON3_UTMI_XCVRSEELCT_FSTRANSC (1 << 3)
-#define RK3288_UOC0_CON3_UTMI_XCVRSEELCT_MASK (3 << 3)
-#define RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED BIT(5)
#define RK3288_UOC0_CON3_BYPASSDMEN BIT(6)
#define RK3288_UOC0_CON3_BYPASSSEL BIT(7)
@@ -353,40 +429,13 @@ static const struct rockchip_usb_phy_pdata rk3188_pdata = {
*
* The actual code in the vendor kernel does some things differently.
*/
-static int __init rk3288_init_usb_uart(struct regmap *grf)
+static int __init rk3288_init_usb_uart(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata)
{
u32 val;
int ret;
- /*
- * COMMON_ON and DISABLE settings are described in the TRM,
- * but were not present in the original code.
- * Also disable the analog phy components to save power.
- */
- val = HIWORD_UPDATE(RK3288_UOC0_CON0_COMMON_ON_N
- | RK3288_UOC0_CON0_DISABLE
- | UOC_CON0_SIDDQ,
- RK3288_UOC0_CON0_COMMON_ON_N
- | RK3288_UOC0_CON0_DISABLE
- | UOC_CON0_SIDDQ);
- ret = regmap_write(grf, RK3288_UOC0_CON0, val);
- if (ret)
- return ret;
-
- val = HIWORD_UPDATE(RK3288_UOC0_CON2_SOFT_CON_SEL,
- RK3288_UOC0_CON2_SOFT_CON_SEL);
- ret = regmap_write(grf, RK3288_UOC0_CON2, val);
- if (ret)
- return ret;
-
- val = HIWORD_UPDATE(RK3288_UOC0_CON3_UTMI_OPMODE_NODRIVING
- | RK3288_UOC0_CON3_UTMI_XCVRSEELCT_FSTRANSC
- | RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED,
- RK3288_UOC0_CON3_UTMI_SUSPENDN
- | RK3288_UOC0_CON3_UTMI_OPMODE_MASK
- | RK3288_UOC0_CON3_UTMI_XCVRSEELCT_MASK
- | RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED);
- ret = regmap_write(grf, RK3288_UOC0_CON3, val);
+ ret = rockchip_init_usb_uart_common(grf, pdata);
if (ret)
return ret;
@@ -516,7 +565,7 @@ static int __init rockchip_init_usb_uart(void)
return PTR_ERR(grf);
}
- ret = data->init_usb_uart(grf);
+ ret = data->init_usb_uart(grf, data);
if (ret) {
pr_err("%s: could not init usb_uart, %d\n", __func__, ret);
enable_usb_uart = 0;
diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig
new file mode 100644
index 000000000000..467e8147972b
--- /dev/null
+++ b/drivers/phy/socionext/Kconfig
@@ -0,0 +1,34 @@
+#
+# PHY drivers for Socionext platforms.
+#
+
+config PHY_UNIPHIER_USB2
+ tristate "UniPhier USB2 PHY driver"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support USB PHY implemented on USB2 controller
+ on UniPhier SoCs. This driver provides interface to interact
+ with USB 2.0 PHY that is part of the UniPhier SoC.
+ In case of Pro4, it is necessary to specify this USB2 PHY instead
+ of USB3 HS-PHY.
+
+config PHY_UNIPHIER_USB3
+ tristate "UniPhier USB3 PHY driver"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to support USB PHY implemented in USB3 controller
+ on UniPhier SoCs. This controller supports USB3.0 and lower speed.
+
+config PHY_UNIPHIER_PCIE
+ tristate "Uniphier PHY driver for PCIe controller"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ default PCIE_UNIPHIER
+ select GENERIC_PHY
+ help
+ Enable this to support PHY implemented in PCIe controller
+ on UniPhier SoCs. This driver supports LD20 and PXs3 SoCs.
diff --git a/drivers/phy/socionext/Makefile b/drivers/phy/socionext/Makefile
new file mode 100644
index 000000000000..7dc9095b5bb7
--- /dev/null
+++ b/drivers/phy/socionext/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the phy drivers.
+#
+
+obj-$(CONFIG_PHY_UNIPHIER_USB2) += phy-uniphier-usb2.o
+obj-$(CONFIG_PHY_UNIPHIER_USB3) += phy-uniphier-usb3hs.o phy-uniphier-usb3ss.o
+obj-$(CONFIG_PHY_UNIPHIER_PCIE) += phy-uniphier-pcie.o
diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
new file mode 100644
index 000000000000..93ffbd2940fa
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-pcie.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-pcie.c - PHY driver for UniPhier PCIe controller
+ * Copyright 2018, Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+
+/* PHY */
+#define PCL_PHY_TEST_I 0x2000
+#define PCL_PHY_TEST_O 0x2004
+#define TESTI_DAT_MASK GENMASK(13, 6)
+#define TESTI_ADR_MASK GENMASK(5, 1)
+#define TESTI_WR_EN BIT(0)
+
+#define PCL_PHY_RESET 0x200c
+#define PCL_PHY_RESET_N_MNMODE BIT(8) /* =1:manual */
+#define PCL_PHY_RESET_N BIT(0) /* =1:deasssert */
+
+/* SG */
+#define SG_USBPCIESEL 0x590
+#define SG_USBPCIESEL_PCIE BIT(0)
+
+#define PCL_PHY_R00 0
+#define RX_EQ_ADJ_EN BIT(3) /* enable for EQ adjustment */
+#define PCL_PHY_R06 6
+#define RX_EQ_ADJ GENMASK(5, 0) /* EQ adjustment value */
+#define RX_EQ_ADJ_VAL 0
+#define PCL_PHY_R26 26
+#define VCO_CTRL GENMASK(7, 4) /* Tx VCO adjustment value */
+#define VCO_CTRL_INIT_VAL 5
+
+struct uniphier_pciephy_priv {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ struct reset_control *rst;
+ const struct uniphier_pciephy_soc_data *data;
+};
+
+struct uniphier_pciephy_soc_data {
+ bool has_syscon;
+};
+
+static void uniphier_pciephy_testio_write(struct uniphier_pciephy_priv *priv,
+ u32 data)
+{
+ /* need to read TESTO twice after accessing TESTI */
+ writel(data, priv->base + PCL_PHY_TEST_I);
+ readl(priv->base + PCL_PHY_TEST_O);
+ readl(priv->base + PCL_PHY_TEST_O);
+}
+
+static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv,
+ u32 reg, u32 mask, u32 param)
+{
+ u32 val;
+
+ /* read previous data */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ uniphier_pciephy_testio_write(priv, val);
+ val = readl(priv->base + PCL_PHY_TEST_O);
+
+ /* update value */
+ val &= ~FIELD_PREP(TESTI_DAT_MASK, mask);
+ val = FIELD_PREP(TESTI_DAT_MASK, mask & param);
+ val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ uniphier_pciephy_testio_write(priv, val);
+ uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN);
+ uniphier_pciephy_testio_write(priv, val);
+
+ /* read current data as dummy */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ uniphier_pciephy_testio_write(priv, val);
+ readl(priv->base + PCL_PHY_TEST_O);
+}
+
+static void uniphier_pciephy_assert(struct uniphier_pciephy_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_PHY_RESET);
+ val &= ~PCL_PHY_RESET_N;
+ val |= PCL_PHY_RESET_N_MNMODE;
+ writel(val, priv->base + PCL_PHY_RESET);
+}
+
+static void uniphier_pciephy_deassert(struct uniphier_pciephy_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_PHY_RESET);
+ val |= PCL_PHY_RESET_N_MNMODE | PCL_PHY_RESET_N;
+ writel(val, priv->base + PCL_PHY_RESET);
+}
+
+static int uniphier_pciephy_init(struct phy *phy)
+{
+ struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ uniphier_pciephy_set_param(priv, PCL_PHY_R00,
+ RX_EQ_ADJ_EN, RX_EQ_ADJ_EN);
+ uniphier_pciephy_set_param(priv, PCL_PHY_R06, RX_EQ_ADJ,
+ FIELD_PREP(RX_EQ_ADJ, RX_EQ_ADJ_VAL));
+ uniphier_pciephy_set_param(priv, PCL_PHY_R26, VCO_CTRL,
+ FIELD_PREP(VCO_CTRL, VCO_CTRL_INIT_VAL));
+ usleep_range(1, 10);
+
+ uniphier_pciephy_deassert(priv);
+ usleep_range(1, 10);
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static int uniphier_pciephy_exit(struct phy *phy)
+{
+ struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy);
+
+ uniphier_pciephy_assert(priv);
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct phy_ops uniphier_pciephy_ops = {
+ .init = uniphier_pciephy_init,
+ .exit = uniphier_pciephy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_pciephy_probe(struct platform_device *pdev)
+{
+ struct uniphier_pciephy_priv *priv;
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ struct resource *res;
+ struct phy *phy;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data))
+ return -EINVAL;
+
+ priv->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ phy = devm_phy_create(dev, dev->of_node, &uniphier_pciephy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "socionext,syscon");
+ if (!IS_ERR(regmap) && priv->data->has_syscon)
+ regmap_update_bits(regmap, SG_USBPCIESEL,
+ SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct uniphier_pciephy_soc_data uniphier_ld20_data = {
+ .has_syscon = true,
+};
+
+static const struct uniphier_pciephy_soc_data uniphier_pxs3_data = {
+ .has_syscon = false,
+};
+
+static const struct of_device_id uniphier_pciephy_match[] = {
+ {
+ .compatible = "socionext,uniphier-ld20-pcie-phy",
+ .data = &uniphier_ld20_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-pcie-phy",
+ .data = &uniphier_pxs3_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, uniphier_pciephy_match);
+
+static struct platform_driver uniphier_pciephy_driver = {
+ .probe = uniphier_pciephy_probe,
+ .driver = {
+ .name = "uniphier-pcie-phy",
+ .of_match_table = uniphier_pciephy_match,
+ },
+};
+module_platform_driver(uniphier_pciephy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PHY driver for PCIe controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/socionext/phy-uniphier-usb2.c b/drivers/phy/socionext/phy-uniphier-usb2.c
new file mode 100644
index 000000000000..3f2086ed4fe4
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-usb2.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-usb2.c - PHY driver for UniPhier USB2 controller
+ * Copyright 2015-2018 Socionext Inc.
+ * Author:
+ * Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define SG_USBPHY1CTRL 0x500
+#define SG_USBPHY1CTRL2 0x504
+#define SG_USBPHY2CTRL 0x508
+#define SG_USBPHY2CTRL2 0x50c /* LD11 */
+#define SG_USBPHY12PLL 0x50c /* Pro4 */
+#define SG_USBPHY3CTRL 0x510
+#define SG_USBPHY3CTRL2 0x514
+#define SG_USBPHY4CTRL 0x518 /* Pro4 */
+#define SG_USBPHY4CTRL2 0x51c /* Pro4 */
+#define SG_USBPHY34PLL 0x51c /* Pro4 */
+
+struct uniphier_u2phy_param {
+ u32 offset;
+ u32 value;
+};
+
+struct uniphier_u2phy_soc_data {
+ struct uniphier_u2phy_param config0;
+ struct uniphier_u2phy_param config1;
+};
+
+struct uniphier_u2phy_priv {
+ struct regmap *regmap;
+ struct phy *phy;
+ struct regulator *vbus;
+ const struct uniphier_u2phy_soc_data *data;
+ struct uniphier_u2phy_priv *next;
+};
+
+static int uniphier_u2phy_power_on(struct phy *phy)
+{
+ struct uniphier_u2phy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->vbus)
+ ret = regulator_enable(priv->vbus);
+
+ return ret;
+}
+
+static int uniphier_u2phy_power_off(struct phy *phy)
+{
+ struct uniphier_u2phy_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
+ return 0;
+}
+
+static int uniphier_u2phy_init(struct phy *phy)
+{
+ struct uniphier_u2phy_priv *priv = phy_get_drvdata(phy);
+
+ if (!priv->data)
+ return 0;
+
+ regmap_write(priv->regmap, priv->data->config0.offset,
+ priv->data->config0.value);
+ regmap_write(priv->regmap, priv->data->config1.offset,
+ priv->data->config1.value);
+
+ return 0;
+}
+
+static struct phy *uniphier_u2phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct uniphier_u2phy_priv *priv = dev_get_drvdata(dev);
+
+ while (priv && args->np != priv->phy->dev.of_node)
+ priv = priv->next;
+
+ if (!priv) {
+ dev_err(dev, "Failed to find appropriate phy\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return priv->phy;
+}
+
+static const struct phy_ops uniphier_u2phy_ops = {
+ .init = uniphier_u2phy_init,
+ .power_on = uniphier_u2phy_power_on,
+ .power_off = uniphier_u2phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_u2phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent, *child;
+ struct uniphier_u2phy_priv *priv = NULL, *next = NULL;
+ struct phy_provider *phy_provider;
+ struct regmap *regmap;
+ const struct uniphier_u2phy_soc_data *data;
+ int ret, data_idx, ndatas;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ /* get number of data */
+ for (ndatas = 0; data[ndatas].config0.offset; ndatas++)
+ ;
+
+ parent = of_get_parent(dev->of_node);
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to get regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ for_each_child_of_node(dev->of_node, child) {
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_put_child;
+ }
+ priv->regmap = regmap;
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER) {
+ ret = PTR_ERR(priv->vbus);
+ goto out_put_child;
+ }
+ priv->vbus = NULL;
+ }
+
+ priv->phy = devm_phy_create(dev, child, &uniphier_u2phy_ops);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "Failed to create phy\n");
+ ret = PTR_ERR(priv->phy);
+ goto out_put_child;
+ }
+
+ ret = of_property_read_u32(child, "reg", &data_idx);
+ if (ret) {
+ dev_err(dev, "Failed to get reg property\n");
+ goto out_put_child;
+ }
+
+ if (data_idx < ndatas)
+ priv->data = &data[data_idx];
+ else
+ dev_warn(dev, "No phy configuration: %s\n",
+ child->full_name);
+
+ phy_set_drvdata(priv->phy, priv);
+ priv->next = next;
+ next = priv;
+ }
+
+ dev_set_drvdata(dev, priv);
+ phy_provider = devm_of_phy_provider_register(dev,
+ uniphier_u2phy_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+out_put_child:
+ of_node_put(child);
+
+ return ret;
+}
+
+static const struct uniphier_u2phy_soc_data uniphier_pro4_data[] = {
+ {
+ .config0 = { SG_USBPHY1CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY12PLL, 0x00010010 },
+ },
+ {
+ .config0 = { SG_USBPHY2CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY12PLL, 0x00010010 },
+ },
+ {
+ .config0 = { SG_USBPHY3CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY34PLL, 0x00010010 },
+ },
+ {
+ .config0 = { SG_USBPHY4CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY34PLL, 0x00010010 },
+ },
+ { /* sentinel */ }
+};
+
+static const struct uniphier_u2phy_soc_data uniphier_ld11_data[] = {
+ {
+ .config0 = { SG_USBPHY1CTRL, 0x82280000 },
+ .config1 = { SG_USBPHY1CTRL2, 0x00000106 },
+ },
+ {
+ .config0 = { SG_USBPHY2CTRL, 0x82280000 },
+ .config1 = { SG_USBPHY2CTRL2, 0x00000106 },
+ },
+ {
+ .config0 = { SG_USBPHY3CTRL, 0x82280000 },
+ .config1 = { SG_USBPHY3CTRL2, 0x00000106 },
+ },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id uniphier_u2phy_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro4-usb2-phy",
+ .data = &uniphier_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-usb2-phy",
+ .data = &uniphier_ld11_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_u2phy_match);
+
+static struct platform_driver uniphier_u2phy_driver = {
+ .probe = uniphier_u2phy_probe,
+ .driver = {
+ .name = "uniphier-usb2-phy",
+ .of_match_table = uniphier_u2phy_match,
+ },
+};
+module_platform_driver(uniphier_u2phy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PHY driver for USB2 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/socionext/phy-uniphier-usb3hs.c b/drivers/phy/socionext/phy-uniphier-usb3hs.c
new file mode 100644
index 000000000000..b1b048be6166
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-usb3hs.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-usb3hs.c - HS-PHY driver for Socionext UniPhier USB3 controller
+ * Copyright 2015-2018 Socionext Inc.
+ * Author:
+ * Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ * Contributors:
+ * Motoya Tanigawa <tanigawa.motoya@socionext.com>
+ * Masami Hiramatsu <masami.hiramatsu@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define HSPHY_CFG0 0x0
+#define HSPHY_CFG0_HS_I_MASK GENMASK(31, 28)
+#define HSPHY_CFG0_HSDISC_MASK GENMASK(27, 26)
+#define HSPHY_CFG0_SWING_MASK GENMASK(17, 16)
+#define HSPHY_CFG0_SEL_T_MASK GENMASK(15, 12)
+#define HSPHY_CFG0_RTERM_MASK GENMASK(7, 6)
+#define HSPHY_CFG0_TRIMMASK (HSPHY_CFG0_HS_I_MASK \
+ | HSPHY_CFG0_SEL_T_MASK \
+ | HSPHY_CFG0_RTERM_MASK)
+
+#define HSPHY_CFG1 0x4
+#define HSPHY_CFG1_DAT_EN BIT(29)
+#define HSPHY_CFG1_ADR_EN BIT(28)
+#define HSPHY_CFG1_ADR_MASK GENMASK(27, 16)
+#define HSPHY_CFG1_DAT_MASK GENMASK(23, 16)
+
+#define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) }
+
+#define LS_SLEW PHY_F(10, 6, 6) /* LS mode slew rate */
+#define FS_LS_DRV PHY_F(10, 5, 5) /* FS/LS slew rate */
+
+#define MAX_PHY_PARAMS 2
+
+struct uniphier_u3hsphy_param {
+ struct {
+ int reg_no;
+ int msb;
+ int lsb;
+ } field;
+ u8 value;
+};
+
+struct uniphier_u3hsphy_trim_param {
+ unsigned int rterm;
+ unsigned int sel_t;
+ unsigned int hs_i;
+};
+
+#define trim_param_is_valid(p) ((p)->rterm || (p)->sel_t || (p)->hs_i)
+
+struct uniphier_u3hsphy_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk, *clk_parent, *clk_ext;
+ struct reset_control *rst, *rst_parent;
+ struct regulator *vbus;
+ const struct uniphier_u3hsphy_soc_data *data;
+};
+
+struct uniphier_u3hsphy_soc_data {
+ int nparams;
+ const struct uniphier_u3hsphy_param param[MAX_PHY_PARAMS];
+ u32 config0;
+ u32 config1;
+ void (*trim_func)(struct uniphier_u3hsphy_priv *priv, u32 *pconfig,
+ struct uniphier_u3hsphy_trim_param *pt);
+};
+
+static void uniphier_u3hsphy_trim_ld20(struct uniphier_u3hsphy_priv *priv,
+ u32 *pconfig,
+ struct uniphier_u3hsphy_trim_param *pt)
+{
+ *pconfig &= ~HSPHY_CFG0_RTERM_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_RTERM_MASK, pt->rterm);
+
+ *pconfig &= ~HSPHY_CFG0_SEL_T_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_SEL_T_MASK, pt->sel_t);
+
+ *pconfig &= ~HSPHY_CFG0_HS_I_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_HS_I_MASK, pt->hs_i);
+}
+
+static int uniphier_u3hsphy_get_nvparam(struct uniphier_u3hsphy_priv *priv,
+ const char *name, unsigned int *val)
+{
+ struct nvmem_cell *cell;
+ u8 *buf;
+
+ cell = devm_nvmem_cell_get(priv->dev, name);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, NULL);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ *val = *buf;
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int uniphier_u3hsphy_get_nvparams(struct uniphier_u3hsphy_priv *priv,
+ struct uniphier_u3hsphy_trim_param *pt)
+{
+ int ret;
+
+ ret = uniphier_u3hsphy_get_nvparam(priv, "rterm", &pt->rterm);
+ if (ret)
+ return ret;
+
+ ret = uniphier_u3hsphy_get_nvparam(priv, "sel_t", &pt->sel_t);
+ if (ret)
+ return ret;
+
+ ret = uniphier_u3hsphy_get_nvparam(priv, "hs_i", &pt->hs_i);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int uniphier_u3hsphy_update_config(struct uniphier_u3hsphy_priv *priv,
+ u32 *pconfig)
+{
+ struct uniphier_u3hsphy_trim_param trim;
+ int ret, trimmed = 0;
+
+ if (priv->data->trim_func) {
+ ret = uniphier_u3hsphy_get_nvparams(priv, &trim);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /*
+ * call trim_func only when trimming parameters that aren't
+ * all-zero can be acquired. All-zero parameters mean nothing
+ * has been written to nvmem.
+ */
+ if (!ret && trim_param_is_valid(&trim)) {
+ priv->data->trim_func(priv, pconfig, &trim);
+ trimmed = 1;
+ } else {
+ dev_dbg(priv->dev, "can't get parameter from nvmem\n");
+ }
+ }
+
+ /* use default parameters without trimming values */
+ if (!trimmed) {
+ *pconfig &= ~HSPHY_CFG0_HSDISC_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_HSDISC_MASK, 3);
+ }
+
+ return 0;
+}
+
+static void uniphier_u3hsphy_set_param(struct uniphier_u3hsphy_priv *priv,
+ const struct uniphier_u3hsphy_param *p)
+{
+ u32 val;
+ u32 field_mask = GENMASK(p->field.msb, p->field.lsb);
+ u8 data;
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~HSPHY_CFG1_ADR_MASK;
+ val |= FIELD_PREP(HSPHY_CFG1_ADR_MASK, p->field.reg_no)
+ | HSPHY_CFG1_ADR_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~HSPHY_CFG1_ADR_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~FIELD_PREP(HSPHY_CFG1_DAT_MASK, field_mask);
+ data = field_mask & (p->value << p->field.lsb);
+ val |= FIELD_PREP(HSPHY_CFG1_DAT_MASK, data) | HSPHY_CFG1_DAT_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~HSPHY_CFG1_DAT_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+}
+
+static int uniphier_u3hsphy_power_on(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_ext);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_clk_ext_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ if (priv->vbus) {
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ goto out_rst_assert;
+ }
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+out_clk_ext_disable:
+ clk_disable_unprepare(priv->clk_ext);
+
+ return ret;
+}
+
+static int uniphier_u3hsphy_power_off(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_ext);
+
+ return 0;
+}
+
+static int uniphier_u3hsphy_init(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+ u32 config0, config1;
+ int i, ret;
+
+ ret = clk_prepare_enable(priv->clk_parent);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rst_parent);
+ if (ret)
+ goto out_clk_disable;
+
+ if (!priv->data->config0 && !priv->data->config1)
+ return 0;
+
+ config0 = priv->data->config0;
+ config1 = priv->data->config1;
+
+ ret = uniphier_u3hsphy_update_config(priv, &config0);
+ if (ret)
+ goto out_rst_assert;
+
+ writel(config0, priv->base + HSPHY_CFG0);
+ writel(config1, priv->base + HSPHY_CFG1);
+
+ for (i = 0; i < priv->data->nparams; i++)
+ uniphier_u3hsphy_set_param(priv, &priv->data->param[i]);
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst_parent);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk_parent);
+
+ return ret;
+}
+
+static int uniphier_u3hsphy_exit(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+
+ reset_control_assert(priv->rst_parent);
+ clk_disable_unprepare(priv->clk_parent);
+
+ return 0;
+}
+
+static const struct phy_ops uniphier_u3hsphy_ops = {
+ .init = uniphier_u3hsphy_init,
+ .exit = uniphier_u3hsphy_exit,
+ .power_on = uniphier_u3hsphy_power_on,
+ .power_off = uniphier_u3hsphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_u3hsphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_u3hsphy_priv *priv;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct phy *phy;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data ||
+ priv->data->nparams > MAX_PHY_PARAMS))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->clk_parent = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk_parent))
+ return PTR_ERR(priv->clk_parent);
+
+ priv->clk_ext = devm_clk_get(dev, "phy-ext");
+ if (IS_ERR(priv->clk_ext)) {
+ if (PTR_ERR(priv->clk_ext) == -ENOENT)
+ priv->clk_ext = NULL;
+ else
+ return PTR_ERR(priv->clk_ext);
+ }
+
+ priv->rst = devm_reset_control_get_shared(dev, "phy");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ priv->rst_parent = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst_parent))
+ return PTR_ERR(priv->rst_parent);
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER)
+ return PTR_ERR(priv->vbus);
+ priv->vbus = NULL;
+ }
+
+ phy = devm_phy_create(dev, dev->of_node, &uniphier_u3hsphy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct uniphier_u3hsphy_soc_data uniphier_pxs2_data = {
+ .nparams = 0,
+};
+
+static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = {
+ .nparams = 2,
+ .param = {
+ { LS_SLEW, 1 },
+ { FS_LS_DRV, 1 },
+ },
+ .trim_func = uniphier_u3hsphy_trim_ld20,
+ .config0 = 0x92316680,
+ .config1 = 0x00000106,
+};
+
+static const struct uniphier_u3hsphy_soc_data uniphier_pxs3_data = {
+ .nparams = 0,
+ .trim_func = uniphier_u3hsphy_trim_ld20,
+ .config0 = 0x92316680,
+ .config1 = 0x00000106,
+};
+
+static const struct of_device_id uniphier_u3hsphy_match[] = {
+ {
+ .compatible = "socionext,uniphier-pxs2-usb3-hsphy",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-usb3-hsphy",
+ .data = &uniphier_ld20_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-usb3-hsphy",
+ .data = &uniphier_pxs3_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_u3hsphy_match);
+
+static struct platform_driver uniphier_u3hsphy_driver = {
+ .probe = uniphier_u3hsphy_probe,
+ .driver = {
+ .name = "uniphier-usb3-hsphy",
+ .of_match_table = uniphier_u3hsphy_match,
+ },
+};
+
+module_platform_driver(uniphier_u3hsphy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier HS-PHY driver for USB3 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c
new file mode 100644
index 000000000000..4be95679c7d8
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-usb3ss.c - SS-PHY driver for Socionext UniPhier USB3 controller
+ * Copyright 2015-2018 Socionext Inc.
+ * Author:
+ * Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ * Contributors:
+ * Motoya Tanigawa <tanigawa.motoya@socionext.com>
+ * Masami Hiramatsu <masami.hiramatsu@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#define SSPHY_TESTI 0x0
+#define SSPHY_TESTO 0x4
+#define TESTI_DAT_MASK GENMASK(13, 6)
+#define TESTI_ADR_MASK GENMASK(5, 1)
+#define TESTI_WR_EN BIT(0)
+
+#define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) }
+
+#define CDR_CPD_TRIM PHY_F(7, 3, 0) /* RxPLL charge pump current */
+#define CDR_CPF_TRIM PHY_F(8, 3, 0) /* RxPLL charge pump current 2 */
+#define TX_PLL_TRIM PHY_F(9, 3, 0) /* TxPLL charge pump current */
+#define BGAP_TRIM PHY_F(11, 3, 0) /* Bandgap voltage */
+#define CDR_TRIM PHY_F(13, 6, 5) /* Clock Data Recovery setting */
+#define VCO_CTRL PHY_F(26, 7, 4) /* VCO control */
+#define VCOPLL_CTRL PHY_F(27, 2, 0) /* TxPLL VCO tuning */
+#define VCOPLL_CM PHY_F(28, 1, 0) /* TxPLL voltage */
+
+#define MAX_PHY_PARAMS 7
+
+struct uniphier_u3ssphy_param {
+ struct {
+ int reg_no;
+ int msb;
+ int lsb;
+ } field;
+ u8 value;
+};
+
+struct uniphier_u3ssphy_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk, *clk_ext, *clk_parent, *clk_parent_gio;
+ struct reset_control *rst, *rst_parent, *rst_parent_gio;
+ struct regulator *vbus;
+ const struct uniphier_u3ssphy_soc_data *data;
+};
+
+struct uniphier_u3ssphy_soc_data {
+ bool is_legacy;
+ int nparams;
+ const struct uniphier_u3ssphy_param param[MAX_PHY_PARAMS];
+};
+
+static void uniphier_u3ssphy_testio_write(struct uniphier_u3ssphy_priv *priv,
+ u32 data)
+{
+ /* need to read TESTO twice after accessing TESTI */
+ writel(data, priv->base + SSPHY_TESTI);
+ readl(priv->base + SSPHY_TESTO);
+ readl(priv->base + SSPHY_TESTO);
+}
+
+static void uniphier_u3ssphy_set_param(struct uniphier_u3ssphy_priv *priv,
+ const struct uniphier_u3ssphy_param *p)
+{
+ u32 val;
+ u8 field_mask = GENMASK(p->field.msb, p->field.lsb);
+ u8 data;
+
+ /* read previous data */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
+ uniphier_u3ssphy_testio_write(priv, val);
+ val = readl(priv->base + SSPHY_TESTO);
+
+ /* update value */
+ val &= ~FIELD_PREP(TESTI_DAT_MASK, field_mask);
+ data = field_mask & (p->value << p->field.lsb);
+ val = FIELD_PREP(TESTI_DAT_MASK, data);
+ val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
+ uniphier_u3ssphy_testio_write(priv, val);
+ uniphier_u3ssphy_testio_write(priv, val | TESTI_WR_EN);
+ uniphier_u3ssphy_testio_write(priv, val);
+
+ /* read current data as dummy */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
+ uniphier_u3ssphy_testio_write(priv, val);
+ readl(priv->base + SSPHY_TESTO);
+}
+
+static int uniphier_u3ssphy_power_on(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_ext);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_clk_ext_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ if (priv->vbus) {
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ goto out_rst_assert;
+ }
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+out_clk_ext_disable:
+ clk_disable_unprepare(priv->clk_ext);
+
+ return ret;
+}
+
+static int uniphier_u3ssphy_power_off(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_ext);
+
+ return 0;
+}
+
+static int uniphier_u3ssphy_init(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+ int i, ret;
+
+ ret = clk_prepare_enable(priv->clk_parent);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk_parent_gio);
+ if (ret)
+ goto out_clk_disable;
+
+ ret = reset_control_deassert(priv->rst_parent);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_parent_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ if (priv->data->is_legacy)
+ return 0;
+
+ for (i = 0; i < priv->data->nparams; i++)
+ uniphier_u3ssphy_set_param(priv, &priv->data->param[i]);
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst_parent);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_parent_gio);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk_parent);
+
+ return ret;
+}
+
+static int uniphier_u3ssphy_exit(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+
+ reset_control_assert(priv->rst_parent_gio);
+ reset_control_assert(priv->rst_parent);
+ clk_disable_unprepare(priv->clk_parent_gio);
+ clk_disable_unprepare(priv->clk_parent);
+
+ return 0;
+}
+
+static const struct phy_ops uniphier_u3ssphy_ops = {
+ .init = uniphier_u3ssphy_init,
+ .exit = uniphier_u3ssphy_exit,
+ .power_on = uniphier_u3ssphy_power_on,
+ .power_off = uniphier_u3ssphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_u3ssphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_u3ssphy_priv *priv;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct phy *phy;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data ||
+ priv->data->nparams > MAX_PHY_PARAMS))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ if (!priv->data->is_legacy) {
+ priv->clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->clk_ext = devm_clk_get(dev, "phy-ext");
+ if (IS_ERR(priv->clk_ext)) {
+ if (PTR_ERR(priv->clk_ext) == -ENOENT)
+ priv->clk_ext = NULL;
+ else
+ return PTR_ERR(priv->clk_ext);
+ }
+
+ priv->rst = devm_reset_control_get_shared(dev, "phy");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+ } else {
+ priv->clk_parent_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_parent_gio))
+ return PTR_ERR(priv->clk_parent_gio);
+
+ priv->rst_parent_gio =
+ devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_parent_gio))
+ return PTR_ERR(priv->rst_parent_gio);
+ }
+
+ priv->clk_parent = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk_parent))
+ return PTR_ERR(priv->clk_parent);
+
+ priv->rst_parent = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst_parent))
+ return PTR_ERR(priv->rst_parent);
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER)
+ return PTR_ERR(priv->vbus);
+ priv->vbus = NULL;
+ }
+
+ phy = devm_phy_create(dev, dev->of_node, &uniphier_u3ssphy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct uniphier_u3ssphy_soc_data uniphier_pro4_data = {
+ .is_legacy = true,
+};
+
+static const struct uniphier_u3ssphy_soc_data uniphier_pxs2_data = {
+ .is_legacy = false,
+ .nparams = 7,
+ .param = {
+ { CDR_CPD_TRIM, 10 },
+ { CDR_CPF_TRIM, 3 },
+ { TX_PLL_TRIM, 5 },
+ { BGAP_TRIM, 9 },
+ { CDR_TRIM, 2 },
+ { VCOPLL_CTRL, 7 },
+ { VCOPLL_CM, 1 },
+ },
+};
+
+static const struct uniphier_u3ssphy_soc_data uniphier_ld20_data = {
+ .is_legacy = false,
+ .nparams = 3,
+ .param = {
+ { CDR_CPD_TRIM, 6 },
+ { CDR_TRIM, 2 },
+ { VCO_CTRL, 5 },
+ },
+};
+
+static const struct of_device_id uniphier_u3ssphy_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro4-usb3-ssphy",
+ .data = &uniphier_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-usb3-ssphy",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-usb3-ssphy",
+ .data = &uniphier_ld20_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-usb3-ssphy",
+ .data = &uniphier_ld20_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_u3ssphy_match);
+
+static struct platform_driver uniphier_u3ssphy_driver = {
+ .probe = uniphier_u3ssphy_probe,
+ .driver = {
+ .name = "uniphier-usb3-ssphy",
+ .of_match_table = uniphier_u3ssphy_match,
+ },
+};
+
+module_platform_driver(uniphier_u3ssphy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier SS-PHY driver for USB3 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index de1b4ebe4de2..5b3b8863363e 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -115,8 +115,8 @@ int tegra_xusb_lane_parse_dt(struct tegra_xusb_lane *lane,
err = match_string(lane->soc->funcs, lane->soc->num_funcs, function);
if (err < 0) {
- dev_err(dev, "invalid function \"%s\" for lane \"%s\"\n",
- function, np->name);
+ dev_err(dev, "invalid function \"%s\" for lane \"%pOFn\"\n",
+ function, np);
return err;
}
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
index a44680d64f9b..c267afb68f07 100644
--- a/drivers/phy/ti/phy-twl4030-usb.c
+++ b/drivers/phy/ti/phy-twl4030-usb.c
@@ -144,6 +144,7 @@
#define PMBR1 0x0D
#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
+static irqreturn_t twl4030_usb_irq(int irq, void *_twl);
/*
* If VBUS is valid or ID is ground, then we know a
* cable is present and we need to be runtime-enabled
@@ -395,6 +396,33 @@ static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
}
+static int __maybe_unused twl4030_usb_suspend(struct device *dev)
+{
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+ /*
+ * we need enabled runtime on resume,
+ * so turn irq off here, so we do not get it early
+ * note: wakeup on usb plug works independently of this
+ */
+ dev_dbg(twl->dev, "%s\n", __func__);
+ disable_irq(twl->irq);
+
+ return 0;
+}
+
+static int __maybe_unused twl4030_usb_resume(struct device *dev)
+{
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+ dev_dbg(twl->dev, "%s\n", __func__);
+ enable_irq(twl->irq);
+ /* check whether cable status changed */
+ twl4030_usb_irq(0, twl);
+
+ return 0;
+}
+
static int __maybe_unused twl4030_usb_runtime_suspend(struct device *dev)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
@@ -655,6 +683,7 @@ static const struct phy_ops ops = {
static const struct dev_pm_ops twl4030_usb_pm_ops = {
SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
twl4030_usb_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(twl4030_usb_suspend, twl4030_usb_resume)
};
static int twl4030_usb_probe(struct platform_device *pdev)
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 2da567540c2d..7c639006252e 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2013 Intel, Inc.
@@ -46,7 +47,6 @@
* exchange is properly mapped during a transfer.
*/
-
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
@@ -59,10 +59,11 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <linux/goldfish.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/acpi.h>
+#include <linux/bug.h>
+#include "goldfish_pipe_qemu.h"
/*
* Update this when something changes in the driver's behavior so the host
@@ -73,71 +74,6 @@ enum {
PIPE_CURRENT_DEVICE_VERSION = 2
};
-/*
- * IMPORTANT: The following constants must match the ones used and defined
- * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
- */
-
-/* List of bitflags returned in status of CMD_POLL command */
-enum PipePollFlags {
- PIPE_POLL_IN = 1 << 0,
- PIPE_POLL_OUT = 1 << 1,
- PIPE_POLL_HUP = 1 << 2
-};
-
-/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
-enum PipeErrors {
- PIPE_ERROR_INVAL = -1,
- PIPE_ERROR_AGAIN = -2,
- PIPE_ERROR_NOMEM = -3,
- PIPE_ERROR_IO = -4
-};
-
-/* Bit-flags used to signal events from the emulator */
-enum PipeWakeFlags {
- PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */
- PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */
- PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */
-};
-
-/* Bit flags for the 'flags' field */
-enum PipeFlagsBits {
- BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
- BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
- BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
-};
-
-enum PipeRegs {
- PIPE_REG_CMD = 0,
-
- PIPE_REG_SIGNAL_BUFFER_HIGH = 4,
- PIPE_REG_SIGNAL_BUFFER = 8,
- PIPE_REG_SIGNAL_BUFFER_COUNT = 12,
-
- PIPE_REG_OPEN_BUFFER_HIGH = 20,
- PIPE_REG_OPEN_BUFFER = 24,
-
- PIPE_REG_VERSION = 36,
-
- PIPE_REG_GET_SIGNALLED = 48,
-};
-
-enum PipeCmdCode {
- PIPE_CMD_OPEN = 1, /* to be used by the pipe device itself */
- PIPE_CMD_CLOSE,
- PIPE_CMD_POLL,
- PIPE_CMD_WRITE,
- PIPE_CMD_WAKE_ON_WRITE,
- PIPE_CMD_READ,
- PIPE_CMD_WAKE_ON_READ,
-
- /*
- * TODO(zyy): implement a deferred read/write execution to allow
- * parallel processing of pipe operations on the host.
- */
- PIPE_CMD_WAKE_ON_DONE_IO,
-};
-
enum {
MAX_BUFFERS_PER_COMMAND = 336,
MAX_SIGNALLED_PIPES = 64,
@@ -145,14 +81,12 @@ enum {
};
struct goldfish_pipe_dev;
-struct goldfish_pipe;
-struct goldfish_pipe_command;
/* A per-pipe command structure, shared with the host */
struct goldfish_pipe_command {
- s32 cmd; /* PipeCmdCode, guest -> host */
- s32 id; /* pipe id, guest -> host */
- s32 status; /* command execution status, host -> guest */
+ s32 cmd; /* PipeCmdCode, guest -> host */
+ s32 id; /* pipe id, guest -> host */
+ s32 status; /* command execution status, host -> guest */
s32 reserved; /* to pad to 64-bit boundary */
union {
/* Parameters for PIPE_CMD_{READ,WRITE} */
@@ -184,19 +118,21 @@ struct open_command_param {
/* Device-level set of buffers shared with the host */
struct goldfish_pipe_dev_buffers {
struct open_command_param open_command_params;
- struct signalled_pipe_buffer signalled_pipe_buffers[
- MAX_SIGNALLED_PIPES];
+ struct signalled_pipe_buffer
+ signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
};
/* This data type models a given pipe instance */
struct goldfish_pipe {
/* pipe ID - index into goldfish_pipe_dev::pipes array */
u32 id;
+
/* The wake flags pipe is waiting for
* Note: not protected with any lock, uses atomic operations
* and barriers to make it thread-safe.
*/
unsigned long flags;
+
/* wake flags host have signalled,
* - protected by goldfish_pipe_dev::lock
*/
@@ -220,8 +156,12 @@ struct goldfish_pipe {
/* A wake queue for sleeping until host signals an event */
wait_queue_head_t wake_queue;
+
/* Pointer to the parent goldfish_pipe_dev instance */
struct goldfish_pipe_dev *dev;
+
+ /* A buffer of pages, too large to fit into a stack frame */
+ struct page *pages[MAX_BUFFERS_PER_COMMAND];
};
/* The global driver data. Holds a reference to the i/o page used to
@@ -229,6 +169,9 @@ struct goldfish_pipe {
* waiting to be awoken.
*/
struct goldfish_pipe_dev {
+ /* A magic number to check if this is an instance of this struct */
+ void *magic;
+
/*
* Global device spinlock. Protects the following members:
* - pipes, pipes_capacity
@@ -261,15 +204,22 @@ struct goldfish_pipe_dev {
/* Head of a doubly linked list of signalled pipes */
struct goldfish_pipe *first_signalled_pipe;
+ /* ptr to platform device's device struct */
+ struct device *pdev_dev;
+
/* Some device-specific data */
int irq;
int version;
unsigned char __iomem *base;
-};
-static struct goldfish_pipe_dev pipe_dev[1] = {};
+ /* an irq tasklet to run goldfish_interrupt_task */
+ struct tasklet_struct irq_tasklet;
-static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+ struct miscdevice miscdev;
+};
+
+static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe,
+ enum PipeCmdCode cmd)
{
pipe->command_buffer->cmd = cmd;
/* failure by default */
@@ -278,13 +228,13 @@ static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
return pipe->command_buffer->status;
}
-static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
{
int status;
if (mutex_lock_interruptible(&pipe->lock))
return PIPE_ERROR_IO;
- status = goldfish_cmd_locked(pipe, cmd);
+ status = goldfish_pipe_cmd_locked(pipe, cmd);
mutex_unlock(&pipe->lock);
return status;
}
@@ -307,10 +257,12 @@ static int goldfish_pipe_error_convert(int status)
}
}
-static int pin_user_pages(unsigned long first_page, unsigned long last_page,
- unsigned int last_page_size, int is_write,
- struct page *pages[MAX_BUFFERS_PER_COMMAND],
- unsigned int *iter_last_page_size)
+static int pin_user_pages(unsigned long first_page,
+ unsigned long last_page,
+ unsigned int last_page_size,
+ int is_write,
+ struct page *pages[MAX_BUFFERS_PER_COMMAND],
+ unsigned int *iter_last_page_size)
{
int ret;
int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
@@ -322,18 +274,18 @@ static int pin_user_pages(unsigned long first_page, unsigned long last_page,
*iter_last_page_size = last_page_size;
}
- ret = get_user_pages_fast(
- first_page, requested_pages, !is_write, pages);
+ ret = get_user_pages_fast(first_page, requested_pages, !is_write,
+ pages);
if (ret <= 0)
return -EFAULT;
if (ret < requested_pages)
*iter_last_page_size = PAGE_SIZE;
- return ret;
+ return ret;
}
static void release_user_pages(struct page **pages, int pages_count,
- int is_write, s32 consumed_size)
+ int is_write, s32 consumed_size)
{
int i;
@@ -345,12 +297,15 @@ static void release_user_pages(struct page **pages, int pages_count,
}
/* Populate the call parameters, merging adjacent pages together */
-static void populate_rw_params(
- struct page **pages, int pages_count,
- unsigned long address, unsigned long address_end,
- unsigned long first_page, unsigned long last_page,
- unsigned int iter_last_page_size, int is_write,
- struct goldfish_pipe_command *command)
+static void populate_rw_params(struct page **pages,
+ int pages_count,
+ unsigned long address,
+ unsigned long address_end,
+ unsigned long first_page,
+ unsigned long last_page,
+ unsigned int iter_last_page_size,
+ int is_write,
+ struct goldfish_pipe_command *command)
{
/*
* Process the first page separately - it's the only page that
@@ -382,55 +337,59 @@ static void populate_rw_params(
}
static int transfer_max_buffers(struct goldfish_pipe *pipe,
- unsigned long address, unsigned long address_end, int is_write,
- unsigned long last_page, unsigned int last_page_size,
- s32 *consumed_size, int *status)
+ unsigned long address,
+ unsigned long address_end,
+ int is_write,
+ unsigned long last_page,
+ unsigned int last_page_size,
+ s32 *consumed_size,
+ int *status)
{
- static struct page *pages[MAX_BUFFERS_PER_COMMAND];
unsigned long first_page = address & PAGE_MASK;
unsigned int iter_last_page_size;
- int pages_count = pin_user_pages(first_page, last_page,
- last_page_size, is_write,
- pages, &iter_last_page_size);
-
- if (pages_count < 0)
- return pages_count;
+ int pages_count;
/* Serialize access to the pipe command buffers */
if (mutex_lock_interruptible(&pipe->lock))
return -ERESTARTSYS;
- populate_rw_params(pages, pages_count, address, address_end,
- first_page, last_page, iter_last_page_size, is_write,
- pipe->command_buffer);
+ pages_count = pin_user_pages(first_page, last_page,
+ last_page_size, is_write,
+ pipe->pages, &iter_last_page_size);
+ if (pages_count < 0) {
+ mutex_unlock(&pipe->lock);
+ return pages_count;
+ }
+
+ populate_rw_params(pipe->pages, pages_count, address, address_end,
+ first_page, last_page, iter_last_page_size, is_write,
+ pipe->command_buffer);
/* Transfer the data */
- *status = goldfish_cmd_locked(pipe,
+ *status = goldfish_pipe_cmd_locked(pipe,
is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
*consumed_size = pipe->command_buffer->rw_params.consumed_size;
- release_user_pages(pages, pages_count, is_write, *consumed_size);
+ release_user_pages(pipe->pages, pages_count, is_write, *consumed_size);
mutex_unlock(&pipe->lock);
-
return 0;
}
static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
{
- u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
+ u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
- set_bit(wakeBit, &pipe->flags);
+ set_bit(wake_bit, &pipe->flags);
/* Tell the emulator we're going to wait for a wake event */
- (void)goldfish_cmd(pipe,
+ goldfish_pipe_cmd(pipe,
is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
- while (test_bit(wakeBit, &pipe->flags)) {
- if (wait_event_interruptible(
- pipe->wake_queue,
- !test_bit(wakeBit, &pipe->flags)))
+ while (test_bit(wake_bit, &pipe->flags)) {
+ if (wait_event_interruptible(pipe->wake_queue,
+ !test_bit(wake_bit, &pipe->flags)))
return -ERESTARTSYS;
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
@@ -441,7 +400,9 @@ static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
}
static ssize_t goldfish_pipe_read_write(struct file *filp,
- char __user *buffer, size_t bufflen, int is_write)
+ char __user *buffer,
+ size_t bufflen,
+ int is_write)
{
struct goldfish_pipe *pipe = filp->private_data;
int count = 0, ret = -EINVAL;
@@ -456,7 +417,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
return 0;
/* Check the buffer range for access */
if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
- buffer, bufflen)))
+ buffer, bufflen)))
return -EFAULT;
address = (unsigned long)buffer;
@@ -469,8 +430,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
int status;
ret = transfer_max_buffers(pipe, address, address_end, is_write,
- last_page, last_page_size, &consumed_size,
- &status);
+ last_page, last_page_size,
+ &consumed_size, &status);
if (ret < 0)
break;
@@ -496,7 +457,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
* err.
*/
if (status != PIPE_ERROR_AGAIN)
- pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n",
+ dev_err_ratelimited(pipe->dev->pdev_dev,
+ "backend error %d on %s\n",
status, is_write ? "write" : "read");
break;
}
@@ -522,19 +484,21 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
}
static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
- size_t bufflen, loff_t *ppos)
+ size_t bufflen, loff_t *ppos)
{
return goldfish_pipe_read_write(filp, buffer, bufflen,
- /* is_write */ 0);
+ /* is_write */ 0);
}
static ssize_t goldfish_pipe_write(struct file *filp,
- const char __user *buffer, size_t bufflen,
- loff_t *ppos)
+ const char __user *buffer, size_t bufflen,
+ loff_t *ppos)
{
- return goldfish_pipe_read_write(filp,
- /* cast away the const */(char __user *)buffer, bufflen,
- /* is_write */ 1);
+ /* cast away the const */
+ char __user *no_const_buffer = (char __user *)buffer;
+
+ return goldfish_pipe_read_write(filp, no_const_buffer, bufflen,
+ /* is_write */ 1);
}
static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
@@ -545,7 +509,7 @@ static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &pipe->wake_queue, wait);
- status = goldfish_cmd(pipe, PIPE_CMD_POLL);
+ status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
if (status < 0)
return -ERESTARTSYS;
@@ -562,7 +526,7 @@ static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
}
static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
- u32 id, u32 flags)
+ u32 id, u32 flags)
{
struct goldfish_pipe *pipe;
@@ -574,8 +538,8 @@ static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
return;
pipe->signalled_flags |= flags;
- if (pipe->prev_signalled || pipe->next_signalled
- || dev->first_signalled_pipe == pipe)
+ if (pipe->prev_signalled || pipe->next_signalled ||
+ dev->first_signalled_pipe == pipe)
return; /* already in the list */
pipe->next_signalled = dev->first_signalled_pipe;
if (dev->first_signalled_pipe)
@@ -584,7 +548,8 @@ static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
}
static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
- struct goldfish_pipe *pipe) {
+ struct goldfish_pipe *pipe)
+{
if (pipe->prev_signalled)
pipe->prev_signalled->next_signalled = pipe->next_signalled;
if (pipe->next_signalled)
@@ -623,10 +588,10 @@ static struct goldfish_pipe *signalled_pipes_pop_front(
return pipe;
}
-static void goldfish_interrupt_task(unsigned long unused)
+static void goldfish_interrupt_task(unsigned long dev_addr)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
/* Iterate over the signalled pipes and wake them one by one */
+ struct goldfish_pipe_dev *dev = (struct goldfish_pipe_dev *)dev_addr;
struct goldfish_pipe *pipe;
int wakes;
@@ -646,7 +611,9 @@ static void goldfish_interrupt_task(unsigned long unused)
wake_up_interruptible(&pipe->wake_queue);
}
}
-static DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0);
+
+static void goldfish_pipe_device_deinit(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev);
/*
* The general idea of the interrupt handling:
@@ -668,7 +635,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
unsigned long flags;
struct goldfish_pipe_dev *dev = dev_id;
- if (dev != pipe_dev)
+ if (dev->magic != &goldfish_pipe_device_deinit)
return IRQ_NONE;
/* Request the signalled pipes from the device */
@@ -689,7 +656,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&dev->lock, flags);
- tasklet_schedule(&goldfish_interrupt_tasklet);
+ tasklet_schedule(&dev->irq_tasklet);
return IRQ_HANDLED;
}
@@ -702,7 +669,10 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
return id;
{
- /* Reallocate the array */
+ /* Reallocate the array.
+ * Since get_free_pipe_id_locked runs with interrupts disabled,
+ * we don't want to make calls that could lead to sleep.
+ */
u32 new_capacity = 2 * dev->pipes_capacity;
struct goldfish_pipe **pipes =
kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
@@ -717,6 +687,14 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
return id;
}
+/* A helper function to get the instance of goldfish_pipe_dev from file */
+static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
+}
+
/**
* goldfish_pipe_open - open a channel to the AVD
* @inode: inode of device
@@ -730,14 +708,15 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
*/
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
unsigned long flags;
int id;
int status;
/* Allocate new pipe kernel object */
struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
- if (pipe == NULL)
+
+ if (!pipe)
return -ENOMEM;
pipe->dev = dev;
@@ -748,6 +727,7 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
* Command buffer needs to be allocated on its own page to make sure
* it is physically contiguous in host's address space.
*/
+ BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
pipe->command_buffer =
(struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
if (!pipe->command_buffer) {
@@ -772,7 +752,7 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
MAX_BUFFERS_PER_COMMAND;
dev->buffers->open_command_params.command_buffer_ptr =
(u64)(unsigned long)__pa(pipe->command_buffer);
- status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN);
+ status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
spin_unlock_irqrestore(&dev->lock, flags);
if (status < 0)
goto err_cmd;
@@ -798,7 +778,7 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp)
struct goldfish_pipe_dev *dev = pipe->dev;
/* The guest is closing the channel, so tell the emulator right now */
- (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE);
+ goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
spin_lock_irqsave(&dev->lock, flags);
dev->pipes[pipe->id] = NULL;
@@ -820,36 +800,55 @@ static const struct file_operations goldfish_pipe_fops = {
.release = goldfish_pipe_release,
};
-static struct miscdevice goldfish_pipe_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "goldfish_pipe",
- .fops = &goldfish_pipe_fops,
-};
+static void init_miscdevice(struct miscdevice *miscdev)
+{
+ memset(miscdev, 0, sizeof(*miscdev));
+
+ miscdev->minor = MISC_DYNAMIC_MINOR;
+ miscdev->name = "goldfish_pipe";
+ miscdev->fops = &goldfish_pipe_fops;
+}
+
+static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth)
+{
+ const unsigned long paddr = __pa(addr);
+
+ writel(upper_32_bits(paddr), porth);
+ writel(lower_32_bits(paddr), portl);
+}
-static int goldfish_pipe_device_init(struct platform_device *pdev)
+static int goldfish_pipe_device_init(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev)
{
- char *page;
- struct goldfish_pipe_dev *dev = pipe_dev;
- int err = devm_request_irq(&pdev->dev, dev->irq,
- goldfish_pipe_interrupt,
- IRQF_SHARED, "goldfish_pipe", dev);
+ int err;
+
+ tasklet_init(&dev->irq_tasklet, &goldfish_interrupt_task,
+ (unsigned long)dev);
+
+ err = devm_request_irq(&pdev->dev, dev->irq,
+ goldfish_pipe_interrupt,
+ IRQF_SHARED, "goldfish_pipe", dev);
if (err) {
dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
return err;
}
- err = misc_register(&goldfish_pipe_dev);
+ init_miscdevice(&dev->miscdev);
+ err = misc_register(&dev->miscdev);
if (err) {
dev_err(&pdev->dev, "unable to register v2 device\n");
return err;
}
+ dev->pdev_dev = &pdev->dev;
dev->first_signalled_pipe = NULL;
dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
- GFP_KERNEL);
- if (!dev->pipes)
+ GFP_KERNEL);
+ if (!dev->pipes) {
+ misc_deregister(&dev->miscdev);
return -ENOMEM;
+ }
/*
* We're going to pass two buffers, open_command_params and
@@ -857,75 +856,67 @@ static int goldfish_pipe_device_init(struct platform_device *pdev)
* needs to be contained in a single physical page. The easiest choice
* is to just allocate a page and place the buffers in it.
*/
- if (WARN_ON(sizeof(*dev->buffers) > PAGE_SIZE))
- return -ENOMEM;
-
- page = (char *)__get_free_page(GFP_KERNEL);
- if (!page) {
+ BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
+ dev->buffers = (struct goldfish_pipe_dev_buffers *)
+ __get_free_page(GFP_KERNEL);
+ if (!dev->buffers) {
kfree(dev->pipes);
+ misc_deregister(&dev->miscdev);
return -ENOMEM;
}
- dev->buffers = (struct goldfish_pipe_dev_buffers *)page;
/* Send the buffer addresses to the host */
- {
- u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers);
-
- writel((u32)(unsigned long)(paddr >> 32),
- dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
- writel((u32)(unsigned long)paddr,
- dev->base + PIPE_REG_SIGNAL_BUFFER);
- writel((u32)MAX_SIGNALLED_PIPES,
- dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
-
- paddr = __pa(&dev->buffers->open_command_params);
- writel((u32)(unsigned long)(paddr >> 32),
- dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
- writel((u32)(unsigned long)paddr,
- dev->base + PIPE_REG_OPEN_BUFFER);
- }
+ write_pa_addr(&dev->buffers->signalled_pipe_buffers,
+ dev->base + PIPE_REG_SIGNAL_BUFFER,
+ dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
+
+ writel(MAX_SIGNALLED_PIPES,
+ dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
+
+ write_pa_addr(&dev->buffers->open_command_params,
+ dev->base + PIPE_REG_OPEN_BUFFER,
+ dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
+
+ platform_set_drvdata(pdev, dev);
return 0;
}
-static void goldfish_pipe_device_deinit(struct platform_device *pdev)
+static void goldfish_pipe_device_deinit(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
-
- misc_deregister(&goldfish_pipe_dev);
+ misc_deregister(&dev->miscdev);
+ tasklet_kill(&dev->irq_tasklet);
kfree(dev->pipes);
free_page((unsigned long)dev->buffers);
}
static int goldfish_pipe_probe(struct platform_device *pdev)
{
- int err;
struct resource *r;
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev;
- if (WARN_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE))
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
return -ENOMEM;
- /* not thread safe, but this should not happen */
- WARN_ON(dev->base != NULL);
-
+ dev->magic = &goldfish_pipe_device_deinit;
spin_lock_init(&dev->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL || resource_size(r) < PAGE_SIZE) {
+ if (!r || resource_size(r) < PAGE_SIZE) {
dev_err(&pdev->dev, "can't allocate i/o page\n");
return -EINVAL;
}
dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
- if (dev->base == NULL) {
+ if (!dev->base) {
dev_err(&pdev->dev, "ioremap failed\n");
return -EINVAL;
}
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (r == NULL) {
- err = -EINVAL;
- goto error;
- }
+ if (!r)
+ return -EINVAL;
+
dev->irq = r->start;
/*
@@ -935,25 +926,19 @@ static int goldfish_pipe_probe(struct platform_device *pdev)
* reading device version back: this allows the host implementation to
* detect the old driver (if there was no version write before read).
*/
- writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
+ writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
dev->version = readl(dev->base + PIPE_REG_VERSION);
if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
return -EINVAL;
- err = goldfish_pipe_device_init(pdev);
- if (!err)
- return 0;
-
-error:
- dev->base = NULL;
- return err;
+ return goldfish_pipe_device_init(pdev, dev);
}
static int goldfish_pipe_remove(struct platform_device *pdev)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
- goldfish_pipe_device_deinit(pdev);
- dev->base = NULL;
+ struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
+
+ goldfish_pipe_device_deinit(pdev, dev);
return 0;
}
@@ -981,4 +966,4 @@ static struct platform_driver goldfish_pipe_driver = {
module_platform_driver(goldfish_pipe_driver);
MODULE_AUTHOR("David Turner <digit@google.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/goldfish/goldfish_pipe_qemu.h b/drivers/platform/goldfish/goldfish_pipe_qemu.h
new file mode 100644
index 000000000000..b4d78c108afd
--- /dev/null
+++ b/drivers/platform/goldfish/goldfish_pipe_qemu.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMPORTANT: The following constants must match the ones used and defined in
+ * external/qemu/include/hw/misc/goldfish_pipe.h
+ */
+
+#ifndef GOLDFISH_PIPE_QEMU_H
+#define GOLDFISH_PIPE_QEMU_H
+
+/* List of bitflags returned in status of CMD_POLL command */
+enum PipePollFlags {
+ PIPE_POLL_IN = 1 << 0,
+ PIPE_POLL_OUT = 1 << 1,
+ PIPE_POLL_HUP = 1 << 2
+};
+
+/* Possible status values used to signal errors */
+enum PipeErrors {
+ PIPE_ERROR_INVAL = -1,
+ PIPE_ERROR_AGAIN = -2,
+ PIPE_ERROR_NOMEM = -3,
+ PIPE_ERROR_IO = -4
+};
+
+/* Bit-flags used to signal events from the emulator */
+enum PipeWakeFlags {
+ /* emulator closed pipe */
+ PIPE_WAKE_CLOSED = 1 << 0,
+
+ /* pipe can now be read from */
+ PIPE_WAKE_READ = 1 << 1,
+
+ /* pipe can now be written to */
+ PIPE_WAKE_WRITE = 1 << 2,
+
+ /* unlock this pipe's DMA buffer */
+ PIPE_WAKE_UNLOCK_DMA = 1 << 3,
+
+ /* unlock DMA buffer of the pipe shared to this pipe */
+ PIPE_WAKE_UNLOCK_DMA_SHARED = 1 << 4,
+};
+
+/* Possible pipe closing reasons */
+enum PipeCloseReason {
+ /* guest sent a close command */
+ PIPE_CLOSE_GRACEFUL = 0,
+
+ /* guest rebooted, we're closing the pipes */
+ PIPE_CLOSE_REBOOT = 1,
+
+ /* close old pipes on snapshot load */
+ PIPE_CLOSE_LOAD_SNAPSHOT = 2,
+
+ /* some unrecoverable error on the pipe */
+ PIPE_CLOSE_ERROR = 3,
+};
+
+/* Bit flags for the 'flags' field */
+enum PipeFlagsBits {
+ BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
+ BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
+ BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
+};
+
+enum PipeRegs {
+ PIPE_REG_CMD = 0,
+
+ PIPE_REG_SIGNAL_BUFFER_HIGH = 4,
+ PIPE_REG_SIGNAL_BUFFER = 8,
+ PIPE_REG_SIGNAL_BUFFER_COUNT = 12,
+
+ PIPE_REG_OPEN_BUFFER_HIGH = 20,
+ PIPE_REG_OPEN_BUFFER = 24,
+
+ PIPE_REG_VERSION = 36,
+
+ PIPE_REG_GET_SIGNALLED = 48,
+};
+
+enum PipeCmdCode {
+ /* to be used by the pipe device itself */
+ PIPE_CMD_OPEN = 1,
+
+ PIPE_CMD_CLOSE,
+ PIPE_CMD_POLL,
+ PIPE_CMD_WRITE,
+ PIPE_CMD_WAKE_ON_WRITE,
+ PIPE_CMD_READ,
+ PIPE_CMD_WAKE_ON_READ,
+
+ /*
+ * TODO(zyy): implement a deferred read/write execution to allow
+ * parallel processing of pipe operations on the host.
+ */
+ PIPE_CMD_WAKE_ON_DONE_IO,
+};
+
+#endif /* GOLDFISH_PIPE_QEMU_H */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0c1aa6c314f5..bdac939de223 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -867,6 +867,8 @@ config INTEL_CHT_INT33FE
tristate "Intel Cherry Trail ACPI INT33FE Driver"
depends on X86 && ACPI && I2C && REGULATOR
depends on CHARGER_BQ24190=y || (CHARGER_BQ24190=m && m)
+ depends on USB_ROLES_INTEL_XHCI=y || (USB_ROLES_INTEL_XHCI=m && m)
+ depends on TYPEC_MUX_PI3USB30532=y || (TYPEC_MUX_PI3USB30532=m && m)
---help---
This driver add support for the INT33FE ACPI device found on
some Intel Cherry Trail devices.
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 2d6e272315a8..93ee2d5466f8 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -254,7 +254,7 @@ struct asus_wmi {
int asus_hwmon_num_fans;
int asus_hwmon_pwm;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
struct mutex wmi_lock;
struct workqueue_struct *hotplug_workqueue;
@@ -753,7 +753,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
if (asus->wlan.rfkill)
rfkill_set_sw_state(asus->wlan.rfkill, blocked);
- if (asus->hotplug_slot) {
+ if (asus->hotplug_slot.ops) {
bus = pci_find_bus(0, 1);
if (!bus) {
pr_warn("Unable to find PCI bus 1?\n");
@@ -858,7 +858,8 @@ static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node)
static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
u8 *value)
{
- struct asus_wmi *asus = hotplug_slot->private;
+ struct asus_wmi *asus = container_of(hotplug_slot,
+ struct asus_wmi, hotplug_slot);
int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
if (result < 0)
@@ -868,8 +869,7 @@ static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static struct hotplug_slot_ops asus_hotplug_slot_ops = {
- .owner = THIS_MODULE,
+static const struct hotplug_slot_ops asus_hotplug_slot_ops = {
.get_adapter_status = asus_get_adapter_status,
.get_power_status = asus_get_adapter_status,
};
@@ -899,21 +899,9 @@ static int asus_setup_pci_hotplug(struct asus_wmi *asus)
INIT_WORK(&asus->hotplug_work, asus_hotplug_work);
- asus->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!asus->hotplug_slot)
- goto error_slot;
-
- asus->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!asus->hotplug_slot->info)
- goto error_info;
+ asus->hotplug_slot.ops = &asus_hotplug_slot_ops;
- asus->hotplug_slot->private = asus;
- asus->hotplug_slot->ops = &asus_hotplug_slot_ops;
- asus_get_adapter_status(asus->hotplug_slot,
- &asus->hotplug_slot->info->adapter_status);
-
- ret = pci_hp_register(asus->hotplug_slot, bus, 0, "asus-wifi");
+ ret = pci_hp_register(&asus->hotplug_slot, bus, 0, "asus-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
@@ -922,11 +910,7 @@ static int asus_setup_pci_hotplug(struct asus_wmi *asus)
return 0;
error_register:
- kfree(asus->hotplug_slot->info);
-error_info:
- kfree(asus->hotplug_slot);
- asus->hotplug_slot = NULL;
-error_slot:
+ asus->hotplug_slot.ops = NULL;
destroy_workqueue(asus->hotplug_workqueue);
error_workqueue:
return ret;
@@ -1054,11 +1038,8 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
* asus_unregister_rfkill_notifier()
*/
asus_rfkill_hotplug(asus);
- if (asus->hotplug_slot) {
- pci_hp_deregister(asus->hotplug_slot);
- kfree(asus->hotplug_slot->info);
- kfree(asus->hotplug_slot);
- }
+ if (asus->hotplug_slot.ops)
+ pci_hp_deregister(&asus->hotplug_slot);
if (asus->hotplug_workqueue)
destroy_workqueue(asus->hotplug_workqueue);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index a4bbf6ecd1f0..e6946a9beb5a 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -177,7 +177,7 @@ struct eeepc_laptop {
struct rfkill *wwan3g_rfkill;
struct rfkill *wimax_rfkill;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
struct led_classdev tpd_led;
@@ -582,7 +582,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
mutex_lock(&eeepc->hotplug_lock);
pci_lock_rescan_remove();
- if (!eeepc->hotplug_slot)
+ if (!eeepc->hotplug_slot.ops)
goto out_unlock;
port = acpi_get_pci_dev(handle);
@@ -715,8 +715,11 @@ static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
u8 *value)
{
- struct eeepc_laptop *eeepc = hotplug_slot->private;
- int val = get_acpi(eeepc, CM_ASL_WLAN);
+ struct eeepc_laptop *eeepc;
+ int val;
+
+ eeepc = container_of(hotplug_slot, struct eeepc_laptop, hotplug_slot);
+ val = get_acpi(eeepc, CM_ASL_WLAN);
if (val == 1 || val == 0)
*value = val;
@@ -726,8 +729,7 @@ static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
- .owner = THIS_MODULE,
+static const struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
.get_adapter_status = eeepc_get_adapter_status,
.get_power_status = eeepc_get_adapter_status,
};
@@ -742,21 +744,9 @@ static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
return -ENODEV;
}
- eeepc->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!eeepc->hotplug_slot)
- goto error_slot;
+ eeepc->hotplug_slot.ops = &eeepc_hotplug_slot_ops;
- eeepc->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!eeepc->hotplug_slot->info)
- goto error_info;
-
- eeepc->hotplug_slot->private = eeepc;
- eeepc->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
- eeepc_get_adapter_status(eeepc->hotplug_slot,
- &eeepc->hotplug_slot->info->adapter_status);
-
- ret = pci_hp_register(eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
+ ret = pci_hp_register(&eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
@@ -765,11 +755,7 @@ static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
return 0;
error_register:
- kfree(eeepc->hotplug_slot->info);
-error_info:
- kfree(eeepc->hotplug_slot);
- eeepc->hotplug_slot = NULL;
-error_slot:
+ eeepc->hotplug_slot.ops = NULL;
return ret;
}
@@ -830,11 +816,8 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
eeepc->wlan_rfkill = NULL;
}
- if (eeepc->hotplug_slot) {
- pci_hp_deregister(eeepc->hotplug_slot);
- kfree(eeepc->hotplug_slot->info);
- kfree(eeepc->hotplug_slot);
- }
+ if (eeepc->hotplug_slot.ops)
+ pci_hp_deregister(&eeepc->hotplug_slot);
if (eeepc->bluetooth_rfkill) {
rfkill_unregister(eeepc->bluetooth_rfkill);
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index 7166f1cf8a1d..f40b1c192106 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -35,7 +35,7 @@ struct cht_int33fe_data {
struct i2c_client *fusb302;
struct i2c_client *pi3usb30532;
/* Contain a list-head must be per device */
- struct device_connection connections[3];
+ struct device_connection connections[5];
};
/*
@@ -175,19 +175,20 @@ static int cht_int33fe_probe(struct platform_device *pdev)
return -EPROBE_DEFER; /* Wait for i2c-adapter to load */
}
- data->connections[0].endpoint[0] = "i2c-fusb302";
+ data->connections[0].endpoint[0] = "port0";
data->connections[0].endpoint[1] = "i2c-pi3usb30532";
data->connections[0].id = "typec-switch";
- data->connections[1].endpoint[0] = "i2c-fusb302";
+ data->connections[1].endpoint[0] = "port0";
data->connections[1].endpoint[1] = "i2c-pi3usb30532";
data->connections[1].id = "typec-mux";
- data->connections[2].endpoint[0] = "i2c-fusb302";
- data->connections[2].endpoint[1] = "intel_xhci_usb_sw-role-switch";
- data->connections[2].id = "usb-role-switch";
+ data->connections[2].endpoint[0] = "port0";
+ data->connections[2].endpoint[1] = "i2c-pi3usb30532";
+ data->connections[2].id = "idff01m01";
+ data->connections[3].endpoint[0] = "i2c-fusb302";
+ data->connections[3].endpoint[1] = "intel_xhci_usb_sw-role-switch";
+ data->connections[3].id = "usb-role-switch";
- device_connection_add(&data->connections[0]);
- device_connection_add(&data->connections[1]);
- device_connection_add(&data->connections[2]);
+ device_connections_add(data->connections);
memset(&board_info, 0, sizeof(board_info));
strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
@@ -218,9 +219,7 @@ out_unregister_max17047:
if (data->max17047)
i2c_unregister_device(data->max17047);
- device_connection_remove(&data->connections[2]);
- device_connection_remove(&data->connections[1]);
- device_connection_remove(&data->connections[0]);
+ device_connections_remove(data->connections);
return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
}
@@ -234,9 +233,7 @@ static int cht_int33fe_remove(struct platform_device *pdev)
if (data->max17047)
i2c_unregister_device(data->max17047);
- device_connection_remove(&data->connections[2]);
- device_connection_remove(&data->connections[1]);
- device_connection_remove(&data->connections[0]);
+ device_connections_remove(data->connections);
return 0;
}
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 0206cce328b3..2b686c55b717 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -19,6 +19,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk/at91_pmc.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -69,7 +70,10 @@ struct shdwc_config {
struct shdwc {
const struct shdwc_config *cfg;
- void __iomem *at91_shdwc_base;
+ struct clk *sclk;
+ void __iomem *shdwc_base;
+ void __iomem *mpddrc_base;
+ void __iomem *pmc_base;
};
/*
@@ -77,8 +81,6 @@ struct shdwc {
* since pm_power_off itself is global.
*/
static struct shdwc *at91_shdwc;
-static struct clk *sclk;
-static void __iomem *mpddrc_base;
static const unsigned long long sdwc_dbc_period[] = {
0, 3, 32, 512, 4096, 32768,
@@ -90,7 +92,7 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
u32 reg;
char *reason = "unknown";
- reg = readl(shdw->at91_shdwc_base + AT91_SHDW_SR);
+ reg = readl(shdw->shdwc_base + AT91_SHDW_SR);
dev_dbg(&pdev->dev, "%s: status = %#x\n", __func__, reg);
@@ -108,12 +110,6 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
static void at91_poweroff(void)
{
- writel(AT91_SHDW_KEY | AT91_SHDW_SHDW,
- at91_shdwc->at91_shdwc_base + AT91_SHDW_CR);
-}
-
-static void at91_lpddr_poweroff(void)
-{
asm volatile(
/* Align to cache lines */
".balign 32\n\t"
@@ -122,16 +118,29 @@ static void at91_lpddr_poweroff(void)
" ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
/* Power down SDRAM0 */
+ " tst %0, #0\n\t"
+ " beq 1f\n\t"
" str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+
+ /* Switch the master clock source to slow clock. */
+ "1: ldr r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+ " bic r6, r6, #" __stringify(AT91_PMC_CSS) "\n\t"
+ " str r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+ /* Wait for clock switch. */
+ "2: ldr r6, [%4, #" __stringify(AT91_PMC_SR) "]\n\t"
+ " tst r6, #" __stringify(AT91_PMC_MCKRDY) "\n\t"
+ " beq 2b\n\t"
+
/* Shutdown CPU */
" str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
" b .\n\t"
:
- : "r" (mpddrc_base),
+ : "r" (at91_shdwc->mpddrc_base),
"r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
- "r" (at91_shdwc->at91_shdwc_base),
- "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
+ "r" (at91_shdwc->shdwc_base),
+ "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW),
+ "r" (at91_shdwc->pmc_base)
: "r6");
}
@@ -213,10 +222,10 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
mode |= SHDW_RTCWKEN(shdw->cfg);
dev_dbg(&pdev->dev, "%s: mode = %#x\n", __func__, mode);
- writel(mode, shdw->at91_shdwc_base + AT91_SHDW_MR);
+ writel(mode, shdw->shdwc_base + AT91_SHDW_MR);
input = at91_shdwc_get_wakeup_input(pdev, np);
- writel(input, shdw->at91_shdwc_base + AT91_SHDW_WUIR);
+ writel(input, shdw->shdwc_base + AT91_SHDW_WUIR);
}
static const struct shdwc_config sama5d2_shdwc_config = {
@@ -246,6 +255,9 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
if (!pdev->dev.of_node)
return -ENODEV;
+ if (at91_shdwc)
+ return -EBUSY;
+
at91_shdwc = devm_kzalloc(&pdev->dev, sizeof(*at91_shdwc), GFP_KERNEL);
if (!at91_shdwc)
return -ENOMEM;
@@ -253,20 +265,20 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, at91_shdwc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- at91_shdwc->at91_shdwc_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(at91_shdwc->at91_shdwc_base)) {
+ at91_shdwc->shdwc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(at91_shdwc->shdwc_base)) {
dev_err(&pdev->dev, "Could not map reset controller address\n");
- return PTR_ERR(at91_shdwc->at91_shdwc_base);
+ return PTR_ERR(at91_shdwc->shdwc_base);
}
match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
at91_shdwc->cfg = match->data;
- sclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sclk))
- return PTR_ERR(sclk);
+ at91_shdwc->sclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(at91_shdwc->sclk))
+ return PTR_ERR(at91_shdwc->sclk);
- ret = clk_prepare_enable(sclk);
+ ret = clk_prepare_enable(at91_shdwc->sclk);
if (ret) {
dev_err(&pdev->dev, "Could not enable slow clock\n");
return ret;
@@ -276,41 +288,70 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
at91_shdwc_dt_configure(pdev);
- pm_power_off = at91_poweroff;
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-pmc");
+ if (!np) {
+ ret = -ENODEV;
+ goto clk_disable;
+ }
+
+ at91_shdwc->pmc_base = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!at91_shdwc->pmc_base) {
+ ret = -ENOMEM;
+ goto clk_disable;
+ }
np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
- if (!np)
- return 0;
+ if (!np) {
+ ret = -ENODEV;
+ goto unmap;
+ }
- mpddrc_base = of_iomap(np, 0);
+ at91_shdwc->mpddrc_base = of_iomap(np, 0);
of_node_put(np);
- if (!mpddrc_base)
- return 0;
+ if (!at91_shdwc->mpddrc_base) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
- ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
- if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
- (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
- pm_power_off = at91_lpddr_poweroff;
- else
- iounmap(mpddrc_base);
+ pm_power_off = at91_poweroff;
+
+ ddr_type = readl(at91_shdwc->mpddrc_base + AT91_DDRSDRC_MDR) &
+ AT91_DDRSDRC_MD;
+ if (ddr_type != AT91_DDRSDRC_MD_LPDDR2 &&
+ ddr_type != AT91_DDRSDRC_MD_LPDDR3) {
+ iounmap(at91_shdwc->mpddrc_base);
+ at91_shdwc->mpddrc_base = NULL;
+ }
return 0;
+
+unmap:
+ iounmap(at91_shdwc->pmc_base);
+clk_disable:
+ clk_disable_unprepare(at91_shdwc->sclk);
+
+ return ret;
}
static int __exit at91_shdwc_remove(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
- if (pm_power_off == at91_poweroff ||
- pm_power_off == at91_lpddr_poweroff)
+ if (pm_power_off == at91_poweroff)
pm_power_off = NULL;
/* Reset values to disable wake-up features */
- writel(0, shdw->at91_shdwc_base + AT91_SHDW_MR);
- writel(0, shdw->at91_shdwc_base + AT91_SHDW_WUIR);
+ writel(0, shdw->shdwc_base + AT91_SHDW_MR);
+ writel(0, shdw->shdwc_base + AT91_SHDW_WUIR);
+
+ if (shdw->mpddrc_base)
+ iounmap(shdw->mpddrc_base);
+ iounmap(shdw->pmc_base);
- clk_disable_unprepare(sclk);
+ clk_disable_unprepare(shdw->sclk);
return 0;
}
diff --git a/drivers/power/reset/qcom-pon.c b/drivers/power/reset/qcom-pon.c
index 0c4caaa7e88f..3fa1642d4c54 100644
--- a/drivers/power/reset/qcom-pon.c
+++ b/drivers/power/reset/qcom-pon.c
@@ -74,6 +74,7 @@ static int pm8916_pon_probe(struct platform_device *pdev)
static const struct of_device_id pm8916_pon_id_table[] = {
{ .compatible = "qcom,pm8916-pon" },
+ { .compatible = "qcom,pms405-pon" },
{ }
};
MODULE_DEVICE_TABLE(of, pm8916_pon_id_table);
diff --git a/drivers/power/reset/rmobile-reset.c b/drivers/power/reset/rmobile-reset.c
index e6569df76941..bd3b396558e0 100644
--- a/drivers/power/reset/rmobile-reset.c
+++ b/drivers/power/reset/rmobile-reset.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Mobile Reset Driver
*
* Copyright (C) 2014 Glider bvba
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/io.h>
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index ff6dab0bf0dd..f27cf0709500 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -645,4 +645,11 @@ config CHARGER_CROS_USBPD
what is connected to USB PD ports from the EC and converts
that into power_supply properties.
+config CHARGER_SC2731
+ tristate "Spreadtrum SC2731 charger driver"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ Say Y here to enable support for battery charging with SC2731
+ PMIC chips.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index a26b402c45d9..767105b88d00 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -85,3 +85,4 @@ obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
+obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 02356f9b5f22..776102c31305 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2433,17 +2433,14 @@ static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
size_t count)
{
unsigned long charge_full;
- ssize_t ret;
+ int ret;
ret = kstrtoul(buf, 10, &charge_full);
+ if (ret)
+ return ret;
- dev_dbg(di->dev, "Ret %zd charge_full %lu", ret, charge_full);
-
- if (!ret) {
- di->bat_cap.max_mah = (int) charge_full;
- ret = count;
- }
- return ret;
+ di->bat_cap.max_mah = (int) charge_full;
+ return count;
}
static ssize_t charge_now_show(struct ab8500_fg *di, char *buf)
@@ -2455,20 +2452,16 @@ static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf,
size_t count)
{
unsigned long charge_now;
- ssize_t ret;
+ int ret;
ret = kstrtoul(buf, 10, &charge_now);
+ if (ret)
+ return ret;
- dev_dbg(di->dev, "Ret %zd charge_now %lu was %d",
- ret, charge_now, di->bat_cap.prev_mah);
-
- if (!ret) {
- di->bat_cap.user_mah = (int) charge_now;
- di->flags.user_cap = true;
- ret = count;
- queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
- }
- return ret;
+ di->bat_cap.user_mah = (int) charge_now;
+ di->flags.user_cap = true;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ return count;
}
static struct ab8500_fg_sysfs_entry charge_full_attr =
@@ -2582,11 +2575,12 @@ static ssize_t ab8505_powercut_flagtime_write(struct device *dev,
const char *buf, size_t count)
{
int ret;
- long unsigned reg_value;
+ int reg_value;
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
if (reg_value > 0x7F) {
dev_err(dev, "Incorrect parameter, echo 0 (1.98s) - 127 (15.625ms) for flagtime\n");
@@ -2636,7 +2630,9 @@ static ssize_t ab8505_powercut_maxtime_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0x7F) {
dev_err(dev, "Incorrect parameter, echo 0 (0.0s) - 127 (1.98s) for maxtime\n");
goto fail;
@@ -2684,7 +2680,9 @@ static ssize_t ab8505_powercut_restart_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0xF) {
dev_err(dev, "Incorrect parameter, echo 0 - 15 for number of restart\n");
goto fail;
@@ -2777,7 +2775,9 @@ static ssize_t ab8505_powercut_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0x1) {
dev_err(dev, "Incorrect parameter, echo 0/1 to disable/enable Pcut feature\n");
goto fail;
@@ -2849,7 +2849,9 @@ static ssize_t ab8505_powercut_debounce_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0x7) {
dev_err(dev, "Incorrect parameter, echo 0 to 7 for debounce setting\n");
goto fail;
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 8e2c41ded171..70b90db5ae38 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -32,6 +32,7 @@
#define BQ25890_IRQ_PIN "bq25890_irq"
#define BQ25890_ID 3
+#define BQ25896_ID 0
enum bq25890_fields {
F_EN_HIZ, F_EN_ILIM, F_IILIM, /* Reg00 */
@@ -153,8 +154,8 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_CONV_RATE] = REG_FIELD(0x02, 6, 6),
[F_BOOSTF] = REG_FIELD(0x02, 5, 5),
[F_ICO_EN] = REG_FIELD(0x02, 4, 4),
- [F_HVDCP_EN] = REG_FIELD(0x02, 3, 3),
- [F_MAXC_EN] = REG_FIELD(0x02, 2, 2),
+ [F_HVDCP_EN] = REG_FIELD(0x02, 3, 3), // reserved on BQ25896
+ [F_MAXC_EN] = REG_FIELD(0x02, 2, 2), // reserved on BQ25896
[F_FORCE_DPM] = REG_FIELD(0x02, 1, 1),
[F_AUTO_DPDM_EN] = REG_FIELD(0x02, 0, 0),
/* REG03 */
@@ -163,6 +164,7 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_OTG_CFG] = REG_FIELD(0x03, 5, 5),
[F_CHG_CFG] = REG_FIELD(0x03, 4, 4),
[F_SYSVMIN] = REG_FIELD(0x03, 1, 3),
+ /* MIN_VBAT_SEL on BQ25896 */
/* REG04 */
[F_PUMPX_EN] = REG_FIELD(0x04, 7, 7),
[F_ICHG] = REG_FIELD(0x04, 0, 6),
@@ -181,7 +183,7 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_CHG_TMR] = REG_FIELD(0x07, 1, 2),
[F_JEITA_ISET] = REG_FIELD(0x07, 0, 0),
/* REG08 */
- [F_BATCMP] = REG_FIELD(0x08, 6, 7),
+ [F_BATCMP] = REG_FIELD(0x08, 6, 7), // 5-7 on BQ25896
[F_VCLAMP] = REG_FIELD(0x08, 2, 4),
[F_TREG] = REG_FIELD(0x08, 0, 1),
/* REG09 */
@@ -195,12 +197,13 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_PUMPX_DN] = REG_FIELD(0x09, 0, 0),
/* REG0A */
[F_BOOSTV] = REG_FIELD(0x0A, 4, 7),
+ /* PFM_OTG_DIS 3 on BQ25896 */
[F_BOOSTI] = REG_FIELD(0x0A, 0, 2),
/* REG0B */
[F_VBUS_STAT] = REG_FIELD(0x0B, 5, 7),
[F_CHG_STAT] = REG_FIELD(0x0B, 3, 4),
[F_PG_STAT] = REG_FIELD(0x0B, 2, 2),
- [F_SDP_STAT] = REG_FIELD(0x0B, 1, 1),
+ [F_SDP_STAT] = REG_FIELD(0x0B, 1, 1), // reserved on BQ25896
[F_VSYS_STAT] = REG_FIELD(0x0B, 0, 0),
/* REG0C */
[F_WD_FAULT] = REG_FIELD(0x0C, 7, 7),
@@ -244,10 +247,7 @@ enum bq25890_table_ids {
/* range tables */
TBL_ICHG,
TBL_ITERM,
- TBL_IPRECHG,
TBL_VREG,
- TBL_BATCMP,
- TBL_VCLAMP,
TBL_BOOSTV,
TBL_SYSVMIN,
@@ -287,8 +287,6 @@ static const union {
[TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */
[TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */
[TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */
- [TBL_BATCMP] = { .rt = {0, 140, 20} }, /* mOhm */
- [TBL_VCLAMP] = { .rt = {0, 224000, 32000} }, /* uV */
[TBL_BOOSTV] = { .rt = {4550000, 5510000, 64000} }, /* uV */
[TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} }, /* uV */
@@ -401,6 +399,16 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
val->strval = BQ25890_MANUFACTURER;
break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ if (bq->chip_id == BQ25890_ID)
+ val->strval = "BQ25890";
+ else if (bq->chip_id == BQ25896_ID)
+ val->strval = "BQ25896";
+ else
+ val->strval = "UNKNOWN";
+
+ break;
+
case POWER_SUPPLY_PROP_ONLINE:
val->intval = state.online;
break;
@@ -453,6 +461,15 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
val->intval = bq25890_find_val(bq->init_data.iterm, TBL_ITERM);
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = bq25890_field_read(bq, F_SYSV); /* read measured value */
+ if (ret < 0)
+ return ret;
+
+ /* converted_val = 2.304V + ADC_val * 20mV (table 10.3.15) */
+ val->intval = 2304000 + ret * 20000;
+ break;
+
default:
return -EINVAL;
}
@@ -608,30 +625,40 @@ static int bq25890_hw_init(struct bq25890_device *bq)
};
ret = bq25890_chip_reset(bq);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Reset failed %d\n", ret);
return ret;
+ }
/* disable watchdog */
ret = bq25890_field_write(bq, F_WD, 0);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Disabling watchdog failed %d\n", ret);
return ret;
+ }
/* initialize currents/voltages and other parameters */
for (i = 0; i < ARRAY_SIZE(init_data); i++) {
ret = bq25890_field_write(bq, init_data[i].id,
init_data[i].value);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Writing init data failed %d\n", ret);
return ret;
+ }
}
/* Configure ADC for continuous conversions. This does not enable it. */
ret = bq25890_field_write(bq, F_CONV_RATE, 1);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Config ADC failed %d\n", ret);
return ret;
+ }
ret = bq25890_get_chip_state(bq, &state);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Get state failed %d\n", ret);
return ret;
+ }
mutex_lock(&bq->lock);
bq->state = state;
@@ -642,6 +669,7 @@ static int bq25890_hw_init(struct bq25890_device *bq)
static enum power_supply_property bq25890_power_supply_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_HEALTH,
@@ -650,6 +678,7 @@ static enum power_supply_property bq25890_power_supply_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
};
static char *bq25890_charger_supplied_to[] = {
@@ -767,6 +796,9 @@ static int bq25890_fw_read_u32_props(struct bq25890_device *bq)
if (props[i].optional)
continue;
+ dev_err(bq->dev, "Unable to read property %d %s\n", ret,
+ props[i].name);
+
return ret;
}
@@ -840,7 +872,7 @@ static int bq25890_probe(struct i2c_client *client,
return bq->chip_id;
}
- if (bq->chip_id != BQ25890_ID) {
+ if ((bq->chip_id != BQ25890_ID) && (bq->chip_id != BQ25896_ID)) {
dev_err(dev, "Chip with ID=%d, not supported!\n", bq->chip_id);
return -ENODEV;
}
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index f022e1b550df..6dbbe95844a3 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -432,6 +432,7 @@ static u8
[BQ27XXX_REG_AP] = 0x18,
BQ27XXX_DM_REG_ROWS,
};
+#define bq27411_regs bq27421_regs
#define bq27425_regs bq27421_regs
#define bq27426_regs bq27421_regs
#define bq27441_regs bq27421_regs
@@ -665,6 +666,7 @@ static enum power_supply_property bq27421_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_MANUFACTURER,
};
+#define bq27411_props bq27421_props
#define bq27425_props bq27421_props
#define bq27426_props bq27421_props
#define bq27441_props bq27421_props
@@ -725,6 +727,12 @@ static struct bq27xxx_dm_reg bq27545_dm_regs[] = {
#define bq27545_dm_regs 0
#endif
+static struct bq27xxx_dm_reg bq27411_dm_regs[] = {
+ [BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 10, 2, 0, 32767 },
+ [BQ27XXX_DM_DESIGN_ENERGY] = { 82, 12, 2, 0, 32767 },
+ [BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 16, 2, 2800, 3700 },
+};
+
static struct bq27xxx_dm_reg bq27421_dm_regs[] = {
[BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 10, 2, 0, 8000 },
[BQ27XXX_DM_DESIGN_ENERGY] = { 82, 12, 2, 0, 32767 },
@@ -802,6 +810,7 @@ static struct {
[BQ27546] = BQ27XXX_DATA(bq27546, 0 , BQ27XXX_O_OTDC),
[BQ27742] = BQ27XXX_DATA(bq27742, 0 , BQ27XXX_O_OTDC),
[BQ27545] = BQ27XXX_DATA(bq27545, 0x04143672, BQ27XXX_O_OTDC),
+ [BQ27411] = BQ27XXX_DATA(bq27411, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
[BQ27421] = BQ27XXX_DATA(bq27421, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
[BQ27425] = BQ27XXX_DATA(bq27425, 0x04143672, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP),
[BQ27426] = BQ27XXX_DATA(bq27426, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 40069128ad44..2677c38a8a42 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -247,6 +247,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27546", BQ27546 },
{ "bq27742", BQ27742 },
{ "bq27545", BQ27545 },
+ { "bq27411", BQ27411 },
{ "bq27421", BQ27421 },
{ "bq27425", BQ27425 },
{ "bq27426", BQ27426 },
@@ -279,6 +280,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
{ .compatible = "ti,bq27546" },
{ .compatible = "ti,bq27742" },
{ .compatible = "ti,bq27545" },
+ { .compatible = "ti,bq27411" },
{ .compatible = "ti,bq27421" },
{ .compatible = "ti,bq27425" },
{ .compatible = "ti,bq27426" },
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 688a16bacfbb..7e9c3984ef6a 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -12,8 +12,12 @@
#include <linux/power_supply.h>
#include <linux/slab.h>
-#define CHARGER_DIR_NAME "CROS_USBPD_CHARGER%d"
-#define CHARGER_DIR_NAME_LENGTH sizeof(CHARGER_DIR_NAME)
+#define CHARGER_USBPD_DIR_NAME "CROS_USBPD_CHARGER%d"
+#define CHARGER_DEDICATED_DIR_NAME "CROS_DEDICATED_CHARGER"
+#define CHARGER_DIR_NAME_LENGTH (sizeof(CHARGER_USBPD_DIR_NAME) >= \
+ sizeof(CHARGER_DEDICATED_DIR_NAME) ? \
+ sizeof(CHARGER_USBPD_DIR_NAME) : \
+ sizeof(CHARGER_DEDICATED_DIR_NAME))
#define CHARGER_CACHE_UPDATE_DELAY msecs_to_jiffies(500)
#define CHARGER_MANUFACTURER_MODEL_LENGTH 32
@@ -42,6 +46,7 @@ struct charger_data {
struct cros_ec_dev *ec_dev;
struct cros_ec_device *ec_device;
int num_charger_ports;
+ int num_usbpd_ports;
int num_registered_psy;
struct port_data *ports[EC_USB_PD_MAX_PORTS];
struct notifier_block notifier;
@@ -58,6 +63,12 @@ static enum power_supply_property cros_usbpd_charger_props[] = {
POWER_SUPPLY_PROP_USB_TYPE
};
+static enum power_supply_property cros_usbpd_dedicated_charger_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
static enum power_supply_usb_type cros_usbpd_charger_usb_types[] = {
POWER_SUPPLY_USB_TYPE_UNKNOWN,
POWER_SUPPLY_USB_TYPE_SDP,
@@ -69,6 +80,11 @@ static enum power_supply_usb_type cros_usbpd_charger_usb_types[] = {
POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID
};
+static bool cros_usbpd_charger_port_is_dedicated(struct port_data *port)
+{
+ return port->port_number >= port->charger->num_usbpd_ports;
+}
+
static int cros_usbpd_charger_ec_command(struct charger_data *charger,
unsigned int version,
unsigned int command,
@@ -103,6 +119,23 @@ static int cros_usbpd_charger_ec_command(struct charger_data *charger,
static int cros_usbpd_charger_get_num_ports(struct charger_data *charger)
{
+ struct ec_response_charge_port_count resp;
+ int ret;
+
+ ret = cros_usbpd_charger_ec_command(charger, 0,
+ EC_CMD_CHARGE_PORT_COUNT,
+ NULL, 0, &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_err(charger->dev,
+ "Unable to get the number of ports (err:0x%x)\n", ret);
+ return ret;
+ }
+
+ return resp.port_count;
+}
+
+static int cros_usbpd_charger_get_usbpd_num_ports(struct charger_data *charger)
+{
struct ec_response_usb_pd_ports resp;
int ret;
@@ -246,7 +279,10 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port)
port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
}
- port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
+ if (cros_usbpd_charger_port_is_dedicated(port))
+ port->psy_desc.type = POWER_SUPPLY_TYPE_MAINS;
+ else
+ port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
dev_dbg(dev,
"Port %d: type=%d vmax=%d vnow=%d cmax=%d clim=%d pmax=%d\n",
@@ -281,7 +317,8 @@ static int cros_usbpd_charger_get_port_status(struct port_data *port,
if (ret < 0)
return ret;
- ret = cros_usbpd_charger_get_discovery_info(port);
+ if (!cros_usbpd_charger_port_is_dedicated(port))
+ ret = cros_usbpd_charger_get_discovery_info(port);
port->last_update = jiffies;
return ret;
@@ -378,12 +415,10 @@ static int cros_usbpd_charger_ec_event(struct notifier_block *nb,
{
struct cros_ec_device *ec_device;
struct charger_data *charger;
- struct device *dev;
u32 host_event;
charger = container_of(nb, struct charger_data, notifier);
ec_device = charger->ec_device;
- dev = charger->dev;
host_event = cros_ec_get_host_event(ec_device);
if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) {
@@ -426,17 +461,56 @@ static int cros_usbpd_charger_probe(struct platform_device *pd)
platform_set_drvdata(pd, charger);
+ /*
+ * We need to know the number of USB PD ports in order to know whether
+ * there is a dedicated port. The dedicated port will always be
+ * after the USB PD ports, and there should be only one.
+ */
+ charger->num_usbpd_ports =
+ cros_usbpd_charger_get_usbpd_num_ports(charger);
+ if (charger->num_usbpd_ports <= 0) {
+ /*
+ * This can happen on a system that doesn't support USB PD.
+ * Log a message, but no need to warn.
+ */
+ dev_info(dev, "No USB PD charging ports found\n");
+ }
+
charger->num_charger_ports = cros_usbpd_charger_get_num_ports(charger);
- if (charger->num_charger_ports <= 0) {
+ if (charger->num_charger_ports < 0) {
/*
* This can happen on a system that doesn't support USB PD.
* Log a message, but no need to warn.
+ * Older ECs do not support the above command, in that case
+ * let's set up the number of charger ports equal to the number
+ * of USB PD ports
+ */
+ dev_info(dev, "Could not get charger port count\n");
+ charger->num_charger_ports = charger->num_usbpd_ports;
+ }
+
+ if (charger->num_charger_ports <= 0) {
+ /*
+ * This can happen on a system that doesn't support USB PD and
+ * doesn't have a dedicated port.
+ * Log a message, but no need to warn.
*/
dev_info(dev, "No charging ports found\n");
ret = -ENODEV;
goto fail_nowarn;
}
+ /*
+ * Sanity checks on the number of ports:
+ * there should be at most 1 dedicated port
+ */
+ if (charger->num_charger_ports < charger->num_usbpd_ports ||
+ charger->num_charger_ports > (charger->num_usbpd_ports + 1)) {
+ dev_err(dev, "Unexpected number of charge port count\n");
+ ret = -EPROTO;
+ goto fail_nowarn;
+ }
+
for (i = 0; i < charger->num_charger_ports; i++) {
struct power_supply_config psy_cfg = {};
@@ -448,22 +522,33 @@ static int cros_usbpd_charger_probe(struct platform_device *pd)
port->charger = charger;
port->port_number = i;
- sprintf(port->name, CHARGER_DIR_NAME, i);
psy_desc = &port->psy_desc;
- psy_desc->name = port->name;
- psy_desc->type = POWER_SUPPLY_TYPE_USB;
psy_desc->get_property = cros_usbpd_charger_get_prop;
psy_desc->external_power_changed =
cros_usbpd_charger_power_changed;
- psy_desc->properties = cros_usbpd_charger_props;
- psy_desc->num_properties =
- ARRAY_SIZE(cros_usbpd_charger_props);
- psy_desc->usb_types = cros_usbpd_charger_usb_types;
- psy_desc->num_usb_types =
- ARRAY_SIZE(cros_usbpd_charger_usb_types);
psy_cfg.drv_data = port;
+ if (cros_usbpd_charger_port_is_dedicated(port)) {
+ sprintf(port->name, CHARGER_DEDICATED_DIR_NAME);
+ psy_desc->type = POWER_SUPPLY_TYPE_MAINS;
+ psy_desc->properties =
+ cros_usbpd_dedicated_charger_props;
+ psy_desc->num_properties =
+ ARRAY_SIZE(cros_usbpd_dedicated_charger_props);
+ } else {
+ sprintf(port->name, CHARGER_USBPD_DIR_NAME, i);
+ psy_desc->type = POWER_SUPPLY_TYPE_USB;
+ psy_desc->properties = cros_usbpd_charger_props;
+ psy_desc->num_properties =
+ ARRAY_SIZE(cros_usbpd_charger_props);
+ psy_desc->usb_types = cros_usbpd_charger_usb_types;
+ psy_desc->num_usb_types =
+ ARRAY_SIZE(cros_usbpd_charger_usb_types);
+ }
+
+ psy_desc->name = port->name;
+
psy = devm_power_supply_register_no_ws(dev, psy_desc,
&psy_cfg);
if (IS_ERR(psy)) {
diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index 370e9109342b..cad14ba1b648 100644
--- a/drivers/power/supply/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -829,5 +829,5 @@ module_platform_driver(ds2780_battery_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
-MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC driver");
MODULE_ALIAS("platform:ds2780-battery");
diff --git a/drivers/power/supply/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c
index d1b5a19aae7c..5e794607f732 100644
--- a/drivers/power/supply/ds2781_battery.c
+++ b/drivers/power/supply/ds2781_battery.c
@@ -829,6 +829,6 @@ module_platform_driver(ds2781_battery_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>");
-MODULE_DESCRIPTION("Maxim/Dallas DS2781 Stand-Alone Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim/Dallas DS2781 Stand-Alone Fuel Gauge IC driver");
MODULE_ALIAS("platform:ds2781-battery");
diff --git a/drivers/power/supply/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c
index a1b7e0592245..019c58493e3d 100644
--- a/drivers/power/supply/ds2782_battery.c
+++ b/drivers/power/supply/ds2782_battery.c
@@ -471,5 +471,5 @@ static struct i2c_driver ds278x_battery_driver = {
module_i2c_driver(ds278x_battery_driver);
MODULE_AUTHOR("Ryan Mallon");
-MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauge IC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c
index 449fc56f09eb..8a59feac6468 100644
--- a/drivers/power/supply/max14577_charger.c
+++ b/drivers/power/supply/max14577_charger.c
@@ -1,19 +1,9 @@
-/*
- * max14577_charger.c - Battery charger driver for the Maxim 14577/77836
- *
- * Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max14577_charger.c - Battery charger driver for the Maxim 14577/77836
+//
+// Copyright (C) 2013,2014 Samsung Electronics
+// Krzysztof Kozlowski <krzk@kernel.org>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 33c40f79d23d..91cafc7bed30 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -1,14 +1,10 @@
-/*
- * max17040_battery.c
- * fuel-gauge systems for lithium-ion (Li+) batteries
- *
- * Copyright (C) 2009 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// max17040_battery.c
+// fuel-gauge systems for lithium-ion (Li+) batteries
+//
+// Copyright (C) 2009 Samsung Electronics
+// Minkyu Kang <mk7.kang@samsung.com>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 1a568df383db..2a8d75e5e930 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -1,26 +1,12 @@
-/*
- * Fuel gauge driver for Maxim 17042 / 8966 / 8997
- * Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max17040_battery.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Fuel gauge driver for Maxim 17042 / 8966 / 8997
+// Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max17040_battery.c
#include <linux/acpi.h>
#include <linux/init.h>
diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c
index 749c7926e3c9..a2c5c9858639 100644
--- a/drivers/power/supply/max77693_charger.c
+++ b/drivers/power/supply/max77693_charger.c
@@ -1,19 +1,9 @@
-/*
- * max77693_charger.c - Battery charger driver for the Maxim 77693
- *
- * Copyright (C) 2014 Samsung Electronics
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77693_charger.c - Battery charger driver for the Maxim 77693
+//
+// Copyright (C) 2014 Samsung Electronics
+// Krzysztof Kozlowski <krzk@kernel.org>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/power/supply/max8925_power.c b/drivers/power/supply/max8925_power.c
index 3b94620ce5c1..39b4d5b6ac39 100644
--- a/drivers/power/supply/max8925_power.c
+++ b/drivers/power/supply/max8925_power.c
@@ -124,6 +124,7 @@ static irqreturn_t max8925_charger_handler(int irq, void *data)
case MAX8925_IRQ_VCHG_THM_OK_F:
/* Battery is not ready yet */
dev_dbg(chip->dev, "Battery temperature is out of range\n");
+ /* Fall through */
case MAX8925_IRQ_VCHG_DC_OVP:
dev_dbg(chip->dev, "Error detection\n");
__set_charger(info, 0);
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index c73fb4221695..f5e84cd47924 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -1,23 +1,9 @@
-/*
- * max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c
index cad7d1a8feec..9a926c7c0f22 100644
--- a/drivers/power/supply/max8998_charger.c
+++ b/drivers/power/supply/max8998_charger.c
@@ -1,23 +1,9 @@
-/*
- * max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
- *
- * Copyright (C) 2009-2010 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
+//
+// Copyright (C) 2009-2010 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
#include <linux/err.h>
#include <linux/module.h>
@@ -86,7 +72,7 @@ static const struct power_supply_desc max8998_battery_desc = {
static int max8998_battery_probe(struct platform_device *pdev)
{
struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8998_platform_data *pdata = iodev->pdata;
struct power_supply_config psy_cfg = {};
struct max8998_battery_data *max8998;
struct i2c_client *i2c;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 6170ed8b6854..dce24f596160 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -131,7 +131,8 @@ static ssize_t power_supply_show_property(struct device *dev,
dev_dbg(dev, "driver has no data for `%s' property\n",
attr->attr.name);
else if (ret != -ENODEV && ret != -EAGAIN)
- dev_err(dev, "driver failed to report `%s' property: %zd\n",
+ dev_err_ratelimited(dev,
+ "driver failed to report `%s' property: %zd\n",
attr->attr.name, ret);
return ret;
}
diff --git a/drivers/power/supply/sc2731_charger.c b/drivers/power/supply/sc2731_charger.c
new file mode 100644
index 000000000000..525a820537bf
--- /dev/null
+++ b/drivers/power/supply/sc2731_charger.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Spreadtrum Communications Inc.
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/usb/phy.h>
+#include <linux/regmap.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+
+/* PMIC global registers definition */
+#define SC2731_CHARGE_STATUS 0xedc
+#define SC2731_CHARGE_FULL BIT(4)
+#define SC2731_MODULE_EN1 0xc0c
+#define SC2731_CHARGE_EN BIT(5)
+
+/* SC2731 switch charger registers definition */
+#define SC2731_CHG_CFG0 0x0
+#define SC2731_CHG_CFG1 0x4
+#define SC2731_CHG_CFG2 0x8
+#define SC2731_CHG_CFG3 0xc
+#define SC2731_CHG_CFG4 0x10
+#define SC2731_CHG_CFG5 0x28
+
+/* SC2731_CHG_CFG0 register definition */
+#define SC2731_PRECHG_RNG_SHIFT 11
+#define SC2731_PRECHG_RNG_MASK GENMASK(12, 11)
+
+#define SC2731_TERMINATION_VOL_MASK GENMASK(2, 1)
+#define SC2731_TERMINATION_VOL_SHIFT 1
+#define SC2731_TERMINATION_VOL_CAL_MASK GENMASK(8, 3)
+#define SC2731_TERMINATION_VOL_CAL_SHIFT 3
+#define SC2731_TERMINATION_CUR_MASK GENMASK(2, 0)
+
+#define SC2731_CC_EN BIT(13)
+#define SC2731_CHARGER_PD BIT(0)
+
+/* SC2731_CHG_CFG1 register definition */
+#define SC2731_CUR_MASK GENMASK(5, 0)
+
+/* SC2731_CHG_CFG5 register definition */
+#define SC2731_CUR_LIMIT_SHIFT 8
+#define SC2731_CUR_LIMIT_MASK GENMASK(9, 8)
+
+/* Default current definition (unit is mA) */
+#define SC2731_CURRENT_LIMIT_100 100
+#define SC2731_CURRENT_LIMIT_500 500
+#define SC2731_CURRENT_LIMIT_900 900
+#define SC2731_CURRENT_LIMIT_2000 2000
+#define SC2731_CURRENT_PRECHG 450
+#define SC2731_CURRENT_STEP 50
+
+struct sc2731_charger_info {
+ struct device *dev;
+ struct regmap *regmap;
+ struct usb_phy *usb_phy;
+ struct notifier_block usb_notify;
+ struct power_supply *psy_usb;
+ struct mutex lock;
+ bool charging;
+ u32 base;
+};
+
+static void sc2731_charger_stop_charge(struct sc2731_charger_info *info)
+{
+ regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CC_EN, 0);
+
+ regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CHARGER_PD, SC2731_CHARGER_PD);
+}
+
+static int sc2731_charger_start_charge(struct sc2731_charger_info *info)
+{
+ int ret;
+
+ /* Enable charger constant current mode */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CC_EN, SC2731_CC_EN);
+ if (ret)
+ return ret;
+
+ /* Start charging */
+ return regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CHARGER_PD, 0);
+}
+
+static int sc2731_charger_set_current_limit(struct sc2731_charger_info *info,
+ u32 limit)
+{
+ u32 val;
+
+ if (limit <= SC2731_CURRENT_LIMIT_100)
+ val = 0;
+ else if (limit <= SC2731_CURRENT_LIMIT_500)
+ val = 3;
+ else if (limit <= SC2731_CURRENT_LIMIT_900)
+ val = 2;
+ else
+ val = 1;
+
+ return regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG5,
+ SC2731_CUR_LIMIT_MASK,
+ val << SC2731_CUR_LIMIT_SHIFT);
+}
+
+static int sc2731_charger_set_current(struct sc2731_charger_info *info, u32 cur)
+{
+ u32 val;
+ int ret;
+
+ if (cur > SC2731_CURRENT_LIMIT_2000)
+ cur = SC2731_CURRENT_LIMIT_2000;
+ else if (cur < SC2731_CURRENT_PRECHG)
+ cur = SC2731_CURRENT_PRECHG;
+
+ /* Calculate the step value, each step is 50 mA */
+ val = (cur - SC2731_CURRENT_PRECHG) / SC2731_CURRENT_STEP;
+
+ /* Set pre-charge current as 450 mA */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_PRECHG_RNG_MASK,
+ 0x3 << SC2731_PRECHG_RNG_SHIFT);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG1,
+ SC2731_CUR_MASK, val);
+}
+
+static int sc2731_charger_get_status(struct sc2731_charger_info *info)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(info->regmap, SC2731_CHARGE_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (val & SC2731_CHARGE_FULL)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ return POWER_SUPPLY_STATUS_CHARGING;
+}
+
+static int sc2731_charger_get_current(struct sc2731_charger_info *info,
+ u32 *cur)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(info->regmap, info->base + SC2731_CHG_CFG1, &val);
+ if (ret)
+ return ret;
+
+ val &= SC2731_CUR_MASK;
+ *cur = val * SC2731_CURRENT_STEP + SC2731_CURRENT_PRECHG;
+
+ return 0;
+}
+
+static int sc2731_charger_get_current_limit(struct sc2731_charger_info *info,
+ u32 *cur)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(info->regmap, info->base + SC2731_CHG_CFG5, &val);
+ if (ret)
+ return ret;
+
+ val = (val & SC2731_CUR_LIMIT_MASK) >> SC2731_CUR_LIMIT_SHIFT;
+
+ switch (val) {
+ case 0:
+ *cur = SC2731_CURRENT_LIMIT_100;
+ break;
+
+ case 1:
+ *cur = SC2731_CURRENT_LIMIT_2000;
+ break;
+
+ case 2:
+ *cur = SC2731_CURRENT_LIMIT_900;
+ break;
+
+ case 3:
+ *cur = SC2731_CURRENT_LIMIT_500;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+sc2731_charger_usb_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct sc2731_charger_info *info = power_supply_get_drvdata(psy);
+ int ret;
+
+ mutex_lock(&info->lock);
+
+ if (!info->charging) {
+ mutex_unlock(&info->lock);
+ return -ENODEV;
+ }
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = sc2731_charger_set_current(info, val->intval / 1000);
+ if (ret < 0)
+ dev_err(info->dev, "set charge current failed\n");
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = sc2731_charger_set_current_limit(info,
+ val->intval / 1000);
+ if (ret < 0)
+ dev_err(info->dev, "set input current limit failed\n");
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int sc2731_charger_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct sc2731_charger_info *info = power_supply_get_drvdata(psy);
+ int ret = 0;
+ u32 cur;
+
+ mutex_lock(&info->lock);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (info->charging)
+ val->intval = sc2731_charger_get_status(info);
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ if (!info->charging) {
+ val->intval = 0;
+ } else {
+ ret = sc2731_charger_get_current(info, &cur);
+ if (ret)
+ goto out;
+
+ val->intval = cur * 1000;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ if (!info->charging) {
+ val->intval = 0;
+ } else {
+ ret = sc2731_charger_get_current_limit(info, &cur);
+ if (ret)
+ goto out;
+
+ val->intval = cur * 1000;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int sc2731_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = 1;
+ break;
+
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property sc2731_usb_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+};
+
+static const struct power_supply_desc sc2731_charger_desc = {
+ .name = "sc2731_charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = sc2731_usb_props,
+ .num_properties = ARRAY_SIZE(sc2731_usb_props),
+ .get_property = sc2731_charger_usb_get_property,
+ .set_property = sc2731_charger_usb_set_property,
+ .property_is_writeable = sc2731_charger_property_is_writeable,
+};
+
+static int sc2731_charger_usb_change(struct notifier_block *nb,
+ unsigned long limit, void *data)
+{
+ struct sc2731_charger_info *info =
+ container_of(nb, struct sc2731_charger_info, usb_notify);
+ int ret = 0;
+
+ mutex_lock(&info->lock);
+
+ if (limit > 0) {
+ /* set current limitation and start to charge */
+ ret = sc2731_charger_set_current_limit(info, limit);
+ if (ret)
+ goto out;
+
+ ret = sc2731_charger_set_current(info, limit);
+ if (ret)
+ goto out;
+
+ ret = sc2731_charger_start_charge(info);
+ if (ret)
+ goto out;
+
+ info->charging = true;
+ } else {
+ /* Stop charging */
+ info->charging = false;
+ sc2731_charger_stop_charge(info);
+ }
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int sc2731_charger_hw_init(struct sc2731_charger_info *info)
+{
+ struct power_supply_battery_info bat_info = { };
+ u32 term_currrent, term_voltage, cur_val, vol_val;
+ int ret;
+
+ /* Enable charger module */
+ ret = regmap_update_bits(info->regmap, SC2731_MODULE_EN1,
+ SC2731_CHARGE_EN, SC2731_CHARGE_EN);
+ if (ret)
+ return ret;
+
+ ret = power_supply_get_battery_info(info->psy_usb, &bat_info);
+ if (ret) {
+ dev_warn(info->dev, "no battery information is supplied\n");
+
+ /*
+ * If no battery information is supplied, we should set
+ * default charge termination current to 120 mA, and default
+ * charge termination voltage to 4.35V.
+ */
+ cur_val = 0x2;
+ vol_val = 0x1;
+ } else {
+ term_currrent = bat_info.charge_term_current_ua / 1000;
+
+ if (term_currrent <= 90)
+ cur_val = 0;
+ else if (term_currrent >= 265)
+ cur_val = 0x7;
+ else
+ cur_val = ((term_currrent - 90) / 25) + 1;
+
+ term_voltage = bat_info.constant_charge_voltage_max_uv / 1000;
+
+ if (term_voltage > 4500)
+ term_voltage = 4500;
+
+ if (term_voltage > 4200)
+ vol_val = (term_voltage - 4200) / 100;
+ else
+ vol_val = 0;
+ }
+
+ /* Set charge termination current */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG2,
+ SC2731_TERMINATION_CUR_MASK, cur_val);
+ if (ret)
+ goto error;
+
+ /* Set charge termination voltage */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_TERMINATION_VOL_MASK |
+ SC2731_TERMINATION_VOL_CAL_MASK,
+ (vol_val << SC2731_TERMINATION_VOL_SHIFT) |
+ (0x6 << SC2731_TERMINATION_VOL_CAL_SHIFT));
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ regmap_update_bits(info->regmap, SC2731_MODULE_EN1, SC2731_CHARGE_EN, 0);
+ return ret;
+}
+
+static int sc2731_charger_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sc2731_charger_info *info;
+ struct power_supply_config charger_cfg = { };
+ int ret;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ info->dev = &pdev->dev;
+
+ info->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!info->regmap) {
+ dev_err(&pdev->dev, "failed to get charger regmap\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "reg", &info->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get register address\n");
+ return -ENODEV;
+ }
+
+ charger_cfg.drv_data = info;
+ charger_cfg.of_node = np;
+ info->psy_usb = devm_power_supply_register(&pdev->dev,
+ &sc2731_charger_desc,
+ &charger_cfg);
+ if (IS_ERR(info->psy_usb)) {
+ dev_err(&pdev->dev, "failed to register power supply\n");
+ return PTR_ERR(info->psy_usb);
+ }
+
+ ret = sc2731_charger_hw_init(info);
+ if (ret)
+ return ret;
+
+ info->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "phys", 0);
+ if (IS_ERR(info->usb_phy)) {
+ dev_err(&pdev->dev, "failed to find USB phy\n");
+ return PTR_ERR(info->usb_phy);
+ }
+
+ info->usb_notify.notifier_call = sc2731_charger_usb_change;
+ ret = usb_register_notifier(info->usb_phy, &info->usb_notify);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register notifier: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sc2731_charger_remove(struct platform_device *pdev)
+{
+ struct sc2731_charger_info *info = platform_get_drvdata(pdev);
+
+ usb_unregister_notifier(info->usb_phy, &info->usb_notify);
+
+ return 0;
+}
+
+static const struct of_device_id sc2731_charger_of_match[] = {
+ { .compatible = "sprd,sc2731-charger", },
+ { }
+};
+
+static struct platform_driver sc2731_charger_driver = {
+ .driver = {
+ .name = "sc2731-charger",
+ .of_match_table = sc2731_charger_of_match,
+ },
+ .probe = sc2731_charger_probe,
+ .remove = sc2731_charger_remove,
+};
+
+module_platform_driver(sc2731_charger_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC2731 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index bbcaee56db9d..0e202d4273fb 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -420,7 +420,8 @@ static void twl4030_current_worker(struct work_struct *data)
if (v < USB_MIN_VOLT) {
/* Back up and stop adjusting. */
- bci->usb_cur -= USB_CUR_STEP;
+ if (bci->usb_cur >= USB_CUR_STEP)
+ bci->usb_cur -= USB_CUR_STEP;
bci->usb_cur_target = bci->usb_cur;
} else if (bci->usb_cur >= bci->usb_cur_target ||
bci->usb_cur + USB_CUR_STEP > USB_MAX_CURRENT) {
@@ -439,6 +440,7 @@ static void twl4030_current_worker(struct work_struct *data)
static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
{
int ret;
+ u32 reg;
if (bci->usb_mode == CHARGE_OFF)
enable = false;
@@ -452,14 +454,38 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
bci->usb_enabled = 1;
}
- if (bci->usb_mode == CHARGE_AUTO)
+ if (bci->usb_mode == CHARGE_AUTO) {
+ /* Enable interrupts now. */
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC |
+ TWL4030_TBATOR2 | TWL4030_TBATOR1 |
+ TWL4030_BATSTS);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ if (ret < 0) {
+ dev_err(bci->dev,
+ "failed to unmask interrupts: %d\n",
+ ret);
+ return ret;
+ }
/* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
+ }
/* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */
ret = twl4030_clear_set(TWL_MODULE_MAIN_CHARGE, 0,
TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
if (bci->usb_mode == CHARGE_LINEAR) {
+ /* Enable interrupts now. */
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_TBATOR2 |
+ TWL4030_TBATOR1 | TWL4030_BATSTS);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ if (ret < 0) {
+ dev_err(bci->dev,
+ "failed to unmask interrupts: %d\n",
+ ret);
+ return ret;
+ }
twl4030_clear_set_boot_bci(TWL4030_BCIAUTOAC|TWL4030_CVENAC, 0);
/* Watch dog key: WOVF acknowledge */
ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x33,
@@ -996,12 +1022,13 @@ static int twl4030_bci_probe(struct platform_device *pdev)
if (bci->dev->of_node) {
struct device_node *phynode;
- phynode = of_find_compatible_node(bci->dev->of_node->parent,
- NULL, "ti,twl4030-usb");
+ phynode = of_get_compatible_child(bci->dev->of_node->parent,
+ "ti,twl4030-usb");
if (phynode) {
bci->usb_nb.notifier_call = twl4030_bci_usb_ncb;
bci->transceiver = devm_usb_get_phy_by_node(
bci->dev, phynode, &bci->usb_nb);
+ of_node_put(phynode);
if (IS_ERR(bci->transceiver)) {
ret = PTR_ERR(bci->transceiver);
if (ret == -EPROBE_DEFER)
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 97d9f08271c5..77911fa8f31d 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -67,6 +67,7 @@ static const struct imx7_src_signal imx7_src_signals[IMX7_RESET_NUM] = {
[IMX7_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, BIT(2) | BIT(1) },
[IMX7_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
[IMX7_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
+ [IMX7_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
[IMX7_RESET_DDRC_PRST] = { SRC_DDRC_RCR, BIT(0) },
[IMX7_RESET_DDRC_CORE_RST] = { SRC_DDRC_RCR, BIT(1) },
};
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index fd5e215c66b7..6ccd93d0b1cb 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -15,3 +15,7 @@ obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
# pkey kernel module
pkey-objs := pkey_api.o
obj-$(CONFIG_PKEY) += pkey.o
+
+# adjunct processor matrix
+vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
+obj-$(CONFIG_VFIO_AP) += vfio_ap.o
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
new file mode 100644
index 000000000000..7667b38728f0
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VFIO based AP device driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "vfio_ap_private.h"
+
+#define VFIO_AP_ROOT_NAME "vfio_ap"
+#define VFIO_AP_DEV_TYPE_NAME "ap_matrix"
+#define VFIO_AP_DEV_NAME "matrix"
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
+MODULE_LICENSE("GPL v2");
+
+static struct ap_driver vfio_ap_drv;
+
+static struct device_type vfio_ap_dev_type = {
+ .name = VFIO_AP_DEV_TYPE_NAME,
+};
+
+struct ap_matrix_dev *matrix_dev;
+
+/* Only type 10 adapters (CEX4 and later) are supported
+ * by the AP matrix device driver
+ */
+static struct ap_device_id ap_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX5,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of sibling */ },
+};
+
+MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
+
+static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
+{
+ return 0;
+}
+
+static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
+{
+ /* Nothing to do yet */
+}
+
+static void vfio_ap_matrix_dev_release(struct device *dev)
+{
+ struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
+
+ kfree(matrix_dev);
+}
+
+static int vfio_ap_matrix_dev_create(void)
+{
+ int ret;
+ struct device *root_device;
+
+ root_device = root_device_register(VFIO_AP_ROOT_NAME);
+ if (IS_ERR(root_device))
+ return PTR_ERR(root_device);
+
+ matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
+ if (!matrix_dev) {
+ ret = -ENOMEM;
+ goto matrix_alloc_err;
+ }
+
+ /* Fill in config info via PQAP(QCI), if available */
+ if (test_facility(12)) {
+ ret = ap_qci(&matrix_dev->info);
+ if (ret)
+ goto matrix_alloc_err;
+ }
+
+ mutex_init(&matrix_dev->lock);
+ INIT_LIST_HEAD(&matrix_dev->mdev_list);
+
+ matrix_dev->device.type = &vfio_ap_dev_type;
+ dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
+ matrix_dev->device.parent = root_device;
+ matrix_dev->device.release = vfio_ap_matrix_dev_release;
+ matrix_dev->device.driver = &vfio_ap_drv.driver;
+
+ ret = device_register(&matrix_dev->device);
+ if (ret)
+ goto matrix_reg_err;
+
+ return 0;
+
+matrix_reg_err:
+ put_device(&matrix_dev->device);
+matrix_alloc_err:
+ root_device_unregister(root_device);
+
+ return ret;
+}
+
+static void vfio_ap_matrix_dev_destroy(void)
+{
+ device_unregister(&matrix_dev->device);
+ root_device_unregister(matrix_dev->device.parent);
+}
+
+static int __init vfio_ap_init(void)
+{
+ int ret;
+
+ /* If there are no AP instructions, there is nothing to pass through. */
+ if (!ap_instructions_available())
+ return -ENODEV;
+
+ ret = vfio_ap_matrix_dev_create();
+ if (ret)
+ return ret;
+
+ memset(&vfio_ap_drv, 0, sizeof(vfio_ap_drv));
+ vfio_ap_drv.probe = vfio_ap_queue_dev_probe;
+ vfio_ap_drv.remove = vfio_ap_queue_dev_remove;
+ vfio_ap_drv.ids = ap_queue_ids;
+
+ ret = ap_driver_register(&vfio_ap_drv, THIS_MODULE, VFIO_AP_DRV_NAME);
+ if (ret) {
+ vfio_ap_matrix_dev_destroy();
+ return ret;
+ }
+
+ ret = vfio_ap_mdev_register();
+ if (ret) {
+ ap_driver_unregister(&vfio_ap_drv);
+ vfio_ap_matrix_dev_destroy();
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit vfio_ap_exit(void)
+{
+ vfio_ap_mdev_unregister();
+ ap_driver_unregister(&vfio_ap_drv);
+ vfio_ap_matrix_dev_destroy();
+}
+
+module_init(vfio_ap_init);
+module_exit(vfio_ap_exit);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
new file mode 100644
index 000000000000..272ef427dcc0
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -0,0 +1,939 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Adjunct processor matrix VFIO device driver callbacks.
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Halil Pasic <pasic@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
+ */
+#include <linux/string.h>
+#include <linux/vfio.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <asm/kvm.h>
+#include <asm/zcrypt.h>
+
+#include "vfio_ap_private.h"
+
+#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
+#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
+
+static void vfio_ap_matrix_init(struct ap_config_info *info,
+ struct ap_matrix *matrix)
+{
+ matrix->apm_max = info->apxa ? info->Na : 63;
+ matrix->aqm_max = info->apxa ? info->Nd : 15;
+ matrix->adm_max = info->apxa ? info->Nd : 15;
+}
+
+static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
+ return -EPERM;
+
+ matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
+ if (!matrix_mdev) {
+ atomic_inc(&matrix_dev->available_instances);
+ return -ENOMEM;
+ }
+
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
+ mdev_set_drvdata(mdev, matrix_mdev);
+ mutex_lock(&matrix_dev->lock);
+ list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
+ mutex_unlock(&matrix_dev->lock);
+
+ return 0;
+}
+
+static int vfio_ap_mdev_remove(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ mutex_lock(&matrix_dev->lock);
+ list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->lock);
+
+ kfree(matrix_mdev);
+ mdev_set_drvdata(mdev, NULL);
+ atomic_inc(&matrix_dev->available_instances);
+
+ return 0;
+}
+
+static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
+}
+
+static MDEV_TYPE_ATTR_RO(name);
+
+static ssize_t available_instances_show(struct kobject *kobj,
+ struct device *dev, char *buf)
+{
+ return sprintf(buf, "%d\n",
+ atomic_read(&matrix_dev->available_instances));
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
+}
+
+static MDEV_TYPE_ATTR_RO(device_api);
+
+static struct attribute *vfio_ap_mdev_type_attrs[] = {
+ &mdev_type_attr_name.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_available_instances.attr,
+ NULL,
+};
+
+static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
+ .name = VFIO_AP_MDEV_TYPE_HWVIRT,
+ .attrs = vfio_ap_mdev_type_attrs,
+};
+
+static struct attribute_group *vfio_ap_mdev_type_groups[] = {
+ &vfio_ap_mdev_hwvirt_type_group,
+ NULL,
+};
+
+struct vfio_ap_queue_reserved {
+ unsigned long *apid;
+ unsigned long *apqi;
+ bool reserved;
+};
+
+/**
+ * vfio_ap_has_queue
+ *
+ * @dev: an AP queue device
+ * @data: a struct vfio_ap_queue_reserved reference
+ *
+ * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
+ * apid or apqi specified in @data:
+ *
+ * - If @data contains both an apid and apqi value, then @data will be flagged
+ * as reserved if the APID and APQI fields for the AP queue device matches
+ *
+ * - If @data contains only an apid value, @data will be flagged as
+ * reserved if the APID field in the AP queue device matches
+ *
+ * - If @data contains only an apqi value, @data will be flagged as
+ * reserved if the APQI field in the AP queue device matches
+ *
+ * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
+ * @data does not contain either an apid or apqi.
+ */
+static int vfio_ap_has_queue(struct device *dev, void *data)
+{
+ struct vfio_ap_queue_reserved *qres = data;
+ struct ap_queue *ap_queue = to_ap_queue(dev);
+ ap_qid_t qid;
+ unsigned long id;
+
+ if (qres->apid && qres->apqi) {
+ qid = AP_MKQID(*qres->apid, *qres->apqi);
+ if (qid == ap_queue->qid)
+ qres->reserved = true;
+ } else if (qres->apid && !qres->apqi) {
+ id = AP_QID_CARD(ap_queue->qid);
+ if (id == *qres->apid)
+ qres->reserved = true;
+ } else if (!qres->apid && qres->apqi) {
+ id = AP_QID_QUEUE(ap_queue->qid);
+ if (id == *qres->apqi)
+ qres->reserved = true;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vfio_ap_verify_queue_reserved
+ *
+ * @matrix_dev: a mediated matrix device
+ * @apid: an AP adapter ID
+ * @apqi: an AP queue index
+ *
+ * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
+ * driver according to the following rules:
+ *
+ * - If both @apid and @apqi are not NULL, then there must be an AP queue
+ * device bound to the vfio_ap driver with the APQN identified by @apid and
+ * @apqi
+ *
+ * - If only @apid is not NULL, then there must be an AP queue device bound
+ * to the vfio_ap driver with an APQN containing @apid
+ *
+ * - If only @apqi is not NULL, then there must be an AP queue device bound
+ * to the vfio_ap driver with an APQN containing @apqi
+ *
+ * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
+ */
+static int vfio_ap_verify_queue_reserved(unsigned long *apid,
+ unsigned long *apqi)
+{
+ int ret;
+ struct vfio_ap_queue_reserved qres;
+
+ qres.apid = apid;
+ qres.apqi = apqi;
+ qres.reserved = false;
+
+ ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres,
+ vfio_ap_has_queue);
+ if (ret)
+ return ret;
+
+ if (qres.reserved)
+ return 0;
+
+ return -EADDRNOTAVAIL;
+}
+
+static int
+vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ int ret;
+ unsigned long apqi;
+ unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
+
+ if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
+ return vfio_ap_verify_queue_reserved(&apid, NULL);
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
+ ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vfio_ap_mdev_verify_no_sharing
+ *
+ * Verifies that the APQNs derived from the cross product of the AP adapter IDs
+ * and AP queue indexes comprising the AP matrix are not configured for another
+ * mediated device. AP queue sharing is not allowed.
+ *
+ * @matrix_mdev: the mediated matrix device
+ *
+ * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
+ */
+static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
+{
+ struct ap_matrix_mdev *lstdev;
+ DECLARE_BITMAP(apm, AP_DEVICES);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
+
+ list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
+ if (matrix_mdev == lstdev)
+ continue;
+
+ memset(apm, 0, sizeof(apm));
+ memset(aqm, 0, sizeof(aqm));
+
+ /*
+ * We work on full longs, as we can only exclude the leftover
+ * bits in non-inverse order. The leftover is all zeros.
+ */
+ if (!bitmap_and(apm, matrix_mdev->matrix.apm,
+ lstdev->matrix.apm, AP_DEVICES))
+ continue;
+
+ if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
+ lstdev->matrix.aqm, AP_DOMAINS))
+ continue;
+
+ return -EADDRINUSE;
+ }
+
+ return 0;
+}
+
+/**
+ * assign_adapter_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_adapter attribute
+ * @buf: a buffer containing the AP adapter number (APID) to
+ * be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APID from @buf and sets the corresponding bit in the mediated
+ * matrix device's APM.
+ *
+ * Returns the number of bytes processed if the APID is valid; otherwise,
+ * returns one of the following errors:
+ *
+ * 1. -EINVAL
+ * The APID is not a valid number
+ *
+ * 2. -ENODEV
+ * The APID exceeds the maximum value configured for the system
+ *
+ * 3. -EADDRNOTAVAIL
+ * An APQN derived from the cross product of the APID being assigned
+ * and the APQIs previously assigned is not bound to the vfio_ap device
+ * driver; or, if no APQIs have yet been assigned, the APID is not
+ * contained in an APQN bound to the vfio_ap device driver.
+ *
+ * 4. -EADDRINUSE
+ * An APQN derived from the cross product of the APID being assigned
+ * and the APQIs previously assigned is being used by another mediated
+ * matrix device
+ */
+static ssize_t assign_adapter_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow assignment of adapter */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apid);
+ if (ret)
+ return ret;
+
+ if (apid > matrix_mdev->matrix.apm_max)
+ return -ENODEV;
+
+ /*
+ * Set the bit in the AP mask (APM) corresponding to the AP adapter
+ * number (APID). The bits in the mask, from most significant to least
+ * significant bit, correspond to APIDs 0-255.
+ */
+ mutex_lock(&matrix_dev->lock);
+
+ ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
+ if (ret)
+ goto done;
+
+ set_bit_inv(apid, matrix_mdev->matrix.apm);
+
+ ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
+ if (ret)
+ goto share_err;
+
+ ret = count;
+ goto done;
+
+share_err:
+ clear_bit_inv(apid, matrix_mdev->matrix.apm);
+done:
+ mutex_unlock(&matrix_dev->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(assign_adapter);
+
+/**
+ * unassign_adapter_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_adapter attribute
+ * @buf: a buffer containing the adapter number (APID) to be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APID from @buf and clears the corresponding bit in the mediated
+ * matrix device's APM.
+ *
+ * Returns the number of bytes processed if the APID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the APID is not a number
+ * -ENODEV if the APID it exceeds the maximum value configured for the
+ * system
+ */
+static ssize_t unassign_adapter_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow un-assignment of adapter */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apid);
+ if (ret)
+ return ret;
+
+ if (apid > matrix_mdev->matrix.apm_max)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_adapter);
+
+static int
+vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+{
+ int ret;
+ unsigned long apid;
+ unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
+
+ if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
+ return vfio_ap_verify_queue_reserved(NULL, &apqi);
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
+ ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * assign_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_domain attribute
+ * @buf: a buffer containing the AP queue index (APQI) of the domain to
+ * be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APQI from @buf and sets the corresponding bit in the mediated
+ * matrix device's AQM.
+ *
+ * Returns the number of bytes processed if the APQI is valid; otherwise returns
+ * one of the following errors:
+ *
+ * 1. -EINVAL
+ * The APQI is not a valid number
+ *
+ * 2. -ENODEV
+ * The APQI exceeds the maximum value configured for the system
+ *
+ * 3. -EADDRNOTAVAIL
+ * An APQN derived from the cross product of the APQI being assigned
+ * and the APIDs previously assigned is not bound to the vfio_ap device
+ * driver; or, if no APIDs have yet been assigned, the APQI is not
+ * contained in an APQN bound to the vfio_ap device driver.
+ *
+ * 4. -EADDRINUSE
+ * An APQN derived from the cross product of the APQI being assigned
+ * and the APIDs previously assigned is being used by another mediated
+ * matrix device
+ */
+static ssize_t assign_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apqi;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
+
+ /* If the guest is running, disallow assignment of domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apqi);
+ if (ret)
+ return ret;
+ if (apqi > max_apqi)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+
+ ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
+ if (ret)
+ goto done;
+
+ set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+
+ ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
+ if (ret)
+ goto share_err;
+
+ ret = count;
+ goto done;
+
+share_err:
+ clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
+done:
+ mutex_unlock(&matrix_dev->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(assign_domain);
+
+
+/**
+ * unassign_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_domain attribute
+ * @buf: a buffer containing the AP queue index (APQI) of the domain to
+ * be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APQI from @buf and clears the corresponding bit in the
+ * mediated matrix device's AQM.
+ *
+ * Returns the number of bytes processed if the APQI is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the APQI is not a number
+ * -ENODEV if the APQI exceeds the maximum value configured for the system
+ */
+static ssize_t unassign_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apqi;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow un-assignment of domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apqi);
+ if (ret)
+ return ret;
+
+ if (apqi > matrix_mdev->matrix.aqm_max)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_domain);
+
+/**
+ * assign_control_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_control_domain attribute
+ * @buf: a buffer containing the domain ID to be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the domain ID from @buf and sets the corresponding bit in the mediated
+ * matrix device's ADM.
+ *
+ * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the ID is not a number
+ * -ENODEV if the ID exceeds the maximum value configured for the system
+ */
+static ssize_t assign_control_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long id;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow assignment of control domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &id);
+ if (ret)
+ return ret;
+
+ if (id > matrix_mdev->matrix.adm_max)
+ return -ENODEV;
+
+ /* Set the bit in the ADM (bitmask) corresponding to the AP control
+ * domain number (id). The bits in the mask, from most significant to
+ * least significant, correspond to IDs 0 up to the one less than the
+ * number of control domains that can be assigned.
+ */
+ mutex_lock(&matrix_dev->lock);
+ set_bit_inv(id, matrix_mdev->matrix.adm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(assign_control_domain);
+
+/**
+ * unassign_control_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_control_domain attribute
+ * @buf: a buffer containing the domain ID to be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the domain ID from @buf and clears the corresponding bit in the
+ * mediated matrix device's ADM.
+ *
+ * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the ID is not a number
+ * -ENODEV if the ID exceeds the maximum value configured for the system
+ */
+static ssize_t unassign_control_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long domid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_domid = matrix_mdev->matrix.adm_max;
+
+ /* If the guest is running, disallow un-assignment of control domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &domid);
+ if (ret)
+ return ret;
+ if (domid > max_domid)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv(domid, matrix_mdev->matrix.adm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_control_domain);
+
+static ssize_t control_domains_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ unsigned long id;
+ int nchars = 0;
+ int n;
+ char *bufpos = buf;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_domid = matrix_mdev->matrix.adm_max;
+
+ mutex_lock(&matrix_dev->lock);
+ for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
+ n = sprintf(bufpos, "%04lx\n", id);
+ bufpos += n;
+ nchars += n;
+ }
+ mutex_unlock(&matrix_dev->lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(control_domains);
+
+static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ char *bufpos = buf;
+ unsigned long apid;
+ unsigned long apqi;
+ unsigned long apid1;
+ unsigned long apqi1;
+ unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
+ unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
+ int nchars = 0;
+ int n;
+
+ apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
+ apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
+
+ mutex_lock(&matrix_dev->lock);
+
+ if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ naqm_bits) {
+ n = sprintf(bufpos, "%02lx.%04lx\n", apid,
+ apqi);
+ bufpos += n;
+ nchars += n;
+ }
+ }
+ } else if (apid1 < napm_bits) {
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ n = sprintf(bufpos, "%02lx.\n", apid);
+ bufpos += n;
+ nchars += n;
+ }
+ } else if (apqi1 < naqm_bits) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
+ n = sprintf(bufpos, ".%04lx\n", apqi);
+ bufpos += n;
+ nchars += n;
+ }
+ }
+
+ mutex_unlock(&matrix_dev->lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(matrix);
+
+static struct attribute *vfio_ap_mdev_attrs[] = {
+ &dev_attr_assign_adapter.attr,
+ &dev_attr_unassign_adapter.attr,
+ &dev_attr_assign_domain.attr,
+ &dev_attr_unassign_domain.attr,
+ &dev_attr_assign_control_domain.attr,
+ &dev_attr_unassign_control_domain.attr,
+ &dev_attr_control_domains.attr,
+ &dev_attr_matrix.attr,
+ NULL,
+};
+
+static struct attribute_group vfio_ap_mdev_attr_group = {
+ .attrs = vfio_ap_mdev_attrs
+};
+
+static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
+ &vfio_ap_mdev_attr_group,
+ NULL
+};
+
+/**
+ * vfio_ap_mdev_set_kvm
+ *
+ * @matrix_mdev: a mediated matrix device
+ * @kvm: reference to KVM instance
+ *
+ * Verifies no other mediated matrix device has @kvm and sets a reference to
+ * it in @matrix_mdev->kvm.
+ *
+ * Return 0 if no other mediated matrix device has a reference to @kvm;
+ * otherwise, returns an -EPERM.
+ */
+static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
+ struct kvm *kvm)
+{
+ struct ap_matrix_mdev *m;
+
+ mutex_lock(&matrix_dev->lock);
+
+ list_for_each_entry(m, &matrix_dev->mdev_list, node) {
+ if ((m != matrix_mdev) && (m->kvm == kvm)) {
+ mutex_unlock(&matrix_dev->lock);
+ return -EPERM;
+ }
+ }
+
+ matrix_mdev->kvm = kvm;
+ mutex_unlock(&matrix_dev->lock);
+
+ return 0;
+}
+
+static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int ret;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if (action != VFIO_GROUP_NOTIFY_SET_KVM)
+ return NOTIFY_OK;
+
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+
+ if (!data) {
+ matrix_mdev->kvm = NULL;
+ return NOTIFY_OK;
+ }
+
+ ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
+ if (ret)
+ return NOTIFY_DONE;
+
+ /* If there is no CRYCB pointer, then we can't copy the masks */
+ if (!matrix_mdev->kvm->arch.crypto.crycbd)
+ return NOTIFY_DONE;
+
+ kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm,
+ matrix_mdev->matrix.adm);
+
+ return NOTIFY_OK;
+}
+
+static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry)
+{
+ struct ap_queue_status status;
+
+ do {
+ status = ap_zapq(AP_MKQID(apid, apqi));
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ default:
+ /* things are really broken, give up */
+ return -EIO;
+ }
+ } while (retry--);
+
+ return -EBUSY;
+}
+
+static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
+{
+ int ret;
+ int rc = 0;
+ unsigned long apid, apqi;
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.apm_max + 1) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ matrix_mdev->matrix.aqm_max + 1) {
+ ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ /*
+ * Regardless whether a queue turns out to be busy, or
+ * is not operational, we need to continue resetting
+ * the remaining queues.
+ */
+ if (ret)
+ rc = ret;
+ }
+ }
+
+ return rc;
+}
+
+static int vfio_ap_mdev_open(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long events;
+ int ret;
+
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
+ events = VFIO_GROUP_NOTIFY_SET_KVM;
+
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &events, &matrix_mdev->group_notifier);
+ if (ret) {
+ module_put(THIS_MODULE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vfio_ap_mdev_release(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ if (matrix_mdev->kvm)
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+
+ vfio_ap_mdev_reset_queues(mdev);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &matrix_mdev->group_notifier);
+ matrix_mdev->kvm = NULL;
+ module_put(THIS_MODULE);
+}
+
+static int vfio_ap_mdev_get_device_info(unsigned long arg)
+{
+ unsigned long minsz;
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
+ info.num_regions = 0;
+ info.num_irqs = 0;
+
+ return copy_to_user((void __user *)arg, &info, minsz);
+}
+
+static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ ret = vfio_ap_mdev_get_device_info(arg);
+ break;
+ case VFIO_DEVICE_RESET:
+ ret = vfio_ap_mdev_reset_queues(mdev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct mdev_parent_ops vfio_ap_matrix_ops = {
+ .owner = THIS_MODULE,
+ .supported_type_groups = vfio_ap_mdev_type_groups,
+ .mdev_attr_groups = vfio_ap_mdev_attr_groups,
+ .create = vfio_ap_mdev_create,
+ .remove = vfio_ap_mdev_remove,
+ .open = vfio_ap_mdev_open,
+ .release = vfio_ap_mdev_release,
+ .ioctl = vfio_ap_mdev_ioctl,
+};
+
+int vfio_ap_mdev_register(void)
+{
+ atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
+
+ return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
+}
+
+void vfio_ap_mdev_unregister(void)
+{
+ mdev_unregister_device(&matrix_dev->device);
+}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
new file mode 100644
index 000000000000..5675492233c7
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Private data and functions for adjunct processor VFIO matrix driver.
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Halil Pasic <pasic@linux.ibm.com>
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef _VFIO_AP_PRIVATE_H_
+#define _VFIO_AP_PRIVATE_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mdev.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+
+#include "ap_bus.h"
+
+#define VFIO_AP_MODULE_NAME "vfio_ap"
+#define VFIO_AP_DRV_NAME "vfio_ap"
+
+/**
+ * ap_matrix_dev - the AP matrix device structure
+ * @device: generic device structure associated with the AP matrix device
+ * @available_instances: number of mediated matrix devices that can be created
+ * @info: the struct containing the output from the PQAP(QCI) instruction
+ * mdev_list: the list of mediated matrix devices created
+ * lock: mutex for locking the AP matrix device. This lock will be
+ * taken every time we fiddle with state managed by the vfio_ap
+ * driver, be it using @mdev_list or writing the state of a
+ * single ap_matrix_mdev device. It's quite coarse but we don't
+ * expect much contention.
+ */
+struct ap_matrix_dev {
+ struct device device;
+ atomic_t available_instances;
+ struct ap_config_info info;
+ struct list_head mdev_list;
+ struct mutex lock;
+};
+
+extern struct ap_matrix_dev *matrix_dev;
+
+/**
+ * The AP matrix is comprised of three bit masks identifying the adapters,
+ * queues (domains) and control domains that belong to an AP matrix. The bits i
+ * each mask, from least significant to most significant bit, correspond to IDs
+ * 0 to 255. When a bit is set, the corresponding ID belongs to the matrix.
+ *
+ * @apm_max: max adapter number in @apm
+ * @apm identifies the AP adapters in the matrix
+ * @aqm_max: max domain number in @aqm
+ * @aqm identifies the AP queues (domains) in the matrix
+ * @adm_max: max domain number in @adm
+ * @adm identifies the AP control domains in the matrix
+ */
+struct ap_matrix {
+ unsigned long apm_max;
+ DECLARE_BITMAP(apm, 256);
+ unsigned long aqm_max;
+ DECLARE_BITMAP(aqm, 256);
+ unsigned long adm_max;
+ DECLARE_BITMAP(adm, 256);
+};
+
+/**
+ * struct ap_matrix_mdev - the mediated matrix device structure
+ * @list: allows the ap_matrix_mdev struct to be added to a list
+ * @matrix: the adapters, usage domains and control domains assigned to the
+ * mediated matrix device.
+ * @group_notifier: notifier block used for specifying callback function for
+ * handling the VFIO_GROUP_NOTIFY_SET_KVM event
+ * @kvm: the struct holding guest's state
+ */
+struct ap_matrix_mdev {
+ struct list_head node;
+ struct ap_matrix matrix;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+};
+
+extern int vfio_ap_mdev_register(void);
+extern void vfio_ap_mdev_unregister(void);
+
+#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index c0631895154e..f96ec68af2e5 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -515,8 +515,8 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_unmap;
- pci_set_dma_seg_boundary(pdev, SZ_1M - 1);
- pci_set_dma_max_seg_size(pdev, SZ_1M);
+ dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
+ dma_set_max_seg_size(&pdev->dev, SZ_1M);
pci_set_master(pdev);
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 27521fc3ef5a..05293babb031 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -518,7 +518,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
@@ -526,7 +527,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
- pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
+ cpu_addr, dma_handle);
goto out;
}
@@ -1027,16 +1029,16 @@ out:
static void twa_free_device_extension(TW_Device_Extension *tw_dev)
{
if (tw_dev->command_packet_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
- sizeof(TW_Command_Full)*TW_Q_LENGTH,
- tw_dev->command_packet_virt[0],
- tw_dev->command_packet_phys[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Command_Full) * TW_Q_LENGTH,
+ tw_dev->command_packet_virt[0],
+ tw_dev->command_packet_phys[0]);
if (tw_dev->generic_buffer_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
- TW_SECTOR_SIZE*TW_Q_LENGTH,
- tw_dev->generic_buffer_virt[0],
- tw_dev->generic_buffer_phys[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ TW_SECTOR_SIZE * TW_Q_LENGTH,
+ tw_dev->generic_buffer_virt[0],
+ tw_dev->generic_buffer_phys[0]);
kfree(tw_dev->event_queue[0]);
} /* End twa_free_device_extension() */
@@ -2015,14 +2017,12 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) {
@@ -2237,14 +2237,12 @@ static int twa_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
/* Initialize the card */
if (twa_reset_sequence(tw_dev, 0)) {
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 40c1e6e64f58..266bdac75304 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -644,8 +644,8 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH,
- &dma_handle);
+ cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
@@ -899,19 +899,19 @@ out:
static void twl_free_device_extension(TW_Device_Extension *tw_dev)
{
if (tw_dev->command_packet_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Full)*TW_Q_LENGTH,
tw_dev->command_packet_virt[0],
tw_dev->command_packet_phys[0]);
if (tw_dev->generic_buffer_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
TW_SECTOR_SIZE*TW_Q_LENGTH,
tw_dev->generic_buffer_virt[0],
tw_dev->generic_buffer_phys[0]);
if (tw_dev->sense_buffer_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Apache_Header)*
TW_Q_LENGTH,
tw_dev->sense_buffer_virt[0],
@@ -1571,14 +1571,12 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) {
@@ -1805,14 +1803,12 @@ static int twl_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
/* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) {
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 471366945bd4..a58257645e94 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -834,15 +834,17 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n");
- cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (cpu_addr == NULL) {
- printk(KERN_WARNING "3w-xxxx: pci_alloc_consistent() failed.\n");
+ printk(KERN_WARNING "3w-xxxx: dma_alloc_coherent() failed.\n");
return 1;
}
if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) {
printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n");
- pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
+ cpu_addr, dma_handle);
return 1;
}
@@ -1062,10 +1064,16 @@ static void tw_free_device_extension(TW_Device_Extension *tw_dev)
/* Free command packet and generic buffer memory */
if (tw_dev->command_packet_virtual_address[0])
- pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Command)*TW_Q_LENGTH, tw_dev->command_packet_virtual_address[0], tw_dev->command_packet_physical_address[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Command) * TW_Q_LENGTH,
+ tw_dev->command_packet_virtual_address[0],
+ tw_dev->command_packet_physical_address[0]);
if (tw_dev->alignment_virtual_address[0])
- pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Sector)*TW_Q_LENGTH, tw_dev->alignment_virtual_address[0], tw_dev->alignment_physical_address[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Sector) * TW_Q_LENGTH,
+ tw_dev->alignment_virtual_address[0],
+ tw_dev->alignment_physical_address[0]);
} /* End tw_free_device_extension() */
/* This function will send an initconnection command to controller */
@@ -2260,7 +2268,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
- retval = pci_set_dma_mask(pdev, TW_DMA_MASK);
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
printk(KERN_WARNING "3w-xxxx: Failed to set dma mask.");
goto out_disable_device;
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 69e80c1ed1ca..bd87fbacfbc7 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -230,7 +230,6 @@ static unsigned char tw_sense_table[][4] =
#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
#define TW_IOCTL_CHRDEV_FREE -1
-#define TW_DMA_MASK DMA_BIT_MASK(32)
#define TW_MAX_CDB_LEN 16
/* Bitmask macros to eliminate bitfields */
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 0c9a100af667..05fe439b66af 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -90,7 +90,7 @@ struct NCR_700_Device_Parameters {
/* The SYNC negotiation sequence looks like:
*
* If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
- * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION
+ * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTIATION
* If we get an SDTR reply, work out the SXFER parameters, squirrel
* them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set
* DEV_NEGOTIATED_SYNC. If we get a REJECT msg, squirrel
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 0d4ffe0ae306..9cee941f97d6 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -201,8 +201,8 @@ static bool __init blogic_create_initccbs(struct blogic_adapter *adapter)
dma_addr_t blkp;
while (adapter->alloc_ccbs < adapter->initccbs) {
- blk_pointer = pci_alloc_consistent(adapter->pci_device,
- blk_size, &blkp);
+ blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
+ blk_size, &blkp, GFP_KERNEL);
if (blk_pointer == NULL) {
blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
adapter);
@@ -227,15 +227,16 @@ static void blogic_destroy_ccbs(struct blogic_adapter *adapter)
next_ccb = ccb->next_all;
if (ccb->allocgrp_head) {
if (lastccb)
- pci_free_consistent(adapter->pci_device,
+ dma_free_coherent(&adapter->pci_device->dev,
lastccb->allocgrp_size, lastccb,
lastccb->allocgrp_head);
lastccb = ccb;
}
}
if (lastccb)
- pci_free_consistent(adapter->pci_device, lastccb->allocgrp_size,
- lastccb, lastccb->allocgrp_head);
+ dma_free_coherent(&adapter->pci_device->dev,
+ lastccb->allocgrp_size, lastccb,
+ lastccb->allocgrp_head);
}
@@ -256,8 +257,8 @@ static void blogic_create_addlccbs(struct blogic_adapter *adapter,
if (addl_ccbs <= 0)
return;
while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) {
- blk_pointer = pci_alloc_consistent(adapter->pci_device,
- blk_size, &blkp);
+ blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
+ blk_size, &blkp, GFP_KERNEL);
if (blk_pointer == NULL)
break;
blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp);
@@ -318,8 +319,8 @@ static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
if (ccb->command != NULL)
scsi_dma_unmap(ccb->command);
if (dma_unmap)
- pci_unmap_single(adapter->pci_device, ccb->sensedata,
- ccb->sense_datalen, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pci_device->dev, ccb->sensedata,
+ ccb->sense_datalen, DMA_FROM_DEVICE);
ccb->command = NULL;
ccb->status = BLOGIC_CCB_FREE;
@@ -712,7 +713,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
- if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
@@ -895,7 +896,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
- if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
@@ -952,7 +953,7 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
- if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
@@ -2040,7 +2041,7 @@ static void blogic_relres(struct blogic_adapter *adapter)
Release any allocated memory structs not released elsewhere
*/
if (adapter->mbox_space)
- pci_free_consistent(adapter->pci_device, adapter->mbox_sz,
+ dma_free_coherent(&adapter->pci_device->dev, adapter->mbox_sz,
adapter->mbox_space, adapter->mbox_space_handle);
pci_dev_put(adapter->pci_device);
adapter->mbox_space = NULL;
@@ -2092,8 +2093,9 @@ static bool blogic_initadapter(struct blogic_adapter *adapter)
Initialize the Outgoing and Incoming Mailbox pointers.
*/
adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox));
- adapter->mbox_space = pci_alloc_consistent(adapter->pci_device,
- adapter->mbox_sz, &adapter->mbox_space_handle);
+ adapter->mbox_space = dma_alloc_coherent(&adapter->pci_device->dev,
+ adapter->mbox_sz, &adapter->mbox_space_handle,
+ GFP_KERNEL);
if (adapter->mbox_space == NULL)
return blogic_failure(adapter, "MAILBOX ALLOCATION");
adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space;
@@ -3183,9 +3185,9 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
memcpy(ccb->cdb, cdb, cdblen);
ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
ccb->command = command;
- sense_buf = pci_map_single(adapter->pci_device,
+ sense_buf = dma_map_single(&adapter->pci_device->dev,
command->sense_buffer, ccb->sense_datalen,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
blogic_err("DMA mapping for sense data buffer failed\n",
adapter);
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 867b864f5047..0f17bd51088a 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -2944,7 +2944,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
}
if (currSCCB->Lun == 0x00) {
- if ((currSCCB->Sccb_scsistat == SELECT_SN_ST)) {
+ if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
currTar_Info->TarStatus |=
(unsigned char)SYNC_SUPPORTED;
@@ -2953,8 +2953,8 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
~EE_SYNC_MASK;
}
- else if ((currSCCB->Sccb_scsistat ==
- SELECT_WN_ST)) {
+ else if (currSCCB->Sccb_scsistat ==
+ SELECT_WN_ST) {
currTar_Info->TarStatus =
(currTar_Info->
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7c097006c54d..70988c381268 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -42,6 +42,9 @@ config SCSI_DMA
bool
default n
+config SCSI_ESP_PIO
+ bool
+
config SCSI_NETLINK
bool
default n
@@ -557,6 +560,36 @@ config SCSI_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may not
wish to include it.
+config SCSI_MYRB
+ tristate "Mylex DAC960/DAC1100 PCI RAID Controller (Block Interface)"
+ depends on PCI
+ select RAID_ATTRS
+ help
+ This driver adds support for the Mylex DAC960, AcceleRAID, and
+ eXtremeRAID PCI RAID controllers. This driver supports the
+ older, block based interface.
+ This driver is a reimplementation of the original DAC960
+ driver. If you have used the DAC960 driver you should enable
+ this module.
+
+ To compile this driver as a module, choose M here: the
+ module will be called myrb.
+
+config SCSI_MYRS
+ tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
+ depends on PCI
+ select RAID_ATTRS
+ help
+ This driver adds support for the Mylex DAC960, AcceleRAID, and
+ eXtremeRAID PCI RAID controllers. This driver supports the
+ newer, SCSI-based interface only.
+ This driver is a reimplementation of the original DAC960
+ driver. If you have used the DAC960 driver you should enable
+ this module.
+
+ To compile this driver as a module, choose M here: the
+ module will be called myrs.
+
config VMWARE_PVSCSI
tristate "VMware PVSCSI driver support"
depends on PCI && SCSI && X86
@@ -1332,6 +1365,7 @@ config SCSI_ZORRO_ESP
tristate "Zorro ESP SCSI support"
depends on ZORRO && SCSI
select SCSI_SPI_ATTRS
+ select SCSI_ESP_PIO
help
Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
expansion boards for the Amiga.
@@ -1374,6 +1408,7 @@ config SCSI_MAC_ESP
tristate "Macintosh NCR53c9[46] SCSI"
depends on MAC && SCSI
select SCSI_SPI_ATTRS
+ select SCSI_ESP_PIO
help
This is the NCR 53c9x SCSI controller found on most of the 68040
based Macintoshes.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6d71b2a9592b..fcb41ae329c4 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -106,6 +106,8 @@ obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
obj-$(CONFIG_SCSI_MESH) += mesh.o
obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
+obj-$(CONFIG_SCSI_MYRB) += myrb.o
+obj-$(CONFIG_SCSI_MYRS) += myrs.o
obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 90ea0f5d9bdb..8429c855701f 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -131,6 +131,7 @@
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
+static void bus_reset_cleanup(struct Scsi_Host *);
/**
* initialize_SCp - init the scsi pointer field
@@ -513,16 +514,15 @@ static void complete_cmd(struct Scsi_Host *instance,
if (hostdata->sensing == cmd) {
/* Autosense processing ends here */
- if ((cmd->result & 0xff) != SAM_STAT_GOOD) {
+ if (status_byte(cmd->result) != GOOD) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- set_host_byte(cmd, DID_ERROR);
- } else
+ } else {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ }
hostdata->sensing = NULL;
}
- hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
-
cmd->scsi_done(cmd);
}
@@ -884,7 +884,14 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
/* Probably Bus Reset */
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+ if (sr & SR_RST) {
+ /* Certainly Bus Reset */
+ shost_printk(KERN_WARNING, instance,
+ "bus reset interrupt\n");
+ bus_reset_cleanup(instance);
+ } else {
+ dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+ }
#ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE;
#endif
@@ -902,20 +909,16 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-/*
- * Function : int NCR5380_select(struct Scsi_Host *instance,
- * struct scsi_cmnd *cmd)
- *
- * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
- * including ARBITRATION, SELECTION, and initial message out for
- * IDENTIFY and queue messages.
+/**
+ * NCR5380_select - attempt arbitration and selection for a given command
+ * @instance: the Scsi_Host instance
+ * @cmd: the scsi_cmnd to execute
*
- * Inputs : instance - instantiation of the 5380 driver on which this
- * target lives, cmd - SCSI command to execute.
+ * This routine establishes an I_T_L nexus for a SCSI command. This involves
+ * ARBITRATION, SELECTION and MESSAGE OUT phases and an IDENTIFY message.
*
- * Returns cmd if selection failed but should be retried,
- * NULL if selection failed and should not be retried, or
- * NULL if selection succeeded (hostdata->connected == cmd).
+ * Returns true if the operation should be retried.
+ * Returns false if it should not be retried.
*
* Side effects :
* If bus busy, arbitration failed, etc, NCR5380_select() will exit
@@ -923,16 +926,15 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
* SELECT_ENABLE will be set appropriately, the NCR5380
* will cease to drive any SCSI bus signals.
*
- * If successful : I_T_L or I_T_L_Q nexus will be established,
- * instance->connected will be set to cmd.
+ * If successful : the I_T_L nexus will be established, and
+ * hostdata->connected will be set to cmd.
* SELECT interrupt will be disabled.
*
* If failed (no target) : cmd->scsi_done() will be called, and the
* cmd->result host byte set to DID_BAD_TARGET.
*/
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
- struct scsi_cmnd *cmd)
+static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
__releases(&hostdata->lock) __acquires(&hostdata->lock)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -940,6 +942,9 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
unsigned char *data;
int len;
int err;
+ bool ret = true;
+ bool can_disconnect = instance->irq != NO_IRQ &&
+ cmd->cmnd[0] != REQUEST_SENSE;
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -948,7 +953,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/*
* Arbitration and selection phases are slow and involve dropping the
* lock, so we have to watch out for EH. An exception handler may
- * change 'selecting' to NULL. This function will then return NULL
+ * change 'selecting' to NULL. This function will then return false
* so that the caller will forget about 'cmd'. (During information
* transfer phases, EH may change 'connected' to NULL.)
*/
@@ -984,7 +989,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (!hostdata->selecting) {
/* Command was aborted */
NCR5380_write(MODE_REG, MR_BASE);
- goto out;
+ return false;
}
if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE);
@@ -1033,7 +1038,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (!hostdata->selecting) {
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- goto out;
+ return false;
}
dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
@@ -1116,13 +1121,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
spin_lock_irq(&hostdata->lock);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
/* Can't touch cmd if it has been reclaimed by the scsi ML */
- if (hostdata->selecting) {
- cmd->result = DID_BAD_TARGET << 16;
- complete_cmd(instance, cmd);
- dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
- cmd = NULL;
- }
+ if (!hostdata->selecting)
+ return false;
+
+ cmd->result = DID_BAD_TARGET << 16;
+ complete_cmd(instance, cmd);
+ dsprintk(NDEBUG_SELECTION, instance,
+ "target did not respond within 250ms\n");
+ ret = false;
goto out;
}
@@ -1155,12 +1163,12 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
}
if (!hostdata->selecting) {
do_abort(instance);
- goto out;
+ return false;
}
dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
scmd_id(cmd));
- tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun);
+ tmp[0] = IDENTIFY(can_disconnect, cmd->device->lun);
len = 1;
data = tmp;
@@ -1171,7 +1179,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n");
- cmd = NULL;
+ ret = false;
goto out;
}
@@ -1186,13 +1194,13 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
initialize_SCp(cmd);
- cmd = NULL;
+ ret = false;
out:
if (!hostdata->selecting)
return NULL;
hostdata->selecting = NULL;
- return cmd;
+ return ret;
}
/*
@@ -1711,6 +1719,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
return;
#endif
case PHASE_DATAIN:
@@ -1793,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd, scmd_id(cmd), cmd->device->lun);
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result &= ~0xffff;
cmd->result |= cmd->SCp.Status;
@@ -1951,6 +1961,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_transfer_pio(instance, &phase, &len, &data);
if (msgout == ABORT) {
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
maybe_release_dma_irq(instance);
@@ -2014,8 +2025,11 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(MODE_REG, MR_BASE);
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
-
- dsprintk(NDEBUG_RESELECTION, instance, "reselect\n");
+ if (!target_mask || target_mask & (target_mask - 1)) {
+ shost_printk(KERN_WARNING, instance,
+ "reselect: bad target_mask 0x%02x\n", target_mask);
+ return;
+ }
/*
* At this point, we have detected that our SCSI ID is on the bus,
@@ -2029,6 +2043,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
+ shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return;
}
@@ -2040,6 +2055,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
+ if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0)
+ /* BUS FREE phase */
+ return;
+ shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n");
do_abort(instance);
return;
}
@@ -2101,13 +2120,16 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance,
"reselect: removed %p from disconnected queue\n", tmp);
} else {
+ int target = ffs(target_mask) - 1;
+
shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n",
target_mask, lun);
/*
* Since we have an established nexus that we can't do anything
* with, we must abort it.
*/
- do_abort(instance);
+ if (do_abort(instance) == 0)
+ hostdata->busy[target] &= ~(1 << lun);
return;
}
@@ -2272,15 +2294,16 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->autosense, cmd)) {
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
}
out:
if (result == FAILED)
dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd);
- else
+ else {
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
+ }
queue_work(hostdata->work_q, &hostdata->main_task);
maybe_release_dma_irq(instance);
@@ -2290,31 +2313,12 @@ out:
}
-/**
- * NCR5380_host_reset - reset the SCSI host
- * @cmd: SCSI command undergoing EH
- *
- * Returns SUCCESS
- */
-
-static int NCR5380_host_reset(struct scsi_cmnd *cmd)
+static void bus_reset_cleanup(struct Scsi_Host *instance)
{
- struct Scsi_Host *instance = cmd->device->host;
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int i;
- unsigned long flags;
struct NCR5380_cmd *ncmd;
- spin_lock_irqsave(&hostdata->lock, flags);
-
-#if (NDEBUG & NDEBUG_ANY)
- scmd_printk(KERN_INFO, cmd, __func__);
-#endif
- NCR5380_dprint(NDEBUG_ANY, instance);
- NCR5380_dprint_phase(NDEBUG_ANY, instance);
-
- do_reset(instance);
-
/* reset NCR registers */
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(TARGET_COMMAND_REG, 0);
@@ -2326,11 +2330,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
* commands!
*/
- if (list_del_cmd(&hostdata->unissued, cmd)) {
- cmd->result = DID_RESET << 16;
- cmd->scsi_done(cmd);
- }
-
if (hostdata->selecting) {
hostdata->selecting->result = DID_RESET << 16;
complete_cmd(instance, hostdata->selecting);
@@ -2348,7 +2347,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
- set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
INIT_LIST_HEAD(&hostdata->autosense);
@@ -2365,6 +2363,41 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
queue_work(hostdata->work_q, &hostdata->main_task);
maybe_release_dma_irq(instance);
+}
+
+/**
+ * NCR5380_host_reset - reset the SCSI host
+ * @cmd: SCSI command undergoing EH
+ *
+ * Returns SUCCESS
+ */
+
+static int NCR5380_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ unsigned long flags;
+ struct NCR5380_cmd *ncmd;
+
+ spin_lock_irqsave(&hostdata->lock, flags);
+
+#if (NDEBUG & NDEBUG_ANY)
+ shost_printk(KERN_INFO, instance, __func__);
+#endif
+ NCR5380_dprint(NDEBUG_ANY, instance);
+ NCR5380_dprint_phase(NDEBUG_ANY, instance);
+
+ list_for_each_entry(ncmd, &hostdata->unissued, list) {
+ struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd);
+
+ scmd->result = DID_RESET << 16;
+ scmd->scsi_done(scmd);
+ }
+ INIT_LIST_HEAD(&hostdata->unissued);
+
+ do_reset(instance);
+ bus_reset_cleanup(instance);
+
spin_unlock_irqrestore(&hostdata->lock, flags);
return SUCCESS;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 31096a0b0fdd..efca509b92b0 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -275,7 +275,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
static void NCR5380_main(struct work_struct *work);
static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance);
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
+static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 23b17621b6d2..00072ed9540b 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1094,7 +1094,7 @@ static int inia100_probe_one(struct pci_dev *pdev,
if (pci_enable_device(pdev))
goto out;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "Unable to set 32bit DMA "
"on inia100 adapter, ignoring.\n");
goto out_disable_device;
@@ -1124,7 +1124,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
- host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys);
+ host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys,
+ GFP_KERNEL);
if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
@@ -1132,7 +1133,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
- host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys);
+ host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys,
+ GFP_KERNEL);
if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
@@ -1177,10 +1179,12 @@ static int inia100_probe_one(struct pci_dev *pdev,
out_free_irq:
free_irq(shost->irq, shost);
out_free_escb_array:
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
out_free_scb_array:
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
out_host_put:
scsi_host_put(shost);
@@ -1200,9 +1204,11 @@ static void inia100_remove_one(struct pci_dev *pdev)
scsi_remove_host(shost);
free_irq(shost->irq, shost);
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
release_region(shost->io_port, 256);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 6e356325d8d9..bd7f352c28f3 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3480,7 +3480,6 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
static void aac_srb_callback(void *context, struct fib * fibptr)
{
- struct aac_dev *dev;
struct aac_srb_reply *srbreply;
struct scsi_cmnd *scsicmd;
@@ -3491,8 +3490,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
BUG_ON(fibptr == NULL);
- dev = fibptr->dev;
-
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
@@ -3921,13 +3918,11 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
{
- struct aac_dev *dev;
unsigned long byte_count = 0;
int nseg;
struct scatterlist *sg;
int i;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr = 0;
@@ -3963,14 +3958,12 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
{
- struct aac_dev *dev;
unsigned long byte_count = 0;
u64 addr;
int nseg;
struct scatterlist *sg;
int i;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr[0] = 0;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 6e1b022a823d..1e77d96a18f2 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2586,9 +2586,7 @@ int aac_acquire_irq(struct aac_dev *dev)
void aac_free_irq(struct aac_dev *dev)
{
int i;
- int cpu;
- cpu = cpumask_first(cpu_online_mask);
if (aac_is_src(dev)) {
if (dev->max_msix > 1) {
for (i = 0; i < dev->max_msix; i++)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 04443577d48b..2d4e4ddc5ace 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1747,7 +1747,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
- error = pci_set_dma_max_seg_size(pdev,
+ error = dma_set_max_seg_size(&pdev->dev,
(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
(shost->max_sectors << 9) : 65536);
if (error)
@@ -2055,8 +2055,6 @@ static void aac_pci_resume(struct pci_dev *pdev)
struct scsi_device *sdev = NULL;
struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
if (aac_adapter_ioremap(aac, aac->base_size)) {
dev_err(&pdev->dev, "aacraid: ioremap failed\n");
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 713f69033f20..223ef6f4e258 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -5949,7 +5949,6 @@ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code)
static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
{
struct asc_board *boardp = adv_dvc_varp->drv_ptr;
- u32 srb_tag;
adv_req_t *reqp;
adv_sgblk_t *sgblkp;
struct scsi_cmnd *scp;
@@ -5965,7 +5964,6 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
* completed. The adv_req_t structure actually contains the
* completed ADV_SCSI_REQ_Q structure.
*/
- srb_tag = le32_to_cpu(scsiqp->srb_tag);
scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag);
ASC_DBG(1, "scp 0x%p\n", scp);
@@ -6448,7 +6446,7 @@ static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
sdtr_data =
AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
ext_msg.req_ack_offset);
- if ((sdtr_data == 0xFF)) {
+ if (sdtr_data == 0xFF) {
q_cntl |= QC_MSG_OUT;
asc_dvc->init_sdtr &= ~target_id;
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c
index 5000bd69c13f..176704b24e6a 100644
--- a/drivers/scsi/aic7xxx/aic7770.c
+++ b/drivers/scsi/aic7xxx/aic7770.c
@@ -42,15 +42,9 @@
* $FreeBSD$
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
#define ID_AIC7770 0x04907770
#define ID_AHA_274x 0x04907771
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 31f2bb9d7146..9a515551641c 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -607,9 +607,6 @@ struct scb {
ahd_io_ctx_t io_ctx;
struct ahd_softc *ahd_softc;
scb_flag flags;
-#ifndef __linux__
- bus_dmamap_t dmamap;
-#endif
struct scb_platform_data *platform_data;
struct map_node *hscb_map;
struct map_node *sg_map;
@@ -1056,9 +1053,6 @@ struct ahd_completion
struct ahd_softc {
bus_space_tag_t tags[2];
bus_space_handle_t bshs[2];
-#ifndef __linux__
- bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
-#endif
struct scb_data scb_data;
struct hardware_scb *next_queued_hscb;
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 2d82ec85753e..9ee75c9a9aa1 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -40,16 +40,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $
*/
-#ifdef __linux__
#include "aic79xx_osm.h"
#include "aic79xx_inline.h"
#include "aicasm/aicasm_insformat.h"
-#else
-#include <dev/aic7xxx/aic79xx_osm.h>
-#include <dev/aic7xxx/aic79xx_inline.h>
-#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
-#endif
-
/***************************** Lookup Tables **********************************/
static const char *const ahd_chip_names[] =
@@ -59,7 +52,6 @@ static const char *const ahd_chip_names[] =
"aic7902",
"aic7901A"
};
-static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
/*
* Hardware error codes.
@@ -6172,17 +6164,11 @@ ahd_free(struct ahd_softc *ahd)
case 2:
ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
case 1:
-#ifndef __linux__
- ahd_dma_tag_destroy(ahd, ahd->buffer_dmat);
-#endif
break;
case 0:
break;
}
-#ifndef __linux__
- ahd_dma_tag_destroy(ahd, ahd->parent_dmat);
-#endif
ahd_platform_free(ahd);
ahd_fini_scbdata(ahd);
for (i = 0; i < AHD_NUM_TARGETS; i++) {
@@ -6934,9 +6920,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
for (i = 0; i < newcount; i++) {
struct scb_platform_data *pdata;
u_int col_tag;
-#ifndef __linux__
- int error;
-#endif
next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC);
if (next_scb == NULL)
@@ -6970,15 +6953,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
next_scb->ahd_softc = ahd;
next_scb->flags = SCB_FLAG_NONE;
-#ifndef __linux__
- error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
- &next_scb->dmamap);
- if (error != 0) {
- kfree(next_scb);
- kfree(pdata);
- break;
- }
-#endif
next_scb->hscb->tag = ahd_htole16(scb_data->numscbs);
col_tag = scb_data->numscbs ^ 0x100;
next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
@@ -7091,24 +7065,6 @@ ahd_init(struct ahd_softc *ahd)
if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
ahd->features &= ~AHD_TARGETMODE;
-#ifndef __linux__
- /* DMA tag for mapping buffers into device visible space. */
- if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
- /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
- /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
- ? (dma_addr_t)0x7FFFFFFFFFULL
- : BUS_SPACE_MAXADDR_32BIT,
- /*highaddr*/BUS_SPACE_MAXADDR,
- /*filter*/NULL, /*filterarg*/NULL,
- /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
- /*nsegments*/AHD_NSEG,
- /*maxsegsz*/AHD_MAXTRANSFER_SIZE,
- /*flags*/BUS_DMA_ALLOCNOW,
- &ahd->buffer_dmat) != 0) {
- return (ENOMEM);
- }
-#endif
-
ahd->init_level++;
/*
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index cc9bd26f5d1a..8397ae93f7dd 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -41,14 +41,8 @@
* $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $
*/
-#ifdef __linux__
#include "aic79xx_osm.h"
#include "aic79xx_inline.h"
-#else
-#include <dev/aic7xxx/aic79xx_osm.h>
-#include <dev/aic7xxx/aic79xx_inline.h>
-#endif
-
#include "aic79xx_pci.h"
static inline uint64_t
@@ -294,13 +288,11 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
int
ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
{
- struct scb_data *shared_scb_data;
u_int command;
uint32_t devconfig;
uint16_t subvendor;
int error;
- shared_scb_data = NULL;
ahd->description = entry->name;
/*
* Record if this is an HP board.
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 4ce4e903a759..5614921b4041 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -568,9 +568,6 @@ struct scb {
ahc_io_ctx_t io_ctx;
struct ahc_softc *ahc_softc;
scb_flag flags;
-#ifndef __linux__
- bus_dmamap_t dmamap;
-#endif
struct scb_platform_data *platform_data;
struct sg_map_node *sg_map;
struct ahc_dma_seg *sg_list;
@@ -906,9 +903,6 @@ typedef void ahc_callback_t (void *);
struct ahc_softc {
bus_space_tag_t tag;
bus_space_handle_t bsh;
-#ifndef __linux__
- bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
-#endif
struct scb_data *scb_data;
struct scb *next_queued_scb;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 9e85a7ef9c8e..cc9e41967ce4 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -64,15 +64,9 @@
* bit to be sent from the chip.
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
/*
* Right now, we only have to read the SEEPROM. But we make it easier to
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 915a34f141e4..f3362f4ab16e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -40,15 +40,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aicasm/aicasm_insformat.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
-#endif
/***************************** Lookup Tables **********************************/
static const char *const ahc_chip_names[] = {
@@ -67,7 +61,6 @@ static const char *const ahc_chip_names[] = {
"aic7892",
"aic7899"
};
-static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
/*
* Hardware error codes.
@@ -4509,17 +4502,11 @@ ahc_free(struct ahc_softc *ahc)
case 2:
ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
case 1:
-#ifndef __linux__
- ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
-#endif
break;
case 0:
break;
}
-#ifndef __linux__
- ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
-#endif
ahc_platform_free(ahc);
ahc_fini_scbdata(ahc);
for (i = 0; i < AHC_NUM_TARGETS; i++) {
@@ -5005,9 +4992,7 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
for (i = 0; i < newcount; i++) {
struct scb_platform_data *pdata;
-#ifndef __linux__
- int error;
-#endif
+
pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
if (pdata == NULL)
break;
@@ -5021,12 +5006,6 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
next_scb->ahc_softc = ahc;
next_scb->flags = SCB_FREE;
-#ifndef __linux__
- error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
- &next_scb->dmamap);
- if (error != 0)
- break;
-#endif
next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
next_scb->hscb->tag = ahc->scb_data->numscbs;
SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
@@ -5325,24 +5304,6 @@ ahc_init(struct ahc_softc *ahc)
if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
ahc->features &= ~AHC_TARGETMODE;
-#ifndef __linux__
- /* DMA tag for mapping buffers into device visible space. */
- if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
- /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
- /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
- ? (dma_addr_t)0x7FFFFFFFFFULL
- : BUS_SPACE_MAXADDR_32BIT,
- /*highaddr*/BUS_SPACE_MAXADDR,
- /*filter*/NULL, /*filterarg*/NULL,
- /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
- /*nsegments*/AHC_NSEG,
- /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
- /*flags*/BUS_DMA_ALLOCNOW,
- &ahc->buffer_dmat) != 0) {
- return (ENOMEM);
- }
-#endif
-
ahc->init_level++;
/*
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 673e826d7adb..656f680c7802 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -42,16 +42,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
-
#include "aic7xxx_pci.h"
static inline uint64_t
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.h b/drivers/scsi/aic7xxx/aicasm/aicasm.h
index 51678dd46ff7..716a2aefc925 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.h
@@ -42,11 +42,7 @@
* $FreeBSD$
*/
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#ifndef TRUE
#define TRUE 1
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index f1586a437906..924d55a8acbf 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -52,11 +52,7 @@
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
index 708326df0766..8c0479865f04 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -52,11 +52,7 @@
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
index c0457b8c3b77..98e9959c6907 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
@@ -51,11 +51,7 @@
#include <stdio.h>
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 93c8667cd704..c78d4f68eea5 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -51,11 +51,7 @@
#include <stdio.h>
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index 232aff1fe784..975fcfcc0d8f 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -44,11 +44,7 @@
#include <sys/types.h>
-#ifdef __linux__
#include "aicdb.h"
-#else
-#include <db.h>
-#endif
#include <fcntl.h>
#include <inttypes.h>
#include <regex.h>
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 34bbcad7f83f..7bf7fd5953ac 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -42,11 +42,7 @@
* $FreeBSD$
*/
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
typedef enum {
UNINITIALIZED,
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 1391e5f35918..41c4d8abdd4a 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -771,13 +771,8 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto Err_remove;
err = -ENODEV;
- if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))
- && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)))
- ;
- else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)))
- ;
- else {
+ if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
goto Err_remove;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 22873ce8bbfa..91ea87dfb700 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -724,9 +724,11 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask &= ~SAS_SPEED_60_DIS;
+ /* fall through*/
default:
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SAS_SPEED_30_DIS;
+ /* fall through*/
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SAS_SPEED_15_DIS;
}
@@ -734,6 +736,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->min_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask |= SAS_SPEED_30_DIS;
+ /* fall through*/
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask |= SAS_SPEED_15_DIS;
default:
@@ -745,6 +748,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sata_lrate) {
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SATA_SPEED_30_DIS;
+ /* fall through*/
default:
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SATA_SPEED_15_DIS;
@@ -803,6 +807,7 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
/* link reset retries, this should be nominal */
control_phy->link_reset_retries = 10;
+ /* fall through */
case RELEASE_SPINUP_HOLD: /* 0x02 */
/* decide the func_mask */
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index cdd4ab683be9..7fea344531f6 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -42,13 +42,13 @@ static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
}
-/* PCI_DMA_... to our direction translation.
+/* DMA_... to our direction translation.
*/
static const u8 data_dir_flags[] = {
- [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
- [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */
- [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */
- [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
+ [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+ [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
+ [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
+ [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
};
static int asd_map_scatterlist(struct sas_task *task,
@@ -60,12 +60,12 @@ static int asd_map_scatterlist(struct sas_task *task,
struct scatterlist *sc;
int num_sg, res;
- if (task->data_dir == PCI_DMA_NONE)
+ if (task->data_dir == DMA_NONE)
return 0;
if (task->num_scatter == 0) {
void *p = task->scatter;
- dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
+ dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
task->total_xfer_len,
task->data_dir);
sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
@@ -79,7 +79,7 @@ static int asd_map_scatterlist(struct sas_task *task,
if (sas_protocol_ata(task->task_proto))
num_sg = task->num_scatter;
else
- num_sg = pci_map_sg(asd_ha->pcidev, task->scatter,
+ num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
task->num_scatter, task->data_dir);
if (num_sg == 0)
return -ENOMEM;
@@ -126,8 +126,8 @@ static int asd_map_scatterlist(struct sas_task *task,
return 0;
err_unmap:
if (sas_protocol_ata(task->task_proto))
- pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
+ task->num_scatter, task->data_dir);
return res;
}
@@ -136,21 +136,21 @@ static void asd_unmap_scatterlist(struct asd_ascb *ascb)
struct asd_ha_struct *asd_ha = ascb->ha;
struct sas_task *task = ascb->uldd_task;
- if (task->data_dir == PCI_DMA_NONE)
+ if (task->data_dir == DMA_NONE)
return;
if (task->num_scatter == 0) {
dma_addr_t dma = (dma_addr_t)
le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
- pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
- task->data_dir);
+ dma_unmap_single(&ascb->ha->pcidev->dev, dma,
+ task->total_xfer_len, task->data_dir);
return;
}
asd_free_coherent(asd_ha, ascb->sg_arr);
if (task->task_proto != SAS_PROTOCOL_STP)
- pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
+ task->num_scatter, task->data_dir);
}
/* ---------- Task complete tasklet ---------- */
@@ -436,10 +436,10 @@ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
struct domain_device *dev = task->dev;
struct scb *scb;
- pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
- pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
scb = ascb->scb;
@@ -471,10 +471,10 @@ static void asd_unbuild_smp_ascb(struct asd_ascb *a)
struct sas_task *task = a->uldd_task;
BUG_ON(!task);
- pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
- pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
}
/* ---------- SSP ---------- */
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index d81ca66e24d6..27c0a4a937d9 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -96,9 +96,7 @@ static void pci_esp_dma_drain(struct esp *esp);
static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
{
- struct pci_dev *pdev = esp->dev;
-
- return pci_get_drvdata(pdev);
+ return dev_get_drvdata(esp->dev);
}
static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
@@ -116,30 +114,6 @@ static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
return iowrite32(val, esp->regs + (reg * 4UL));
}
-static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return pci_map_single(esp->dev, buf, sz, dir);
-}
-
-static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return pci_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- pci_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- pci_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int pci_esp_irq_pending(struct esp *esp)
{
struct pci_esp_priv *pep = pci_esp_get_priv(esp);
@@ -295,10 +269,6 @@ static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
static const struct esp_driver_ops pci_esp_ops = {
.esp_write8 = pci_esp_write8,
.esp_read8 = pci_esp_read8,
- .map_single = pci_esp_map_single,
- .map_sg = pci_esp_map_sg,
- .unmap_single = pci_esp_unmap_single,
- .unmap_sg = pci_esp_unmap_sg,
.irq_pending = pci_esp_irq_pending,
.reset_dma = pci_esp_reset_dma,
.dma_drain = pci_esp_dma_drain,
@@ -375,18 +345,18 @@ static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
static void dc390_check_eeprom(struct esp *esp)
{
+ struct pci_dev *pdev = to_pci_dev(esp->dev);
u8 EEbuf[128];
u16 *ptr = (u16 *)EEbuf, wval = 0;
int i;
- dc390_read_eeprom((struct pci_dev *)esp->dev, ptr);
+ dc390_read_eeprom(pdev, ptr);
for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
wval += *ptr;
/* no Tekram EEprom found */
if (wval != 0x1234) {
- struct pci_dev *pdev = esp->dev;
dev_printk(KERN_INFO, &pdev->dev,
"No valid Tekram EEprom found\n");
return;
@@ -411,7 +381,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
return -ENODEV;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dev_printk(KERN_INFO, &pdev->dev,
"failed to set 32bit DMA mask\n");
goto fail_disable_device;
@@ -435,7 +405,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
esp = shost_priv(shost);
esp->host = shost;
- esp->dev = pdev;
+ esp->dev = &pdev->dev;
esp->ops = &pci_esp_ops;
/*
* The am53c974 HBA has a design flaw of generating
@@ -467,8 +437,8 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
pci_set_master(pdev);
- esp->command_block = pci_alloc_consistent(pdev, 16,
- &esp->command_block_dma);
+ esp->command_block = dma_alloc_coherent(&pdev->dev, 16,
+ &esp->command_block_dma, GFP_KERNEL);
if (!esp->command_block) {
dev_printk(KERN_ERR, &pdev->dev,
"failed to allocate command block\n");
@@ -498,7 +468,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
/* Assume 40MHz clock */
esp->cfreq = 40000000;
- err = scsi_esp_register(esp, &pdev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
@@ -508,8 +478,8 @@ fail_free_irq:
free_irq(pdev->irq, esp);
fail_unmap_command_block:
pci_set_drvdata(pdev, NULL);
- pci_free_consistent(pdev, 16, esp->command_block,
- esp->command_block_dma);
+ dma_free_coherent(&pdev->dev, 16, esp->command_block,
+ esp->command_block_dma);
fail_unmap_regs:
pci_iounmap(pdev, esp->regs);
fail_release_regions:
@@ -532,8 +502,8 @@ static void pci_esp_remove_one(struct pci_dev *pdev)
scsi_esp_unregister(esp);
free_irq(pdev->irq, esp);
pci_set_drvdata(pdev, NULL);
- pci_free_consistent(pdev, 16, esp->command_block,
- esp->command_block_dma);
+ dma_free_coherent(&pdev->dev, 16, esp->command_block,
+ esp->command_block_dma);
pci_iounmap(pdev, esp->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 12316ef4c893..d4404eea24fb 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1317,13 +1317,10 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
{
- int id, lun;
if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
struct scsi_cmnd *abortcmd = pCCB->pcmd;
if (abortcmd) {
- id = abortcmd->device->id;
- lun = abortcmd->device->lun;
abortcmd->result |= DID_ABORT << 16;
arcmsr_ccb_complete(pCCB);
printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
@@ -1798,7 +1795,7 @@ static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, acb->host->host_no);
}
}
@@ -1811,7 +1808,7 @@ static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, acb->host->host_no);
}
}
@@ -1824,7 +1821,7 @@ static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, pACB->host->host_no);
}
return;
@@ -1837,7 +1834,7 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
if (!arcmsr_hbaD_wait_msgint_ready(pACB))
- pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
"timeout\n", pACB->host->host_no);
}
@@ -1850,7 +1847,7 @@ static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
writel(pACB->out_doorbell, &reg->iobound_doorbell);
if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
- pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
"timeout\n", pACB->host->host_no);
}
}
@@ -3927,7 +3924,7 @@ static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
- rebulid' timeout \n", acb->host->host_no);
+ rebuild' timeout \n", acb->host->host_no);
}
}
@@ -3938,7 +3935,7 @@ static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
- rebulid' timeout \n",acb->host->host_no);
+ rebuild' timeout \n",acb->host->host_no);
}
}
@@ -3950,7 +3947,7 @@ static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
- rebulid' timeout \n", pACB->host->host_no);
+ rebuild' timeout \n", pACB->host->host_no);
}
return;
}
@@ -3963,7 +3960,7 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
pr_notice("arcmsr%d: wait 'start adapter "
- "background rebulid' timeout\n", pACB->host->host_no);
+ "background rebuild' timeout\n", pACB->host->host_no);
}
}
@@ -3977,7 +3974,7 @@ static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
writel(pACB->out_doorbell, &pmu->iobound_doorbell);
if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
pr_notice("arcmsr%d: wait 'start adapter "
- "background rebulid' timeout \n", pACB->host->host_no);
+ "background rebuild' timeout \n", pACB->host->host_no);
}
}
@@ -4135,9 +4132,9 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
pci_read_config_byte(acb->pdev, i, &value[i]);
}
/* hardware reset signal */
- if ((acb->dev_id == 0x1680)) {
+ if (acb->dev_id == 0x1680) {
writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
- } else if ((acb->dev_id == 0x1880)) {
+ } else if (acb->dev_id == 0x1880) {
do {
count++;
writel(0xF, &pmuC->write_sequence);
@@ -4161,7 +4158,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
} while (((readl(&pmuE->host_diagnostic_3xxx) &
ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
- } else if ((acb->dev_id == 0x1214)) {
+ } else if (acb->dev_id == 0x1214) {
writel(0x20, pmuD->reset_request);
} else {
pci_write_config_byte(acb->pdev, 0x84, 0x20);
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 8996d2329e11..802d15018ec0 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1193,7 +1193,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
for (k = 0; k < 16; k++) {
if (!atp_dev->id[j][k].prd_table)
continue;
- pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
+ dma_free_coherent(&atp_dev->pdev->dev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
atp_dev->id[j][k].prd_table = NULL;
}
}
@@ -1205,7 +1205,7 @@ static int atp870u_init_tables(struct Scsi_Host *host)
int c,k;
for(c=0;c < 2;c++) {
for(k=0;k<16;k++) {
- atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
+ atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL);
if (!atp_dev->id[c][k].prd_table) {
printk("atp870u_init_tables fail\n");
atp870u_free_tables(host);
@@ -1509,7 +1509,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto fail;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
err = -EIO;
goto disable_device;
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index c10aac4dbc5e..0a6972ee94d7 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -520,7 +520,7 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
**/
tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
if (tag_mem->size) {
- pci_free_consistent(ctrl->pdev, tag_mem->size,
+ dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
tag_mem->va, tag_mem->dma);
tag_mem->size = 0;
}
@@ -1269,12 +1269,12 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
struct be_sge *sge = nonembedded_sgl(wrb);
int status = 0;
- nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+ nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
sizeof(struct be_mgmt_controller_attributes),
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : pci_alloc_consistent failed in %s\n",
+ "BG_%d : dma_alloc_coherent failed in %s\n",
__func__);
return -ENOMEM;
}
@@ -1314,7 +1314,7 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
"BG_%d : Failed in beiscsi_check_supported_fw\n");
mutex_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
- pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+ dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return status;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index c8f0a2144b44..96b96e2ab91a 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -771,7 +771,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
status = beiscsi_get_initiator_name(phba, buf, false);
if (status < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Retreiving Initiator Name Failed\n");
+ "BS_%d : Retrieving Initiator Name Failed\n");
status = 0;
}
}
@@ -1071,9 +1071,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
else
req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
- nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
req_memsize,
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -1091,7 +1091,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : mgmt_open_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
beiscsi_free_ep(beiscsi_ep);
return -EAGAIN;
@@ -1104,8 +1104,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : mgmt_open_connection Failed");
if (ret != -EBUSY)
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
+ dma_free_coherent(&phba->ctrl.pdev->dev,
+ nonemb_cmd.size, nonemb_cmd.va,
+ nonemb_cmd.dma);
beiscsi_free_ep(beiscsi_ep);
return ret;
@@ -1118,7 +1119,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_open_connection Success\n");
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return 0;
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 3660059784f7..effb6fc95af4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -511,18 +511,9 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
}
pci_set_master(pcidev);
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
if (ret) {
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
- goto pci_region_release;
- } else {
- ret = pci_set_consistent_dma_mask(pcidev,
- DMA_BIT_MASK(32));
- }
- } else {
- ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
goto pci_region_release;
@@ -550,9 +541,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
if (status)
return status;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = pci_alloc_consistent(pdev,
- mbox_mem_alloc->size,
- &mbox_mem_alloc->dma);
+ mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev,
+ mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL);
if (!mbox_mem_alloc->va) {
beiscsi_unmap_pci_function(phba);
return -ENOMEM;
@@ -1866,7 +1856,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
{
struct be_queue_info *cq;
struct sol_cqe *sol;
- struct dmsg_cqe *dmsg;
unsigned int total = 0;
unsigned int num_processed = 0;
unsigned short code = 0, cid = 0;
@@ -1939,7 +1928,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
"BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
- dmsg = (struct dmsg_cqe *)sol;
hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
break;
case UNSOL_HDR_NOTIFY:
@@ -2304,11 +2292,11 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
/* Map addr only if there is data_count */
if (dsp_value) {
- io_task->mtask_addr = pci_map_single(phba->pcidev,
+ io_task->mtask_addr = dma_map_single(&phba->pcidev->dev,
task->data,
task->data_count,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(phba->pcidev,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&phba->pcidev->dev,
io_task->mtask_addr))
return -ENOMEM;
io_task->mtask_data_count = task->data_count;
@@ -2519,10 +2507,9 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
BEISCSI_MAX_FRAGS_INIT);
curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
do {
- mem_arr->virtual_address = pci_alloc_consistent(
- phba->pcidev,
- curr_alloc_size,
- &bus_add);
+ mem_arr->virtual_address =
+ dma_alloc_coherent(&phba->pcidev->dev,
+ curr_alloc_size, &bus_add, GFP_KERNEL);
if (!mem_arr->virtual_address) {
if (curr_alloc_size <= BE_MIN_MEM_SIZE)
goto free_mem;
@@ -2560,7 +2547,7 @@ free_mem:
mem_descr->num_elements = j;
while ((i) || (j)) {
for (j = mem_descr->num_elements; j > 0; j--) {
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
mem_descr->mem_array[j - 1].size,
mem_descr->mem_array[j - 1].
virtual_address,
@@ -3031,9 +3018,9 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem;
phwi_context->be_eq[i].phba = phba;
- eq_vaddress = pci_alloc_consistent(phba->pcidev,
+ eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
num_eq_pages * PAGE_SIZE,
- &paddr);
+ &paddr, GFP_KERNEL);
if (!eq_vaddress) {
ret = -ENOMEM;
goto create_eq_error;
@@ -3069,7 +3056,7 @@ create_eq_error:
eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem;
if (mem->va)
- pci_free_consistent(phba->pcidev, num_eq_pages
+ dma_free_coherent(&phba->pcidev->dev, num_eq_pages
* PAGE_SIZE,
mem->va, mem->dma);
}
@@ -3097,9 +3084,9 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
pbe_eq->cq = cq;
pbe_eq->phba = phba;
mem = &cq->dma_mem;
- cq_vaddress = pci_alloc_consistent(phba->pcidev,
+ cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
num_cq_pages * PAGE_SIZE,
- &paddr);
+ &paddr, GFP_KERNEL);
if (!cq_vaddress) {
ret = -ENOMEM;
goto create_cq_error;
@@ -3134,7 +3121,7 @@ create_cq_error:
cq = &phwi_context->be_cq[i];
mem = &cq->dma_mem;
if (mem->va)
- pci_free_consistent(phba->pcidev, num_cq_pages
+ dma_free_coherent(&phba->pcidev->dev, num_cq_pages
* PAGE_SIZE,
mem->va, mem->dma);
}
@@ -3326,7 +3313,7 @@ static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
if (mem->va) {
- pci_free_consistent(phba->pcidev, mem->size,
+ dma_free_coherent(&phba->pcidev->dev, mem->size,
mem->va, mem->dma);
mem->va = NULL;
}
@@ -3341,7 +3328,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
+ mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
@@ -3479,7 +3467,7 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
&ctrl->ptag_state[tag].tag_state)) {
ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
if (ptag_mem->size) {
- pci_free_consistent(ctrl->pdev,
+ dma_free_coherent(&ctrl->pdev->dev,
ptag_mem->size,
ptag_mem->va,
ptag_mem->dma);
@@ -3880,7 +3868,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
j = 0;
for (i = 0; i < SE_MEM_MAX; i++) {
for (j = mem_descr->num_elements; j > 0; j--) {
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
mem_descr->mem_array[j - 1].size,
mem_descr->mem_array[j - 1].virtual_address,
(unsigned long)mem_descr->mem_array[j - 1].
@@ -4255,10 +4243,10 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
}
if (io_task->mtask_addr) {
- pci_unmap_single(phba->pcidev,
+ dma_unmap_single(&phba->pcidev->dev,
io_task->mtask_addr,
io_task->mtask_data_count,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
io_task->mtask_addr = 0;
}
}
@@ -4852,9 +4840,9 @@ static int beiscsi_bsg_request(struct bsg_job *job)
switch (bsg_req->msgcode) {
case ISCSI_BSG_HST_VENDOR:
- nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
job->request_payload.payload_len,
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : Failed to allocate memory for "
@@ -4867,7 +4855,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : MBX Tag Allocation Failed\n");
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN;
}
@@ -4881,7 +4869,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
clear_bit(MCC_TAG_STATE_RUNNING,
&phba->ctrl.ptag_state[tag].tag_state);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EIO;
}
@@ -4898,7 +4886,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
bsg_reply->result = status;
bsg_job_done(job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
if (status || extd_status) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -5529,7 +5517,6 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -5755,7 +5742,7 @@ free_twq:
beiscsi_cleanup_port(phba);
beiscsi_free_mem(phba);
free_port:
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
@@ -5799,7 +5786,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
/* ctrl uninit */
beiscsi_unmap_pci_function(phba);
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 8fdc07b6c686..ca7b7bbc8371 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -284,7 +284,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
return rc;
free_cmd:
- pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
+ dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma);
return rc;
}
@@ -293,7 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
struct be_dma_mem *cmd,
u8 subsystem, u8 opcode, u32 size)
{
- cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
+ cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
+ GFP_KERNEL);
if (!cmd->va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to allocate memory for if info\n");
@@ -315,7 +316,7 @@ static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
__beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
if (tag_mem->size) {
- pci_free_consistent(phba->pcidev, tag_mem->size,
+ dma_free_coherent(&phba->pcidev->dev, tag_mem->size,
tag_mem->va, tag_mem->dma);
tag_mem->size = 0;
}
@@ -761,7 +762,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
"BG_%d : Memory Allocation Failure\n");
/* Free the DMA memory for the IOCTL issuing */
- pci_free_consistent(phba->ctrl.pdev,
+ dma_free_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd.size,
nonemb_cmd.va,
nonemb_cmd.dma);
@@ -780,7 +781,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
ioctl_size += sizeof(struct be_cmd_req_hdr);
/* Free the previous allocated DMA memory */
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va,
nonemb_cmd.dma);
@@ -869,7 +870,7 @@ static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
status);
boot_work = 0;
}
- pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, bs->nonemb_cmd.size,
bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
bs->nonemb_cmd.va = NULL;
break;
@@ -1012,9 +1013,10 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
nonemb_cmd = &phba->boot_struct.nonemb_cmd;
nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp);
- nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd->size,
- &nonemb_cmd->dma);
+ &nonemb_cmd->dma,
+ GFP_KERNEL);
if (!nonemb_cmd->va) {
mutex_unlock(&ctrl->mbox_lock);
return 0;
@@ -1508,9 +1510,10 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
return -EINVAL;
nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
- nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd.size,
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma,
+ GFP_KERNEL);
if (!nonemb_cmd.va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
"BM_%d : invldt_cmds_params alloc failed\n");
@@ -1521,7 +1524,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
wrb = alloc_mcc_wrb(phba, &tag);
if (!wrb) {
mutex_unlock(&ctrl->mbox_lock);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -ENOMEM;
}
@@ -1548,7 +1551,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (rc != -EBUSY)
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return rc;
}
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 3d0c96a5c873..c19c26e0e405 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1453,7 +1453,7 @@ union bfa_aen_data_u {
struct bfa_aen_entry_s {
struct list_head qe;
enum bfa_aen_category aen_category;
- u32 aen_type;
+ int aen_type;
union bfa_aen_data_u aen_data;
u64 aen_tv_sec;
u64 aen_tv_usec;
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index d3b00a475aeb..2de5d514e99c 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -190,27 +190,6 @@ fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
fchs->ox_id = ox_id;
}
-enum fc_parse_status
-fc_els_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
- struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
-
- len = len;
-
- switch (els_cmd->els_code) {
- case FC_ELS_LS_RJT:
- if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
- return FC_PARSE_BUSY;
- else
- return FC_PARSE_FAILURE;
-
- case FC_ELS_ACC:
- return FC_PARSE_OK;
- }
- return FC_PARSE_OK;
-}
-
static void
fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
{
@@ -831,18 +810,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
}
u16
-fc_logo_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-
- len = len;
- if (els_cmd->els_code != FC_ELS_ACC)
- return FC_PARSE_FAILURE;
-
- return FC_PARSE_OK;
-}
-
-u16
fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
wwn_t port_name, wwn_t node_name, u16 pdu_size)
{
@@ -908,40 +875,6 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
}
u16
-fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
- int num_pages = 0;
- int page = 0;
-
- len = len;
-
- if (prlo->command != FC_ELS_ACC)
- return FC_PARSE_FAILURE;
-
- num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
-
- for (page = 0; page < num_pages; page++) {
- if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].opa_valid != 0)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].rpa_valid != 0)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].orig_process_assc != 0)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].resp_process_assc != 0)
- return FC_PARSE_FAILURE;
- }
- return FC_PARSE_OK;
-
-}
-
-u16
fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
{
@@ -972,47 +905,6 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
}
u16
-fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1);
- int num_pages = 0;
- int page = 0;
-
- len = len;
-
- if (tprlo->command != FC_ELS_ACC)
- return FC_PARSE_ACC_INVAL;
-
- num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
-
- for (page = 0; page < num_pages; page++) {
- if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
- return FC_PARSE_NOT_FCP;
- if (tprlo->tprlo_acc_params[page].opa_valid != 0)
- return FC_PARSE_OPAFLAG_INVAL;
- if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
- return FC_PARSE_RPAFLAG_INVAL;
- if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
- return FC_PARSE_OPA_INVAL;
- if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
- return FC_PARSE_RPA_INVAL;
- }
- return FC_PARSE_OK;
-}
-
-enum fc_parse_status
-fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-
- len = len;
- if (els_cmd->els_code != FC_ELS_ACC)
- return FC_PARSE_FAILURE;
-
- return FC_PARSE_OK;
-}
-
-u16
fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
u32 reason_code, u32 reason_expl)
{
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index b109a8813401..ac08d0b5b89a 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -163,7 +163,6 @@ enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
u32 s_id, u16 ox_id, u16 rrq_oxid);
-enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, u8 *name);
@@ -276,8 +275,6 @@ void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
__be16 ox_id);
-enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
-
enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
wwn_t port_name);
@@ -297,8 +294,6 @@ u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
-u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
-
u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size);
@@ -308,14 +303,10 @@ u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages);
-u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
-
u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
u32 tpr_id);
-u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
-
u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
__be16 ox_id, u32 reason_code, u32 reason_expl);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index bd7e6a6fc1f1..911efc98d1fd 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1569,8 +1569,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
goto out_disable_device;
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
if (restart_bfa(bfad) == -1)
goto out_disable_device;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index e61ed8dad0b4..bd4ac187fd8e 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -143,7 +143,7 @@ struct bfad_im_s {
static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
struct bfad_s *drv, int cnt,
enum bfa_aen_category cat,
- enum bfa_ioc_aen_event evt)
+ int evt)
{
struct timespec64 ts;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 27c8d6ba05bb..cd160f2ec75d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -432,7 +432,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
struct sk_buff *tmp_skb;
- unsigned short oxid;
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
@@ -466,8 +465,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
fh = (struct fc_frame_header *) skb_transport_header(skb);
- oxid = ntohs(fh->fh_ox_id);
-
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index ed2dae657964..1a458ce08210 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -210,11 +210,8 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- } else {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto err_release_regions;
}
@@ -1102,7 +1099,6 @@ csio_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
/* Bring HW s/m to ready state.
* but don't resume IOs.
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index cc5611efc7a9..66e58f0a75dc 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1845,8 +1845,8 @@ csio_ln_fdmi_init(struct csio_lnode *ln)
/* Allocate Dma buffers for FDMI response Payload */
dma_buf = &ln->mgmt_req->dma_buf;
dma_buf->len = 2048;
- dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
- &dma_buf->paddr);
+ dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
+ &dma_buf->paddr, GFP_KERNEL);
if (!dma_buf->vaddr) {
csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
kfree(ln->mgmt_req);
@@ -1873,7 +1873,7 @@ csio_ln_fdmi_exit(struct csio_lnode *ln)
dma_buf = &ln->mgmt_req->dma_buf;
if (dma_buf->vaddr)
- pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
+ dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
dma_buf->paddr);
kfree(ln->mgmt_req);
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index dab0d3f9bee1..8c15b7acb4b7 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2349,8 +2349,8 @@ csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
}
/* Allocate Dma buffers for DDP */
- ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,
- &ddp_desc->paddr);
+ ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
+ &ddp_desc->paddr, GFP_KERNEL);
if (!ddp_desc->vaddr) {
csio_err(hw,
"SCSI response DMA buffer (ddp) allocation"
@@ -2372,8 +2372,8 @@ no_mem:
list_for_each(tmp, &scm->ddp_freelist) {
ddp_desc = (struct csio_dma_buf *) tmp;
tmp = csio_list_prev(tmp);
- pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
- ddp_desc->paddr);
+ dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
+ ddp_desc->vaddr, ddp_desc->paddr);
list_del_init(&ddp_desc->list);
kfree(ddp_desc);
}
@@ -2399,8 +2399,8 @@ csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
list_for_each(tmp, &scm->ddp_freelist) {
ddp_desc = (struct csio_dma_buf *) tmp;
tmp = csio_list_prev(tmp);
- pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
- ddp_desc->paddr);
+ dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
+ ddp_desc->vaddr, ddp_desc->paddr);
list_del_init(&ddp_desc->list);
kfree(ddp_desc);
}
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 5022e82ccc4f..dc12933533d5 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -124,8 +124,8 @@ csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
while (n--) {
buf->len = sge->sge_fl_buf_size[sreg];
- buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
- &buf->paddr);
+ buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
+ &buf->paddr, GFP_KERNEL);
if (!buf->vaddr) {
csio_err(hw, "Could only fill %d buffers!\n", n + 1);
return -ENOMEM;
@@ -233,7 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
q = wrm->q_arr[free_idx];
- q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart);
+ q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
+ GFP_KERNEL);
if (!q->vstart) {
csio_err(hw,
"Failed to allocate DMA memory for "
@@ -1703,14 +1704,14 @@ csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
buf = &q->un.fl.bufs[j];
if (!buf->vaddr)
continue;
- pci_free_consistent(hw->pdev, buf->len,
- buf->vaddr,
- buf->paddr);
+ dma_free_coherent(&hw->pdev->dev,
+ buf->len, buf->vaddr,
+ buf->paddr);
}
kfree(q->un.fl.bufs);
}
- pci_free_consistent(hw->pdev, q->size,
- q->vstart, q->pstart);
+ dma_free_coherent(&hw->pdev->dev, q->size,
+ q->vstart, q->pstart);
}
kfree(q);
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 211da1d5a869..064ef5735182 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -35,6 +35,11 @@ static unsigned int dbg_level;
#include "../libcxgbi.h"
+#ifdef CONFIG_CHELSIO_T4_DCB
+#include <net/dcbevent.h>
+#include "cxgb4_dcb.h"
+#endif
+
#define DRV_MODULE_NAME "cxgb4i"
#define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver"
#define DRV_MODULE_VERSION "0.9.5-ko"
@@ -155,6 +160,15 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.session_recovery_timedout = iscsi_session_recovery_timedout,
};
+#ifdef CONFIG_CHELSIO_T4_DCB
+static int
+cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
+
+static struct notifier_block cxgb4_dcb_change = {
+ .notifier_call = cxgb4_dcb_change_notify,
+};
+#endif
+
static struct scsi_transport_template *cxgb4i_stt;
/*
@@ -574,6 +588,9 @@ static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
int nparams, flowclen16, flowclen;
nparams = FLOWC_WR_NPARAMS_MIN;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ nparams++;
+#endif
flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
flowclen16 = DIV_ROUND_UP(flowclen, 16);
flowclen = flowclen16 * 16;
@@ -595,6 +612,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
struct fw_flowc_wr *flowc;
int nparams, flowclen16, flowclen;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
+#endif
flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
flowc = (struct fw_flowc_wr *)skb->head;
@@ -622,6 +642,17 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
flowc->mnemval[8].val = 0;
flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
flowc->mnemval[8].val = 16384;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
+ if (vlan == CPL_L2T_VLAN_NONE) {
+ pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
+ csk->tid);
+ flowc->mnemval[9].val = cpu_to_be32(0);
+ } else {
+ flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT);
+ }
+#endif
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
@@ -1600,6 +1631,46 @@ static void release_offload_resources(struct cxgbi_sock *csk)
csk->dst = NULL;
}
+#ifdef CONFIG_CHELSIO_T4_DCB
+static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
+{
+ return ndev->dcbnl_ops->getstate(ndev);
+}
+
+static int select_priority(int pri_mask)
+{
+ if (!pri_mask)
+ return 0;
+ return (ffs(pri_mask) - 1);
+}
+
+static u8 get_iscsi_dcb_priority(struct net_device *ndev)
+{
+ int rv;
+ u8 caps;
+
+ struct dcb_app iscsi_dcb_app = {
+ .protocol = 3260
+ };
+
+ rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
+ if (rv)
+ return 0;
+
+ if (caps & DCB_CAP_DCBX_VER_IEEE) {
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+ rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+ } else if (caps & DCB_CAP_DCBX_VER_CEE) {
+ iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
+ rv = dcb_getapp(ndev, &iscsi_dcb_app);
+ }
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "iSCSI priority is set to %u\n", select_priority(rv));
+ return select_priority(rv);
+}
+#endif
+
static int init_act_open(struct cxgbi_sock *csk)
{
struct cxgbi_device *cdev = csk->cdev;
@@ -1613,7 +1684,9 @@ static int init_act_open(struct cxgbi_sock *csk)
unsigned int size, size6;
unsigned int linkspeed;
unsigned int rcv_winf, snd_winf;
-
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 priority = 0;
+#endif
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
csk, csk->state, csk->flags, csk->tid);
@@ -1647,7 +1720,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ if (get_iscsi_dcb_state(ndev))
+ priority = get_iscsi_dcb_priority(ndev);
+
+ csk->dcb_priority = priority;
+ csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
+#else
csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
+#endif
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
goto rel_resource_without_clip;
@@ -2146,6 +2227,70 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
return 0;
}
+#ifdef CONFIG_CHELSIO_T4_DCB
+static int
+cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ int i, port = 0xFF;
+ struct net_device *ndev;
+ struct cxgbi_device *cdev = NULL;
+ struct dcb_app_type *iscsi_app = data;
+ struct cxgbi_ports_map *pmap;
+ u8 priority;
+
+ if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
+ return NOTIFY_DONE;
+
+ priority = iscsi_app->app.priority;
+ } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
+ if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
+ return NOTIFY_DONE;
+
+ if (!iscsi_app->app.priority)
+ return NOTIFY_DONE;
+
+ priority = ffs(iscsi_app->app.priority) - 1;
+ } else {
+ return NOTIFY_DONE;
+ }
+
+ if (iscsi_app->app.protocol != 3260)
+ return NOTIFY_DONE;
+
+ log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
+ iscsi_app->ifindex, priority);
+
+ ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
+
+ dev_put(ndev);
+ if (!cdev)
+ return NOTIFY_DONE;
+
+ pmap = &cdev->pmap;
+
+ for (i = 0; i < pmap->used; i++) {
+ if (pmap->port_csk[i]) {
+ struct cxgbi_sock *csk = pmap->port_csk[i];
+
+ if (csk->dcb_priority != priority) {
+ iscsi_conn_failure(csk->user_data,
+ ISCSI_ERR_CONN_FAILED);
+ pr_info("Restarting iSCSI connection %p with "
+ "priority %u->%u.\n", csk,
+ csk->dcb_priority, priority);
+ }
+ }
+ }
+ return NOTIFY_OK;
+}
+#endif
+
static int __init cxgb4i_init_module(void)
{
int rc;
@@ -2157,11 +2302,18 @@ static int __init cxgb4i_init_module(void)
return rc;
cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
+ register_dcbevent_notifier(&cxgb4_dcb_change);
+#endif
return 0;
}
static void __exit cxgb4i_exit_module(void)
{
+#ifdef CONFIG_CHELSIO_T4_DCB
+ unregister_dcbevent_notifier(&cxgb4_dcb_change);
+#endif
cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index dcb190e75343..5d5d8b50d842 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -120,6 +120,9 @@ struct cxgbi_sock {
int wr_max_cred;
int wr_cred;
int wr_una_cred;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 dcb_priority;
+#endif
unsigned char hcrc_len;
unsigned char dcrc_len;
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 1ed2cd82129d..8c55ec6e1827 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -753,105 +753,6 @@ static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
return NULL;
}
-
-static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
-{
- struct list_head *head = &acb->srb_free_list;
- struct ScsiReqBlk *srb = NULL;
-
- if (!list_empty(head)) {
- srb = list_entry(head->next, struct ScsiReqBlk, list);
- list_del(head->next);
- dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
- }
- return srb;
-}
-
-
-static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
- list_add_tail(&srb->list, &acb->srb_free_list);
-}
-
-
-static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_add(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_waiting_append(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_add_tail(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_add_tail(&srb->list, &dcb->srb_going_list);
-}
-
-
-static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
- struct ScsiReqBlk *i;
- struct ScsiReqBlk *tmp;
- dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
-
- list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
- if (i == srb) {
- list_del(&srb->list);
- break;
- }
-}
-
-
-static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- struct ScsiReqBlk *i;
- struct ScsiReqBlk *tmp;
- dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
-
- list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
- if (i == srb) {
- list_del(&srb->list);
- break;
- }
-}
-
-
-static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0,
- "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_move(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0,
- "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_move(&srb->list, &dcb->srb_going_list);
-}
-
-
/* Sets the timer to wake us up */
static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
{
@@ -923,7 +824,7 @@ static void waiting_process_next(struct AdapterCtlBlk *acb)
/* Try to send to the bus */
if (!start_scsi(acb, pos, srb))
- srb_waiting_to_going_move(pos, srb);
+ list_move(&srb->list, &pos->srb_going_list);
else
waiting_set_timer(acb, HZ/50);
break;
@@ -960,15 +861,15 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
acb->active_dcb ||
(acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
- srb_waiting_append(dcb, srb);
+ list_add_tail(&srb->list, &dcb->srb_waiting_list);
waiting_process_next(acb);
return;
}
- if (!start_scsi(acb, dcb, srb))
- srb_going_append(dcb, srb);
- else {
- srb_waiting_insert(dcb, srb);
+ if (!start_scsi(acb, dcb, srb)) {
+ list_add_tail(&srb->list, &dcb->srb_going_list);
+ } else {
+ list_add(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 50);
}
}
@@ -1045,10 +946,8 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
sgp->length++;
}
- srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
- srb->segment_x,
- SEGMENTX_LEN,
- PCI_DMA_TODEVICE);
+ srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
+ srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
@@ -1116,9 +1015,9 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
cmd->scsi_done = done;
cmd->result = 0;
- srb = srb_get_free(acb);
- if (!srb)
- {
+ srb = list_first_entry_or_null(&acb->srb_free_list,
+ struct ScsiReqBlk, list);
+ if (!srb) {
/*
* Return 1 since we are unable to queue this command at this
* point in time.
@@ -1126,12 +1025,13 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
dprintkdbg(DBG_0, "queue_command: No free srb's\n");
return 1;
}
+ list_del(&srb->list);
build_srb(cmd, dcb, srb);
if (!list_empty(&dcb->srb_waiting_list)) {
/* append to waiting queue */
- srb_waiting_append(dcb, srb);
+ list_add_tail(&srb->list, &dcb->srb_waiting_list);
waiting_process_next(acb);
} else {
/* process immediately */
@@ -1376,11 +1276,11 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
srb = find_cmd(cmd, &dcb->srb_waiting_list);
if (srb) {
- srb_waiting_remove(dcb, srb);
+ list_del(&srb->list);
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
free_tag(dcb, srb);
- srb_free_insert(acb, srb);
+ list_add_tail(&srb->list, &acb->srb_free_list);
dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
cmd->result = DID_ABORT << 16;
return SUCCESS;
@@ -1969,14 +1869,15 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
xferred -= psge->length;
} else {
/* Partial SG entry done */
+ dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
+ srb->sg_bus_addr, SEGMENTX_LEN,
+ DMA_TO_DEVICE);
psge->length -= xferred;
psge->address += xferred;
srb->sg_index = idx;
- pci_dma_sync_single_for_device(srb->dcb->
- acb->dev,
- srb->sg_bus_addr,
- SEGMENTX_LEN,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
+ srb->sg_bus_addr, SEGMENTX_LEN,
+ DMA_TO_DEVICE);
break;
}
psge++;
@@ -3083,7 +2984,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
goto disc1;
}
free_tag(dcb, srb);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
dprintkdbg(DBG_KG,
"disconnect: (0x%p) Retry\n",
srb->cmd);
@@ -3148,7 +3049,7 @@ static void reselect(struct AdapterCtlBlk *acb)
srb->state = SRB_READY;
free_tag(dcb, srb);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 20);
/* return; */
@@ -3271,9 +3172,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
/* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN);
- pci_unmap_single(acb->dev, srb->sg_bus_addr,
- SEGMENTX_LEN,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
+ DMA_TO_DEVICE);
dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
scsi_sg_count(cmd), scsi_bufflen(cmd));
/* unmap the sg segments */
@@ -3291,8 +3191,8 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
/* Unmap sense buffer */
dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
srb->segment_x[0].address);
- pci_unmap_single(acb->dev, srb->segment_x[0].address,
- srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
+ srb->segment_x[0].length, DMA_FROM_DEVICE);
/* Restore SG stuff */
srb->total_xfer_length = srb->xferred;
srb->segment_x[0].address =
@@ -3411,7 +3311,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
tempcnt--;
dcb->max_command = tempcnt;
free_tag(dcb, srb);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 20);
srb->adapter_status = 0;
srb->target_status = 0;
@@ -3447,14 +3347,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
}
}
- if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
- pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
- scsi_sg_count(cmd), dir);
-
ckc_only = 0;
/* Check Error Conditions */
ckc_e:
+ pci_unmap_srb(acb, srb);
+
if (cmd->cmnd[0] == INQUIRY) {
unsigned char *base = NULL;
struct ScsiInqData *ptr;
@@ -3498,16 +3396,14 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
cmd->cmnd[0], srb->total_xfer_length);
}
- srb_going_remove(dcb, srb);
- /* Add to free list */
- if (srb == acb->tmp_srb)
- dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
- else {
+ if (srb != acb->tmp_srb) {
+ /* Add to free list */
dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
cmd, cmd->result);
- srb_free_insert(acb, srb);
+ list_move_tail(&srb->list, &acb->srb_free_list);
+ } else {
+ dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
}
- pci_unmap_srb(acb, srb);
cmd->scsi_done(cmd);
waiting_process_next(acb);
@@ -3535,9 +3431,9 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
result = MK_RES(0, did_flag, 0, 0);
printk("G:%p(%02i-%i) ", p,
p->device->id, (u8)p->device->lun);
- srb_going_remove(dcb, srb);
+ list_del(&srb->list);
free_tag(dcb, srb);
- srb_free_insert(acb, srb);
+ list_add_tail(&srb->list, &acb->srb_free_list);
p->result = result;
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
@@ -3565,8 +3461,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
result = MK_RES(0, did_flag, 0, 0);
printk("W:%p<%02i-%i>", p, p->device->id,
(u8)p->device->lun);
- srb_waiting_remove(dcb, srb);
- srb_free_insert(acb, srb);
+ list_move_tail(&srb->list, &acb->srb_free_list);
p->result = result;
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
@@ -3692,9 +3587,9 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
/* Map sense buffer */
- srb->segment_x[0].address =
- pci_map_single(acb->dev, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+ srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
+ cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+ DMA_FROM_DEVICE);
dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
cmd->sense_buffer, srb->segment_x[0].address,
SCSI_SENSE_BUFFERSIZE);
@@ -3705,7 +3600,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
dprintkl(KERN_DEBUG,
"request_sense: (0x%p) failed <%02i-%i>\n",
srb->cmd, dcb->target_id, dcb->target_lun);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 100);
}
}
@@ -4392,7 +4287,7 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
/* link static array of srbs into the srb free list */
for (i = 0; i < acb->srb_count - 1; i++)
- srb_free_insert(acb, &acb->srb_array[i]);
+ list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index c3fc34b9964d..ac7da9db7317 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -369,19 +369,28 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
struct scatterlist *sg = scsi_sglist(cmd);
- int dir = cmd->sc_data_direction;
- int total, i;
+ int total = 0, i;
- if (dir == DMA_NONE)
+ if (cmd->sc_data_direction == DMA_NONE)
return;
- spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
+ if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
+ /*
+ * For pseudo DMA and PIO we need the virtual address instead of
+ * a dma address, so perform an identity mapping.
+ */
+ spriv->num_sg = scsi_sg_count(cmd);
+ for (i = 0; i < spriv->num_sg; i++) {
+ sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]);
+ total += sg_dma_len(&sg[i]);
+ }
+ } else {
+ spriv->num_sg = scsi_dma_map(cmd);
+ for (i = 0; i < spriv->num_sg; i++)
+ total += sg_dma_len(&sg[i]);
+ }
spriv->cur_residue = sg_dma_len(sg);
spriv->cur_sg = sg;
-
- total = 0;
- for (i = 0; i < spriv->u.num_sg; i++)
- total += sg_dma_len(&sg[i]);
spriv->tot_residue = total;
}
@@ -441,13 +450,8 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
- struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
- int dir = cmd->sc_data_direction;
-
- if (dir == DMA_NONE)
- return;
-
- esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
+ if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
+ scsi_dma_unmap(cmd);
}
static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
@@ -478,17 +482,6 @@ static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
spriv->tot_residue = ent->saved_tot_residue;
}
-static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
-{
- if (cmd->cmd_len == 6 ||
- cmd->cmd_len == 10 ||
- cmd->cmd_len == 12) {
- esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
- } else {
- esp->flags |= ESP_FLAG_DOING_SLOWCMD;
- }
-}
-
static void esp_write_tgt_config3(struct esp *esp, int tgt)
{
if (esp->rev > ESP100A) {
@@ -624,6 +617,26 @@ static void esp_free_lun_tag(struct esp_cmd_entry *ent,
}
}
+static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ ent->sense_ptr = ent->cmd->sense_buffer;
+ if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
+ ent->sense_dma = (uintptr_t)ent->sense_ptr;
+ return;
+ }
+
+ ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+}
+
+static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
+ dma_unmap_single(esp->dev, ent->sense_dma,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ ent->sense_ptr = NULL;
+}
+
/* When a contingent allegiance conditon is created, we force feed a
* REQUEST_SENSE command to the device to fetch the sense data. I
* tried many other schemes, relying on the scsi error handling layer
@@ -645,12 +658,7 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
if (!ent->sense_ptr) {
esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
tgt, lun);
-
- ent->sense_ptr = cmd->sense_buffer;
- ent->sense_dma = esp->ops->map_single(esp,
- ent->sense_ptr,
- SCSI_SENSE_BUFFERSIZE,
- DMA_FROM_DEVICE);
+ esp_map_sense(esp, ent);
}
ent->saved_sense_ptr = ent->sense_ptr;
@@ -717,10 +725,10 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
static void esp_maybe_execute_command(struct esp *esp)
{
struct esp_target_data *tp;
- struct esp_lun_data *lp;
struct scsi_device *dev;
struct scsi_cmnd *cmd;
struct esp_cmd_entry *ent;
+ bool select_and_stop = false;
int tgt, lun, i;
u32 val, start_cmd;
u8 *p;
@@ -743,7 +751,6 @@ static void esp_maybe_execute_command(struct esp *esp)
tgt = dev->id;
lun = dev->lun;
tp = &esp->target[tgt];
- lp = dev->hostdata;
list_move(&ent->list, &esp->active_cmds);
@@ -752,7 +759,8 @@ static void esp_maybe_execute_command(struct esp *esp)
esp_map_dma(esp, cmd);
esp_save_pointers(esp, ent);
- esp_check_command_len(esp, cmd);
+ if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
+ select_and_stop = true;
p = esp->command_block;
@@ -793,42 +801,22 @@ static void esp_maybe_execute_command(struct esp *esp)
tp->flags &= ~ESP_TGT_CHECK_NEGO;
}
- /* Process it like a slow command. */
- if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
- esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+ /* If there are multiple message bytes, use Select and Stop */
+ if (esp->msg_out_len)
+ select_and_stop = true;
}
build_identify:
- /* If we don't have a lun-data struct yet, we're probing
- * so do not disconnect. Also, do not disconnect unless
- * we have a tag on this command.
- */
- if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
- *p++ = IDENTIFY(1, lun);
- else
- *p++ = IDENTIFY(0, lun);
+ *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
if (ent->tag[0] && esp->rev == ESP100) {
/* ESP100 lacks select w/atn3 command, use select
* and stop instead.
*/
- esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+ select_and_stop = true;
}
- if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
- start_cmd = ESP_CMD_SELA;
- if (ent->tag[0]) {
- *p++ = ent->tag[0];
- *p++ = ent->tag[1];
-
- start_cmd = ESP_CMD_SA3;
- }
-
- for (i = 0; i < cmd->cmd_len; i++)
- *p++ = cmd->cmnd[i];
-
- esp->select_state = ESP_SELECT_BASIC;
- } else {
+ if (select_and_stop) {
esp->cmd_bytes_left = cmd->cmd_len;
esp->cmd_bytes_ptr = &cmd->cmnd[0];
@@ -843,6 +831,19 @@ build_identify:
start_cmd = ESP_CMD_SELAS;
esp->select_state = ESP_SELECT_MSGOUT;
+ } else {
+ start_cmd = ESP_CMD_SELA;
+ if (ent->tag[0]) {
+ *p++ = ent->tag[0];
+ *p++ = ent->tag[1];
+
+ start_cmd = ESP_CMD_SA3;
+ }
+
+ for (i = 0; i < cmd->cmd_len; i++)
+ *p++ = cmd->cmnd[i];
+
+ esp->select_state = ESP_SELECT_BASIC;
}
val = tgt;
if (esp->rev == FASHME)
@@ -902,9 +903,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
}
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
- esp->ops->unmap_single(esp, ent->sense_dma,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- ent->sense_ptr = NULL;
+ esp_unmap_sense(esp, ent);
/* Restore the message/status bytes to what we actually
* saw originally. Also, report that we are providing
@@ -965,7 +964,7 @@ static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_
cmd->scsi_done = done;
spriv = ESP_CMD_PRIV(cmd);
- spriv->u.dma_addr = ~(dma_addr_t)0x0;
+ spriv->num_sg = 0;
list_add_tail(&ent->list, &esp->queued_cmds);
@@ -1252,14 +1251,10 @@ static int esp_finish_select(struct esp *esp)
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, cmd->device->hostdata);
tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
- esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
esp->cmd_bytes_ptr = NULL;
esp->cmd_bytes_left = 0;
} else {
- esp->ops->unmap_single(esp, ent->sense_dma,
- SCSI_SENSE_BUFFERSIZE,
- DMA_FROM_DEVICE);
- ent->sense_ptr = NULL;
+ esp_unmap_sense(esp, ent);
}
/* Now that the state is unwound properly, put back onto
@@ -1303,9 +1298,8 @@ static int esp_finish_select(struct esp *esp)
esp_flush_fifo(esp);
}
- /* If we are doing a slow command, negotiation, etc.
- * we'll do the right thing as we transition to the
- * next phase.
+ /* If we are doing a Select And Stop command, negotiation, etc.
+ * we'll do the right thing as we transition to the next phase.
*/
esp_event(esp, ESP_EVENT_CHECK_PHASE);
return 0;
@@ -1338,6 +1332,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
bytes_sent = esp->data_dma_len;
bytes_sent -= ecount;
+ bytes_sent -= esp->send_cmd_residual;
/*
* The am53c974 has a DMA 'pecularity'. The doc states:
@@ -1358,7 +1353,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
u8 *ptr;
- ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
+ ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
&offset, &count);
if (likely(ptr)) {
*(ptr + offset) = bval;
@@ -2039,11 +2034,8 @@ static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
esp_free_lun_tag(ent, cmd->device->hostdata);
cmd->result = DID_RESET << 16;
- if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
- esp->ops->unmap_single(esp, ent->sense_dma,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- ent->sense_ptr = NULL;
- }
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
+ esp_unmap_sense(esp, ent);
cmd->scsi_done(cmd);
list_del(&ent->list);
@@ -2382,7 +2374,7 @@ static const char *esp_chip_names[] = {
static struct scsi_transport_template *esp_transport_template;
-int scsi_esp_register(struct esp *esp, struct device *dev)
+int scsi_esp_register(struct esp *esp)
{
static int instance;
int err;
@@ -2402,10 +2394,10 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
esp_bootup_reset(esp);
- dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
+ dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
esp->host->unique_id, esp->regs, esp->dma_regs,
esp->host->irq);
- dev_printk(KERN_INFO, dev,
+ dev_printk(KERN_INFO, esp->dev,
"esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
esp->host->unique_id, esp_chip_names[esp->rev],
esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
@@ -2413,7 +2405,7 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
/* Let the SCSI bus reset settle. */
ssleep(esp_bus_reset_settle);
- err = scsi_add_host(esp->host, dev);
+ err = scsi_add_host(esp->host, esp->dev);
if (err)
return err;
@@ -2790,3 +2782,131 @@ MODULE_PARM_DESC(esp_debug,
module_init(esp_init);
module_exit(esp_exit);
+
+#ifdef CONFIG_SCSI_ESP_PIO
+static inline unsigned int esp_wait_for_fifo(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+ if (fbytes)
+ return fbytes;
+
+ udelay(1);
+ } while (--i);
+
+ shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
+ esp_read8(ESP_STATUS));
+ return 0;
+}
+
+static inline int esp_wait_for_intr(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ esp->sreg = esp_read8(ESP_STATUS);
+ if (esp->sreg & ESP_STAT_INTR)
+ return 0;
+
+ udelay(1);
+ } while (--i);
+
+ shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
+ esp->sreg);
+ return 1;
+}
+
+#define ESP_FIFO_SIZE 16
+
+void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+ cmd &= ~ESP_CMD_DMA;
+ esp->send_cmd_error = 0;
+
+ if (write) {
+ u8 *dst = (u8 *)addr;
+ u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
+
+ scsi_esp_cmd(esp, cmd);
+
+ while (1) {
+ if (!esp_wait_for_fifo(esp))
+ break;
+
+ *dst++ = readb(esp->fifo_reg);
+ --esp_count;
+
+ if (!esp_count)
+ break;
+
+ if (esp_wait_for_intr(esp)) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ if ((esp->sreg & ESP_STAT_PMASK) != phase)
+ break;
+
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & mask) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ if (phase == ESP_MIP)
+ esp_write8(ESP_CMD_MOK, ESP_CMD);
+
+ esp_write8(ESP_CMD_TI, ESP_CMD);
+ }
+ } else {
+ unsigned int n = ESP_FIFO_SIZE;
+ u8 *src = (u8 *)addr;
+
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ if (n > esp_count)
+ n = esp_count;
+ writesb(esp->fifo_reg, src, n);
+ src += n;
+ esp_count -= n;
+
+ scsi_esp_cmd(esp, cmd);
+
+ while (esp_count) {
+ if (esp_wait_for_intr(esp)) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ if ((esp->sreg & ESP_STAT_PMASK) != phase)
+ break;
+
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & ~ESP_INTR_BSERV) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ n = ESP_FIFO_SIZE -
+ (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
+
+ if (n > esp_count)
+ n = esp_count;
+ writesb(esp->fifo_reg, src, n);
+ src += n;
+ esp_count -= n;
+
+ esp_write8(ESP_CMD_TI, ESP_CMD);
+ }
+ }
+
+ esp->send_cmd_residual = esp_count;
+}
+EXPORT_SYMBOL(esp_send_pio_cmd);
+#endif
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 8163dca2071b..aa87a6b72dcc 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -249,11 +249,7 @@
#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
struct esp_cmd_priv {
- union {
- dma_addr_t dma_addr;
- int num_sg;
- } u;
-
+ int num_sg;
int cur_residue;
struct scatterlist *cur_sg;
int tot_residue;
@@ -363,19 +359,6 @@ struct esp_driver_ops {
void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
u8 (*esp_read8)(struct esp *esp, unsigned long reg);
- /* Map and unmap DMA memory. Eventually the driver will be
- * converted to the generic DMA API as soon as SBUS is able to
- * cope with that. At such time we can remove this.
- */
- dma_addr_t (*map_single)(struct esp *esp, void *buf,
- size_t sz, int dir);
- int (*map_sg)(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir);
- void (*unmap_single)(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir);
- void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir);
-
/* Return non-zero if there is an IRQ pending. Usually this
* status bit lives in the DMA controller sitting in front of
* the ESP. This has to be accurate or else the ESP interrupt
@@ -435,7 +418,7 @@ struct esp {
const struct esp_driver_ops *ops;
struct Scsi_Host *host;
- void *dev;
+ struct device *dev;
struct esp_cmd_entry *active_cmd;
@@ -490,11 +473,11 @@ struct esp {
u32 flags;
#define ESP_FLAG_DIFFERENTIAL 0x00000001
#define ESP_FLAG_RESETTING 0x00000002
-#define ESP_FLAG_DOING_SLOWCMD 0x00000004
#define ESP_FLAG_WIDE_CAPABLE 0x00000008
#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
#define ESP_FLAG_DISABLE_SYNC 0x00000020
#define ESP_FLAG_USE_FIFO 0x00000040
+#define ESP_FLAG_NO_DMA_MAP 0x00000080
u8 select_state;
#define ESP_SELECT_NONE 0x00 /* Not selecting */
@@ -532,7 +515,7 @@ struct esp {
u32 min_period;
u32 radelay;
- /* Slow command state. */
+ /* ESP_CMD_SELAS command state */
u8 *cmd_bytes_ptr;
int cmd_bytes_left;
@@ -540,6 +523,11 @@ struct esp {
void *dma;
int dmarev;
+
+ /* These are used by esp_send_pio_cmd() */
+ u8 __iomem *fifo_reg;
+ int send_cmd_error;
+ u32 send_cmd_residual;
};
/* A front-end driver for the ESP chip should do the following in
@@ -568,16 +556,18 @@ struct esp {
* example, the DMA engine has to be reset before ESP can
* be programmed.
* 11) If necessary, call dev_set_drvdata() as needed.
- * 12) Call scsi_esp_register() with prepared 'esp' structure
- * and a device pointer if possible.
+ * 12) Call scsi_esp_register() with prepared 'esp' structure.
* 13) Check scsi_esp_register() return value, release all resources
* if an error was returned.
*/
extern struct scsi_host_template scsi_esp_template;
-extern int scsi_esp_register(struct esp *, struct device *);
+extern int scsi_esp_register(struct esp *);
extern void scsi_esp_unregister(struct esp *);
extern irqreturn_t scsi_esp_intr(int, void *);
extern void scsi_esp_cmd(struct esp *, u8);
+extern void esp_send_pio_cmd(struct esp *esp, u32 dma_addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd);
+
#endif /* !(_ESP_SCSI_H) */
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index c7bf316d8e83..844ef688fa91 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -836,8 +836,8 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
u32 fcp_bytes_written = 0;
unsigned long flags;
- pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
skb = buf->os_buf;
fp = (struct fc_frame *)skb;
buf->os_buf = NULL;
@@ -977,9 +977,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb_put(skb, len);
- pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
-
- if (pci_dma_mapping_error(fnic->pdev, pa)) {
+ pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa)) {
r = -ENOMEM;
printk(KERN_ERR "PCI mapping failed with error %d\n", r);
goto free_skb;
@@ -998,8 +997,8 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev);
- pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
@@ -1018,7 +1017,6 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
unsigned long flags;
- int r;
if (!fnic->vlan_hw_insert) {
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1038,11 +1036,10 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
}
}
- pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-
- r = pci_dma_mapping_error(fnic->pdev, pa);
- if (r) {
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa)) {
+ printk(KERN_ERR "DMA mapping failed\n");
goto free_skb;
}
@@ -1058,7 +1055,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
irq_restore:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
- pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
free_skb:
kfree_skb(skb);
}
@@ -1115,9 +1112,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
if (FC_FCOE_VER)
FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
- pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
-
- if (pci_dma_mapping_error(fnic->pdev, pa)) {
+ pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa)) {
ret = -ENOMEM;
printk(KERN_ERR "DMA map failed with error %d\n", ret);
goto free_skb_on_err;
@@ -1131,8 +1127,7 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
- pci_unmap_single(fnic->pdev, pa,
- tot_len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
ret = -1;
goto irq_restore;
}
@@ -1247,8 +1242,8 @@ static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
- pci_unmap_single(fnic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(fp_skb(fp));
buf->os_buf = NULL;
}
@@ -1290,8 +1285,8 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
- pci_unmap_single(fnic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index e52599f44170..cc461fd7bef1 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -611,30 +611,15 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* limitation for the device. Try 64-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"No usable DMA configuration "
"aborting\n");
goto err_out_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to obtain 32-bit DMA "
- "for consistent allocations, aborting.\n");
- goto err_out_release_regions;
- }
- } else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to obtain 64-bit DMA "
- "for consistent allocations, aborting.\n");
- goto err_out_release_regions;
- }
}
/* Map vNIC resources from BAR0 */
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 8cbd3c9f0b4c..96acfcecd540 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -126,17 +126,17 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
struct scsi_cmnd *sc)
{
if (io_req->sgl_list_pa)
- pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+ dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
scsi_dma_unmap(sc);
if (io_req->sgl_cnt)
mempool_free(io_req->sgl_list_alloc,
fnic->io_sgl_pool[io_req->sgl_type]);
if (io_req->sense_buf_pa)
- pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
/* Free up Copy Wq descriptors. Called with copy_wq lock held */
@@ -330,7 +330,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
int flags;
u8 exch_flags;
struct scsi_lun fc_lun;
- int r;
if (sg_count) {
/* For each SGE, create a device desc entry */
@@ -342,30 +341,25 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
desc++;
}
- io_req->sgl_list_pa = pci_map_single
- (fnic->pdev,
- io_req->sgl_list,
- sizeof(io_req->sgl_list[0]) * sg_count,
- PCI_DMA_TODEVICE);
-
- r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
- if (r) {
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
+ io_req->sgl_list,
+ sizeof(io_req->sgl_list[0]) * sg_count,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
+ printk(KERN_ERR "DMA mapping failed\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
}
- io_req->sense_buf_pa = pci_map_single(fnic->pdev,
+ io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
-
- r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
- if (r) {
- pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
+ dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * sg_count,
- PCI_DMA_TODEVICE);
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ DMA_TO_DEVICE);
+ printk(KERN_ERR "DMA mapping failed\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -2272,33 +2266,17 @@ clean_pending_aborts_end:
static inline int
fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{
- struct blk_queue_tag *bqt = fnic->lport->host->bqt;
- int tag, ret = SCSI_NO_TAG;
-
- BUG_ON(!bqt);
- if (!bqt) {
- pr_err("Tags are not supported\n");
- goto end;
- }
-
- do {
- tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
- if (tag >= bqt->max_depth) {
- pr_err("Tag allocation failure\n");
- goto end;
- }
- } while (test_and_set_bit(tag, bqt->tag_map));
+ struct request_queue *q = sc->request->q;
+ struct request *dummy;
- bqt->tag_index[tag] = sc->request;
- sc->request->tag = tag;
- sc->tag = tag;
- if (!sc->request->special)
- sc->request->special = sc;
+ dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(dummy))
+ return SCSI_NO_TAG;
- ret = tag;
+ sc->tag = sc->request->tag = dummy->tag;
+ sc->request->special = sc;
-end:
- return ret;
+ return dummy->tag;
}
/**
@@ -2308,20 +2286,9 @@ end:
static inline void
fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{
- struct blk_queue_tag *bqt = fnic->lport->host->bqt;
- int tag = sc->request->tag;
+ struct request *dummy = sc->request->special;
- if (tag == SCSI_NO_TAG)
- return;
-
- BUG_ON(!bqt || !bqt->tag_index[tag]);
- if (!bqt)
- return;
-
- bqt->tag_index[tag] = NULL;
- clear_bit(tag, bqt->tag_map);
-
- return;
+ blk_mq_free_request(dummy);
}
/*
@@ -2380,19 +2347,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
tag = sc->request->tag;
if (unlikely(tag < 0)) {
/*
- * XXX(hch): current the midlayer fakes up a struct
- * request for the explicit reset ioctls, and those
- * don't have a tag allocated to them. The below
- * code pokes into midlayer structures to paper over
- * this design issue, but that won't work for blk-mq.
- *
- * Either someone who can actually test the hardware
- * will have to come up with a similar hack for the
- * blk-mq case, or we'll have to bite the bullet and
- * fix the way the EH ioctls work for real, but until
- * that happens we fail these explicit requests here.
+ * Really should fix the midlayer to pass in a proper
+ * request for ioctls...
*/
-
tag = fnic_scsi_host_start_tag(fnic, sc);
if (unlikely(tag == SCSI_NO_TAG))
goto fnic_device_reset_end;
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index ba69d6112fa1..434447ea24b8 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -195,9 +195,9 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
- ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+ ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
ring->size_unaligned,
- &ring->base_addr_unaligned);
+ &ring->base_addr_unaligned, GFP_KERNEL);
if (!ring->descs_unaligned) {
printk(KERN_ERR
@@ -221,7 +221,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
@@ -298,9 +298,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0;
if (!vdev->fw_info) {
- vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+ vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
- &vdev->fw_info_pa);
+ &vdev->fw_info_pa, GFP_KERNEL);
if (!vdev->fw_info)
return -ENOMEM;
@@ -361,8 +361,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = 1000;
if (!vdev->stats) {
- vdev->stats = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_stats), &vdev->stats_pa);
+ vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
if (!vdev->stats)
return -ENOMEM;
}
@@ -523,9 +523,9 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
int wait = 1000;
if (!vdev->notify) {
- vdev->notify = pci_alloc_consistent(vdev->pdev,
+ vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
- &vdev->notify_pa);
+ &vdev->notify_pa, GFP_KERNEL);
if (!vdev->notify)
return -ENOMEM;
}
@@ -647,21 +647,21 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
kfree(vdev);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 6c7d2e201abe..0ddb53c8a2e2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -34,6 +34,7 @@
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
+#define HISI_SAS_RESERVED_IPTT_CNT 96
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -217,7 +218,7 @@ struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
- int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx,
+ int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index a4e2e6aa9a6b..b3f01d5b821b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -183,7 +183,14 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
{
- hisi_sas_slot_index_clear(hisi_hba, slot_idx);
+ unsigned long flags;
+
+ if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
+ hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_slot_index_clear(hisi_hba, slot_idx);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
}
static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
@@ -193,24 +200,34 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
set_bit(slot_idx, bitmap);
}
-static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
+static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
+ struct scsi_cmnd *scsi_cmnd)
{
- unsigned int index;
+ int index;
void *bitmap = hisi_hba->slot_index_tags;
+ unsigned long flags;
+
+ if (scsi_cmnd)
+ return scsi_cmnd->request->tag;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
- hisi_hba->last_slot_index + 1);
+ hisi_hba->last_slot_index + 1);
if (index >= hisi_hba->slot_index_count) {
- index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
- 0);
- if (index >= hisi_hba->slot_index_count)
+ index = find_next_zero_bit(bitmap,
+ hisi_hba->slot_index_count,
+ hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT);
+ if (index >= hisi_hba->slot_index_count) {
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -SAS_QUEUE_FULL;
+ }
}
hisi_sas_slot_index_set(hisi_hba, index);
- *slot_idx = index;
hisi_hba->last_slot_index = index;
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
- return 0;
+ return index;
}
static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
@@ -249,9 +266,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot->idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -287,13 +302,13 @@ static int hisi_sas_task_prep(struct sas_task *task,
int *pass)
{
struct domain_device *device = task->dev;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct hisi_hba *hisi_hba;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct asd_sas_port *sas_port = device->port;
- struct device *dev = hisi_hba->dev;
+ struct device *dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
struct hisi_sas_dq *dq;
@@ -314,6 +329,9 @@ static int hisi_sas_task_prep(struct sas_task *task,
return -ECOMM;
}
+ hisi_hba = dev_to_hisi_hba(device);
+ dev = hisi_hba->dev;
+
if (DEV_IS_GONE(sas_dev)) {
if (sas_dev)
dev_info(dev, "task prep: device %d not ready\n",
@@ -381,16 +399,27 @@ static int hisi_sas_task_prep(struct sas_task *task,
goto err_out_dma_unmap;
}
- spin_lock_irqsave(&hisi_hba->lock, flags);
if (hisi_hba->hw->slot_index_alloc)
- rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
- device);
- else
- rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- if (rc)
+ rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
+ else {
+ struct scsi_cmnd *scsi_cmnd = NULL;
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(device)) {
+ qc = task->uldd_task;
+ scsi_cmnd = qc->scsicmd;
+ } else {
+ scsi_cmnd = task->uldd_task;
+ }
+ }
+ rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
+ }
+ if (rc < 0)
goto err_out_dma_unmap;
+ slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
spin_lock_irqsave(&dq->lock, flags);
@@ -451,9 +480,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
return 0;
err_out_tag:
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
err_out_dma_unmap:
if (!sas_protocol_ata(task->task_proto)) {
if (task->num_scatter) {
@@ -904,6 +931,9 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
_r.maximum_linkrate = max;
_r.minimum_linkrate = min;
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
hisi_hba->hw->phy_disable(hisi_hba, phy_no);
msleep(100);
hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
@@ -950,8 +980,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
static void hisi_sas_task_done(struct sas_task *task)
{
- if (!del_timer(&task->slow_task->timer))
- return;
+ del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -960,13 +989,17 @@ static void hisi_sas_tmf_timedout(struct timer_list *t)
struct sas_task_slow *slow = from_timer(slow, t, timer);
struct sas_task *task = slow->task;
unsigned long flags;
+ bool is_completed = true;
spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ is_completed = false;
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
- complete(&task->slow_task->completion);
+ if (!is_completed)
+ complete(&task->slow_task->completion);
}
#define TASK_TIMEOUT 20
@@ -1019,8 +1052,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
struct hisi_sas_slot *slot = task->lldd_task;
dev_err(dev, "abort tmf: TMF task timeout and not done\n");
- if (slot)
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ tasklet_kill(&cq->tasklet);
slot->task = NULL;
+ }
goto ex_err;
} else
@@ -1396,6 +1437,17 @@ static int hisi_sas_abort_task(struct sas_task *task)
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ struct hisi_sas_slot *slot = task->lldd_task;
+ struct hisi_sas_cq *cq;
+
+ if (slot) {
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ cq = &hisi_hba->cq[slot->dlvry_queue];
+ tasklet_kill(&cq->tasklet);
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
rc = TMF_RESP_FUNC_COMPLETE;
goto out;
@@ -1451,12 +1503,19 @@ static int hisi_sas_abort_task(struct sas_task *task)
/* SMP */
struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx;
+ struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
- task->lldd_task)
- hisi_sas_do_release_task(hisi_hba, task, slot);
+ task->lldd_task) {
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ tasklet_kill(&cq->tasklet);
+ slot->task = NULL;
+ }
}
out:
@@ -1705,14 +1764,11 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
port = to_hisi_sas_port(sas_port);
/* simply get a slot and send abort command */
- spin_lock_irqsave(&hisi_hba->lock, flags);
- rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- if (rc) {
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
+ if (rc < 0)
goto err_out;
- }
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
spin_lock_irqsave(&dq->lock, flags_dq);
@@ -1748,7 +1804,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
-
WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */
spin_lock_irqsave(&dq->lock, flags);
@@ -1759,9 +1814,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
return 0;
err_out_tag:
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
err_out:
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
@@ -1823,8 +1876,16 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
- if (slot)
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ tasklet_kill(&cq->tasklet);
slot->task = NULL;
+ }
dev_err(dev, "internal task abort: timeout and not done.\n");
res = -EIO;
goto exit;
@@ -1861,10 +1922,6 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
hisi_sas_port_notify_formed(sas_phy);
}
-static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
-{
-}
-
static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
u8 reg_index, u8 reg_count, u8 *write_data)
{
@@ -1954,10 +2011,9 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
.lldd_lu_reset = hisi_sas_lu_reset,
.lldd_query_task = hisi_sas_query_task,
- .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
+ .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
- .lldd_port_deformed = hisi_sas_port_deformed,
- .lldd_write_gpio = hisi_sas_write_gpio,
+ .lldd_write_gpio = hisi_sas_write_gpio,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -2120,6 +2176,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
hisi_sas_init_mem(hisi_hba);
hisi_sas_slot_index_init(hisi_hba);
+ hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
if (!hisi_hba->wq) {
@@ -2323,8 +2381,15 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->max_channel = 1;
shost->max_cmd_len = 16;
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
- shost->can_queue = hisi_hba->hw->max_command_entries;
- shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+ if (hisi_hba->hw->slot_index_alloc) {
+ shost->can_queue = hisi_hba->hw->max_command_entries;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+ } else {
+ shost->can_queue = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
+ }
sha->sas_ha_name = DRV_NAME;
sha->dev = hisi_hba->dev;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 8f60f0e04599..f0e457e6884e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1809,7 +1809,6 @@ static struct scsi_host_template sht_v1_hw = {
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
- .can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9c5c5a601332..cc36b6473e98 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -770,7 +770,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
/* This function needs to be protected from pre-emption. */
static int
-slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
+slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
struct domain_device *device)
{
int sata_dev = dev_is_sata(device);
@@ -778,6 +778,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
struct hisi_sas_device *sas_dev = device->lldd_dev;
int sata_idx = sas_dev->sata_idx;
int start, end;
+ unsigned long flags;
if (!sata_dev) {
/*
@@ -801,11 +802,14 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
end = 64 * (sata_idx + 2);
}
+ spin_lock_irqsave(&hisi_hba->lock, flags);
while (1) {
start = find_next_zero_bit(bitmap,
hisi_hba->slot_index_count, start);
- if (start >= end)
+ if (start >= end) {
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -SAS_QUEUE_FULL;
+ }
/*
* SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
*/
@@ -815,8 +819,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
}
set_bit(start, bitmap);
- *slot_idx = start;
- return 0;
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ return start;
}
static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
@@ -2483,7 +2487,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -2493,6 +2496,7 @@ out:
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags);
@@ -3560,7 +3564,6 @@ static struct scsi_host_template sht_v2_hw = {
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
- .can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 08b503e274b8..bd4ce38b98d2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -127,6 +127,7 @@
#define PHY_CTRL_RESET_OFF 0
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
#define SL_CFG (PORT_BASE + 0x84)
+#define AIP_LIMIT (PORT_BASE + 0x90)
#define SL_CONTROL (PORT_BASE + 0x94)
#define SL_CONTROL_NOTIFY_EN_OFF 0
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
@@ -431,6 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
(u32)((1ULL << hisi_hba->queue_count) - 1));
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+ hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
@@ -441,7 +443,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
if (pdev->revision >= 0x21)
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7aff);
else
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
@@ -495,6 +497,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
+ hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -1751,7 +1754,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -1761,6 +1763,7 @@ out:
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags);
@@ -2098,7 +2101,6 @@ static struct scsi_host_template sht_v3_hw = {
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
- .can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
@@ -2108,6 +2110,7 @@ static struct scsi_host_template sht_v3_hw = {
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = host_attrs,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
static const struct hisi_sas_hw hisi_sas_v3_hw = {
@@ -2245,8 +2248,10 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_channel = 1;
shost->max_cmd_len = 16;
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
- shost->can_queue = hisi_hba->hw->max_command_entries;
- shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+ shost->can_queue = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
sha->sas_ha_name = DRV_NAME;
sha->dev = dev;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c120929d4ffe..c9cccf35e9d7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2240,8 +2240,8 @@ static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
chain_size = le32_to_cpu(cp->sg[0].length);
- temp64 = pci_map_single(h->pdev, chain_block, chain_size,
- PCI_DMA_TODEVICE);
+ temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&h->pdev->dev, temp64)) {
/* prevent subsequent unmapping */
cp->sg->address = 0;
@@ -2261,7 +2261,7 @@ static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
chain_sg = cp->sg;
temp64 = le64_to_cpu(chain_sg->address);
chain_size = le32_to_cpu(cp->sg[0].length);
- pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
+ dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
}
static int hpsa_map_sg_chain_block(struct ctlr_info *h,
@@ -2277,8 +2277,8 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
chain_len = sizeof(*chain_sg) *
(le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
chain_sg->Len = cpu_to_le32(chain_len);
- temp64 = pci_map_single(h->pdev, chain_block, chain_len,
- PCI_DMA_TODEVICE);
+ temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&h->pdev->dev, temp64)) {
/* prevent subsequent unmapping */
chain_sg->Addr = cpu_to_le64(0);
@@ -2297,8 +2297,8 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
return;
chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
- pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
- le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
+ dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
+ le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
}
@@ -2759,13 +2759,13 @@ static void complete_scsi_command(struct CommandList *cp)
return hpsa_cmd_free_and_done(h, cp, cmd);
}
-static void hpsa_pci_unmap(struct pci_dev *pdev,
- struct CommandList *c, int sg_used, int data_direction)
+static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
+ int sg_used, enum dma_data_direction data_direction)
{
int i;
for (i = 0; i < sg_used; i++)
- pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
+ dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
le32_to_cpu(c->SG[i].Len),
data_direction);
}
@@ -2774,17 +2774,17 @@ static int hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp,
unsigned char *buf,
size_t buflen,
- int data_direction)
+ enum dma_data_direction data_direction)
{
u64 addr64;
- if (buflen == 0 || data_direction == PCI_DMA_NONE) {
+ if (buflen == 0 || data_direction == DMA_NONE) {
cp->Header.SGList = 0;
cp->Header.SGTotal = cpu_to_le16(0);
return 0;
}
- addr64 = pci_map_single(pdev, buf, buflen, data_direction);
+ addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
if (dma_mapping_error(&pdev->dev, addr64)) {
/* Prevent subsequent unmap of something never mapped */
cp->Header.SGList = 0;
@@ -2845,7 +2845,8 @@ static u32 lockup_detected(struct ctlr_info *h)
#define MAX_DRIVER_CMD_RETRIES 25
static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
- struct CommandList *c, int data_direction, unsigned long timeout_msecs)
+ struct CommandList *c, enum dma_data_direction data_direction,
+ unsigned long timeout_msecs)
{
int backoff_time = 10, retry_count = 0;
int rc;
@@ -2969,8 +2970,8 @@ static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
rc = -1;
goto out;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3022,8 +3023,8 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
rc = -1;
goto out;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3306,8 +3307,8 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
cmd_free(h, c);
return -1;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3349,8 +3350,8 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
c->Request.CDB[2] = bmic_device_index & 0xff;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3377,8 +3378,8 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h,
if (rc)
goto out;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3408,7 +3409,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
c->Request.CDB[2] = bmic_device_index & 0xff;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
- hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
NO_TIMEOUT);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
@@ -3484,7 +3485,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
else
c->Request.CDB[5] = 0;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
NO_TIMEOUT);
if (rc)
goto out;
@@ -3731,8 +3732,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
}
if (extended_response)
c->Request.CDB[1] = extended_response;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -6320,8 +6321,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0) {
- temp64 = pci_map_single(h->pdev, buff,
- iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ temp64 = dma_map_single(&h->pdev->dev, buff,
+ iocommand.buf_size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
c->SG[0].Addr = cpu_to_le64(0);
c->SG[0].Len = cpu_to_le32(0);
@@ -6335,7 +6336,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
NO_TIMEOUT);
if (iocommand.buf_size > 0)
- hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+ hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
if (rc) {
rc = -EIO;
@@ -6381,13 +6382,9 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
- if (!ioc) {
- status = -ENOMEM;
- goto cleanup1;
- }
- if (copy_from_user(ioc, argp, sizeof(*ioc))) {
- status = -EFAULT;
+ ioc = vmemdup_user(argp, sizeof(*ioc));
+ if (IS_ERR(ioc)) {
+ status = PTR_ERR(ioc);
goto cleanup1;
}
if ((ioc->buf_size < 1) &&
@@ -6447,14 +6444,14 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
if (ioc->buf_size > 0) {
int i;
for (i = 0; i < sg_used; i++) {
- temp64 = pci_map_single(h->pdev, buff[i],
- buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ temp64 = dma_map_single(&h->pdev->dev, buff[i],
+ buff_size[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(&h->pdev->dev,
(dma_addr_t) temp64)) {
c->SG[i].Addr = cpu_to_le64(0);
c->SG[i].Len = cpu_to_le32(0);
hpsa_pci_unmap(h->pdev, c, i,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
status = -ENOMEM;
goto cleanup0;
}
@@ -6467,7 +6464,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
NO_TIMEOUT);
if (sg_used)
- hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+ hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
if (status) {
status = -EIO;
@@ -6505,7 +6502,7 @@ cleanup1:
kfree(buff);
}
kfree(buff_size);
- kfree(ioc);
+ kvfree(ioc);
return status;
}
@@ -6579,7 +6576,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type)
{
- int pci_dir = XFER_NONE;
+ enum dma_data_direction dir = DMA_NONE;
c->cmd_type = CMD_IOCTL_PEND;
c->scsi_cmd = SCSI_CMD_BUSY;
@@ -6785,18 +6782,18 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
switch (GET_DIR(c->Request.type_attr_dir)) {
case XFER_READ:
- pci_dir = PCI_DMA_FROMDEVICE;
+ dir = DMA_FROM_DEVICE;
break;
case XFER_WRITE:
- pci_dir = PCI_DMA_TODEVICE;
+ dir = DMA_TO_DEVICE;
break;
case XFER_NONE:
- pci_dir = PCI_DMA_NONE;
+ dir = DMA_NONE;
break;
default:
- pci_dir = PCI_DMA_BIDIRECTIONAL;
+ dir = DMA_BIDIRECTIONAL;
}
- if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+ if (hpsa_map_one(h->pdev, c, buff, size, dir))
return -1;
return 0;
}
@@ -6992,13 +6989,13 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
* CCISS commands, so they must be allocated from the lower 4GiB of
* memory.
*/
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
iounmap(vaddr);
return err;
}
- cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
+ cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
if (cmd == NULL) {
iounmap(vaddr);
return -ENOMEM;
@@ -7047,7 +7044,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
return -ETIMEDOUT;
}
- pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+ dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
if (tag & HPSA_ERROR_BIT) {
dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
@@ -7914,7 +7911,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
kfree(h->cmd_pool_bits);
h->cmd_pool_bits = NULL;
if (h->cmd_pool) {
- pci_free_consistent(h->pdev,
+ dma_free_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool,
h->cmd_pool_dhandle);
@@ -7922,7 +7919,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
h->cmd_pool_dhandle = 0;
}
if (h->errinfo_pool) {
- pci_free_consistent(h->pdev,
+ dma_free_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
@@ -7936,12 +7933,12 @@ static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
sizeof(unsigned long),
GFP_KERNEL);
- h->cmd_pool = pci_alloc_consistent(h->pdev,
+ h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->cmd_pool),
- &(h->cmd_pool_dhandle));
- h->errinfo_pool = pci_alloc_consistent(h->pdev,
+ &h->cmd_pool_dhandle, GFP_KERNEL);
+ h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->errinfo_pool),
- &(h->errinfo_pool_dhandle));
+ &h->errinfo_pool_dhandle, GFP_KERNEL);
if ((h->cmd_pool_bits == NULL)
|| (h->cmd_pool == NULL)
|| (h->errinfo_pool == NULL)) {
@@ -8068,7 +8065,7 @@ static void hpsa_free_reply_queues(struct ctlr_info *h)
for (i = 0; i < h->nreply_queues; i++) {
if (!h->reply_queue[i].head)
continue;
- pci_free_consistent(h->pdev,
+ dma_free_coherent(&h->pdev->dev,
h->reply_queue_size,
h->reply_queue[i].head,
h->reply_queue[i].busaddr);
@@ -8594,11 +8591,11 @@ reinit_after_soft_reset:
number_of_controllers++;
/* configure PCI DMA stuff */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc == 0) {
dac = 1;
} else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc == 0) {
dac = 0;
} else {
@@ -8797,8 +8794,8 @@ static void hpsa_flush_cache(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD)) {
goto out;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
+ DEFAULT_TIMEOUT);
if (rc)
goto out;
if (c->err_info->CommandStatus != 0)
@@ -8833,8 +8830,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD))
goto errout;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8845,8 +8842,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD))
goto errout;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
+ NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8855,8 +8852,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD))
goto errout;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -9228,9 +9225,9 @@ static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
IOACCEL1_COMMANDLIST_ALIGNMENT);
h->ioaccel_cmd_pool =
- pci_alloc_consistent(h->pdev,
+ dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
- &(h->ioaccel_cmd_pool_dhandle));
+ &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
h->ioaccel1_blockFetchTable =
kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9281,9 +9278,9 @@ static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
IOACCEL2_COMMANDLIST_ALIGNMENT);
h->ioaccel2_cmd_pool =
- pci_alloc_consistent(h->pdev,
+ dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
- &(h->ioaccel2_cmd_pool_dhandle));
+ &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
h->ioaccel2_blockFetchTable =
kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9356,9 +9353,10 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
h->reply_queue_size = h->max_commands * sizeof(u64);
for (i = 0; i < h->nreply_queues; i++) {
- h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
+ h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
h->reply_queue_size,
- &(h->reply_queue[i].busaddr));
+ &h->reply_queue[i].busaddr,
+ GFP_KERNEL);
if (!h->reply_queue[i].head) {
rc = -ENOMEM;
goto clean1; /* rq, ioaccel */
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index f42a619198c4..e63aadd10dfd 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2266,7 +2266,6 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
/*
* Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
*/
- target_wait_for_sess_cmds(se_sess);
target_remove_session(se_sess);
tport->ibmv_nexus = NULL;
kfree(nexus);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bd6ac6b5980a..ee8a1ecd58fd 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -208,7 +208,7 @@ module_param(ips, charp, 0);
#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
- PCI_DMA_BIDIRECTIONAL : \
+ DMA_BIDIRECTIONAL : \
scb->scsi_cmd->sc_data_direction)
#ifdef IPS_DEBUG
@@ -1529,11 +1529,12 @@ ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
if (ha->ioctl_data && length <= ha->ioctl_len)
return 0;
/* there is no buffer or it's not big enough, allocate a new one */
- bigger_buf = pci_alloc_consistent(ha->pcidev, length, &dma_busaddr);
+ bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
+ GFP_KERNEL);
if (bigger_buf) {
/* free the old memory */
- pci_free_consistent(ha->pcidev, ha->ioctl_len, ha->ioctl_data,
- ha->ioctl_busaddr);
+ dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
+ ha->ioctl_data, ha->ioctl_busaddr);
/* use the new memory */
ha->ioctl_data = (char *) bigger_buf;
ha->ioctl_len = length;
@@ -1678,9 +1679,8 @@ ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
} else if (!ha->flash_data) {
datasize = pt->CoppCP.cmd.flashfw.total_packets *
pt->CoppCP.cmd.flashfw.count;
- ha->flash_data = pci_alloc_consistent(ha->pcidev,
- datasize,
- &ha->flash_busaddr);
+ ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
+ datasize, &ha->flash_busaddr, GFP_KERNEL);
if (!ha->flash_data){
printk(KERN_WARNING "Unable to allocate a flash buffer\n");
return IPS_FAILURE;
@@ -1858,7 +1858,7 @@ ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
scb->data_len = ha->flash_datasize;
scb->data_busaddr =
- pci_map_single(ha->pcidev, ha->flash_data, scb->data_len,
+ dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
IPS_DMA_DIR(scb));
scb->flags |= IPS_SCB_MAP_SINGLE;
scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
@@ -1880,8 +1880,8 @@ ips_free_flash_copperhead(ips_ha_t * ha)
if (ha->flash_data == ips_FlashData)
test_and_clear_bit(0, &ips_FlashDataInUse);
else if (ha->flash_data)
- pci_free_consistent(ha->pcidev, ha->flash_len, ha->flash_data,
- ha->flash_busaddr);
+ dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
+ ha->flash_data, ha->flash_busaddr);
ha->flash_data = NULL;
}
@@ -3485,6 +3485,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
case START_STOP:
scb->scsi_cmd->result = DID_OK << 16;
+ break;
case TEST_UNIT_READY:
case INQUIRY:
@@ -4212,7 +4213,7 @@ ips_free(ips_ha_t * ha)
if (ha) {
if (ha->enq) {
- pci_free_consistent(ha->pcidev, sizeof(IPS_ENQ),
+ dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
ha->enq, ha->enq_busaddr);
ha->enq = NULL;
}
@@ -4221,7 +4222,7 @@ ips_free(ips_ha_t * ha)
ha->conf = NULL;
if (ha->adapt) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
sizeof (IPS_ADAPTER) +
sizeof (IPS_IO_CMD), ha->adapt,
ha->adapt->hw_status_start);
@@ -4229,7 +4230,7 @@ ips_free(ips_ha_t * ha)
}
if (ha->logical_drive_info) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
sizeof (IPS_LD_INFO),
ha->logical_drive_info,
ha->logical_drive_info_dma_addr);
@@ -4243,7 +4244,7 @@ ips_free(ips_ha_t * ha)
ha->subsys = NULL;
if (ha->ioctl_data) {
- pci_free_consistent(ha->pcidev, ha->ioctl_len,
+ dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
ha->ioctl_data, ha->ioctl_busaddr);
ha->ioctl_data = NULL;
ha->ioctl_datasize = 0;
@@ -4276,11 +4277,11 @@ static int
ips_deallocatescbs(ips_ha_t * ha, int cmds)
{
if (ha->scbs) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
ha->scbs->sg_list.list,
ha->scbs->sg_busaddr);
- pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds,
+ dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
ha->scbs, ha->scbs->scb_busaddr);
ha->scbs = NULL;
} /* end if */
@@ -4307,17 +4308,16 @@ ips_allocatescbs(ips_ha_t * ha)
METHOD_TRACE("ips_allocatescbs", 1);
/* Allocate memory for the SCBs */
- ha->scbs =
- pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t),
- &command_dma);
+ ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
+ ha->max_cmds * sizeof (ips_scb_t),
+ &command_dma, GFP_KERNEL);
if (ha->scbs == NULL)
return 0;
- ips_sg.list =
- pci_alloc_consistent(ha->pcidev,
- IPS_SGLIST_SIZE(ha) * IPS_MAX_SG *
- ha->max_cmds, &sg_dma);
+ ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
+ IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
+ &sg_dma, GFP_KERNEL);
if (ips_sg.list == NULL) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
command_dma);
return 0;
@@ -4446,8 +4446,8 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
if (scb->flags & IPS_SCB_MAP_SG)
scsi_dma_unmap(scb->scsi_cmd);
else if (scb->flags & IPS_SCB_MAP_SINGLE)
- pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
- IPS_DMA_DIR(scb));
+ dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
+ scb->data_len, IPS_DMA_DIR(scb));
/* check to make sure this is not our "special" scb */
if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
@@ -4559,7 +4559,8 @@ ips_flush_and_reset(ips_ha_t *ha)
dma_addr_t command_dma;
/* Create a usuable SCB */
- scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma);
+ scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
+ &command_dma, GFP_KERNEL);
if (scb) {
memset(scb, 0, sizeof(ips_scb_t));
ips_init_scb(ha, scb);
@@ -4594,7 +4595,7 @@ ips_flush_and_reset(ips_ha_t *ha)
/* Now RESET and INIT the adapter */
(*ha->func.reset) (ha);
- pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma);
+ dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
return;
}
@@ -6926,29 +6927,30 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
* are guaranteed to be < 4G.
*/
if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
- !pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) {
+ !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
(ha)->flags |= IPS_HA_ENH_SG;
} else {
- if (pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING "Unable to set DMA Mask\n");
return ips_abort_init(ha, index);
}
}
if(ips_cd_boot && !ips_FlashData){
- ips_FlashData = pci_alloc_consistent(pci_dev, PAGE_SIZE << 7,
- &ips_flashbusaddr);
+ ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
+ PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
}
- ha->enq = pci_alloc_consistent(pci_dev, sizeof (IPS_ENQ),
- &ha->enq_busaddr);
+ ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
+ &ha->enq_busaddr, GFP_KERNEL);
if (!ha->enq) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host inquiry structure\n");
return ips_abort_init(ha, index);
}
- ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) +
- sizeof (IPS_IO_CMD), &dma_address);
+ ha->adapt = dma_alloc_coherent(&pci_dev->dev,
+ sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
+ &dma_address, GFP_KERNEL);
if (!ha->adapt) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host adapt & dummy structures\n");
@@ -6959,7 +6961,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
- ha->logical_drive_info = pci_alloc_consistent(pci_dev, sizeof (IPS_LD_INFO), &dma_address);
+ ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
+ sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
if (!ha->logical_drive_info) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate logical drive info structure\n");
@@ -6997,8 +7000,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
if (ips_ioctlsize < PAGE_SIZE)
ips_ioctlsize = PAGE_SIZE;
- ha->ioctl_data = pci_alloc_consistent(pci_dev, ips_ioctlsize,
- &ha->ioctl_busaddr);
+ ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
+ &ha->ioctl_busaddr, GFP_KERNEL);
ha->ioctl_len = ips_ioctlsize;
if (!ha->ioctl_data) {
IPS_PRINTK(KERN_WARNING, pci_dev,
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 1ee3868ade07..7b5deae68d33 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
* the task management request.
* @task_request: the handle to the task request object to start.
*/
-enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
- struct isci_remote_device *idev,
- struct isci_request *ireq)
+enum sci_status sci_controller_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
{
enum sci_status status;
@@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
"%s: SCIC Controller starting task from invalid "
"state\n",
__func__);
- return SCI_TASK_FAILURE_INVALID_STATE;
+ return SCI_FAILURE_INVALID_STATE;
}
status = sci_remote_device_start_task(ihost, idev, ireq);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index b3539928073c..6bc3f022630a 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -489,7 +489,7 @@ enum sci_status sci_controller_start_io(
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_task_status sci_controller_start_task(
+enum sci_status sci_controller_start_task(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index ed197bc8e801..2f151708b59a 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
if (status == SCI_SUCCESS) {
if (ireq->stp.rsp.status & ATA_ERR)
- status = SCI_IO_FAILURE_RESPONSE_VALID;
+ status = SCI_FAILURE_IO_RESPONSE_VALID;
} else {
- status = SCI_IO_FAILURE_RESPONSE_VALID;
+ status = SCI_FAILURE_IO_RESPONSE_VALID;
}
if (status != SCI_SUCCESS) {
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 6dcaed0c1fc8..fb6eba331ac6 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
struct isci_tmf *tmf, unsigned long timeout_ms)
{
DECLARE_COMPLETION_ONSTACK(completion);
- enum sci_task_status status = SCI_TASK_FAILURE;
+ enum sci_status status = SCI_FAILURE;
struct isci_request *ireq;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
@@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
/* start the TMF io. */
status = sci_controller_start_task(ihost, idev, ireq);
- if (status != SCI_TASK_SUCCESS) {
+ if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
"%s: start_io failed - status = 0x%x, request = %p\n",
__func__,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b025a0b74341..23354f206533 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -800,7 +800,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
return rc;
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
- &addr, param, buf);
+ &addr,
+ (enum iscsi_param)param, buf);
default:
return iscsi_host_get_param(shost, param, buf);
}
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 6eb5ff3e2e61..1ad28262b00a 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -38,30 +38,6 @@ static u8 jazz_esp_read8(struct esp *esp, unsigned long reg)
return *(volatile u8 *)(esp->regs + reg);
}
-static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int jazz_esp_irq_pending(struct esp *esp)
{
if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
@@ -117,10 +93,6 @@ static int jazz_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops jazz_esp_ops = {
.esp_write8 = jazz_esp_write8,
.esp_read8 = jazz_esp_read8,
- .map_single = jazz_esp_map_single,
- .map_sg = jazz_esp_map_sg,
- .unmap_single = jazz_esp_unmap_single,
- .unmap_sg = jazz_esp_unmap_sg,
.irq_pending = jazz_esp_irq_pending,
.reset_dma = jazz_esp_reset_dma,
.dma_drain = jazz_esp_dma_drain,
@@ -182,7 +154,7 @@ static int esp_jazz_probe(struct platform_device *dev)
dev_set_drvdata(&dev->dev, esp);
- err = scsi_esp_register(esp, &dev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 4fae253d4f3d..b1bd283be51c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1872,7 +1872,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_fcp_pkt *fsp;
- struct fc_rport_libfc_priv *rpriv;
int rval;
int rc = 0;
struct fc_stats *stats;
@@ -1894,8 +1893,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
goto out;
}
- rpriv = rport->dd_data;
-
if (!fc_fcp_lport_queue_ready(lport)) {
if (lport->qfull) {
if (fc_fcp_can_queue_ramp_down(lport))
@@ -2295,8 +2292,7 @@ int fc_setup_fcp(void)
void fc_destroy_fcp(void)
{
- if (scsi_pkt_cachep)
- kmem_cache_destroy(scsi_pkt_cachep);
+ kmem_cache_destroy(scsi_pkt_cachep);
}
/**
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 372387a450df..1e1c0f1b9e69 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1038,8 +1038,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_ls_rjt *rjt;
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
- FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
- rjt->er_reason, rjt->er_explan);
+ if (!rjt)
+ FC_RPORT_DBG(rdata, "PLOGI bad response\n");
+ else
+ FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
}
out:
@@ -1158,8 +1161,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC) {
pp = fc_frame_payload_get(fp, sizeof(*pp));
- if (!pp)
+ if (!pp) {
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
+ }
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
@@ -1172,8 +1177,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
}
- if (pp->prli.prli_spp_len < sizeof(pp->spp))
+ if (pp->prli.prli_spp_len < sizeof(pp->spp)) {
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
+ }
fcp_parm = ntohl(pp->spp.spp_params);
if (fcp_parm & FCP_SPPF_RETRY)
@@ -1211,8 +1218,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
} else {
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
- FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
- rjt->er_reason, rjt->er_explan);
+ if (!rjt)
+ FC_RPORT_DBG(rdata, "PRLI bad response\n");
+ else
+ FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 64a958a99f6a..4f6cdf53e913 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -654,7 +654,7 @@ void sas_probe_sata(struct asd_sas_port *port)
/* if libata could not bring the link up, don't surface
* the device
*/
- if (ata_dev_disabled(sas_to_ata_dev(dev)))
+ if (!ata_dev_enabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, __func__, -ENODEV);
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 0148ae62a52a..dde433aa59c2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -260,7 +260,7 @@ static void sas_suspend_devices(struct work_struct *work)
* phy_list is not being mutated
*/
list_for_each_entry(phy, &port->phy_list, port_phy_el) {
- if (si->dft->lldd_port_formed)
+ if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
phy->suspended = 1;
port->suspended = 1;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index fadc99cb60df..0d1f72752ca2 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -48,17 +48,16 @@ static void smp_task_timedout(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ complete(&task->slow_task->completion);
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- complete(&task->slow_task->completion);
}
static void smp_task_done(struct sas_task *task)
{
- if (!del_timer(&task->slow_task->timer))
- return;
+ del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -2054,14 +2053,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
return res;
}
- /* delete the old link */
- if (SAS_ADDR(phy->attached_sas_addr) &&
- SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
- SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
- SAS_ADDR(dev->sas_addr), phy_id,
- SAS_ADDR(phy->attached_sas_addr));
- sas_unregister_devs_sas_addr(dev, phy_id, last);
- }
+ /* we always have to delete the old device when we went here */
+ SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
+ SAS_ADDR(dev->sas_addr), phy_id,
+ SAS_ADDR(phy->attached_sas_addr));
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
return sas_discover_new(dev, phy_id);
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 43732e8d1347..c1eb2b00ca7f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -52,7 +52,7 @@ struct lpfc_sli2_slim;
downloads using bsg */
#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
-#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
@@ -583,6 +583,25 @@ struct lpfc_mbox_ext_buf_ctx {
struct list_head ext_dmabuf_list;
};
+struct lpfc_ras_fwlog {
+ uint8_t *fwlog_buff;
+ uint32_t fw_buffcount; /* Buffer size posted to FW */
+#define LPFC_RAS_BUFF_ENTERIES 16 /* Each entry can hold max of 64k */
+#define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024)
+#define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024)
+#define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024)
+ uint32_t fw_loglevel; /* Log level set */
+ struct lpfc_dmabuf lwpd;
+ struct list_head fwlog_buff_list;
+
+ /* RAS support status on adapter */
+ bool ras_hwsupport; /* RAS Support available on HW or not */
+ bool ras_enabled; /* Ras Enabled for the function */
+#define LPFC_RAS_DISABLE_LOGGING 0x00
+#define LPFC_RAS_ENABLE_LOGGING 0x01
+ bool ras_active; /* RAS logging running state */
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
@@ -790,6 +809,7 @@ struct lpfc_hba {
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_nvme_seg_cnt;
+ uint32_t cfg_scsi_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
@@ -833,6 +853,9 @@ struct lpfc_hba {
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
uint32_t cfg_enable_SmartSAN;
uint32_t cfg_enable_mds_diags;
+ uint32_t cfg_ras_fwlog_level;
+ uint32_t cfg_ras_fwlog_buffsize;
+ uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_fc4_type;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
@@ -963,6 +986,7 @@ struct lpfc_hba {
uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF
struct list_head port_list;
+ spinlock_t port_list_lock; /* lock for port_list mutations */
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
@@ -1092,6 +1116,9 @@ struct lpfc_hba {
struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
uint32_t ctx_idx;
+ /* RAS Support */
+ struct lpfc_ras_fwlog ras_fwlog;
+
uint8_t menlo_flag; /* menlo generic flags */
#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
uint32_t iocb_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1a6ed9b0a249..dda7f450b96d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5358,15 +5358,74 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
/*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
- * This value can be set to values between 64 and 4096. The default value is
- * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
- * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * This value can be set to values between 64 and 4096. The default value
+ * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
+ * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
* Because of the additional overhead involved in setting up T10-DIF,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3.
*/
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
- LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
+static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
+module_param(lpfc_sg_seg_cnt, uint, 0444);
+MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
+
+/**
+ * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
+ * configured for the adapter
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains a string with the list sizes
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
+ phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
+
+ len += snprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
+ phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
+ phba->cfg_nvme_seg_cnt);
+ return len;
+}
+
+static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
+
+/**
+ * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
+ * @phba: lpfc_hba pointer.
+ * @val: contains the initial value
+ *
+ * Description:
+ * Validates the initial value is within range and assigns it to the
+ * adapter. If not in range, an error message is posted and the
+ * default value is assigned.
+ *
+ * Returns:
+ * zero if value is in range and is set
+ * -EINVAL if value was out of range
+ **/
+static int
+lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
+ phba->cfg_sg_seg_cnt = val;
+ return 0;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
+ "be set to %d, allowed range is [%d, %d]\n",
+ val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
+ phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
+ return -EINVAL;
+}
/*
* lpfc_enable_mds_diags: Enable MDS Diagnostics
@@ -5377,6 +5436,31 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
/*
+ * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
+ * 0 = Disable firmware logging (default)
+ * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
+ * Value range [0..4]. Default value is 0
+ */
+LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+
+/*
+ * lpfc_ras_fwlog_level: Firmware logging verbosity level
+ * Valid only if firmware logging is enabled
+ * 0(Least Verbosity) 4 (most verbosity)
+ * Value range is [0..4]. Default value is 0
+ */
+LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
+
+/*
+ * lpfc_ras_fwlog_func: Firmware logging enabled on function number
+ * Default function which has RAS support : 0
+ * Value Range is [0..7].
+ * FW logging is a global action and enablement is via a specific
+ * port.
+ */
+LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
+
+/*
* lpfc_enable_bbcr: Enable BB Credit Recovery
* 0 = BB Credit Recovery disabled
* 1 = BB Credit Recovery enabled (default)
@@ -5501,6 +5585,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_protocol,
&dev_attr_lpfc_xlane_supported,
&dev_attr_lpfc_enable_mds_diags,
+ &dev_attr_lpfc_ras_fwlog_buffsize,
+ &dev_attr_lpfc_ras_fwlog_level,
+ &dev_attr_lpfc_ras_fwlog_func,
&dev_attr_lpfc_enable_bbcr,
&dev_attr_lpfc_enable_dpp,
NULL,
@@ -6587,6 +6674,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1;
lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
+ lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
+ lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
+ lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
+
+
+ /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
+ * accommodate 512K and 1M IOs in a single nvme buf and supply
+ * enough NVME LS iocb buffers for larger connectivity counts.
+ */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ phba->cfg_iocb_cnt = 5;
+ }
+
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 90745feca808..7bd7ae86bed5 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/bsg-lib.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -2843,9 +2844,6 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
if (nocopydata) {
bpl->tus.f.bdeFlags = 0;
- pci_dma_sync_single_for_device(phba->pcidev,
- dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
-
} else {
memset((uint8_t *)dmp->dma.virt, 0, cnt);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
@@ -5309,6 +5307,330 @@ job_error:
}
/**
+ * lpfc_check_fwlog_support: Check FW log support on the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Check if FW Logging support by the adapter
+ **/
+int
+lpfc_check_fwlog_support(struct lpfc_hba *phba)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = NULL;
+
+ ras_fwlog = &phba->ras_fwlog;
+
+ if (ras_fwlog->ras_hwsupport == false)
+ return -EACCES;
+ else if (ras_fwlog->ras_enabled == false)
+ return -EPERM;
+ else
+ return 0;
+}
+
+/**
+ * lpfc_bsg_get_ras_config: Get RAS configuration settings
+ * @job: fc_bsg_job to handle
+ *
+ * Get RAS configuration values set.
+ **/
+static int
+lpfc_bsg_get_ras_config(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_bsg_get_ras_config_reply *ras_reply;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_ras_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6181 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ /* Check FW log status */
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ /* Current logging state */
+ if (ras_fwlog->ras_active == true)
+ ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
+ else
+ ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+
+ ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
+ ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
+
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ return rc;
+}
+
+/**
+ * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Disable FW logging into host memory on the adapter. To
+ * be done before reading logs from the host memory.
+ **/
+static void
+lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+ ras_fwlog->ras_active = false;
+
+ /* Disable FW logging to host memory */
+ writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+}
+
+/**
+ * lpfc_bsg_set_ras_config: Set FW logging parameters
+ * @job: fc_bsg_job to handle
+ *
+ * Set log-level parameters for FW-logging in host memory
+ **/
+static int
+lpfc_bsg_set_ras_config(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_bsg_set_ras_config_req *ras_req;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint8_t action = 0, log_level = 0;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_set_ras_config_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6182 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ /* Check FW log status */
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ ras_req = (struct lpfc_bsg_set_ras_config_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+ action = ras_req->action;
+ log_level = ras_req->log_level;
+
+ if (action == LPFC_RASACTION_STOP_LOGGING) {
+ /* Check if already disabled */
+ if (ras_fwlog->ras_active == false) {
+ rc = -ESRCH;
+ goto ras_job_error;
+ }
+
+ /* Disable logging */
+ lpfc_ras_stop_fwlog(phba);
+ } else {
+ /*action = LPFC_RASACTION_START_LOGGING*/
+ if (ras_fwlog->ras_active == true) {
+ rc = -EINPROGRESS;
+ goto ras_job_error;
+ }
+
+ /* Enable logging */
+ rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
+ LPFC_RAS_ENABLE_LOGGING);
+ if (rc)
+ rc = -EINVAL;
+ }
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_get_ras_lwpd: Get log write position data
+ * @job: fc_bsg_job to handle
+ *
+ * Get Offset/Wrap count of the log message written
+ * in host memory
+ **/
+static int
+lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_bsg_get_ras_lwpd *ras_reply;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t lwpd_offset = 0;
+ uint64_t wrap_value = 0;
+ int rc = 0;
+
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_ras_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6183 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ lwpd_offset = *((uint32_t *)ras_fwlog->lwpd.virt) & 0xffffffff;
+ ras_reply->offset = be32_to_cpu(lwpd_offset);
+
+ wrap_value = *((uint64_t *)ras_fwlog->lwpd.virt);
+ ras_reply->wrap_count = be32_to_cpu((wrap_value >> 32) & 0xffffffff);
+
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_get_ras_fwlog: Read FW log
+ * @job: fc_bsg_job to handle
+ *
+ * Copy the FW log into the passed buffer.
+ **/
+static int
+lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct lpfc_bsg_get_fwlog_req *ras_req;
+ uint32_t rd_offset, rd_index, offset, pending_wlen;
+ uint32_t boundary = 0, align_len = 0, write_len = 0;
+ void *dest, *src, *fwlog_buff;
+ struct lpfc_ras_fwlog *ras_fwlog = NULL;
+ struct lpfc_dmabuf *dmabuf, *next;
+ int rc = 0;
+
+ ras_fwlog = &phba->ras_fwlog;
+
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ /* Logging to be stopped before reading */
+ if (ras_fwlog->ras_active == true) {
+ rc = -EINPROGRESS;
+ goto ras_job_error;
+ }
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_get_fwlog_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6184 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ ras_req = (struct lpfc_bsg_get_fwlog_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+ rd_offset = ras_req->read_offset;
+
+ /* Allocate memory to read fw log*/
+ fwlog_buff = vmalloc(ras_req->read_size);
+ if (!fwlog_buff) {
+ rc = -ENOMEM;
+ goto ras_job_error;
+ }
+
+ rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
+ offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
+ pending_wlen = ras_req->read_size;
+ dest = fwlog_buff;
+
+ list_for_each_entry_safe(dmabuf, next,
+ &ras_fwlog->fwlog_buff_list, list) {
+
+ if (dmabuf->buffer_tag < rd_index)
+ continue;
+
+ /* Align read to buffer size */
+ if (offset) {
+ boundary = ((dmabuf->buffer_tag + 1) *
+ LPFC_RAS_MAX_ENTRY_SIZE);
+
+ align_len = (boundary - offset);
+ write_len = min_t(u32, align_len,
+ LPFC_RAS_MAX_ENTRY_SIZE);
+ } else {
+ write_len = min_t(u32, pending_wlen,
+ LPFC_RAS_MAX_ENTRY_SIZE);
+ align_len = 0;
+ boundary = 0;
+ }
+ src = dmabuf->virt + offset;
+ memcpy(dest, src, write_len);
+
+ pending_wlen -= write_len;
+ if (!pending_wlen)
+ break;
+
+ dest += write_len;
+ offset = (offset + write_len) % LPFC_RAS_MAX_ENTRY_SIZE;
+ }
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ fwlog_buff, ras_req->read_size);
+
+ vfree(fwlog_buff);
+
+ras_job_error:
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
+
+
+/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
**/
@@ -5355,6 +5677,18 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
rc = lpfc_forced_link_speed(job);
break;
+ case LPFC_BSG_VENDOR_RAS_GET_LWPD:
+ rc = lpfc_bsg_get_ras_lwpd(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
+ rc = lpfc_bsg_get_ras_fwlog(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
+ rc = lpfc_bsg_get_ras_config(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
+ rc = lpfc_bsg_set_ras_config(job);
+ break;
default:
rc = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
@@ -5368,7 +5702,7 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
/**
* lpfc_bsg_request - handle a bsg request from the FC transport
- * @job: fc_bsg_job to handle
+ * @job: bsg_job to handle
**/
int
lpfc_bsg_request(struct bsg_job *job)
@@ -5402,7 +5736,7 @@ lpfc_bsg_request(struct bsg_job *job)
/**
* lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
- * @job: fc_bsg_job that has timed out
+ * @job: bsg_job that has timed out
*
* This function just aborts the job's IOCB. The aborted IOCB will return to
* the waiting function which will handle passing the error back to userspace
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 32347c87e3b4..820323f1139b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -38,6 +38,10 @@
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
+#define LPFC_BSG_VENDOR_RAS_GET_LWPD 16
+#define LPFC_BSG_VENDOR_RAS_GET_FWLOG 17
+#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18
+#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19
struct set_ct_event {
uint32_t command;
@@ -296,6 +300,38 @@ struct forced_link_speed_support_reply {
uint8_t supported;
};
+struct lpfc_bsg_ras_req {
+ uint32_t command;
+};
+
+struct lpfc_bsg_get_fwlog_req {
+ uint32_t command;
+ uint32_t read_size;
+ uint32_t read_offset;
+};
+
+struct lpfc_bsg_get_ras_lwpd {
+ uint32_t offset;
+ uint32_t wrap_count;
+};
+
+struct lpfc_bsg_set_ras_config_req {
+ uint32_t command;
+ uint8_t action;
+#define LPFC_RASACTION_STOP_LOGGING 0x00
+#define LPFC_RASACTION_START_LOGGING 0x01
+ uint8_t log_level;
+};
+
+struct lpfc_bsg_get_ras_config_reply {
+ uint8_t state;
+#define LPFC_RASLOG_STATE_STOPPED 0x00
+#define LPFC_RASLOG_STATE_RUNNING 0x01
+ uint8_t log_level;
+ uint32_t log_buff_sz;
+};
+
+
/* driver only */
#define SLI_CONFIG_NOT_HANDLED 0
#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bea24bc4410a..e01136507780 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -545,6 +545,13 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
+/* RAS Interface */
+void lpfc_sli4_ras_init(struct lpfc_hba *phba);
+void lpfc_sli4_ras_setup(struct lpfc_hba *phba);
+int lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level,
+ uint32_t fwlog_enable);
+int lpfc_check_fwlog_support(struct lpfc_hba *phba);
+
/* NVME interfaces. */
void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1cbdc892ff95..789ad1502534 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -445,14 +445,14 @@ lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
struct lpfc_vport *vport_curr;
unsigned long flags;
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport_curr, &phba->port_list, listentry) {
if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return vport_curr;
}
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return NULL;
}
@@ -471,11 +471,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
- /* Don't assume the rport is always the previous
- * FC4 type.
- */
- ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
-
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index aec5b10a8c85..0c8005bb0f53 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -550,7 +550,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_nodelist *ndlp;
unsigned char *statep;
struct nvme_fc_local_port *localport;
- struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_remote_port *nrport = NULL;
struct lpfc_nvme_rport *rport;
@@ -654,7 +653,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
"\nOutstanding IO x%x\n", outio);
if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
len += snprintf(buf + len, size - len,
"\nNVME Targetport Entry ...\n");
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4dda969e947c..f1c1faa74b46 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7673,8 +7673,11 @@ void
lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
+
+ spin_lock_irq(&phba->port_list_lock);
list_for_each_entry(vport, &phba->port_list, listentry)
lpfc_els_flush_cmd(vport);
+ spin_unlock_irq(&phba->port_list_lock);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index eb71877f12f8..f4deb862efc6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4193,7 +4193,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) {
- if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
+ if (ndlp->nlp_fc4_type ||
ndlp->nlp_DID == Fabric_DID ||
ndlp->nlp_DID == NameServer_DID ||
ndlp->nlp_DID == FDMI_DID) {
@@ -5428,12 +5428,10 @@ static void
lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- struct lpfc_sli *psli;
IOCB_t *icmd;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_sli_ring *pring;
- psli = &phba->sli;
pring = lpfc_phba_elsring(phba);
if (unlikely(!pring))
return;
@@ -5938,14 +5936,14 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
}
}
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport->vpi == i) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return vport;
}
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 083f8c8706e5..bbd0a57e953f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -186,6 +186,7 @@ struct lpfc_sli_intf {
#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
+#define LPFC_CTL_PDEV_CTL_DDL_RAS 0x1000000
#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
@@ -964,6 +965,7 @@ struct mbox_header {
/* Subsystem Definitions */
#define LPFC_MBOX_SUBSYSTEM_NA 0x0
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
+#define LPFC_MBOX_SUBSYSTEM_LOWLEVEL 0xB
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
/* Device Specific Definitions */
@@ -1030,6 +1032,9 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
+/* Low level Opcodes */
+#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION 0x37
+
/* Mailbox command structures */
struct eq_context {
uint32_t word0;
@@ -1162,6 +1167,45 @@ struct lpfc_mbx_nop {
uint32_t context[2];
};
+
+
+struct lpfc_mbx_set_ras_fwlog {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_fwlog_enable_SHIFT 0
+#define lpfc_fwlog_enable_MASK 0x00000001
+#define lpfc_fwlog_enable_WORD word4
+#define lpfc_fwlog_loglvl_SHIFT 8
+#define lpfc_fwlog_loglvl_MASK 0x0000000F
+#define lpfc_fwlog_loglvl_WORD word4
+#define lpfc_fwlog_ra_SHIFT 15
+#define lpfc_fwlog_ra_WORD 0x00000008
+#define lpfc_fwlog_buffcnt_SHIFT 16
+#define lpfc_fwlog_buffcnt_MASK 0x000000FF
+#define lpfc_fwlog_buffcnt_WORD word4
+#define lpfc_fwlog_buffsz_SHIFT 24
+#define lpfc_fwlog_buffsz_MASK 0x000000FF
+#define lpfc_fwlog_buffsz_WORD word4
+ uint32_t word5;
+#define lpfc_fwlog_acqe_SHIFT 0
+#define lpfc_fwlog_acqe_MASK 0x0000FFFF
+#define lpfc_fwlog_acqe_WORD word5
+#define lpfc_fwlog_cqid_SHIFT 16
+#define lpfc_fwlog_cqid_MASK 0x0000FFFF
+#define lpfc_fwlog_cqid_WORD word5
+#define LPFC_MAX_FWLOG_PAGE 16
+ struct dma_address lwpd;
+ struct dma_address buff_fwlog[LPFC_MAX_FWLOG_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+
struct cq_context {
uint32_t word0;
#define lpfc_cq_context_event_SHIFT 31
@@ -3868,6 +3912,7 @@ struct lpfc_mqe {
struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
struct lpfc_mbx_set_host_data set_host_data;
struct lpfc_mbx_nop nop;
+ struct lpfc_mbx_set_ras_fwlog ras_fwlog;
} un;
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f3cae733ae2d..20fa6785a0e2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3956,7 +3956,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
if (phba->sli_rev == LPFC_SLI_REV4) {
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
- shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+ shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
}
/*
@@ -3988,9 +3988,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
if (error)
goto out_put_shost;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_add_tail(&vport->listentry, &phba->port_list);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
return vport;
out_put_shost:
@@ -4016,9 +4016,9 @@ destroy_port(struct lpfc_vport *vport)
fc_remove_host(shost);
scsi_remove_host(shost);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
lpfc_cleanup(vport);
return;
@@ -5621,7 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize ndlp management spinlock */
spin_lock_init(&phba->ndlp_lock);
+ /* Initialize port_list spinlock */
+ spin_lock_init(&phba->port_list_lock);
INIT_LIST_HEAD(&phba->port_list);
+
INIT_LIST_HEAD(&phba->work_list);
init_waitqueue_head(&phba->wait_4_mlo_m_q);
@@ -5919,8 +5922,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5942,9 +5943,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
- phba->cfg_sg_seg_cnt =
- LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+ /*
+ * If supporting DIF, reduce the seg count for scsi to
+ * allow room for the DIF sges.
+ */
+ if (phba->cfg_enable_bg &&
+ phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
+ phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
+ else
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
} else {
/*
* The scsi_buf for a regular I/O holds the FCP cmnd,
@@ -5958,6 +5966,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Total SGEs for scsi_sg_list */
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
/*
* NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
@@ -5965,10 +5974,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
}
+ /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6300 Reducing NVME sg segment "
+ "cnt to %d\n",
+ LPFC_MAX_NVME_SEG_CNT);
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ } else
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+ }
+
/* Initialize the host templates with the updated values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5977,9 +5998,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
- "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+ "9087 sg_seg_cnt:%d dmabuf_size:%d "
+ "total:%d scsi:%d nvme:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
- phba->cfg_total_seg_cnt);
+ phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
+ phba->cfg_nvme_seg_cnt);
/* Initialize buffer queue management fields */
INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
@@ -6205,6 +6228,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (phba->cfg_fof)
fof_vectors = 1;
+ /* Verify RAS support on adapter */
+ lpfc_sli4_ras_init(phba);
+
/* Verify all the SLI4 queues */
rc = lpfc_sli4_queue_verify(phba);
if (rc)
@@ -7967,7 +7993,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
else
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3028 GET_FUNCTION_CONFIG: failed to find "
- "Resrouce Descriptor:x%x\n",
+ "Resource Descriptor:x%x\n",
LPFC_RSRC_DESC_TYPE_FCFCOE);
read_cfg_out:
@@ -10492,6 +10518,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread);
+ /* Disable FW logging to host memory */
+ writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* Free RAS DMA memory */
+ if (phba->ras_fwlog.ras_enabled == true)
+ lpfc_sli4_ras_dma_free(phba);
+
/* Unset the queues shared with the hardware then release all
* allocated resources.
*/
@@ -10737,6 +10771,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->mds_diags_support = 1;
else
phba->mds_diags_support = 0;
+
return 0;
}
@@ -10965,9 +11000,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
kfree(phba->vpi_ids);
lpfc_stop_hba_timers(phba);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
lpfc_debugfs_terminate(vport);
@@ -11329,10 +11364,6 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
/* Bring device online, it will be no-op for non-fatal error resume */
lpfc_online(phba);
-
- /* Clean up Advanced Error Reporting (AER) if needed */
- if (phba->hba_flag & HBA_AER_ENABLED)
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
/**
@@ -11698,6 +11729,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
+
+ /* Enable RAS FW log support */
+ lpfc_sli4_ras_setup(phba);
+
return 0;
out_disable_intr:
@@ -11777,9 +11812,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
lpfc_sli4_hba_unset(phba);
lpfc_stop_hba_timers(phba);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
/* Perform scsi free before driver resource_unset since scsi
* buffers are released to their corresponding pools here.
@@ -12144,10 +12179,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
/* Bring the device back online */
lpfc_online(phba);
}
-
- /* Clean up Advanced Error Reporting (AER) if needed */
- if (phba->hba_flag & HBA_AER_ENABLED)
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
/**
@@ -12428,6 +12459,30 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if RAS is supported by the adapter. Check the
+ * function through which RAS support enablement is to be done.
+ **/
+void
+lpfc_sli4_ras_init(struct lpfc_hba *phba)
+{
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ phba->ras_fwlog.ras_hwsupport = true;
+ if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn))
+ phba->ras_fwlog.ras_enabled = true;
+ else
+ phba->ras_fwlog.ras_enabled = false;
+ break;
+ default:
+ phba->ras_fwlog.ras_hwsupport = false;
+ }
+}
+
+/**
* lpfc_fof_queue_setup - Set up all the fof queues
* @phba: pointer to lpfc hba data structure.
*
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd9bce9d9974..269808e8480f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -2318,6 +2318,7 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
@@ -2395,6 +2396,7 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
@@ -2652,6 +2654,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 918ae18ef8a8..ba831def9301 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n",
+ "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
lport, qidx, handle);
kfree(handle);
}
@@ -2235,13 +2235,11 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
struct sli4_sge *sgl;
dma_addr_t pdma_phys_sgl;
uint16_t iotag, lxri = 0;
- int bcnt, num_posted, sgl_size;
+ int bcnt, num_posted;
LIST_HEAD(prep_nblist);
LIST_HEAD(post_nblist);
LIST_HEAD(nvme_nblist);
- sgl_size = phba->cfg_sg_dma_buf_size;
-
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
if (!lpfc_ncmd)
@@ -2462,17 +2460,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
- /* Limit to LPFC_MAX_NVME_SEG_CNT.
- * For now need + 1 to get around NVME transport logic.
+ /* We need to tell the transport layer + 1 because it takes page
+ * alignment into account. When space for the SGL is allocated we
+ * allocate + 3, one for cmd, one for rsp and one for this alignment
*/
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
- "6300 Reducing sg segment cnt to %d\n",
- LPFC_MAX_NVME_SEG_CNT);
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- } else {
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
- }
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index b766afe10d3d..6245f442d784 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1339,15 +1339,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
idx = 0;
}
- infop = phba->sli4_hba.nvmet_ctx_info;
- for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+ infop = lpfc_get_ctx_list(phba, i, j);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
"6408 TOTAL NVMET ctx for CPU %d "
"MRQ %d: cnt %d nextcpu %p\n",
i, j, infop->nvmet_ctx_list_cnt,
infop->nvmet_ctx_next_cpu);
- infop++;
}
}
return 0;
@@ -1373,17 +1372,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
pinfo.port_id = vport->fc_myDID;
- /* Limit to LPFC_MAX_NVME_SEG_CNT.
- * For now need + 1 to get around NVME transport logic.
+ /* We need to tell the transport layer + 1 because it takes page
+ * alignment into account. When space for the SGL is allocated we
+ * allocate + 3, one for cmd, one for rsp and one for this alignment
*/
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
- "6400 Reducing sg segment cnt to %d\n",
- LPFC_MAX_NVME_SEG_CNT);
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- } else {
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
- }
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 5c7858e735c9..4fa6703a9ec9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -202,8 +202,8 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
static void
lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
- struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
- struct lpfc_nodelist *pnode = rdata->pnode;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags;
struct Scsi_Host *shost = cmd->device->host;
@@ -211,17 +211,19 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
unsigned long latency;
int i;
- if (cmd->result)
+ if (!vport->stat_data_enabled ||
+ vport->stat_data_blocked ||
+ (cmd->result))
return;
latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+ rdata = lpfc_cmd->rdata;
+ pnode = rdata->pnode;
spin_lock_irqsave(shost->host_lock, flags);
- if (!vport->stat_data_enabled ||
- vport->stat_data_blocked ||
- !pnode ||
- !pnode->lat_data ||
- (phba->bucket_type == LPFC_NO_BUCKET)) {
+ if (!pnode ||
+ !pnode->lat_data ||
+ (phba->bucket_type == LPFC_NO_BUCKET)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
@@ -1050,7 +1052,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (!found)
return NULL;
- if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
}
@@ -4158,9 +4160,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
- spin_lock_irqsave(&phba->hbalock, flags);
- lpfc_cmd->pCmd = NULL;
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* If pCmd was set to NULL from abort path, do not call scsi_done */
+ if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0711 FCP cmd already NULL, sid: 0x%06x, "
+ "did: 0x%06x, oxid: 0x%04x\n",
+ vport->fc_myDID,
+ (pnode) ? pnode->nlp_DID : 0,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
+ return;
+ }
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9830bdb6e072..783a1540cfbe 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -392,11 +392,7 @@ lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
struct lpfc_register doorbell;
doorbell.word0 = 0;
- bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
- bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
- bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
- (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
- bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+ bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
}
@@ -3797,6 +3793,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
struct hbq_dmabuf *dmabuf;
struct lpfc_cq_event *cq_event;
unsigned long iflag;
+ int count = 0;
spin_lock_irqsave(&phba->hbalock, iflag);
phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
@@ -3818,16 +3815,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
if (irspiocbq)
lpfc_sli_sp_handle_rspiocb(phba, pring,
irspiocbq);
+ count++;
break;
case CQE_CODE_RECEIVE:
case CQE_CODE_RECEIVE_V1:
dmabuf = container_of(cq_event, struct hbq_dmabuf,
cq_event);
lpfc_sli4_handle_received_buffer(phba, dmabuf);
+ count++;
break;
default:
break;
}
+
+ /* Limit the number of events to 64 to avoid soft lockups */
+ if (count == 64)
+ break;
}
}
@@ -6146,6 +6149,271 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
}
/**
+ * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called to free memory allocated for RAS FW logging
+ * support in the driver.
+ **/
+void
+lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct lpfc_dmabuf *dmabuf, *next;
+
+ if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
+ list_for_each_entry_safe(dmabuf, next,
+ &ras_fwlog->fwlog_buff_list,
+ list) {
+ list_del(&dmabuf->list);
+ dma_free_coherent(&phba->pcidev->dev,
+ LPFC_RAS_MAX_ENTRY_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ }
+
+ if (ras_fwlog->lwpd.virt) {
+ dma_free_coherent(&phba->pcidev->dev,
+ sizeof(uint32_t) * 2,
+ ras_fwlog->lwpd.virt,
+ ras_fwlog->lwpd.phys);
+ ras_fwlog->lwpd.virt = NULL;
+ }
+
+ ras_fwlog->ras_active = false;
+}
+
+/**
+ * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
+ * @phba: Pointer to HBA context object.
+ * @fwlog_buff_count: Count of buffers to be created.
+ *
+ * This routine DMA memory for Log Write Position Data[LPWD] and buffer
+ * to update FW log is posted to the adapter.
+ * Buffer count is calculated based on module param ras_fwlog_buffsize
+ * Size of each buffer posted to FW is 64K.
+ **/
+
+static int
+lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
+ uint32_t fwlog_buff_count)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct lpfc_dmabuf *dmabuf;
+ int rc = 0, i = 0;
+
+ /* Initialize List */
+ INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
+
+ /* Allocate memory for the LWPD */
+ ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
+ sizeof(uint32_t) * 2,
+ &ras_fwlog->lwpd.phys,
+ GFP_KERNEL);
+ if (!ras_fwlog->lwpd.virt) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6185 LWPD Memory Alloc Failed\n");
+
+ return -ENOMEM;
+ }
+
+ ras_fwlog->fw_buffcount = fwlog_buff_count;
+ for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+ GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6186 Memory Alloc failed FW logging");
+ goto free_mem;
+ }
+
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ LPFC_RAS_MAX_ENTRY_SIZE,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6187 DMA Alloc Failed FW logging");
+ goto free_mem;
+ }
+ memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
+ dmabuf->buffer_tag = i;
+ list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
+ }
+
+free_mem:
+ if (rc)
+ lpfc_sli4_ras_dma_free(phba);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * Completion handler for driver's RAS MBX command to the device.
+ **/
+static void
+lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+ mb = &pmb->u.mb;
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+ if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "6188 FW LOG mailbox "
+ "completed with status x%x add_status x%x,"
+ " mbx status x%x\n",
+ shdr_status, shdr_add_status, mb->mbxStatus);
+ goto disable_ras;
+ }
+
+ ras_fwlog->ras_active = true;
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return;
+
+disable_ras:
+ /* Free RAS DMA memory */
+ lpfc_sli4_ras_dma_free(phba);
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
+ * @phba: pointer to lpfc hba data structure.
+ * @fwlog_level: Logging verbosity level.
+ * @fwlog_enable: Enable/Disable logging.
+ *
+ * Initialize memory and post mailbox command to enable FW logging in host
+ * memory.
+ **/
+int
+lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
+ uint32_t fwlog_level,
+ uint32_t fwlog_enable)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
+ int rc = 0;
+
+ fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
+ phba->cfg_ras_fwlog_buffsize);
+ fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
+
+ /*
+ * If re-enabling FW logging support use earlier allocated
+ * DMA buffers while posting MBX command.
+ **/
+ if (!ras_fwlog->lwpd.virt) {
+ rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6189 RAS FW Log Support Not Enabled");
+ return rc;
+ }
+ }
+
+ /* Setup Mailbox command */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6190 RAS MBX Alloc Failed");
+ rc = -ENOMEM;
+ goto mem_free;
+ }
+
+ ras_fwlog->fw_loglevel = fwlog_level;
+ len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
+ LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
+ len, LPFC_SLI4_MBX_EMBED);
+
+ mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
+ bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
+ fwlog_enable);
+ bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
+ ras_fwlog->fw_loglevel);
+ bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
+ ras_fwlog->fw_buffcount);
+ bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
+ LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
+
+ /* Update DMA buffer address */
+ list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
+ memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
+
+ mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+
+ mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+
+ /* Update LPWD address */
+ mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
+ mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6191 RAS Mailbox failed. "
+ "status %d mbxStatus : x%x", rc,
+ bf_get(lpfc_mqe_status, &mbox->u.mqe));
+ mempool_free(mbox, phba->mbox_mem_pool);
+ rc = -EIO;
+ goto mem_free;
+ } else
+ rc = 0;
+mem_free:
+ if (rc)
+ lpfc_sli4_ras_dma_free(phba);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Check if RAS is supported on the adapter and initialize it.
+ **/
+void
+lpfc_sli4_ras_setup(struct lpfc_hba *phba)
+{
+ /* Check RAS FW Log needs to be enabled or not */
+ if (lpfc_check_fwlog_support(phba))
+ return;
+
+ lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
+ LPFC_RAS_ENABLE_LOGGING);
+}
+
+/**
* lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
* @phba: Pointer to HBA context object.
*
@@ -10266,8 +10534,12 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmb;
unsigned long iflag;
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
+ local_bh_disable();
+
/* Flush all the mailbox commands in the mbox system */
spin_lock_irqsave(&phba->hbalock, iflag);
+
/* The pending mailbox command queue */
list_splice_init(&phba->sli.mboxq, &completions);
/* The outstanding active mailbox command */
@@ -10280,6 +10552,9 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
list_splice_init(&phba->sli.mboxq_cmpl, &completions);
spin_unlock_irqrestore(&phba->hbalock, iflag);
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+
/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
while (!list_empty(&completions)) {
list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
@@ -10419,6 +10694,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
lpfc_hba_down_prep(phba);
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
+ local_bh_disable();
+
lpfc_fabric_abort_hba(phba);
spin_lock_irqsave(&phba->hbalock, flags);
@@ -10472,6 +10750,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
kfree(buf_ptr);
}
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+
/* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo);
@@ -11775,6 +12056,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
}
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
+ local_bh_disable();
+
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
@@ -11788,6 +12072,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
1000) + jiffies;
spin_unlock_irq(&phba->hbalock);
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
msleep(2);
@@ -11797,9 +12084,13 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
*/
break;
}
- } else
+ } else {
spin_unlock_irq(&phba->hbalock);
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+ }
+
lpfc_sli_mbox_sys_flush(phba);
}
@@ -13136,7 +13427,6 @@ static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
{
bool workposted = false;
- struct fc_frame_header *fc_hdr;
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
struct lpfc_nvmet_tgtport *tgtp;
@@ -13173,9 +13463,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
hrq->RQ_buf_posted--;
memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
- /* If a NVME LS event (type 0x28), treat it as Fast path */
- fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
-
/* save off the frame for the word thread to process */
list_add_tail(&dma_buf->cq_event.list,
&phba->sli4_hba.sp_queue_event);
@@ -14558,13 +14845,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
- uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
/* sanity check on queue memory */
if (!cq || !eq)
return -ENODEV;
- if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = cq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 399c0015c546..e76c380e1a84 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -886,3 +886,4 @@ int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 501249509af4..5a0d512ff497 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.0.0.6"
+#define LPFC_DRIVER_VERSION "12.0.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 1ff0f7de9105..c340e0e47473 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -207,7 +207,7 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
struct lpfc_vport *vport;
unsigned long flags;
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport == new_vport)
continue;
@@ -215,11 +215,11 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
if (memcmp(&vport->fc_sparam.portName,
&new_vport->fc_sparam.portName,
sizeof(struct lpfc_name)) == 0) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return 0;
}
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return 1;
}
@@ -825,9 +825,9 @@ skip_logo:
lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1828 Vport Deleted.\n");
scsi_host_put(shost);
@@ -844,7 +844,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
GFP_KERNEL);
if (vports == NULL)
return NULL;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
if (port_iterator->load_flag & FC_UNLOADING)
continue;
@@ -856,7 +856,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
}
vports[index++] = port_iterator;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
return vports;
}
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index eb551f3cc471..764d320bb2ca 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -52,14 +52,12 @@ struct mac_esp_priv {
struct esp *esp;
void __iomem *pdma_regs;
void __iomem *pdma_io;
- int error;
};
static struct esp *esp_chips[2];
static DEFINE_SPINLOCK(esp_chips_lock);
#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
- platform_get_drvdata((struct platform_device *) \
- (esp->dev)))
+ dev_get_drvdata((esp)->dev))
static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
{
@@ -71,38 +69,6 @@ static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
return nubus_readb(esp->regs + reg * 16);
}
-/* For pseudo DMA and PIO we need the virtual address
- * so this address mapping is the identity mapping.
- */
-
-static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return (dma_addr_t)buf;
-}
-
-static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- int i;
-
- for (i = 0; i < num_sg; i++)
- sg[i].dma_address = (u32)sg_virt(&sg[i]);
- return num_sg;
-}
-
-static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- /* Nothing to do. */
-}
-
-static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- /* Nothing to do. */
-}
-
static void mac_esp_reset_dma(struct esp *esp)
{
/* Nothing to do. */
@@ -120,12 +86,11 @@ static void mac_esp_dma_invalidate(struct esp *esp)
static int mac_esp_dma_error(struct esp *esp)
{
- return MAC_ESP_GET_PRIV(esp)->error;
+ return esp->send_cmd_error;
}
static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
{
- struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
int i = 500000;
do {
@@ -140,7 +105,7 @@ static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
esp_read8(ESP_STATUS));
- mep->error = 1;
+ esp->send_cmd_error = 1;
return 1;
}
@@ -166,7 +131,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
esp_read8(ESP_STATUS));
- mep->error = 1;
+ esp->send_cmd_error = 1;
return 1;
}
@@ -233,7 +198,7 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
{
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
- mep->error = 0;
+ esp->send_cmd_error = 0;
if (!write)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@@ -271,164 +236,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
} while (esp_count);
}
-/*
- * Programmed IO routines follow.
- */
-
-static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
-{
- int i = 500000;
-
- do {
- unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
-
- if (fbytes)
- return fbytes;
-
- udelay(2);
- } while (--i);
-
- printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
- esp_read8(ESP_STATUS));
- return 0;
-}
-
-static inline int mac_esp_wait_for_intr(struct esp *esp)
-{
- struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
- int i = 500000;
-
- do {
- esp->sreg = esp_read8(ESP_STATUS);
- if (esp->sreg & ESP_STAT_INTR)
- return 0;
-
- udelay(2);
- } while (--i);
-
- printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
- mep->error = 1;
- return 1;
-}
-
-#define MAC_ESP_PIO_LOOP(operands, reg1) \
- asm volatile ( \
- "1: moveb " operands " \n" \
- " subqw #1,%1 \n" \
- " jbne 1b \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo))
-
-#define MAC_ESP_PIO_FILL(operands, reg1) \
- asm volatile ( \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " subqw #8,%1 \n" \
- " subqw #8,%1 \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo))
-
-#define MAC_ESP_FIFO_SIZE 16
-
-static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
- u32 dma_count, int write, u8 cmd)
-{
- struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
- u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
- u8 phase = esp->sreg & ESP_STAT_PMASK;
-
- cmd &= ~ESP_CMD_DMA;
- mep->error = 0;
-
- if (write) {
- u8 *dst = (u8 *)addr;
- u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
-
- scsi_esp_cmd(esp, cmd);
-
- while (1) {
- if (!mac_esp_wait_for_fifo(esp))
- break;
-
- *dst++ = esp_read8(ESP_FDATA);
- --esp_count;
-
- if (!esp_count)
- break;
-
- if (mac_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = esp_read8(ESP_INTRPT);
- if (esp->ireg & mask) {
- mep->error = 1;
- break;
- }
-
- if (phase == ESP_MIP)
- scsi_esp_cmd(esp, ESP_CMD_MOK);
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- } else {
- scsi_esp_cmd(esp, ESP_CMD_FLUSH);
-
- if (esp_count >= MAC_ESP_FIFO_SIZE)
- MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
- else
- MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
-
- scsi_esp_cmd(esp, cmd);
-
- while (esp_count) {
- unsigned int n;
-
- if (mac_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = esp_read8(ESP_INTRPT);
- if (esp->ireg & ~ESP_INTR_BSERV) {
- mep->error = 1;
- break;
- }
-
- n = MAC_ESP_FIFO_SIZE -
- (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
- if (n > esp_count)
- n = esp_count;
-
- if (n == MAC_ESP_FIFO_SIZE) {
- MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
- } else {
- esp_count -= n;
- MAC_ESP_PIO_LOOP("%0@+,%2@", n);
- }
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- }
-}
-
static int mac_esp_irq_pending(struct esp *esp)
{
if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
@@ -470,10 +277,6 @@ static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
static struct esp_driver_ops mac_esp_ops = {
.esp_write8 = mac_esp_write8,
.esp_read8 = mac_esp_read8,
- .map_single = mac_esp_map_single,
- .map_sg = mac_esp_map_sg,
- .unmap_single = mac_esp_unmap_single,
- .unmap_sg = mac_esp_unmap_sg,
.irq_pending = mac_esp_irq_pending,
.dma_length_limit = mac_esp_dma_length_limit,
.reset_dma = mac_esp_reset_dma,
@@ -508,7 +311,7 @@ static int esp_mac_probe(struct platform_device *dev)
esp = shost_priv(host);
esp->host = host;
- esp->dev = dev;
+ esp->dev = &dev->dev;
esp->command_block = kzalloc(16, GFP_KERNEL);
if (!esp->command_block)
@@ -551,14 +354,16 @@ static int esp_mac_probe(struct platform_device *dev)
mep->pdma_regs = NULL;
break;
}
+ esp->fifo_reg = esp->regs + ESP_FDATA * 16;
esp->ops = &mac_esp_ops;
+ esp->flags = ESP_FLAG_NO_DMA_MAP;
if (mep->pdma_io == NULL) {
printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
esp_write8(0, ESP_TCLOW);
esp_write8(0, ESP_TCMED);
- esp->flags = ESP_FLAG_DISABLE_SYNC;
- mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
+ esp->flags |= ESP_FLAG_DISABLE_SYNC;
+ mac_esp_ops.send_dma_cmd = esp_send_pio_cmd;
} else {
printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
}
@@ -577,7 +382,7 @@ static int esp_mac_probe(struct platform_device *dev)
esp_chips[dev->id] = esp;
spin_unlock(&esp_chips_lock);
- err = scsi_esp_register(esp, &dev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 530358cdcb39..3b7abe5ca7f5 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -202,13 +202,6 @@ module_param_named(debug_level, mraid_debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
/*
- * ### global data ###
- */
-static uint8_t megaraid_mbox_version[8] =
- { 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
-
-
-/*
* PCI table for all supported controllers.
*/
static struct pci_device_id pci_id_table_g[] = {
@@ -457,10 +450,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
// Setup the default DMA mask. This would be changed later on
// depending on hardware capabilities
- if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
-
+ if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
+ "megaraid: dma_set_mask failed:%d\n", __LINE__));
goto out_free_adapter;
}
@@ -484,7 +476,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
// Start the mailbox based controller
if (megaraid_init_mbox(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: maibox adapter did not initialize\n"));
+ "megaraid: mailbox adapter did not initialize\n"));
goto out_free_adapter;
}
@@ -878,11 +870,12 @@ megaraid_init_mbox(adapter_t *adapter)
adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
- if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(64))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: DMA mask for 64-bit failed\n"));
- if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: 32-bit DMA mask failed\n"));
goto out_free_sysfs_res;
@@ -950,7 +943,7 @@ megaraid_fini_mbox(adapter_t *adapter)
* megaraid_alloc_cmd_packets - allocate shared mailbox
* @adapter : soft state of the raid controller
*
- * Allocate and align the shared mailbox. This maibox is used to issue
+ * Allocate and align the shared mailbox. This mailbox is used to issue
* all the commands. For IO based controllers, the mailbox is also registered
* with the FW. Allocate memory for all commands as well.
* This is our big allocator.
@@ -975,9 +968,9 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
* Allocate the common 16-byte aligned memory for the handshake
* mailbox.
*/
- raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev,
- sizeof(mbox64_t),
- &raid_dev->una_mbox64_dma);
+ raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev,
+ sizeof(mbox64_t), &raid_dev->una_mbox64_dma,
+ GFP_KERNEL);
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
@@ -1003,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
align;
// Allocate memory for commands issued internally
- adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE,
- &adapter->ibuf_dma_h);
+ adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
+ &adapter->ibuf_dma_h, GFP_KERNEL);
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
@@ -1082,7 +1075,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
scb->scp = NULL;
scb->state = SCB_FREE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
scb->dma_type = MRAID_DMA_NONE;
scb->dev_channel = -1;
scb->dev_target = -1;
@@ -1098,10 +1091,10 @@ out_teardown_dma_pools:
out_free_scb_list:
kfree(adapter->kscb_list);
out_free_ibuf:
- pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
+ dma_free_coherent(&pdev->dev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
adapter->ibuf_dma_h);
out_free_common_mbox:
- pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
return -1;
@@ -1123,10 +1116,10 @@ megaraid_free_cmd_packets(adapter_t *adapter)
kfree(adapter->kscb_list);
- pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
+ dma_free_coherent(&adapter->pdev->dev, MBOX_IBUF_SIZE,
(void *)adapter->ibuf, adapter->ibuf_dma_h);
- pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
return;
}
@@ -1428,12 +1421,6 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
adapter->outstanding_cmds++;
- if (scb->dma_direction == PCI_DMA_TODEVICE)
- pci_dma_sync_sg_for_device(adapter->pdev,
- scsi_sglist(scb->scp),
- scsi_sg_count(scb->scp),
- PCI_DMA_TODEVICE);
-
mbox->busy = 1; // Set busy
mbox->poll = 0;
mbox->ack = 0;
@@ -2181,31 +2168,6 @@ megaraid_isr(int irq, void *devp)
/**
- * megaraid_mbox_sync_scb - sync kernel buffers
- * @adapter : controller's soft state
- * @scb : pointer to the resource packet
- *
- * DMA sync if required.
- */
-static void
-megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
-{
- mbox_ccb_t *ccb;
-
- ccb = (mbox_ccb_t *)scb->ccb;
-
- if (scb->dma_direction == PCI_DMA_FROMDEVICE)
- pci_dma_sync_sg_for_cpu(adapter->pdev,
- scsi_sglist(scb->scp),
- scsi_sg_count(scb->scp),
- PCI_DMA_FROMDEVICE);
-
- scsi_dma_unmap(scb->scp);
- return;
-}
-
-
-/**
* megaraid_mbox_dpc - the tasklet to complete the commands from completed list
* @devp : pointer to HBA soft state
*
@@ -2403,9 +2365,7 @@ megaraid_mbox_dpc(unsigned long devp)
megaraid_mbox_display_scb(adapter, scb);
}
- // Free our internal resources and call the mid-layer callback
- // routine
- megaraid_mbox_sync_scb(adapter, scb);
+ scsi_dma_unmap(scp);
// remove from local clist
list_del_init(&scb->list);
@@ -2577,7 +2537,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
int recovery_window;
- int recovering;
int i;
uioc_t *kioc;
@@ -2591,7 +2550,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
return FAILED;
}
-
// Under exceptional conditions, FW can take up to 3 minutes to
// complete command processing. Wait for additional 2 minutes for the
// pending commands counter to go down to 0. If it doesn't, let the
@@ -2640,8 +2598,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
- recovering = adapter->outstanding_cmds;
-
for (i = 0; i < recovery_window; i++) {
megaraid_ack_sequence(adapter);
@@ -2725,13 +2681,10 @@ static int
mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
- mbox64_t *mbox64;
mbox_t *mbox;
uint8_t status;
int i;
-
- mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/*
@@ -2948,9 +2901,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
* Issue an ENQUIRY3 command to find out certain adapter parameters,
* e.g., max channels, max commands etc.
*/
- pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
- &pinfo_dma_h);
-
+ pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
+ &pinfo_dma_h, GFP_KERNEL);
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
@@ -2971,7 +2923,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
pinfo, pinfo_dma_h);
return -1;
@@ -3002,7 +2954,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
con_log(CL_ANN, (KERN_WARNING
"megaraid: product info failed\n"));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
pinfo, pinfo_dma_h);
return -1;
@@ -3038,7 +2990,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
"megaraid: fw version:[%s] bios version:[%s]\n",
adapter->fw_version, adapter->bios_version));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo,
pinfo_dma_h);
return 0;
@@ -3135,7 +3087,6 @@ megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
static int
megaraid_mbox_support_random_del(adapter_t *adapter)
{
- mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
@@ -3157,8 +3108,6 @@ megaraid_mbox_support_random_del(adapter_t *adapter)
return 0;
}
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = FC_DEL_LOGDRV;
@@ -3263,12 +3212,8 @@ megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
static void
megaraid_mbox_flush_cache(adapter_t *adapter)
{
- mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
-
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = FLUSH_ADAPTER;
@@ -3299,7 +3244,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
- mbox64_t *mbox64;
int status = 0;
int i;
uint32_t dword;
@@ -3310,7 +3254,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
raw_mbox[0] = 0xFF;
- mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/* Wait until mailbox is free */
@@ -3515,7 +3458,7 @@ megaraid_cmm_register(adapter_t *adapter)
scb->scp = NULL;
scb->state = SCB_FREE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
scb->dma_type = MRAID_DMA_NONE;
scb->dev_channel = -1;
scb->dev_target = -1;
@@ -3653,7 +3596,7 @@ megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
scb->state = SCB_ACTIVE;
scb->dma_type = MRAID_DMA_NONE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
ccb = (mbox_ccb_t *)scb->ccb;
mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
@@ -3794,10 +3737,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
static int
gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
{
- uint8_t dmajor;
-
- dmajor = megaraid_mbox_version[0];
-
hinfo->pci_vendor_id = adapter->pdev->vendor;
hinfo->pci_device_id = adapter->pdev->device;
hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
@@ -3843,8 +3782,8 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
- raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
- PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
+ raid_dev->sysfs_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ PAGE_SIZE, &raid_dev->sysfs_buffer_dma, GFP_KERNEL);
if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
!raid_dev->sysfs_buffer) {
@@ -3881,7 +3820,7 @@ megaraid_sysfs_free_resources(adapter_t *adapter)
kfree(raid_dev->sysfs_mbox64);
if (raid_dev->sysfs_buffer) {
- pci_free_consistent(adapter->pdev, PAGE_SIZE,
+ dma_free_coherent(&adapter->pdev->dev, PAGE_SIZE,
raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
}
}
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index c1d86d961a92..e075aeb4012f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -117,7 +117,7 @@
* @raw_mbox : raw mailbox pointer
* @mbox : mailbox
* @mbox64 : extended mailbox
- * @mbox_dma_h : maibox dma address
+ * @mbox_dma_h : mailbox dma address
* @sgl64 : 64-bit scatter-gather list
* @sgl32 : 32-bit scatter-gather list
* @sgl_dma_h : dma handle for the scatter-gather list
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9aa9590c5373..9b90c716f06d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1330,11 +1330,11 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
device_id = MEGASAS_DEV_INDEX(scp);
pthru = (struct megasas_pthru_frame *)cmd->frame;
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
flags = MFI_FRAME_DIR_WRITE;
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
flags = MFI_FRAME_DIR_READ;
- else if (scp->sc_data_direction == PCI_DMA_NONE)
+ else if (scp->sc_data_direction == DMA_NONE)
flags = MFI_FRAME_DIR_NONE;
if (instance->flag_ieee == 1) {
@@ -1428,9 +1428,9 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
device_id = MEGASAS_DEV_INDEX(scp);
ldio = (struct megasas_io_frame *)cmd->frame;
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
flags = MFI_FRAME_DIR_WRITE;
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
flags = MFI_FRAME_DIR_READ;
if (instance->flag_ieee == 1) {
@@ -2240,9 +2240,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION_111));
else {
new_affiliation_111 =
- pci_zalloc_consistent(instance->pdev,
+ dma_zalloc_coherent(&instance->pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
- &new_affiliation_111_h);
+ &new_affiliation_111_h, GFP_KERNEL);
if (!new_affiliation_111) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2302,7 +2302,7 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
}
out:
if (new_affiliation_111) {
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
new_affiliation_111,
new_affiliation_111_h);
@@ -2347,10 +2347,10 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION));
else {
new_affiliation =
- pci_zalloc_consistent(instance->pdev,
+ dma_zalloc_coherent(&instance->pdev->dev,
(MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
- &new_affiliation_h);
+ &new_affiliation_h, GFP_KERNEL);
if (!new_affiliation) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2470,7 +2470,7 @@ out:
}
if (new_affiliation)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
(MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
new_affiliation, new_affiliation_h);
@@ -2513,9 +2513,9 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
if (initial) {
instance->hb_host_mem =
- pci_zalloc_consistent(instance->pdev,
+ dma_zalloc_coherent(&instance->pdev->dev,
sizeof(struct MR_CTRL_HB_HOST_MEM),
- &instance->hb_host_mem_h);
+ &instance->hb_host_mem_h, GFP_KERNEL);
if (!instance->hb_host_mem) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
" memory for heartbeat host memory for scsi%d\n",
@@ -4995,9 +4995,8 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
context_sz = sizeof(u32);
reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
- instance->reply_queue = pci_alloc_consistent(instance->pdev,
- reply_q_sz,
- &instance->reply_queue_h);
+ instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
+ reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
if (!instance->reply_queue) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
@@ -5029,7 +5028,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
fail_fw_init:
- pci_free_consistent(instance->pdev, reply_q_sz,
+ dma_free_coherent(&instance->pdev->dev, reply_q_sz,
instance->reply_queue, instance->reply_queue_h);
fail_reply_queue:
megasas_free_cmds(instance);
@@ -5533,7 +5532,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
else {
if (instance->crash_dump_buf)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
CRASH_DMA_BUF_SIZE,
instance->crash_dump_buf,
instance->crash_dump_h);
@@ -5616,7 +5615,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
if (instance->reply_queue)
- pci_free_consistent(instance->pdev, reply_q_sz,
+ dma_free_coherent(&instance->pdev->dev, reply_q_sz,
instance->reply_queue, instance->reply_queue_h);
megasas_free_cmds(instance);
@@ -5655,10 +5654,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
}
dcmd = &cmd->frame->dcmd;
- el_info = pci_zalloc_consistent(instance->pdev,
- sizeof(struct megasas_evt_log_info),
- &el_info_h);
-
+ el_info = dma_zalloc_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info), &el_info_h,
+ GFP_KERNEL);
if (!el_info) {
megasas_return_cmd(instance, cmd);
return -ENOMEM;
@@ -5695,8 +5693,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
eli->boot_seq_num = el_info->boot_seq_num;
dcmd_failed:
- pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
- el_info, el_info_h);
+ dma_free_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info),
+ el_info, el_info_h);
megasas_return_cmd(instance, cmd);
@@ -6134,10 +6133,10 @@ static inline void megasas_set_adapter_type(struct megasas_instance *instance)
static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
{
- instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
- &instance->producer_h);
- instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
- &instance->consumer_h);
+ instance->producer = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(u32), &instance->producer_h, GFP_KERNEL);
+ instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(u32), &instance->consumer_h, GFP_KERNEL);
if (!instance->producer || !instance->consumer) {
dev_err(&instance->pdev->dev,
@@ -6199,11 +6198,11 @@ static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
kfree(instance->reply_map);
if (instance->adapter_type == MFI_SERIES) {
if (instance->producer)
- pci_free_consistent(instance->pdev, sizeof(u32),
+ dma_free_coherent(&instance->pdev->dev, sizeof(u32),
instance->producer,
instance->producer_h);
if (instance->consumer)
- pci_free_consistent(instance->pdev, sizeof(u32),
+ dma_free_coherent(&instance->pdev->dev, sizeof(u32),
instance->consumer,
instance->consumer_h);
} else {
@@ -6224,10 +6223,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
struct pci_dev *pdev = instance->pdev;
struct fusion_context *fusion = instance->ctrl_context;
- instance->evt_detail =
- pci_alloc_consistent(pdev,
- sizeof(struct megasas_evt_detail),
- &instance->evt_detail_h);
+ instance->evt_detail = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct megasas_evt_detail),
+ &instance->evt_detail_h, GFP_KERNEL);
if (!instance->evt_detail) {
dev_err(&instance->pdev->dev,
@@ -6250,9 +6248,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->pd_list_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
- &instance->pd_list_buf_h);
+ &instance->pd_list_buf_h, GFP_KERNEL);
if (!instance->pd_list_buf) {
dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
@@ -6260,9 +6258,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->ctrl_info_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
sizeof(struct megasas_ctrl_info),
- &instance->ctrl_info_buf_h);
+ &instance->ctrl_info_buf_h, GFP_KERNEL);
if (!instance->ctrl_info_buf) {
dev_err(&pdev->dev,
@@ -6271,9 +6269,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->ld_list_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
sizeof(struct MR_LD_LIST),
- &instance->ld_list_buf_h);
+ &instance->ld_list_buf_h, GFP_KERNEL);
if (!instance->ld_list_buf) {
dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
@@ -6281,9 +6279,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->ld_targetid_list_buf =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_LD_TARGETID_LIST),
- &instance->ld_targetid_list_buf_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_LD_TARGETID_LIST),
+ &instance->ld_targetid_list_buf_h, GFP_KERNEL);
if (!instance->ld_targetid_list_buf) {
dev_err(&pdev->dev,
@@ -6293,21 +6291,20 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
if (!reset_devices) {
instance->system_info_buf =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_DRV_SYSTEM_INFO),
- &instance->system_info_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_DRV_SYSTEM_INFO),
+ &instance->system_info_h, GFP_KERNEL);
instance->pd_info =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_PD_INFO),
- &instance->pd_info_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_PD_INFO),
+ &instance->pd_info_h, GFP_KERNEL);
instance->tgt_prop =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_TARGET_PROPERTIES),
- &instance->tgt_prop_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_TARGET_PROPERTIES),
+ &instance->tgt_prop_h, GFP_KERNEL);
instance->crash_dump_buf =
- pci_alloc_consistent(pdev,
- CRASH_DMA_BUF_SIZE,
- &instance->crash_dump_h);
+ dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h, GFP_KERNEL);
if (!instance->system_info_buf)
dev_err(&instance->pdev->dev,
@@ -6343,7 +6340,7 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
struct fusion_context *fusion = instance->ctrl_context;
if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
instance->evt_detail,
instance->evt_detail_h);
@@ -6354,41 +6351,41 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
fusion->ioc_init_request_phys);
if (instance->pd_list_buf)
- pci_free_consistent(pdev,
+ dma_free_coherent(&pdev->dev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
instance->pd_list_buf,
instance->pd_list_buf_h);
if (instance->ld_list_buf)
- pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
instance->ld_list_buf,
instance->ld_list_buf_h);
if (instance->ld_targetid_list_buf)
- pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
instance->ld_targetid_list_buf,
instance->ld_targetid_list_buf_h);
if (instance->ctrl_info_buf)
- pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
+ dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
instance->ctrl_info_buf,
instance->ctrl_info_buf_h);
if (instance->system_info_buf)
- pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
instance->system_info_buf,
instance->system_info_h);
if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
instance->pd_info, instance->pd_info_h);
if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
instance->tgt_prop, instance->tgt_prop_h);
if (instance->crash_dump_buf)
- pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
instance->crash_dump_buf,
instance->crash_dump_h);
}
@@ -6516,17 +6513,20 @@ static int megasas_probe_one(struct pci_dev *pdev,
if (instance->requestorId) {
if (instance->PlasmaFW111) {
instance->vf_affiliation_111 =
- pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
- &instance->vf_affiliation_111_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &instance->vf_affiliation_111_h,
+ GFP_KERNEL);
if (!instance->vf_affiliation_111)
dev_warn(&pdev->dev, "Can't allocate "
"memory for VF affiliation buffer\n");
} else {
instance->vf_affiliation =
- pci_alloc_consistent(pdev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &instance->vf_affiliation_h);
+ dma_alloc_coherent(&pdev->dev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &instance->vf_affiliation_h,
+ GFP_KERNEL);
if (!instance->vf_affiliation)
dev_warn(&pdev->dev, "Can't allocate "
"memory for VF affiliation buffer\n");
@@ -6994,19 +6994,19 @@ skip_firing_dcmds:
}
if (instance->vf_affiliation)
- pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+ dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
instance->vf_affiliation,
instance->vf_affiliation_h);
if (instance->vf_affiliation_111)
- pci_free_consistent(pdev,
+ dma_free_coherent(&pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
instance->vf_affiliation_111,
instance->vf_affiliation_111_h);
if (instance->hb_host_mem)
- pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
instance->hb_host_mem,
instance->hb_host_mem_h);
@@ -7254,7 +7254,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
/*
* We don't change the dma_coherent_mask, so
- * pci_alloc_consistent only returns 32bit addresses
+ * dma_alloc_coherent only returns 32bit addresses
*/
if (instance->consistent_mask_64bit) {
kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
@@ -7523,6 +7523,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
get_user(user_sense_off, &cioc->sense_off))
return -EFAULT;
+ if (local_sense_off != user_sense_off)
+ return -EINVAL;
+
if (local_sense_len) {
void __user **sense_ioc_ptr =
(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index c7f95bace353..f74b5ea24f0f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -684,8 +684,8 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size,
- &fusion->rdpq_phys);
+ fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev,
+ array_size, &fusion->rdpq_phys, GFP_KERNEL);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
@@ -813,7 +813,7 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
dma_pool_destroy(fusion->reply_frames_desc_pool_align);
if (fusion->rdpq_virt)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
fusion->rdpq_virt, fusion->rdpq_phys);
}
@@ -2209,7 +2209,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ if (scp->sc_data_direction == DMA_FROM_DEVICE)
cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
else
cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
@@ -2238,7 +2238,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[31] = (u8)(num_blocks & 0xff);
/* set SCSI IO EEDPFlags */
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+ if (scp->sc_data_direction == DMA_FROM_DEVICE) {
io_request->EEDPFlags = cpu_to_le16(
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
@@ -2621,7 +2621,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
scsi_buff_len = scsi_bufflen(scp);
io_request->DataLength = cpu_to_le32(scsi_buff_len);
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ if (scp->sc_data_direction == DMA_FROM_DEVICE)
io_info.isRead = 1;
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
@@ -3088,9 +3088,9 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
io_request->SGLOffset0 =
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 82e01dbe90af..ec6940f2fcb3 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1915,8 +1915,8 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
- dma_cmd_space = pci_zalloc_consistent(macio_get_pci_dev(mdev),
- ms->dma_cmd_size, &dma_cmd_bus);
+ dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev,
+ ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL);
if (dma_cmd_space == NULL) {
printk(KERN_ERR "mesh: can't allocate DMA table\n");
goto out_unmap;
@@ -1974,7 +1974,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
*/
mesh_shutdown(mdev);
set_mesh_power(ms, 0);
- pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+ dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
ms->dma_cmd_space, ms->dma_cmd_bus);
out_unmap:
iounmap(ms->dma);
@@ -2007,7 +2007,7 @@ static int mesh_remove(struct macio_dev *mdev)
iounmap(ms->dma);
/* Free DMA commands memory */
- pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+ dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
ms->dma_cmd_space, ms->dma_cmd_bus);
/* Release memory resources */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 59d7844ee022..2500377d0723 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -122,8 +122,8 @@ mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
if (!(status & MPT3_CMD_RESET))
issue_reset = 1;
- pr_err(MPT3SAS_FMT "Command %s\n", ioc->name,
- ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout"));
+ ioc_err(ioc, "Command %s\n",
+ issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
_debug_dump_mf(mpi_request, sz);
return issue_reset;
@@ -336,9 +336,7 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
return ct->chain_buffer;
}
}
- pr_info(MPT3SAS_FMT
- "Provided chain_buffer_dma address is not in the lookup list\n",
- ioc->name);
+ ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
return NULL;
}
@@ -394,7 +392,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
/* Get scsi_cmd using smid */
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (scmd == NULL) {
- pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
+ ioc_err(ioc, "scmd is NULL\n");
return;
}
@@ -532,11 +530,11 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
struct pci_dev *pdev;
- if ((ioc == NULL))
+ if (!ioc)
return -1;
pdev = ioc->pdev;
- if ((pdev == NULL))
+ if (!pdev)
return -1;
pci_stop_and_remove_bus_device_locked(pdev);
return 0;
@@ -566,8 +564,7 @@ _base_fault_reset_work(struct work_struct *work)
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
- pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
- ioc->name);
+ ioc_err(ioc, "SAS host is non-operational !!!!\n");
/* It may be possible that EEH recovery can resolve some of
* pci bus failure issues rather removing the dead ioc function
@@ -600,13 +597,11 @@ _base_fault_reset_work(struct work_struct *work)
p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
"%s_dead_ioc_%d", ioc->driver_name, ioc->id);
if (IS_ERR(p))
- pr_err(MPT3SAS_FMT
- "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
+ __func__);
else
- pr_err(MPT3SAS_FMT
- "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
+ __func__);
return; /* don't rearm timer */
}
@@ -614,8 +609,8 @@ _base_fault_reset_work(struct work_struct *work)
if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
- __func__, (rc == 0) ? "success" : "failed");
+ ioc_warn(ioc, "%s: hard reset: %s\n",
+ __func__, rc == 0 ? "success" : "failed");
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
mpt3sas_base_fault_info(ioc, doorbell &
@@ -657,8 +652,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
ioc->fault_reset_work_q =
create_singlethread_workqueue(ioc->fault_reset_work_q_name);
if (!ioc->fault_reset_work_q) {
- pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
- ioc->name, __func__, __LINE__);
+ ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
return;
}
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
@@ -700,8 +694,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
{
- pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
- ioc->name, fault_code);
+ ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
}
/**
@@ -728,8 +721,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_fault_info(ioc , doorbell);
else {
writel(0xC0FFEE00, &ioc->chip->Doorbell);
- pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
- ioc->name);
+ ioc_err(ioc, "Firmware is halted due to command timeout\n");
}
if (ioc->fwfault_debug == 2)
@@ -956,8 +948,8 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
break;
}
- pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
- ioc->name, desc, ioc_status, request_hdr, func_str);
+ ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
+ desc, ioc_status, request_hdr, func_str);
_debug_dump_mf(request_hdr, frame_sz/4);
}
@@ -1003,9 +995,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
{
Mpi2EventDataSasDiscovery_t *event_data =
(Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
- pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
- (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
- "start" : "stop");
+ ioc_info(ioc, "Discovery: (%s)",
+ event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+ "start" : "stop");
if (event_data->DiscoveryStatus)
pr_cont(" discovery_status(0x%08x)",
le32_to_cpu(event_data->DiscoveryStatus));
@@ -1059,14 +1051,13 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
{
Mpi26EventDataPCIeEnumeration_t *event_data =
(Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
- pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
- (event_data->ReasonCode ==
- MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
- "start" : "stop");
+ ioc_info(ioc, "PCIE Enumeration: (%s)",
+ event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
+ "start" : "stop");
if (event_data->EnumerationStatus)
- pr_info("enumeration_status(0x%08x)",
- le32_to_cpu(event_data->EnumerationStatus));
- pr_info("\n");
+ pr_cont("enumeration_status(0x%08x)",
+ le32_to_cpu(event_data->EnumerationStatus));
+ pr_cont("\n");
return;
}
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
@@ -1077,7 +1068,7 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
if (!desc)
return;
- pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
+ ioc_info(ioc, "%s\n", desc);
}
/**
@@ -1128,11 +1119,9 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
break;
}
- pr_warn(MPT3SAS_FMT
- "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
- ioc->name, log_info,
- originator_str, sas_loginfo.dw.code,
- sas_loginfo.dw.subcode);
+ ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
+ log_info,
+ originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
}
/**
@@ -1152,8 +1141,8 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
@@ -1249,9 +1238,9 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
delayed_event_ack->EventContext = mpi_reply->EventContext;
list_add_tail(&delayed_event_ack->list,
&ioc->delayed_event_ack_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED: EVENT ACK: event (0x%04x)\n",
- ioc->name, le16_to_cpu(mpi_reply->Event)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
+ le16_to_cpu(mpi_reply->Event)));
goto out;
}
@@ -2270,7 +2259,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device,
- "pci_map_sg failed: request for %d bytes!\n",
+ "scsi_dma_map failed: request for %d bytes!\n",
scsi_bufflen(scmd));
return -ENOMEM;
}
@@ -2418,7 +2407,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device,
- "pci_map_sg failed: request for %d bytes!\n",
+ "scsi_dma_map failed: request for %d bytes!\n",
scsi_bufflen(scmd));
return -ENOMEM;
}
@@ -2563,44 +2552,41 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
+ u64 required_mask, coherent_mask;
struct sysinfo s;
- u64 consistent_dma_mask;
if (ioc->is_mcpu_endpoint)
goto try_32bit;
+ required_mask = dma_get_required_mask(&pdev->dev);
+ if (sizeof(dma_addr_t) == 4 || required_mask == 32)
+ goto try_32bit;
+
if (ioc->dma_mask)
- consistent_dma_mask = DMA_BIT_MASK(64);
+ coherent_mask = DMA_BIT_MASK(64);
else
- consistent_dma_mask = DMA_BIT_MASK(32);
-
- if (sizeof(dma_addr_t) > 4) {
- const uint64_t required_mask =
- dma_get_required_mask(&pdev->dev);
- if ((required_mask > DMA_BIT_MASK(32)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
- ioc->base_add_sg_single = &_base_add_sg_single_64;
- ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = 64;
- goto out;
- }
- }
+ coherent_mask = DMA_BIT_MASK(32);
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_coherent_mask(&pdev->dev, coherent_mask))
+ goto try_32bit;
+
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ ioc->dma_mask = 64;
+ goto out;
try_32bit:
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- ioc->base_add_sg_single = &_base_add_sg_single_32;
- ioc->sge_size = sizeof(Mpi2SGESimple32_t);
- ioc->dma_mask = 32;
- } else
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
return -ENODEV;
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ ioc->dma_mask = 32;
out:
si_meminfo(&s);
- pr_info(MPT3SAS_FMT
- "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
+ ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+ ioc->dma_mask, convert_to_kb(s.totalram));
return 0;
}
@@ -2639,8 +2625,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
if (!base) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
- ioc->name));
+ dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
return -EINVAL;
}
@@ -2658,9 +2643,8 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
pci_read_config_word(ioc->pdev, base + 2, &message_control);
ioc->msix_vector_count = (message_control & 0x3FF) + 1;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "msix is supported, vector_count(%d)\n",
- ioc->name, ioc->msix_vector_count));
+ dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
+ ioc->msix_vector_count));
return 0;
}
@@ -2702,8 +2686,8 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
if (!reply_q) {
- pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
- ioc->name, (int)sizeof(struct adapter_reply_queue));
+ ioc_err(ioc, "unable to allocate memory %zu!\n",
+ sizeof(struct adapter_reply_queue));
return -ENOMEM;
}
reply_q->ioc = ioc;
@@ -2719,7 +2703,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
IRQF_SHARED, reply_q->name, reply_q);
if (r) {
- pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
+ pr_err("%s: unable to allocate interrupt %d!\n",
reply_q->name, pci_irq_vector(pdev, index));
kfree(reply_q);
return -EBUSY;
@@ -2761,8 +2745,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
reply_q->msix_index);
if (!mask) {
- pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
- ioc->name, reply_q->msix_index);
+ ioc_warn(ioc, "no affinity for msi %x\n",
+ reply_q->msix_index);
continue;
}
@@ -2833,9 +2817,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_queue_count = min_t(int, ioc->cpu_count,
ioc->msix_vector_count);
- printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
- ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
- ioc->cpu_count, max_msix_vectors);
+ ioc_info(ioc, "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
+ ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
local_max_msix_vectors = (reset_devices) ? 1 : 8;
@@ -2857,9 +2840,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
irq_flags);
if (r < 0) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "pci_alloc_irq_vectors failed (r=%d) !!!\n",
- ioc->name, r));
+ dfailprintk(ioc,
+ ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
+ r));
goto try_ioapic;
}
@@ -2882,9 +2865,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_queue_count = 1;
r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
if (r < 0) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
- ioc->name, r));
+ dfailprintk(ioc,
+ ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
+ r));
} else
r = _base_request_irq(ioc, 0);
@@ -2900,8 +2883,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
{
struct pci_dev *pdev = ioc->pdev;
- dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
- ioc->name, __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
_base_free_irq(ioc);
_base_disable_msix(ioc);
@@ -2939,13 +2921,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
phys_addr_t chip_phys = 0;
struct adapter_reply_queue *reply_q;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
- ioc->name, __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
if (pci_enable_device_mem(pdev)) {
- pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
- ioc->name);
+ ioc_warn(ioc, "pci_enable_device_mem: failed\n");
ioc->bars = 0;
return -ENODEV;
}
@@ -2953,8 +2933,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (pci_request_selected_regions(pdev, ioc->bars,
ioc->driver_name)) {
- pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
- ioc->name);
+ ioc_warn(ioc, "pci_request_selected_regions: failed\n");
ioc->bars = 0;
r = -ENODEV;
goto out_fail;
@@ -2967,8 +2946,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (_base_config_dma_addressing(ioc, pdev) != 0) {
- pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
- ioc->name, pci_name(pdev));
+ ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
r = -ENODEV;
goto out_fail;
}
@@ -2991,8 +2969,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->chip == NULL) {
- pr_err(MPT3SAS_FMT "unable to map adapter memory! "
- " or resource not found\n", ioc->name);
+ ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
r = -EINVAL;
goto out_fail;
}
@@ -3026,9 +3003,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
ioc->combined_reply_index_count,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->replyPostRegisterIndex) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "allocation for reply Post Register Index failed!!!\n",
- ioc->name));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
r = -ENOMEM;
goto out_fail;
}
@@ -3053,15 +3029,15 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
- pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
- reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
- "IO-APIC enabled"),
- pci_irq_vector(ioc->pdev, reply_q->msix_index));
+ pr_info("%s: %s enabled: IRQ %d\n",
+ reply_q->name,
+ ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
+ pci_irq_vector(ioc->pdev, reply_q->msix_index));
- pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
- ioc->name, &chip_phys, ioc->chip, memap_sz);
- pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
- ioc->name, (unsigned long long)pio_chip, pio_sz);
+ ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
+ &chip_phys, ioc->chip, memap_sz);
+ ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
+ (unsigned long long)pio_chip, pio_sz);
/* Save PCI configuration state for recovery from PCI AER/EEH errors */
pci_save_state(pdev);
@@ -3176,8 +3152,7 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (list_empty(&ioc->internal_free_list)) {
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- pr_err(MPT3SAS_FMT "%s: smid not available\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: smid not available\n", __func__);
return 0;
}
@@ -3545,89 +3520,85 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI2_MFGPAGE_DEVID_SAS2008:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_INTEL_RMS2LL080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS2LL080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS2LL080_BRANDING);
break;
case MPT2SAS_INTEL_RMS2LL040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS2LL040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS2LL040_BRANDING);
break;
case MPT2SAS_INTEL_SSD910_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_SSD910_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_SSD910_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
case MPI2_MFGPAGE_DEVID_SAS2308_2:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_INTEL_RS25GB008_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RS25GB008_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RS25GB008_BRANDING);
break;
case MPT2SAS_INTEL_RMS25JB080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25JB080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25JB080_BRANDING);
break;
case MPT2SAS_INTEL_RMS25JB040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25JB040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25JB040_BRANDING);
break;
case MPT2SAS_INTEL_RMS25KB080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25KB080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25KB080_BRANDING);
break;
case MPT2SAS_INTEL_RMS25KB040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25KB040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25KB040_BRANDING);
break;
case MPT2SAS_INTEL_RMS25LB040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25LB040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25LB040_BRANDING);
break;
case MPT2SAS_INTEL_RMS25LB080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25LB080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25LB080_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
case MPI25_MFGPAGE_DEVID_SAS3008:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_INTEL_RMS3JC080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RMS3JC080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RMS3JC080_BRANDING);
break;
case MPT3SAS_INTEL_RS3GC008_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RS3GC008_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RS3GC008_BRANDING);
break;
case MPT3SAS_INTEL_RS3FC044_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RS3FC044_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RS3FC044_BRANDING);
break;
case MPT3SAS_INTEL_RS3UC080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RS3UC080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RS3UC080_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
@@ -3636,57 +3607,54 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI2_MFGPAGE_DEVID_SAS2008:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_BRANDING);
break;
case MPT2SAS_DELL_6GBPS_SAS_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_6GBPS_SAS_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_6GBPS_SAS_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
case MPI25_MFGPAGE_DEVID_SAS3008:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_DELL_12G_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_DELL_12G_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_DELL_12G_HBA_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
default:
- pr_info(MPT3SAS_FMT
- "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
- ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
@@ -3695,46 +3663,42 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI25_MFGPAGE_DEVID_SAS3008:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
break;
case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
break;
case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
case MPI25_MFGPAGE_DEVID_SAS3108_1:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
break;
case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
- );
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
default:
- pr_info(MPT3SAS_FMT
- "Cisco SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
@@ -3743,43 +3707,40 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI2_MFGPAGE_DEVID_SAS2004:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
case MPI2_MFGPAGE_DEVID_SAS2308_2:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_HP_2_4_INTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_2_4_INTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_2_4_INTERNAL_BRANDING);
break;
case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
break;
case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
break;
case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
default:
- pr_info(MPT3SAS_FMT
- "HP SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
default:
@@ -3806,28 +3767,25 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
u16 smid, ioc_status;
size_t data_length;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
data_length = sizeof(Mpi2FWImageHeader_t);
- fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
- &fwpkg_data_dma);
+ fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
+ &fwpkg_data_dma, GFP_KERNEL);
if (!fwpkg_data) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENOMEM;
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
r = -EAGAIN;
goto out;
}
@@ -3846,11 +3804,9 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
/* Wait for 15 seconds */
wait_for_completion_timeout(&ioc->base_cmds.done,
FW_IMG_HDR_READ_TIMEOUT*HZ);
- pr_info(MPT3SAS_FMT "%s: complete\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: complete\n", __func__);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi25FWUploadRequest_t)/4);
r = -ETIME;
@@ -3864,13 +3820,11 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
if (FWImgHdr->PackageVersion.Word) {
- pr_info(MPT3SAS_FMT "FW Package Version"
- "(%02d.%02d.%02d.%02d)\n",
- ioc->name,
- FWImgHdr->PackageVersion.Struct.Major,
- FWImgHdr->PackageVersion.Struct.Minor,
- FWImgHdr->PackageVersion.Struct.Unit,
- FWImgHdr->PackageVersion.Struct.Dev);
+ ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
+ FWImgHdr->PackageVersion.Struct.Major,
+ FWImgHdr->PackageVersion.Struct.Minor,
+ FWImgHdr->PackageVersion.Struct.Unit,
+ FWImgHdr->PackageVersion.Struct.Dev);
}
} else {
_debug_dump_mf(&mpi_reply,
@@ -3881,7 +3835,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
out:
if (fwpkg_data)
- pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
+ dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
fwpkg_data_dma);
return r;
}
@@ -3900,18 +3854,17 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
strncpy(desc, ioc->manu_pg0.ChipName, 16);
- pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
- "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
- ioc->name, desc,
- (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
- (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
- (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
- ioc->facts.FWVersion.Word & 0x000000FF,
- ioc->pdev->revision,
- (bios_version & 0xFF000000) >> 24,
- (bios_version & 0x00FF0000) >> 16,
- (bios_version & 0x0000FF00) >> 8,
- bios_version & 0x000000FF);
+ ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+ desc,
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF,
+ ioc->pdev->revision,
+ (bios_version & 0xFF000000) >> 24,
+ (bios_version & 0x00FF0000) >> 16,
+ (bios_version & 0x0000FF00) >> 8,
+ bios_version & 0x000000FF);
_base_display_OEMs_branding(ioc);
@@ -3920,82 +3873,81 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
i++;
}
- pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
+ ioc_info(ioc, "Protocol=(");
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
- pr_info("Initiator");
+ pr_cont("Initiator");
i++;
}
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
- pr_info("%sTarget", i ? "," : "");
+ pr_cont("%sTarget", i ? "," : "");
i++;
}
i = 0;
- pr_info("), ");
- pr_info("Capabilities=(");
+ pr_cont("), Capabilities=(");
if (!ioc->hide_ir_msg) {
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
- pr_info("Raid");
+ pr_cont("Raid");
i++;
}
}
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
- pr_info("%sTLR", i ? "," : "");
+ pr_cont("%sTLR", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
- pr_info("%sMulticast", i ? "," : "");
+ pr_cont("%sMulticast", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
- pr_info("%sBIDI Target", i ? "," : "");
+ pr_cont("%sBIDI Target", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
- pr_info("%sEEDP", i ? "," : "");
+ pr_cont("%sEEDP", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
- pr_info("%sSnapshot Buffer", i ? "," : "");
+ pr_cont("%sSnapshot Buffer", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
- pr_info("%sDiag Trace Buffer", i ? "," : "");
+ pr_cont("%sDiag Trace Buffer", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
- pr_info("%sDiag Extended Buffer", i ? "," : "");
+ pr_cont("%sDiag Extended Buffer", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
- pr_info("%sTask Set Full", i ? "," : "");
+ pr_cont("%sTask Set Full", i ? "," : "");
i++;
}
iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
- pr_info("%sNCQ", i ? "," : "");
+ pr_cont("%sNCQ", i ? "," : "");
i++;
}
- pr_info(")\n");
+ pr_cont(")\n");
}
/**
@@ -4028,21 +3980,21 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -4074,11 +4026,11 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
else
dmd_new =
dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
- pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
- ioc->name, dmd_orignal, dmd_new);
- pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
- ioc->name, io_missing_delay_original,
- io_missing_delay);
+ ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
+ dmd_orignal, dmd_new);
+ ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
+ io_missing_delay_original,
+ io_missing_delay);
ioc->device_missing_delay = dmd_new;
ioc->io_missing_delay = io_missing_delay;
}
@@ -4189,33 +4141,32 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
struct chain_tracker *ct;
struct reply_post_struct *rps;
- dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->request) {
- pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+ dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
ioc->request, ioc->request_dma);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "request_pool(0x%p): free\n",
- ioc->name, ioc->request));
+ dexitprintk(ioc,
+ ioc_info(ioc, "request_pool(0x%p): free\n",
+ ioc->request));
ioc->request = NULL;
}
if (ioc->sense) {
dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
dma_pool_destroy(ioc->sense_dma_pool);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "sense_pool(0x%p): free\n",
- ioc->name, ioc->sense));
+ dexitprintk(ioc,
+ ioc_info(ioc, "sense_pool(0x%p): free\n",
+ ioc->sense));
ioc->sense = NULL;
}
if (ioc->reply) {
dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
dma_pool_destroy(ioc->reply_dma_pool);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_pool(0x%p): free\n",
- ioc->name, ioc->reply));
+ dexitprintk(ioc,
+ ioc_info(ioc, "reply_pool(0x%p): free\n",
+ ioc->reply));
ioc->reply = NULL;
}
@@ -4223,9 +4174,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
ioc->reply_free_dma);
dma_pool_destroy(ioc->reply_free_dma_pool);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_free_pool(0x%p): free\n",
- ioc->name, ioc->reply_free));
+ dexitprintk(ioc,
+ ioc_info(ioc, "reply_free_pool(0x%p): free\n",
+ ioc->reply_free));
ioc->reply_free = NULL;
}
@@ -4237,9 +4188,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_post_free_dma_pool,
rps->reply_post_free,
rps->reply_post_free_dma);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_post_free_pool(0x%p): free\n",
- ioc->name, rps->reply_post_free));
+ dexitprintk(ioc,
+ ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
+ rps->reply_post_free));
rps->reply_post_free = NULL;
}
} while (ioc->rdpq_array_enable &&
@@ -4267,10 +4218,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->config_page) {
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "config_page(0x%p): free\n", ioc->name,
- ioc->config_page));
- pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+ dexitprintk(ioc,
+ ioc_info(ioc, "config_page(0x%p): free\n",
+ ioc->config_page));
+ dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
ioc->config_page, ioc->config_page_dma);
}
@@ -4338,8 +4289,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
int i, j;
struct chain_tracker *ct;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
retry_sz = 0;
@@ -4368,10 +4318,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
sg_tablesize = min_t(unsigned short, sg_tablesize,
SG_MAX_SEGMENTS);
- pr_warn(MPT3SAS_FMT
- "sg_tablesize(%u) is bigger than kernel "
- "defined SG_CHUNK_SIZE(%u)\n", ioc->name,
- sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
+ ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
+ sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
}
ioc->shost->sg_tablesize = sg_tablesize;
}
@@ -4381,9 +4329,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
INTERNAL_SCSIIO_CMDS_COUNT)) {
- pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
- Credits, it has just %d number of credits\n",
- ioc->name, facts->RequestCredit);
+ ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
+ facts->RequestCredit);
return -ENOMEM;
}
ioc->internal_depth = 10;
@@ -4482,11 +4429,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
- "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
- "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
- ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
- ioc->chains_needed_per_io));
+ dinitprintk(ioc,
+ ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
+ ioc->max_sges_in_main_message,
+ ioc->max_sges_in_chain_message,
+ ioc->shost->sg_tablesize,
+ ioc->chains_needed_per_io));
/* reply post queue, 16 byte align */
reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -4501,48 +4449,40 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sizeof(struct reply_post_struct), GFP_KERNEL);
if (!ioc->reply_post) {
- pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
goto out;
}
ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
&ioc->pdev->dev, sz, 16, 0);
if (!ioc->reply_post_free_dma_pool) {
- pr_err(MPT3SAS_FMT
- "reply_post_free pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
goto out;
}
i = 0;
do {
ioc->reply_post[i].reply_post_free =
- dma_pool_alloc(ioc->reply_post_free_dma_pool,
+ dma_pool_zalloc(ioc->reply_post_free_dma_pool,
GFP_KERNEL,
&ioc->reply_post[i].reply_post_free_dma);
if (!ioc->reply_post[i].reply_post_free) {
- pr_err(MPT3SAS_FMT
- "reply_post_free pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
goto out;
}
- memset(ioc->reply_post[i].reply_post_free, 0, sz);
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply post free pool (0x%p): depth(%d),"
- "element_size(%d), pool_size(%d kB)\n", ioc->name,
- ioc->reply_post[i].reply_post_free,
- ioc->reply_post_queue_depth, 8, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_post_free_dma = (0x%llx)\n", ioc->name,
- (unsigned long long)
- ioc->reply_post[i].reply_post_free_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post_queue_depth,
+ 8, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
+ (u64)ioc->reply_post[i].reply_post_free_dma));
total_sz += sz;
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
if (ioc->dma_mask == 64) {
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
- pr_warn(MPT3SAS_FMT
- "no suitable consistent DMA mask for %s\n",
- ioc->name, pci_name(ioc->pdev));
+ ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
+ pci_name(ioc->pdev));
goto out;
}
}
@@ -4554,9 +4494,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* with some internal commands that could be outstanding
*/
ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "scsi host: can_queue depth (%d)\n",
- ioc->name, ioc->shost->can_queue));
+ dinitprintk(ioc,
+ ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
+ ioc->shost->can_queue));
/* contiguous pool for request and chains, 16 byte align, one extra "
@@ -4572,12 +4512,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sz += (ioc->internal_depth * ioc->request_sz);
ioc->request_dma_sz = sz;
- ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+ ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
+ &ioc->request_dma, GFP_KERNEL);
if (!ioc->request) {
- pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
- "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
- "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
- ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
+ ioc->hba_queue_depth, ioc->chains_needed_per_io,
+ ioc->request_sz, sz / 1024);
if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
goto out;
retry_sz = 64;
@@ -4587,10 +4527,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
if (retry_sz)
- pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
- "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
- "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
- ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
+ ioc->hba_queue_depth, ioc->chains_needed_per_io,
+ ioc->request_sz, sz / 1024);
/* hi-priority queue */
ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
@@ -4604,24 +4543,26 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
ioc->request_sz);
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
- (ioc->hba_queue_depth * ioc->request_sz)/1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->request, ioc->hba_queue_depth,
+ ioc->request_sz,
+ (ioc->hba_queue_depth * ioc->request_sz) / 1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
- ioc->name, (unsigned long long) ioc->request_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "request pool: dma(0x%llx)\n",
+ (unsigned long long)ioc->request_dma));
total_sz += sz;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
- ioc->name, ioc->request, ioc->scsiio_depth));
+ dinitprintk(ioc,
+ ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
+ ioc->request, ioc->scsiio_depth));
ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
if (!ioc->chain_lookup) {
- pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
- "failed\n", ioc->name);
+ ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
goto out;
}
@@ -4629,8 +4570,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->scsiio_depth; i++) {
ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
if (!ioc->chain_lookup[i].chains_per_smid) {
- pr_err(MPT3SAS_FMT "chain_lookup: "
- " kzalloc failed\n", ioc->name);
+ ioc_err(ioc, "chain_lookup: kzalloc failed\n");
goto out;
}
}
@@ -4639,29 +4579,27 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
sizeof(struct request_tracker), GFP_KERNEL);
if (!ioc->hpr_lookup) {
- pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
- ioc->name);
+ ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
goto out;
}
ioc->hi_priority_smid = ioc->scsiio_depth + 1;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "hi_priority(0x%p): depth(%d), start smid(%d)\n",
- ioc->name, ioc->hi_priority,
- ioc->hi_priority_depth, ioc->hi_priority_smid));
+ dinitprintk(ioc,
+ ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
+ ioc->hi_priority,
+ ioc->hi_priority_depth, ioc->hi_priority_smid));
/* initialize internal queue smid's */
ioc->internal_lookup = kcalloc(ioc->internal_depth,
sizeof(struct request_tracker), GFP_KERNEL);
if (!ioc->internal_lookup) {
- pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
- ioc->name);
+ ioc_err(ioc, "internal_lookup: kcalloc failed\n");
goto out;
}
ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "internal(0x%p): depth(%d), start smid(%d)\n",
- ioc->name, ioc->internal,
- ioc->internal_depth, ioc->internal_smid));
+ dinitprintk(ioc,
+ ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
+ ioc->internal,
+ ioc->internal_depth, ioc->internal_smid));
/*
* The number of NVMe page sized blocks needed is:
* (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
@@ -4685,17 +4623,14 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
if (!ioc->pcie_sg_lookup) {
- pr_info(MPT3SAS_FMT
- "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+ ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
goto out;
}
sz = nvme_blocks_needed * ioc->page_size;
ioc->pcie_sgl_dma_pool =
dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
if (!ioc->pcie_sgl_dma_pool) {
- pr_info(MPT3SAS_FMT
- "PCIe SGL pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
goto out;
}
@@ -4708,9 +4643,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->pcie_sgl_dma_pool, GFP_KERNEL,
&ioc->pcie_sg_lookup[i].pcie_sgl_dma);
if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
- pr_info(MPT3SAS_FMT
- "PCIe SGL pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
goto out;
}
for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
@@ -4724,20 +4657,20 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
- "element_size(%d), pool_size(%d kB)\n", ioc->name,
- ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
- "fit in a PRP page(%d)\n", ioc->name,
- ioc->chains_per_prp_buffer));
+ dinitprintk(ioc,
+ ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->scsiio_depth, sz,
+ (sz * ioc->scsiio_depth) / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
+ ioc->chains_per_prp_buffer));
total_sz += sz * ioc->scsiio_depth;
}
ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
ioc->chain_segment_sz, 16, 0);
if (!ioc->chain_dma_pool) {
- pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
goto out;
}
for (i = 0; i < ioc->scsiio_depth; i++) {
@@ -4748,8 +4681,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->chain_dma_pool, GFP_KERNEL,
&ct->chain_buffer_dma);
if (!ct->chain_buffer) {
- pr_err(MPT3SAS_FMT "chain_lookup: "
- " pci_pool_alloc failed\n", ioc->name);
+ ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
_base_release_memory_pools(ioc);
goto out;
}
@@ -4757,25 +4689,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
total_sz += ioc->chain_segment_sz;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
- ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->chain_depth, ioc->chain_segment_sz,
+ (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
/* sense buffers, 4 byte align */
sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4, 0);
if (!ioc->sense_dma_pool) {
- pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: dma_pool_create failed\n");
goto out;
}
ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
&ioc->sense_dma);
if (!ioc->sense) {
- pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
goto out;
}
/* sense buffer requires to be in same 4 gb region.
@@ -4797,24 +4727,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
dma_pool_create("sense pool", &ioc->pdev->dev, sz,
roundup_pow_of_two(sz), 0);
if (!ioc->sense_dma_pool) {
- pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: pci_pool_create failed\n");
goto out;
}
ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
&ioc->sense_dma);
if (!ioc->sense) {
- pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
goto out;
}
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
- "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
- SCSI_SENSE_BUFFERSIZE, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
- ioc->name, (unsigned long long)ioc->sense_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->sense, ioc->scsiio_depth,
+ SCSI_SENSE_BUFFERSIZE, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "sense_dma(0x%llx)\n",
+ (unsigned long long)ioc->sense_dma));
total_sz += sz;
/* reply pool, 4 byte align */
@@ -4822,25 +4751,24 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
4, 0);
if (!ioc->reply_dma_pool) {
- pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "reply pool: dma_pool_create failed\n");
goto out;
}
ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
&ioc->reply_dma);
if (!ioc->reply) {
- pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
goto out;
}
ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->reply,
- ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
- ioc->name, (unsigned long long)ioc->reply_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->reply, ioc->reply_free_queue_depth,
+ ioc->reply_sz, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_dma(0x%llx)\n",
+ (unsigned long long)ioc->reply_dma));
total_sz += sz;
/* reply free queue, 16 byte align */
@@ -4848,24 +4776,22 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
&ioc->pdev->dev, sz, 16, 0);
if (!ioc->reply_free_dma_pool) {
- pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
goto out;
}
- ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
+ ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
&ioc->reply_free_dma);
if (!ioc->reply_free) {
- pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
goto out;
}
- memset(ioc->reply_free, 0, sz);
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
- "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
- ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_free_dma (0x%llx)\n",
- ioc->name, (unsigned long long)ioc->reply_free_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->reply_free, ioc->reply_free_queue_depth,
+ 4, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_free_dma (0x%llx)\n",
+ (unsigned long long)ioc->reply_free_dma));
total_sz += sz;
if (ioc->rdpq_array_enable) {
@@ -4876,8 +4802,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
&ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
if (!ioc->reply_post_free_array_dma_pool) {
dinitprintk(ioc,
- pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
- "dma_pool_create failed\n", ioc->name));
+ ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
goto out;
}
ioc->reply_post_free_array =
@@ -4885,34 +4810,31 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
GFP_KERNEL, &ioc->reply_post_free_array_dma);
if (!ioc->reply_post_free_array) {
dinitprintk(ioc,
- pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
- "dma_pool_alloc failed\n", ioc->name));
+ ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
goto out;
}
}
ioc->config_page_sz = 512;
- ioc->config_page = pci_alloc_consistent(ioc->pdev,
- ioc->config_page_sz, &ioc->config_page_dma);
+ ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
+ ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
if (!ioc->config_page) {
- pr_err(MPT3SAS_FMT
- "config page: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "config page: dma_pool_alloc failed\n");
goto out;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "config page(0x%p): size(%d)\n",
- ioc->name, ioc->config_page, ioc->config_page_sz));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
- ioc->name, (unsigned long long)ioc->config_page_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "config page(0x%p): size(%d)\n",
+ ioc->config_page, ioc->config_page_sz));
+ dinitprintk(ioc,
+ ioc_info(ioc, "config_page_dma(0x%llx)\n",
+ (unsigned long long)ioc->config_page_dma));
total_sz += ioc->config_page_sz;
- pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
- ioc->name, total_sz/1024);
- pr_info(MPT3SAS_FMT
- "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
- ioc->name, ioc->shost->can_queue, facts->RequestCredit);
- pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
- ioc->name, ioc->shost->sg_tablesize);
+ ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
+ total_sz / 1024);
+ ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
+ ioc->shost->can_queue, facts->RequestCredit);
+ ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
+ ioc->shost->sg_tablesize);
return 0;
out:
@@ -4990,9 +4912,9 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
}
@@ -5000,9 +4922,8 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
count++;
} while (--cntdn);
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ __func__, count, int_status);
return -EFAULT;
}
@@ -5017,9 +4938,9 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
}
@@ -5027,9 +4948,8 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
count++;
} while (--cntdn);
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ __func__, count, int_status);
return -EFAULT;
}
@@ -5056,9 +4976,9 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
doorbell = readl(&ioc->chip->Doorbell);
@@ -5075,9 +4995,8 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
} while (--cntdn);
out:
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ __func__, count, int_status);
return -EFAULT;
}
@@ -5099,9 +5018,9 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
doorbell_reg = readl(&ioc->chip->Doorbell);
if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
}
@@ -5109,9 +5028,8 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
count++;
} while (--cntdn);
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
- ioc->name, __func__, count, doorbell_reg);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
+ __func__, count, doorbell_reg);
return -EFAULT;
}
@@ -5130,8 +5048,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
int r = 0;
if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
- pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: unknown reset_type\n", __func__);
return -EFAULT;
}
@@ -5139,7 +5056,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
return -EFAULT;
- pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
+ ioc_info(ioc, "sending message unit reset !!\n");
writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
&ioc->chip->Doorbell);
@@ -5149,15 +5066,14 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
}
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
if (ioc_state) {
- pr_err(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
+ ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state);
r = -EFAULT;
goto out;
}
out:
- pr_info(MPT3SAS_FMT "message unit reset: %s\n",
- ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "message unit reset: %s\n",
+ r == 0 ? "SUCCESS" : "FAILED");
return r;
}
@@ -5183,9 +5099,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
/* make sure doorbell is not in use */
if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
- pr_err(MPT3SAS_FMT
- "doorbell is in use (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
return -EFAULT;
}
@@ -5200,17 +5114,15 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
&ioc->chip->Doorbell);
if ((_base_spin_on_doorbell_int(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_ack(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake ack failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
@@ -5222,17 +5134,15 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
if (failed) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake sending request failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
/* now wait for the reply */
if ((_base_wait_for_doorbell_int(ioc, timeout))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
@@ -5241,9 +5151,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
@@ -5252,9 +5161,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
for (i = 2; i < default_reply->MsgLength * 2; i++) {
if ((_base_wait_for_doorbell_int(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
if (i >= reply_bytes/2) /* overflow case */
@@ -5267,8 +5175,9 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
_base_wait_for_doorbell_int(ioc, 5);
if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
+ dhsprintk(ioc,
+ ioc_info(ioc, "doorbell is in use (line=%d)\n",
+ __LINE__));
}
writel(0, &ioc->chip->HostInterruptStatus);
@@ -5308,14 +5217,12 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
void *request;
u16 wait_state_count;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mutex_lock(&ioc->base_cmds.mutex);
if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: base_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5324,23 +5231,20 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5408,14 +5312,12 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
void *request;
u16 wait_state_count;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mutex_lock(&ioc->base_cmds.mutex);
if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: base_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5424,24 +5326,20 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name,
- __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5495,8 +5393,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
struct mpt3sas_port_facts *pfacts;
int mpi_reply_sz, mpi_request_sz, r;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
@@ -5507,8 +5404,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
(u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
if (r != 0) {
- pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
+ ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
return r;
}
@@ -5536,26 +5432,26 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
u32 ioc_state;
int rc;
- dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->pci_error_recovery) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "%s: host in pci error recovery\n", ioc->name, __func__));
+ dfailprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
return -EFAULT;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
- dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
- ioc->name, __func__, ioc_state));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+ __func__, ioc_state));
if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
(ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
return 0;
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, printk(MPT3SAS_FMT
- "unexpected doorbell active!\n", ioc->name));
+ dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
goto issue_diag_reset;
}
@@ -5567,9 +5463,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
if (ioc_state) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state));
+ dfailprintk(ioc,
+ ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state));
return -EFAULT;
}
@@ -5592,14 +5488,13 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
struct mpt3sas_facts *facts;
int mpi_reply_sz, mpi_request_sz, r;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
r = _base_wait_for_iocstate(ioc, 10);
if (r) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "%s: failed getting to correct state\n",
- ioc->name, __func__));
+ dfailprintk(ioc,
+ ioc_info(ioc, "%s: failed getting to correct state\n",
+ __func__));
return r;
}
mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
@@ -5610,8 +5505,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
(u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
if (r != 0) {
- pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
+ ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
return r;
}
@@ -5663,20 +5557,20 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
*/
ioc->page_size = 1 << facts->CurrentHostPageSize;
if (ioc->page_size == 1) {
- pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
- "default host page size to 4k\n", ioc->name);
+ ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
- ioc->name, facts->CurrentHostPageSize));
-
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "hba queue depth(%d), max chains per io(%d)\n",
- ioc->name, facts->RequestCredit,
- facts->MaxChainDepth));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "request frame size(%d), reply frame size(%d)\n", ioc->name,
- facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+ dinitprintk(ioc,
+ ioc_info(ioc, "CurrentHostPageSize(%d)\n",
+ facts->CurrentHostPageSize));
+
+ dinitprintk(ioc,
+ ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
+ facts->RequestCredit, facts->MaxChainDepth));
+ dinitprintk(ioc,
+ ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
+ facts->IOCRequestFrameSize * 4,
+ facts->ReplyFrameSize * 4));
return 0;
}
@@ -5696,8 +5590,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
u16 ioc_status;
u32 reply_post_free_array_sz = 0;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
@@ -5763,15 +5656,14 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
if (r != 0) {
- pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
+ ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
return r;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
mpi_reply.IOCLogInfo) {
- pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
+ ioc_err(ioc, "%s: failed\n", __func__);
r = -EIO;
}
@@ -5842,18 +5734,16 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
u16 smid;
u16 ioc_status;
- pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+ ioc_info(ioc, "sending port enable !!\n");
if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
@@ -5867,8 +5757,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2PortEnableRequest_t)/4);
if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
@@ -5881,16 +5770,15 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpi_reply = ioc->port_enable_cmds.reply;
ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
- ioc->name, __func__, ioc_status);
+ ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
+ __func__, ioc_status);
r = -EFAULT;
goto out;
}
out:
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
- pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
- "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
return r;
}
@@ -5906,18 +5794,16 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
Mpi2PortEnableRequest_t *mpi_request;
u16 smid;
- pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+ ioc_info(ioc, "sending port enable !!\n");
if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
@@ -6020,19 +5906,16 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
int r = 0;
int i;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
ioc->base_cmds.status = MPT3_CMD_PENDING;
@@ -6049,8 +5932,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2EventNotificationRequest_t)/4);
if (ioc->base_cmds.status & MPT3_CMD_RESET)
@@ -6058,8 +5940,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
else
r = -ETIME;
} else
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
- ioc->name, __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
return r;
}
@@ -6115,18 +5996,16 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
u32 count;
u32 hcb_size;
- pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
+ ioc_info(ioc, "sending diag reset !!\n");
- drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
- ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
count = 0;
do {
/* Write magic sequence to WriteSequence register
* Loop until in diagnostic mode
*/
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "write magic sequence\n", ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
@@ -6142,16 +6021,15 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
goto out;
host_diagnostic = readl(&ioc->chip->HostDiagnostic);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
- ioc->name, count, host_diagnostic));
+ drsprintk(ioc,
+ ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ count, host_diagnostic));
} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
hcb_size = readl(&ioc->chip->HCBSize);
- drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
- ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
&ioc->chip->HostDiagnostic);
@@ -6174,43 +6052,38 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "restart the adapter assuming the HCB Address points to good F/W\n",
- ioc->name));
+ drsprintk(ioc,
+ ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
writel(host_diagnostic, &ioc->chip->HostDiagnostic);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "re-enable the HCDW\n", ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
&ioc->chip->HCBSize);
}
- drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
- ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
&ioc->chip->HostDiagnostic);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "disable writes to the diagnostic register\n", ioc->name));
+ drsprintk(ioc,
+ ioc_info(ioc, "disable writes to the diagnostic register\n"));
writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "Wait for FW to go to the READY state\n", ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
if (ioc_state) {
- pr_err(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
+ ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state);
goto out;
}
- pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
+ ioc_info(ioc, "diag reset: SUCCESS\n");
return 0;
out:
- pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
+ ioc_err(ioc, "diag reset: FAILED\n");
return -EFAULT;
}
@@ -6228,15 +6101,15 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
int rc;
int count;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->pci_error_recovery)
return 0;
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
- dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
- ioc->name, __func__, ioc_state));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+ __func__, ioc_state));
/* if in RESET state, it should move to READY state shortly */
count = 0;
@@ -6244,9 +6117,8 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
while ((ioc_state & MPI2_IOC_STATE_MASK) !=
MPI2_IOC_STATE_READY) {
if (count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
+ ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state);
return -EFAULT;
}
ssleep(1);
@@ -6258,9 +6130,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
return 0;
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "unexpected doorbell active!\n",
- ioc->name));
+ dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
goto issue_diag_reset;
}
@@ -6304,8 +6174,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
struct adapter_reply_queue *reply_q;
Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
/* clean the delayed target reset list */
list_for_each_entry_safe(delayed_tr, delayed_tr_next,
@@ -6465,8 +6334,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
{
- dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
/* synchronizing freeing resource with pci_access_mutex lock */
mutex_lock(&ioc->pci_access_mutex);
@@ -6494,8 +6362,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
int r, i;
int cpu_id, last_cpu_id = 0;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
/* setup cpu_msix_table */
ioc->cpu_count = num_online_cpus();
@@ -6505,9 +6372,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
ioc->reply_queue_count = 1;
if (!ioc->cpu_msix_table) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "allocation for cpu_msix_table failed!!!\n",
- ioc->name));
+ dfailprintk(ioc,
+ ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
r = -ENOMEM;
goto out_free_resources;
}
@@ -6516,9 +6382,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->reply_post_host_index) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
- "for reply_post_host_index failed!!!\n",
- ioc->name));
+ dfailprintk(ioc,
+ ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
r = -ENOMEM;
goto out_free_resources;
}
@@ -6747,8 +6612,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
{
- dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mpt3sas_base_stop_watchdog(ioc);
mpt3sas_base_free_resources(ioc);
@@ -6781,8 +6645,7 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
mpt3sas_scsih_pre_reset_handler(ioc);
mpt3sas_ctl_pre_reset_handler(ioc);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
}
/**
@@ -6793,8 +6656,7 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
mpt3sas_scsih_after_reset_handler(ioc);
mpt3sas_ctl_after_reset_handler(ioc);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
ioc->transport_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -6835,8 +6697,7 @@ static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
mpt3sas_scsih_reset_done_handler(ioc);
mpt3sas_ctl_reset_done_handler(ioc);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
}
/**
@@ -6883,12 +6744,10 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
u32 ioc_state;
u8 is_fault = 0, is_trigger = 0;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
if (ioc->pci_error_recovery) {
- pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
r = 0;
goto out_unlocked;
}
@@ -6942,8 +6801,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
_base_reset_done_handler(ioc);
out:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
- ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: %s\n",
+ __func__, r == 0 ? "SUCCESS" : "FAILED"));
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 0;
@@ -6959,7 +6819,6 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_trigger_master(ioc,
MASTER_TRIGGER_ADAPTER_RESET);
}
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
return r;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 96dc15e90bd8..8f1d6b071b39 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -158,7 +158,14 @@ struct mpt3sas_nvme_cmd {
/*
* logging format
*/
-#define MPT3SAS_FMT "%s: "
+#define ioc_err(ioc, fmt, ...) \
+ pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...) \
+ pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...) \
+ pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
/*
* WarpDrive Specific Log codes
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index d29a2dcc7d0e..02209447f4ef 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -175,20 +175,18 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
if (!desc)
return;
- pr_info(MPT3SAS_FMT
- "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
- ioc->name, calling_function_name, desc,
- mpi_request->Header.PageNumber, mpi_request->Action,
- le32_to_cpu(mpi_request->PageAddress), smid);
+ ioc_info(ioc, "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
+ calling_function_name, desc,
+ mpi_request->Header.PageNumber, mpi_request->Action,
+ le32_to_cpu(mpi_request->PageAddress), smid);
if (!mpi_reply)
return;
if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
}
/**
@@ -210,9 +208,8 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
&mem->page_dma, GFP_KERNEL);
if (!mem->page) {
- pr_err(MPT3SAS_FMT
- "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
- ioc->name, __func__, mem->sz);
+ ioc_err(ioc, "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
+ __func__, mem->sz);
r = -ENOMEM;
}
} else { /* use tmp buffer if less than 512 bytes */
@@ -313,8 +310,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
mutex_lock(&ioc->config_cmds.mutex);
if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: config_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: config_cmd in use\n", __func__);
mutex_unlock(&ioc->config_cmds.mutex);
return -EAGAIN;
}
@@ -362,34 +358,30 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
r = -EFAULT;
goto free_mem;
}
- pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n",
- ioc->name, __func__, retry_count);
+ ioc_info(ioc, "%s: attempting retry (%d)\n",
+ __func__, retry_count);
}
wait_state_count = 0;
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
ioc->config_cmds.status = MPT3_CMD_NOT_USED;
r = -EFAULT;
goto free_mem;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->config_cmds.status = MPT3_CMD_NOT_USED;
r = -EAGAIN;
goto free_mem;
@@ -429,12 +421,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
(mpi_reply->Header.PageType & 0xF)) {
_debug_dump_mf(mpi_request, ioc->request_sz/4);
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
- panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
- " mpi_reply mismatch: Requested PageType(0x%02x)" \
- " Reply PageType(0x%02x)\n", \
- ioc->name, __func__,
- (mpi_request->Header.PageType & 0xF),
- (mpi_reply->Header.PageType & 0xF));
+ panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->Header.PageType & 0xF,
+ mpi_reply->Header.PageType & 0xF);
}
if (((mpi_request->Header.PageType & 0xF) ==
@@ -442,19 +432,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
mpi_request->ExtPageType != mpi_reply->ExtPageType) {
_debug_dump_mf(mpi_request, ioc->request_sz/4);
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
- panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
- " mpi_reply mismatch: Requested ExtPageType(0x%02x)"
- " Reply ExtPageType(0x%02x)\n",
- ioc->name, __func__, mpi_request->ExtPageType,
- mpi_reply->ExtPageType);
+ panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->ExtPageType,
+ mpi_reply->ExtPageType);
}
ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
& MPI2_IOCSTATUS_MASK;
}
if (retry_count)
- pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \
- ioc->name, __func__, retry_count);
+ ioc_info(ioc, "%s: retry (%d) completed!!\n",
+ __func__, retry_count);
if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
config_page && mpi_request->Action ==
@@ -469,14 +458,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
_debug_dump_config(p, min_t(u16, mem.sz,
config_page_sz)/4);
- panic(KERN_WARNING MPT3SAS_FMT
- "%s: Firmware BUG:" \
- " config page mismatch:"
- " Requested PageType(0x%02x)"
- " Reply PageType(0x%02x)\n",
- ioc->name, __func__,
- (mpi_request->Header.PageType & 0xF),
- (p[3] & 0xF));
+ panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->Header.PageType & 0xF,
+ p[3] & 0xF);
}
if (((mpi_request->Header.PageType & 0xF) ==
@@ -486,13 +471,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
_debug_dump_config(p, min_t(u16, mem.sz,
config_page_sz)/4);
- panic(KERN_WARNING MPT3SAS_FMT
- "%s: Firmware BUG:" \
- " config page mismatch:"
- " Requested ExtPageType(0x%02x)"
- " Reply ExtPageType(0x%02x)\n",
- ioc->name, __func__,
- mpi_request->ExtPageType, p[6]);
+ panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->ExtPageType, p[6]);
}
}
memcpy(config_page, mem.page, min_t(u16, mem.sz,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 5e8c059ce2c9..4afa597cbfba 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -185,17 +185,15 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
if (!desc)
return;
- pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
- ioc->name, calling_function_name, desc, smid);
+ ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
if (!mpi_reply)
return;
if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function ==
@@ -208,38 +206,32 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
sas_device = mpt3sas_get_sdev_by_handle(ioc,
le16_to_cpu(scsi_reply->DevHandle));
if (sas_device) {
- pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->sas_address, sas_device->phy);
- pr_warn(MPT3SAS_FMT
- "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
+ ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+ (u64)sas_device->sas_address,
+ sas_device->phy);
+ ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot);
sas_device_put(sas_device);
}
if (!sas_device) {
pcie_device = mpt3sas_get_pdev_by_handle(ioc,
le16_to_cpu(scsi_reply->DevHandle));
if (pcie_device) {
- pr_warn(MPT3SAS_FMT
- "\tWWID(0x%016llx), port(%d)\n", ioc->name,
- (unsigned long long)pcie_device->wwid,
- pcie_device->port_num);
+ ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
+ (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
if (pcie_device->enclosure_handle != 0)
- pr_warn(MPT3SAS_FMT
- "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
pcie_device_put(pcie_device);
}
}
if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
- pr_info(MPT3SAS_FMT
- "\tscsi_state(0x%02x), scsi_status"
- "(0x%02x)\n", ioc->name,
- scsi_reply->SCSIState,
- scsi_reply->SCSIStatus);
+ ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
+ scsi_reply->SCSIState,
+ scsi_reply->SCSIStatus);
}
}
@@ -466,8 +458,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
int i;
u8 issue_reset;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!(ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_REGISTERED))
@@ -487,8 +478,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
*/
void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
ioc->ctl_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
@@ -506,8 +496,7 @@ void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
int i;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!(ioc->diag_buffer_status[i] &
@@ -612,10 +601,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
}
if (!found) {
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), lun(%d), no active mid!!\n",
- ioc->name,
- desc, le16_to_cpu(tm_request->DevHandle), lun));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
+ desc, le16_to_cpu(tm_request->DevHandle),
+ lun));
tm_reply = ioc->ctl_cmds.reply;
tm_reply->DevHandle = tm_request->DevHandle;
tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -631,10 +620,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
return 1;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
- desc, le16_to_cpu(tm_request->DevHandle), lun,
- le16_to_cpu(tm_request->TaskMID)));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
return 0;
}
@@ -672,8 +661,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
issue_reset = 0;
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
ret = -EAGAIN;
goto out;
}
@@ -682,28 +670,23 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
ret = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name,
- __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
if (!mpi_request) {
- pr_err(MPT3SAS_FMT
- "%s: failed obtaining a memory for mpi_request\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
+ __func__);
ret = -ENOMEM;
goto out;
}
@@ -726,8 +709,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ret = -EAGAIN;
goto out;
}
@@ -762,8 +744,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
/* obtain dma-able memory for data transfer */
if (data_out_sz) /* WRITE */ {
- data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
- &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
+ &data_out_dma, GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -782,8 +764,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
if (data_in_sz) /* READ */ {
- data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
- &data_in_dma);
+ data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
+ &data_in_dma, GFP_KERNEL);
if (!data_in) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -823,9 +805,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
data_out_dma, data_out_sz, data_in_dma, data_in_sz);
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
- "ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -843,9 +825,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
mpt3sas_base_get_sense_buffer_dma(ioc, smid);
memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -863,10 +845,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
Mpi2SCSITaskManagementRequest_t *tm_request =
(Mpi2SCSITaskManagementRequest_t *)request;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
- ioc->name,
- le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+ dtmprintk(ioc,
+ ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
+ le16_to_cpu(tm_request->DevHandle),
+ tm_request->TaskType));
ioc->got_task_abort_from_ioctl = 1;
if (tm_request->TaskType ==
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
@@ -881,9 +863,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->got_task_abort_from_ioctl = 0;
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -929,9 +911,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
case MPI2_FUNCTION_SATA_PASSTHROUGH:
{
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -1017,12 +999,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
Mpi2SCSITaskManagementReply_t *tm_reply =
(Mpi2SCSITaskManagementReply_t *)mpi_reply;
- pr_info(MPT3SAS_FMT "TASK_MGMT: " \
- "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
- "TerminationCount(0x%08x)\n", ioc->name,
- le16_to_cpu(tm_reply->IOCStatus),
- le32_to_cpu(tm_reply->IOCLogInfo),
- le32_to_cpu(tm_reply->TerminationCount));
+ ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
+ le16_to_cpu(tm_reply->IOCStatus),
+ le32_to_cpu(tm_reply->IOCLogInfo),
+ le32_to_cpu(tm_reply->TerminationCount));
}
/* copy out xdata to user */
@@ -1054,9 +1034,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
MPI2_FUNCTION_NVME_ENCAPSULATED)) {
if (karg.sense_data_ptr == NULL) {
- pr_info(MPT3SAS_FMT "Response buffer provided"
- " by application is NULL; Response data will"
- " not be returned.\n", ioc->name);
+ ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
goto out;
}
sz_arg = (mpi_request->Function ==
@@ -1079,9 +1057,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
mpi_request->Function ==
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
- pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
- ioc->name,
- le16_to_cpu(mpi_request->FunctionDependent1));
+ ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
+ le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
pcie_device = mpt3sas_get_pdev_by_handle(ioc,
le16_to_cpu(mpi_request->FunctionDependent1));
@@ -1106,11 +1083,11 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
/* free memory associated with sg buffers */
if (data_in)
- pci_free_consistent(ioc->pdev, data_in_sz, data_in,
+ dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
data_in_dma);
if (data_out)
- pci_free_consistent(ioc->pdev, data_out_sz, data_out,
+ dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
data_out_dma);
kfree(mpi_request);
@@ -1128,8 +1105,8 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt3_ioctl_iocinfo karg;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
memset(&karg, 0 , sizeof(karg));
if (ioc->pfacts)
@@ -1188,8 +1165,8 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
memcpy(karg.event_types, ioc->event_type,
@@ -1219,8 +1196,8 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
memcpy(ioc->event_type, karg.event_types,
MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
@@ -1259,8 +1236,8 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
number_bytes = karg.hdr.max_data_size -
sizeof(struct mpt3_ioctl_header);
@@ -1306,12 +1283,11 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
ioc->is_driver_loading)
return -EAGAIN;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_info(MPT3SAS_FMT "host reset: %s\n",
- ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
return 0;
}
@@ -1440,8 +1416,8 @@ _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
rc = _ctl_btdh_search_sas_device(ioc, &karg);
if (!rc)
@@ -1512,53 +1488,46 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
u32 ioc_state;
u8 issue_reset = 0;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EAGAIN;
goto out;
}
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
buffer_type = diag_register->buffer_type;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) {
- pr_err(MPT3SAS_FMT
- "%s: already has a registered buffer for buffer_type(0x%02x)\n",
- ioc->name, __func__,
- buffer_type);
+ ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (diag_register->requested_buffer_size % 4) {
- pr_err(MPT3SAS_FMT
- "%s: the requested_buffer_size is not 4 byte aligned\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
+ __func__);
return -EINVAL;
}
smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1580,9 +1549,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (request_data) {
request_data_dma = ioc->diag_buffer_dma[buffer_type];
if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
- pci_free_consistent(ioc->pdev,
- ioc->diag_buffer_sz[buffer_type],
- request_data, request_data_dma);
+ dma_free_coherent(&ioc->pdev->dev,
+ ioc->diag_buffer_sz[buffer_type],
+ request_data, request_data_dma);
request_data = NULL;
}
}
@@ -1590,12 +1559,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (request_data == NULL) {
ioc->diag_buffer_sz[buffer_type] = 0;
ioc->diag_buffer_dma[buffer_type] = 0;
- request_data = pci_alloc_consistent(
- ioc->pdev, request_data_sz, &request_data_dma);
+ request_data = dma_alloc_coherent(&ioc->pdev->dev,
+ request_data_sz, &request_data_dma, GFP_KERNEL);
if (request_data == NULL) {
- pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
- " for diag buffers, requested size(%d)\n",
- ioc->name, __func__, request_data_sz);
+ ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
+ __func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
return -ENOMEM;
}
@@ -1612,11 +1580,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
- ioc->name, __func__, request_data,
- (unsigned long long)request_data_dma,
- le32_to_cpu(mpi_request->BufferLength)));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
+ __func__, request_data,
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
mpi_request->ProductSpecific[i] =
@@ -1637,8 +1605,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
/* process the completed Reply Message Frame */
if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
- pr_err(MPT3SAS_FMT "%s: no reply message\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: no reply message\n", __func__);
rc = -EFAULT;
goto out;
}
@@ -1649,13 +1616,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
- ioc->name, __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
- pr_info(MPT3SAS_FMT
- "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
- ioc->name, __func__,
- ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
rc = -EFAULT;
}
@@ -1666,7 +1631,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
out:
if (rc && request_data)
- pci_free_consistent(ioc->pdev, request_data_sz,
+ dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
@@ -1689,8 +1654,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
if (bits_to_register & 1) {
- pr_info(MPT3SAS_FMT "registering trace buffer support\n",
- ioc->name);
+ ioc_info(ioc, "registering trace buffer support\n");
ioc->diag_trigger_master.MasterData =
(MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
@@ -1701,8 +1665,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
}
if (bits_to_register & 2) {
- pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
- ioc->name);
+ ioc_info(ioc, "registering snapshot buffer support\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
/* register for 2MB buffers */
diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1711,8 +1674,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
}
if (bits_to_register & 4) {
- pr_info(MPT3SAS_FMT "registering extended buffer support\n",
- ioc->name);
+ ioc_info(ioc, "registering extended buffer support\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
/* register for 2MB buffers */
diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1768,51 +1730,46 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
buffer_type = karg.unique_id & 0x000000ff;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is not registered\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
return -EINVAL;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) has not been released\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
request_data_sz = ioc->diag_buffer_sz[buffer_type];
request_data_dma = ioc->diag_buffer_dma[buffer_type];
- pci_free_consistent(ioc->pdev, request_data_sz,
- request_data, request_data_dma);
+ dma_free_coherent(&ioc->pdev->dev, request_data_sz,
+ request_data, request_data_dma);
ioc->diag_buffer[buffer_type] = NULL;
ioc->diag_buffer_status[buffer_type] = 0;
return 0;
@@ -1841,41 +1798,37 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
karg.application_flags = 0;
buffer_type = karg.buffer_type;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is not registered\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (karg.unique_id & 0xffffff00) {
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have buffer for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
@@ -1897,9 +1850,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
- pr_err(MPT3SAS_FMT
- "%s: unable to write mpt3_diag_query data @ %p\n",
- ioc->name, __func__, arg);
+ ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
+ __func__, arg);
return -EFAULT;
}
return 0;
@@ -1923,8 +1875,8 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
u32 ioc_state;
int rc;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
rc = 0;
*issue_reset = 0;
@@ -1935,24 +1887,22 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
MPT3_DIAG_BUFFER_IS_REGISTERED)
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_RELEASED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: skipping due to FAULT state\n", ioc->name,
- __func__));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: skipping due to FAULT state\n",
+ __func__));
rc = -EAGAIN;
goto out;
}
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1982,8 +1932,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
/* process the completed Reply Message Frame */
if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
- pr_err(MPT3SAS_FMT "%s: no reply message\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: no reply message\n", __func__);
rc = -EFAULT;
goto out;
}
@@ -1994,13 +1943,11 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_RELEASED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
- ioc->name, __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
- pr_info(MPT3SAS_FMT
- "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
- ioc->name, __func__,
- ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
rc = -EFAULT;
}
@@ -2033,47 +1980,41 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
buffer_type = karg.unique_id & 0x000000ff;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is not registered\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is already released\n",
- ioc->name, __func__,
- buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
+ __func__, buffer_type);
return 0;
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
@@ -2084,9 +2025,8 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_DIAG_BUFFER_IS_RELEASED;
ioc->diag_buffer_status[buffer_type] &=
~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) was released due to host reset\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
+ __func__, buffer_type);
return 0;
}
@@ -2124,38 +2064,34 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
buffer_type = karg.unique_id & 0x000000ff;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have buffer for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
request_size = ioc->diag_buffer_sz[buffer_type];
if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
- pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
- "or bytes_to_read are not 4 byte aligned\n", ioc->name,
- __func__);
+ ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
+ __func__);
return -EINVAL;
}
@@ -2163,10 +2099,10 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EINVAL;
diag_data = (void *)(request_data + karg.starting_offset);
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
- ioc->name, __func__,
- diag_data, karg.starting_offset, karg.bytes_to_read));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
+ __func__, diag_data, karg.starting_offset,
+ karg.bytes_to_read));
/* Truncate data on requests that are too large */
if ((diag_data + karg.bytes_to_read < diag_data) ||
@@ -2177,39 +2113,36 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (copy_to_user((void __user *)uarg->diagnostic_data,
diag_data, copy_size)) {
- pr_err(MPT3SAS_FMT
- "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
- ioc->name, __func__, diag_data);
+ ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
+ __func__, diag_data);
return -EFAULT;
}
if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
return 0;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: Reregister buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
+ __func__, buffer_type));
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is still registered\n",
- ioc->name, __func__, buffer_type));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
+ __func__, buffer_type));
return 0;
}
/* Get a free request frame and save the message context.
*/
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -2247,8 +2180,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/* process the completed Reply Message Frame */
if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
- pr_err(MPT3SAS_FMT "%s: no reply message\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: no reply message\n", __func__);
rc = -EFAULT;
goto out;
}
@@ -2259,13 +2191,11 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
- ioc->name, __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
- pr_info(MPT3SAS_FMT
- "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
- ioc->name, __func__,
- ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__, ioc_status,
+ le32_to_cpu(mpi_reply->IOCLogInfo));
rc = -EFAULT;
}
@@ -2450,8 +2380,9 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
ret = _ctl_diag_read_buffer(ioc, arg);
break;
default:
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+ dctlprintk(ioc,
+ ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
+ cmd));
break;
}
@@ -2840,8 +2771,8 @@ _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
return -EINVAL;
ioc->logging_level = val;
- pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
- ioc->logging_level);
+ ioc_info(ioc, "logging_level=%08xh\n",
+ ioc->logging_level);
return strlen(buf);
}
static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
@@ -2877,8 +2808,8 @@ _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
return -EINVAL;
ioc->fwfault_debug = val;
- pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
- ioc->fwfault_debug);
+ ioc_info(ioc, "fwfault_debug=%d\n",
+ ioc->fwfault_debug);
return strlen(buf);
}
static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
@@ -2958,8 +2889,8 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
ssize_t rc = 0;
if (!ioc->is_warpdrive) {
- pr_err(MPT3SAS_FMT "%s: BRM attribute is only for"
- " warpdrive\n", ioc->name, __func__);
+ ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
+ __func__);
goto out;
}
/* pci_access_mutex lock acquired by sysfs show path */
@@ -2973,30 +2904,28 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
if (!io_unit_pg3) {
- pr_err(MPT3SAS_FMT "%s: failed allocating memory "
- "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
+ ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
+ __func__, sz);
goto out;
}
if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
0) {
- pr_err(MPT3SAS_FMT
- "%s: failed reading iounit_pg3\n", ioc->name,
- __func__);
+ ioc_err(ioc, "%s: failed reading iounit_pg3\n",
+ __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with "
- "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
+ ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
+ __func__, ioc_status);
goto out;
}
if (io_unit_pg3->GPIOCount < 25) {
- pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than "
- "25 entries, detected (%d) entries\n", ioc->name, __func__,
- io_unit_pg3->GPIOCount);
+ ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
+ __func__, io_unit_pg3->GPIOCount);
goto out;
}
@@ -3039,17 +2968,15 @@ _ctl_host_trace_buffer_size_show(struct device *cdev,
struct DIAG_BUFFER_START *request_data;
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
@@ -3089,17 +3016,15 @@ _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
u32 size;
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
@@ -3188,8 +3113,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
goto out;
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
- pr_info(MPT3SAS_FMT "posting host trace buffers\n",
- ioc->name);
+ ioc_info(ioc, "posting host trace buffers\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
diag_register.requested_buffer_size = (1024 * 1024);
diag_register.unique_id = 0x7075900;
@@ -3205,8 +3129,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED))
goto out;
- pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
- ioc->name);
+ ioc_info(ioc, "releasing host trace buffer\n");
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
@@ -3658,8 +3581,10 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate)
if ((ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_RELEASED))
continue;
- pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
- ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
+ dma_free_coherent(&ioc->pdev->dev,
+ ioc->diag_buffer_sz[i],
+ ioc->diag_buffer[i],
+ ioc->diag_buffer_dma[i]);
ioc->diag_buffer[i] = NULL;
ioc->diag_buffer_status[i] = 0;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 53133cfd420f..03c52847ed07 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -418,8 +418,8 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
@@ -442,10 +442,8 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
return -ENXIO;
/* else error case */
- pr_err(MPT3SAS_FMT
- "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
- ioc->name, handle, ioc_status,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
return -EIO;
}
@@ -508,10 +506,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
(ioc->bios_pg2.ReqBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.RequestedBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: req_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->req_boot_device.device = device;
ioc->req_boot_device.channel = channel;
}
@@ -523,10 +520,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
(ioc->bios_pg2.ReqAltBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.RequestedAltBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: req_alt_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->req_alt_boot_device.device = device;
ioc->req_alt_boot_device.channel = channel;
}
@@ -538,10 +534,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
(ioc->bios_pg2.CurrentBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.CurrentBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: current_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->current_boot_device.device = device;
ioc->current_boot_device.channel = channel;
}
@@ -752,19 +747,16 @@ _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
sas_device->chassis_slot);
} else {
if (sas_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "enclosure logical id(0x%016llx), slot(%d) \n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
+ ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot);
if (sas_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, sas_device->enclosure_level,
- sas_device->connector_name);
+ ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
if (sas_device->is_chassis_slot_valid)
- pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
- ioc->name, sas_device->chassis_slot);
+ ioc_info(ioc, "chassis slot(0x%04x)\n",
+ sas_device->chassis_slot);
}
}
@@ -784,10 +776,8 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
if (!sas_device)
return;
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, sas_device->handle,
- (unsigned long long) sas_device->sas_address);
+ ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_device->handle, (u64)sas_device->sas_address);
_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
@@ -872,10 +862,10 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__, sas_device->handle,
+ (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
@@ -923,10 +913,10 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__, sas_device->handle,
+ (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
@@ -1073,21 +1063,16 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
if (!pcie_device)
return;
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, pcie_device->handle,
- (unsigned long long) pcie_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ pcie_device->handle, (u64)pcie_device->wwid);
if (pcie_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "removing enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
if (pcie_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "removing enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, pcie_device->enclosure_level,
- pcie_device->connector_name);
+ ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
if (!list_empty(&pcie_device->list)) {
@@ -1146,20 +1131,21 @@ _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure logical id(0x%016llx), slot( %d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+ __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device_get(pcie_device);
@@ -1191,20 +1177,21 @@ _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure logical id(0x%016llx), slot( %d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+ __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device_get(pcie_device);
@@ -1304,9 +1291,10 @@ _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- raid_device->handle, (unsigned long long)raid_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ raid_device->handle, (u64)raid_device->wwid));
spin_lock_irqsave(&ioc->raid_device_lock, flags);
list_add_tail(&raid_device->list, &ioc->raid_device_list);
@@ -1857,16 +1845,16 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -1952,8 +1940,8 @@ scsih_get_resync(struct device *dev)
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
percent_complete = 0;
goto out;
}
@@ -2006,8 +1994,8 @@ scsih_get_state(struct device *dev)
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -2103,9 +2091,9 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
&num_pds)) || !num_pds) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2114,17 +2102,17 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
sizeof(Mpi2RaidVol0PhysDisk_t));
vol_pg0 = kzalloc(sz, GFP_KERNEL);
if (!vol_pg0) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
kfree(vol_pg0);
return 1;
}
@@ -2215,16 +2203,16 @@ scsih_slave_configure(struct scsi_device *sdev)
raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (!raid_device) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if (_scsih_get_volume_capabilities(ioc, raid_device)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2308,16 +2296,16 @@ scsih_slave_configure(struct scsi_device *sdev)
if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
if (mpt3sas_config_get_volume_handle(ioc, handle,
&volume_handle)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
volume_handle, &volume_wwid)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
}
@@ -2329,9 +2317,9 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device_priv_data->sas_target->sas_address);
if (!pcie_device) {
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2377,9 +2365,9 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device_priv_data->sas_target->sas_address);
if (!sas_device) {
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2515,8 +2503,7 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
desc = "unknown";
break;
}
- pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
- ioc->name, response_code, desc);
+ ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
}
/**
@@ -2640,22 +2627,19 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
lockdep_assert_held(&ioc->tm_cmds.mutex);
if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
- pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
return FAILED;
}
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return FAILED;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "unexpected doorbell active!\n", ioc->name));
+ dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
return (!rc) ? SUCCESS : FAILED;
}
@@ -2669,14 +2653,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return FAILED;
}
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
- ioc->name, handle, type, smid_task, timeout, tr_method));
+ dtmprintk(ioc,
+ ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
+ handle, type, smid_task, timeout, tr_method));
ioc->tm_cmds.status = MPT3_CMD_PENDING;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->tm_cmds.smid = smid;
@@ -2709,11 +2692,11 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
mpi_reply = ioc->tm_cmds.reply;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
- "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dtmprintk(ioc,
+ ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
if (ioc->logging_level & MPT_DEBUG_TM) {
_scsih_response_code(ioc, mpi_reply->ResponseCode);
if (mpi_reply->IOCStatus)
@@ -3060,13 +3043,11 @@ scsih_host_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
int r, retval;
- pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
- ioc->name, scmd);
+ ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
scsi_print_command(scmd);
if (ioc->is_driver_loading || ioc->remove_host) {
- pr_info(MPT3SAS_FMT "Blocking the host reset\n",
- ioc->name);
+ ioc_info(ioc, "Blocking the host reset\n");
r = FAILED;
goto out;
}
@@ -3074,8 +3055,8 @@ scsih_host_reset(struct scsi_cmnd *scmd)
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
r = (retval < 0) ? FAILED : SUCCESS;
out:
- pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
- ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ ioc_info(ioc, "host reset: %s scmd(%p)\n",
+ r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
return r;
}
@@ -3567,18 +3548,16 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
u8 tr_method = 0;
if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery: handle(0x%04x)\n",
- __func__, ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
+ __func__, handle));
return;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational: handle(0x%04x)\n",
- __func__, ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
+ __func__, handle));
return;
}
@@ -3614,39 +3593,31 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
}
if (sas_target_priv_data) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle,
- (unsigned long long)sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle, (u64)sas_address));
if (sas_device) {
if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag:enclosure logical "
- "id(0x%016llx), slot(%d)\n", ioc->name,
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot));
if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: enclosure "
- "level(0x%04x), connector name( %s)\n",
- ioc->name, sas_device->enclosure_level,
- sas_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name));
} else if (pcie_device) {
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: logical "
- "id(0x%016llx), slot(%d)\n", ioc->name,
- (unsigned long long)
- pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag:, enclosure "
- "level(0x%04x), "
- "connector name( %s)\n", ioc->name,
- pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
}
_scsih_ublock_io_device(ioc, sas_address);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
@@ -3660,16 +3631,15 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
goto out;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_tr_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_tr_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3717,39 +3687,39 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
struct _sc_list *delayed_sc;
if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery\n", __func__,
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
return 1;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational\n", __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational\n",
+ __func__));
return 1;
}
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
handle = le16_to_cpu(mpi_request_tm->DevHandle);
if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
- dewtprintk(ioc, pr_err(MPT3SAS_FMT
- "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
- ioc->name, handle,
- le16_to_cpu(mpi_reply->DevHandle), smid));
+ dewtprintk(ioc,
+ ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
return 0;
}
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
if (!smid_sas_ctrl) {
@@ -3759,16 +3729,15 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
INIT_LIST_HEAD(&delayed_sc->list);
delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:sc:handle(0x%04x), (open)\n",
- ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
+ handle));
return _scsih_check_for_pending_tm(ioc, smid);
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid_sas_ctrl,
- ioc->tm_sas_control_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
@@ -3803,20 +3772,19 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (likely(mpi_reply)) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_complete:handle(0x%04x), (open) "
- "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
if (le16_to_cpu(mpi_reply->IOCStatus) ==
MPI2_IOCSTATUS_SUCCESS) {
clear_bit(le16_to_cpu(mpi_reply->DevHandle),
ioc->device_remove_in_progress);
}
} else {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
}
return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
}
@@ -3839,9 +3807,9 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct _tr_list *delayed_tr;
if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host reset in progress!\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host reset in progress!\n",
+ __func__));
return;
}
@@ -3853,16 +3821,15 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
return;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_tr_volume_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_tr_volume_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3892,33 +3859,32 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host reset in progress!\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host reset in progress!\n",
+ __func__));
return 1;
}
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
handle = le16_to_cpu(mpi_request_tm->DevHandle);
if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
- dewtprintk(ioc, pr_err(MPT3SAS_FMT
- "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
- ioc->name, handle,
- le16_to_cpu(mpi_reply->DevHandle), smid));
+ dewtprintk(ioc,
+ ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ handle, le16_to_cpu(mpi_reply->DevHandle),
+ smid));
return 0;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
return _scsih_check_for_pending_tm(ioc, smid);
}
@@ -3948,10 +3914,9 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
- ioc->name, le16_to_cpu(event), smid,
- ioc->base_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
+ le16_to_cpu(event), smid, ioc->base_cb_idx));
ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
@@ -3981,21 +3946,21 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
unsigned long flags;
if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host has been removed\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host has been removed\n",
+ __func__));
return;
} else if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
return;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational\n",
+ __func__));
return;
}
@@ -4007,10 +3972,9 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_sas_control_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_sas_control_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
@@ -4171,8 +4135,8 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
expander_handle) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting ignoring flag\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting ignoring flag\n"));
fw_event->ignore = 1;
}
}
@@ -4243,9 +4207,8 @@ _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
switch_handle) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting ignoring flag for switch event\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting ignoring flag for switch event\n"));
fw_event->ignore = 1;
}
}
@@ -4274,10 +4237,9 @@ _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_target_priv_data =
raid_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: handle(0x%04x), "
- "wwid(0x%016llx)\n", ioc->name, handle,
- (unsigned long long) raid_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
+ handle, (u64)raid_device->wwid));
}
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
@@ -4379,9 +4341,9 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
} else
_scsih_tm_tr_send(ioc, handle);
}
@@ -4424,15 +4386,14 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
Mpi2EventDataTemperature_t *event_data)
{
if (ioc->temp_sensors_count >= event_data->SensorNum) {
- pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s"
- " exceeded for Sensor: %d !!!\n", ioc->name,
- ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
- ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
- ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
- ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
- event_data->SensorNum);
- pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n",
- ioc->name, event_data->CurrentTemperature);
+ ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
+ le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
+ le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
+ le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
+ le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
+ event_data->SensorNum);
+ ioc_err(ioc, "Current Temp In Celsius: %d\n",
+ event_data->CurrentTemperature);
}
}
@@ -4480,8 +4441,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
scmd->result = DID_RESET << 16;
scmd->scsi_done(scmd);
}
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
- ioc->name, count));
+ dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
}
/**
@@ -4680,8 +4640,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
_scsih_set_satl_pending(scmd, false);
goto out;
}
@@ -4919,37 +4878,28 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
scsi_print_command(scmd);
if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
- pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
- device_str, (unsigned long long)priv_target->sas_address);
+ ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
+ device_str, (u64)priv_target->sas_address);
} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
if (pcie_device) {
- pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
- ioc->name,
- (unsigned long long)pcie_device->wwid,
- pcie_device->port_num);
+ ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
+ (u64)pcie_device->wwid, pcie_device->port_num);
if (pcie_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "\tenclosure logical id(0x%016llx), "
- "slot(%d)\n", ioc->name,
- (unsigned long long)
- pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
if (pcie_device->connector_name[0])
- pr_info(MPT3SAS_FMT
- "\tenclosure level(0x%04x),"
- "connector name( %s)\n",
- ioc->name, pcie_device->enclosure_level,
- pcie_device->connector_name);
+ ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
pcie_device_put(pcie_device);
}
} else {
sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
if (sas_device) {
- pr_warn(MPT3SAS_FMT
- "\tsas_address(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->sas_address, sas_device->phy);
+ ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+ (u64)sas_device->sas_address, sas_device->phy);
_scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL);
@@ -4958,30 +4908,23 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
}
}
- pr_warn(MPT3SAS_FMT
- "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->DevHandle),
- desc_ioc_state, ioc_status, smid);
- pr_warn(MPT3SAS_FMT
- "\trequest_len(%d), underflow(%d), resid(%d)\n",
- ioc->name, scsi_bufflen(scmd), scmd->underflow,
- scsi_get_resid(scmd));
- pr_warn(MPT3SAS_FMT
- "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->TaskTag),
- le32_to_cpu(mpi_reply->TransferCount), scmd->result);
- pr_warn(MPT3SAS_FMT
- "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
- ioc->name, desc_scsi_status,
- scsi_status, desc_scsi_state, scsi_state);
+ ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
+ le16_to_cpu(mpi_reply->DevHandle),
+ desc_ioc_state, ioc_status, smid);
+ ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
+ scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
+ ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
+ le16_to_cpu(mpi_reply->TaskTag),
+ le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+ ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
+ desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
struct sense_info data;
_scsih_normalize_sense(scmd->sense_buffer, &data);
- pr_warn(MPT3SAS_FMT
- "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
- ioc->name, data.skey,
- data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+ data.skey, data.asc, data.ascq,
+ le32_to_cpu(mpi_reply->SenseCount));
}
if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
response_info = le32_to_cpu(mpi_reply->ResponseInfo);
@@ -5016,17 +4959,17 @@ _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
&mpi_request)) != 0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
sas_device->pfa_led_on = 1;
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
goto out;
}
out:
@@ -5056,16 +4999,16 @@ _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
&mpi_request)) != 0) {
- printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, printk(MPT3SAS_FMT
- "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
return;
}
}
@@ -5133,8 +5076,8 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
event_reply = kzalloc(sz, GFP_KERNEL);
if (!event_reply) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5424,16 +5367,16 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
u16 attached_handle;
u8 link_rate;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "updating handles for sas_host(0x%016llx)\n",
- ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+ dtmprintk(ioc,
+ ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
+ (u64)ioc->sas_hba.sas_address));
sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
* sizeof(Mpi2SasIOUnit0PhyData_t));
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -5483,15 +5426,15 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
if (!num_phys) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc->sas_hba.phy = kcalloc(num_phys,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!ioc->sas_hba.phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc->sas_hba.num_phys = num_phys;
@@ -5501,21 +5444,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
sizeof(Mpi2SasIOUnit0PhyData_t));
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
sas_iounit_pg0, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5524,21 +5467,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5557,15 +5500,15 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
i))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5579,18 +5522,17 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
}
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc->sas_hba.enclosure_handle =
le16_to_cpu(sas_device_pg0.EnclosureHandle);
ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- pr_info(MPT3SAS_FMT
- "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
- ioc->name, ioc->sas_hba.handle,
- (unsigned long long) ioc->sas_hba.sas_address,
- ioc->sas_hba.num_phys) ;
+ ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ ioc->sas_hba.handle,
+ (u64)ioc->sas_hba.sas_address,
+ ioc->sas_hba.num_phys);
if (ioc->sas_hba.enclosure_handle) {
if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -5639,16 +5581,16 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -5656,8 +5598,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
!= 0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
if (sas_address_parent != ioc->sas_hba.sas_address) {
@@ -5684,8 +5626,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_expander = kzalloc(sizeof(struct _sas_node),
GFP_KERNEL);
if (!sas_expander) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -5694,18 +5636,17 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_expander->sas_address_parent = sas_address_parent;
sas_expander->sas_address = sas_address;
- pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
- " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
- handle, parent_handle, (unsigned long long)
- sas_expander->sas_address, sas_expander->num_phys);
+ ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ handle, parent_handle,
+ (u64)sas_expander->sas_address, sas_expander->num_phys);
if (!sas_expander->num_phys)
goto out_fail;
sas_expander->phy = kcalloc(sas_expander->num_phys,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!sas_expander->phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5714,8 +5655,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
sas_address_parent);
if (!mpt3sas_port) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5724,8 +5665,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
for (i = 0 ; i < sas_expander->num_phys ; i++) {
if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
&expander_pg1, i, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5735,8 +5676,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if ((mpt3sas_transport_add_expander_phy(ioc,
&sas_expander->phy[i], expander_pg1,
sas_expander->parent_dev))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5883,9 +5824,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
if (!rc)
return 0;
- pr_err(MPT3SAS_FMT
- "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
- ioc->name, desc, (unsigned long long)sas_address, handle);
+ ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+ desc, (u64)sas_address, handle);
return rc;
}
@@ -5979,9 +5919,8 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT
- "device is not present handle(0x%04x), flags!!!\n",
- ioc->name, handle);
+ ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
+ handle);
goto out_unlock;
}
@@ -6028,16 +5967,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -6051,8 +5990,8 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
- ioc->name, handle);
+ ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+ handle);
return -1;
}
@@ -6074,16 +6013,15 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
mpt3sas_scsih_enclosure_find_by_handle(ioc,
le16_to_cpu(sas_device_pg0.EnclosureHandle));
if (enclosure_dev == NULL)
- pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
- "doesn't match with enclosure device!\n",
- ioc->name, sas_device_pg0.EnclosureHandle);
+ ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+ sas_device_pg0.EnclosureHandle);
}
sas_device = kzalloc(sizeof(struct _sas_device),
GFP_KERNEL);
if (!sas_device) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
@@ -6092,8 +6030,8 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
if (_scsih_get_sas_address(ioc,
le16_to_cpu(sas_device_pg0.ParentDevHandle),
&sas_device->sas_address_parent) != 0)
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_device->enclosure_handle =
le16_to_cpu(sas_device_pg0.EnclosureHandle);
if (sas_device->enclosure_handle != 0)
@@ -6158,11 +6096,10 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->pfa_led_on = 0;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__,
- sas_device->handle, (unsigned long long)
- sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__,
+ sas_device->handle, (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
@@ -6180,18 +6117,15 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->sas_address,
sas_device->sas_address_parent);
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, sas_device->handle,
- (unsigned long long) sas_device->sas_address);
+ ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_device->handle, (u64)sas_device->sas_address);
_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__,
- sas_device->handle, (unsigned long long)
- sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__,
+ sas_device->handle, (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
}
@@ -6231,8 +6165,7 @@ _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
status_str = "unknown status";
break;
}
- pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
- ioc->name, status_str);
+ ioc_info(ioc, "sas topology change: (%s)\n", status_str);
pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
"start_phy(%02d), count(%d)\n",
le16_to_cpu(event_data->ExpanderDevHandle),
@@ -6309,8 +6242,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
_scsih_sas_host_refresh(ioc);
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring expander event\n", ioc->name));
+ dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
return 0;
}
@@ -6339,8 +6271,8 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
/* handle siblings events */
for (i = 0; i < event_data->NumEntries; i++) {
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring expander event\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "ignoring expander event\n"));
return 0;
}
if (ioc->remove_host || ioc->pci_error_recovery)
@@ -6464,15 +6396,14 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
reason_str = "unknown reason";
break;
}
- pr_info(MPT3SAS_FMT "device status change: (%s)\n"
- "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
- ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- le16_to_cpu(event_data->TaskTag));
+ ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+ reason_str, le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ le16_to_cpu(event_data->TaskTag));
if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
- pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
- event_data->ASC, event_data->ASCQ);
- pr_info("\n");
+ pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
+ event_data->ASC, event_data->ASCQ);
+ pr_cont("\n");
}
/**
@@ -6605,20 +6536,16 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
desc = "nvme failure status";
break;
default:
- pr_err(MPT3SAS_FMT
- " NVMe discovery error(0x%02x): wwid(0x%016llx),"
- "handle(0x%04x)\n", ioc->name, access_status,
- (unsigned long long)wwid, handle);
+ ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
+ access_status, (u64)wwid, handle);
return rc;
}
if (!rc)
return rc;
- pr_info(MPT3SAS_FMT
- "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
- ioc->name, desc,
- (unsigned long long)wwid, handle);
+ ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
+ desc, (u64)wwid, handle);
return rc;
}
@@ -6634,22 +6561,22 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
{
struct MPT3SAS_TARGET *sas_target_priv_data;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)
- pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__,
- pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
+ __func__,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
if (pcie_device->starget && pcie_device->starget->hostdata) {
sas_target_priv_data = pcie_device->starget->hostdata;
@@ -6658,39 +6585,35 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), wwid (0x%016llx)\n",
- ioc->name, pcie_device->handle,
- (unsigned long long) pcie_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ pcie_device->handle, (u64)pcie_device->wwid);
if (pcie_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "removing : enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
if (pcie_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "removing: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, pcie_device->enclosure_level,
- pcie_device->connector_name);
+ ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
if (pcie_device->starget)
scsi_remove_target(&pcie_device->starget->dev);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)
- pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
+ __func__,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
kfree(pcie_device->serial_number);
}
@@ -6760,9 +6683,8 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* check if device is present */
if (!(le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
- pr_info(MPT3SAS_FMT
- "device is not present handle(0x%04x), flags!!!\n",
- ioc->name, handle);
+ ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
+ handle);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
pcie_device_put(pcie_device);
return;
@@ -6806,16 +6728,15 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
@@ -6825,9 +6746,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* check if device is present */
if (!(le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT
- "device is not present handle(0x04%x)!!!\n",
- ioc->name, handle);
+ ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+ handle);
return 0;
}
@@ -6848,8 +6768,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
if (!pcie_device) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
@@ -6890,16 +6810,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* TODO -- Add device name once FW supports it */
if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
&pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
kfree(pcie_device);
return 0;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
kfree(pcie_device);
return 0;
}
@@ -6956,8 +6876,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
status_str = "unknown status";
break;
}
- pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
- ioc->name, status_str);
+ ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
"start_port(%02d), count(%d)\n",
le16_to_cpu(event_data->SwitchDevHandle),
@@ -7030,16 +6949,15 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
return;
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
- ioc->name));
+ dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
return;
}
/* handle siblings events */
for (i = 0; i < event_data->NumEntries; i++) {
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring switch event\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "ignoring switch event\n"));
return;
}
if (ioc->remove_host || ioc->pci_error_recovery)
@@ -7084,9 +7002,9 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (!test_bit(handle, ioc->pend_os_device_add))
break;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) device not found: convert "
- "event to a device add\n", ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
+ handle));
event_data->PortEntry[i].PortStatus &= 0xF0;
event_data->PortEntry[i].PortStatus |=
MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
@@ -7169,15 +7087,15 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
break;
}
- pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
- "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
- ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->WWID),
- le16_to_cpu(event_data->TaskTag));
+ ioc_info(ioc, "PCIE device status change: (%s)\n"
+ "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
+ reason_str, le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->WWID),
+ le16_to_cpu(event_data->TaskTag));
if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
- pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+ pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
event_data->ASC, event_data->ASCQ);
- pr_info("\n");
+ pr_cont("\n");
}
/**
@@ -7255,12 +7173,12 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
break;
}
- pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
- "\thandle(0x%04x), enclosure logical id(0x%016llx)"
- " number slots(%d)\n", ioc->name, reason_str,
- le16_to_cpu(event_data->EnclosureHandle),
- (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
- le16_to_cpu(event_data->StartSlot));
+ ioc_info(ioc, "enclosure status change: (%s)\n"
+ "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
+ reason_str,
+ le16_to_cpu(event_data->EnclosureHandle),
+ (u64)le64_to_cpu(event_data->EnclosureLogicalID),
+ le16_to_cpu(event_data->StartSlot));
}
/**
@@ -7298,9 +7216,8 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
kzalloc(sizeof(struct _enclosure_node),
GFP_KERNEL);
if (!enclosure_dev) {
- pr_info(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_info(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -7358,10 +7275,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
u8 task_abort_retries;
mutex_lock(&ioc->tm_cmds.mutex);
- pr_info(MPT3SAS_FMT
- "%s: enter: phy number(%d), width(%d)\n",
- ioc->name, __func__, event_data->PhyNum,
- event_data->PortWidth);
+ ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
+ __func__, event_data->PhyNum, event_data->PortWidth);
_scsih_block_io_all_device(ioc);
@@ -7371,12 +7286,12 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
/* sanity checks for retrying this loop */
if (max_retries++ == 5) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
- ioc->name, __func__));
+ dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
goto out;
} else if (max_retries > 1)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
- ioc->name, __func__, max_retries - 1));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: %d retry\n",
+ __func__, max_retries - 1));
termination_count = 0;
query_count = 0;
@@ -7443,9 +7358,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
task_abort_retries = 0;
tm_retry:
if (task_abort_retries++ == 60) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: ABORT_TASK: giving up\n", ioc->name,
- __func__));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
+ __func__));
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
goto broadcast_aen_retry;
}
@@ -7474,9 +7389,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
}
if (ioc->broadcast_aen_pending) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: loop back due to pending AEN\n",
- ioc->name, __func__));
+ dewtprintk(ioc,
+ ioc_info(ioc,
+ "%s: loop back due to pending AEN\n",
+ __func__));
ioc->broadcast_aen_pending = 0;
goto broadcast_aen_retry;
}
@@ -7485,9 +7401,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
out_no_lock:
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - exit, query_count = %d termination_count = %d\n",
- ioc->name, __func__, query_count, termination_count));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
+ __func__, query_count, termination_count));
ioc->broadcast_aen_busy = 0;
if (!ioc->shost_recovery)
@@ -7509,13 +7425,13 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
- pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
- (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
- "start" : "stop");
+ ioc_info(ioc, "discovery event: (%s)",
+ event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+ "start" : "stop");
if (event_data->DiscoveryStatus)
- pr_info("discovery_status(0x%08x)",
- le32_to_cpu(event_data->DiscoveryStatus));
- pr_info("\n");
+ pr_cont("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_cont("\n");
}
if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
@@ -7545,20 +7461,16 @@ _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
switch (event_data->ReasonCode) {
case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
- pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
- "(handle:0x%04x, sas_address:0x%016llx,"
- "physical_port:0x%02x) has failed",
- ioc->name, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- event_data->PhysicalPort);
+ ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
+ le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ event_data->PhysicalPort);
break;
case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
- pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
- "(handle:0x%04x, sas_address:0x%016llx,"
- "physical_port:0x%02x) has timed out",
- ioc->name, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- event_data->PhysicalPort);
+ ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
+ le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ event_data->PhysicalPort);
break;
default:
break;
@@ -7581,11 +7493,10 @@ _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
return;
- pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
- ioc->name,
- (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
- "started" : "completed",
- event_data->Flags);
+ ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
+ (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
+ "started" : "completed",
+ event_data->Flags);
if (event_data->EnumerationStatus)
pr_cont("enumeration_status(0x%08x)",
le32_to_cpu(event_data->EnumerationStatus));
@@ -7617,8 +7528,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
mutex_lock(&ioc->scsih_cmds.mutex);
if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -7626,8 +7536,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
rc = -EAGAIN;
goto out;
@@ -7641,9 +7550,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
mpi_request->PhysDiskNum = phys_disk_num;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
- "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
- handle, phys_disk_num));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
+ handle, phys_disk_num));
init_completion(&ioc->scsih_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
@@ -7668,15 +7577,13 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
log_info = 0;
ioc_status &= MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "IR RAID_ACTION: failed: ioc_status(0x%04x), "
- "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
- log_info));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
+ ioc_status, log_info));
rc = -EFAULT;
} else
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "IR RAID_ACTION: completed successfully\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
}
out:
@@ -7721,9 +7628,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
if (!wwid) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -7736,9 +7642,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
if (!raid_device) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -7781,9 +7686,8 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_target_priv_data = starget->hostdata;
sas_target_priv_data->deleted = 1;
}
- pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, raid_device->handle,
- (unsigned long long) raid_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ raid_device->handle, (u64)raid_device->wwid);
list_del(&raid_device->list);
kfree(raid_device);
}
@@ -7925,16 +7829,16 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -7964,10 +7868,10 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
- pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
- ioc->name, (le32_to_cpu(event_data->Flags) &
- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
- "foreign" : "native", event_data->NumElements);
+ ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
+ le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
+ "foreign" : "native",
+ event_data->NumElements);
for (i = 0; i < event_data->NumElements; i++, element++) {
switch (element->ReasonCode) {
case MPI2_EVENT_IR_CHANGE_RC_ADDED:
@@ -8123,10 +8027,11 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
handle = le16_to_cpu(event_data->VolDevHandle);
state = le32_to_cpu(event_data->NewValue);
if (!ioc->hide_ir_msg)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
- ioc->name, __func__, handle,
- le32_to_cpu(event_data->PreviousValue), state));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ __func__, handle,
+ le32_to_cpu(event_data->PreviousValue),
+ state));
switch (state) {
case MPI2_RAID_VOL_STATE_MISSING:
case MPI2_RAID_VOL_STATE_FAILED:
@@ -8146,17 +8051,15 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
if (!wwid) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
break;
}
raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
if (!raid_device) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
break;
}
@@ -8207,10 +8110,11 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
state = le32_to_cpu(event_data->NewValue);
if (!ioc->hide_ir_msg)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
- ioc->name, __func__, handle,
- le32_to_cpu(event_data->PreviousValue), state));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ __func__, handle,
+ le32_to_cpu(event_data->PreviousValue),
+ state));
switch (state) {
case MPI2_RAID_PD_STATE_ONLINE:
@@ -8231,16 +8135,16 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
&sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -8294,11 +8198,10 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
if (!reason_str)
return;
- pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
- "\thandle(0x%04x), percent complete(%d)\n",
- ioc->name, reason_str,
- le16_to_cpu(event_data->VolDevHandle),
- event_data->PercentComplete);
+ ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
+ reason_str,
+ le16_to_cpu(event_data->VolDevHandle),
+ event_data->PercentComplete);
}
/**
@@ -8379,9 +8282,8 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
mpt3sas_scsih_enclosure_find_by_handle(ioc,
le16_to_cpu(sas_device_pg0->EnclosureHandle));
if (enclosure_dev == NULL)
- pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
- "doesn't match with enclosure device!\n",
- ioc->name, sas_device_pg0->EnclosureHandle);
+ ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+ sas_device_pg0->EnclosureHandle);
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
@@ -8475,8 +8377,7 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
enclosure_dev =
kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
if (!enclosure_dev) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
return;
}
@@ -8513,7 +8414,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
u16 handle;
u32 device_info;
- pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+ ioc_info(ioc, "search for end-devices: start\n");
if (list_empty(&ioc->sas_device_list))
goto out;
@@ -8534,8 +8435,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
}
out:
- pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
- ioc->name);
+ ioc_info(ioc, "search for end-devices: complete\n");
}
/**
@@ -8628,7 +8528,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
u16 handle;
u32 device_info;
- pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+ ioc_info(ioc, "search for end-devices: start\n");
if (list_empty(&ioc->pcie_device_list))
goto out;
@@ -8640,10 +8540,9 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from %s: "
- "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
- __func__, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ __func__, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(pcie_device_pg0.DevHandle);
@@ -8653,8 +8552,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
}
out:
- pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
- ioc->name);
+ ioc_info(ioc, "search for PCIe end-devices: complete\n");
}
/**
@@ -8735,8 +8633,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->ir_firmware)
return;
- pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
- ioc->name);
+ ioc_info(ioc, "search for raid volumes: start\n");
if (list_empty(&ioc->raid_device_list))
goto out;
@@ -8779,8 +8676,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
}
}
out:
- pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
- ioc->name);
+ ioc_info(ioc, "search for responding raid volumes: complete\n");
}
/**
@@ -8852,7 +8748,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
u64 sas_address;
u16 handle;
- pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
+ ioc_info(ioc, "search for expanders: start\n");
if (list_empty(&ioc->sas_expander_list))
goto out;
@@ -8875,7 +8771,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
}
out:
- pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
+ ioc_info(ioc, "search for expanders: complete\n");
}
/**
@@ -8893,12 +8789,10 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
unsigned long flags;
LIST_HEAD(head);
- pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: start\n");
/* removing unresponding end devices */
- pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: end-devices\n");
/*
* Iterate, pulling off devices marked as non-responding. We become the
* owner for the reference the list had on any object we prune.
@@ -8922,9 +8816,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
sas_device_put(sas_device);
}
- pr_info(MPT3SAS_FMT
- " Removing unresponding devices: pcie end-devices\n"
- , ioc->name);
+ ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
INIT_LIST_HEAD(&head);
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
list_for_each_entry_safe(pcie_device, pcie_device_next,
@@ -8944,8 +8836,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
/* removing unresponding volumes */
if (ioc->ir_firmware) {
- pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: volumes\n");
list_for_each_entry_safe(raid_device, raid_device_next,
&ioc->raid_device_list, list) {
if (!raid_device->responding)
@@ -8957,8 +8848,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
}
/* removing unresponding expanders */
- pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: expanders\n");
spin_lock_irqsave(&ioc->sas_node_lock, flags);
INIT_LIST_HEAD(&tmp_list);
list_for_each_entry_safe(sas_expander, sas_expander_next,
@@ -8974,8 +8864,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
_scsih_expander_node_remove(ioc, sas_expander);
}
- pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: complete\n");
/* unblock devices */
_scsih_ublock_io_all_device(ioc);
@@ -8992,8 +8881,8 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
for (i = 0 ; i < sas_expander->num_phys ; i++) {
if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
&expander_pg1, i, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -9029,11 +8918,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
u8 retry_count;
unsigned long flags;
- pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
+ ioc_info(ioc, "scan devices: start\n");
_scsih_sas_host_refresh(ioc);
- pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: expanders start\n");
/* expanders */
handle = 0xFFFF;
@@ -9042,10 +8931,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -9057,25 +8944,22 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
_scsih_refresh_expander_links(ioc, expander_device,
handle);
else {
- pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(expander_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(expander_pg0.SASAddress));
_scsih_expander_add(ioc, handle);
- pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(expander_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(expander_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: expanders complete\n");
if (!ioc->ir_firmware)
goto skip_to_sas;
- pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: phys disk start\n");
/* phys disk */
phys_disk_num = 0xFF;
@@ -9085,10 +8969,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
phys_disk_num = pd_pg0.PhysDiskNum;
@@ -9105,19 +8987,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle,
&sas_address)) {
- pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
- " handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
mpt3sas_transport_update_links(ioc, sas_address,
handle, sas_device_pg0.PhyNum,
MPI2_SAS_NEG_LINK_RATE_1_5);
@@ -9131,17 +9010,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1)) {
ssleep(1);
}
- pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
- " handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: phys disk complete\n");
- pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: volumes start\n");
/* volumes */
handle = 0xFFFF;
@@ -9150,10 +9027,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(volume_pg1.DevHandle);
@@ -9170,10 +9045,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
@@ -9182,23 +9055,19 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
element.VolDevHandle = volume_pg1.DevHandle;
- pr_info(MPT3SAS_FMT
- "\tBEFORE adding volume: handle (0x%04x)\n",
- ioc->name, volume_pg1.DevHandle);
+ ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
+ volume_pg1.DevHandle);
_scsih_sas_volume_add(ioc, &element);
- pr_info(MPT3SAS_FMT
- "\tAFTER adding volume: handle (0x%04x)\n",
- ioc->name, volume_pg1.DevHandle);
+ ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
+ volume_pg1.DevHandle);
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: volumes complete\n");
skip_to_sas:
- pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: end devices start\n");
/* sas devices */
handle = 0xFFFF;
@@ -9208,10 +9077,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
- " ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(sas_device_pg0.DevHandle);
@@ -9226,10 +9093,9 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
}
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
- pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
mpt3sas_transport_update_links(ioc, sas_address, handle,
sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
retry_count = 0;
@@ -9241,16 +9107,13 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
0)) {
ssleep(1);
}
- pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
- ioc->name);
- pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: end devices complete\n");
+ ioc_info(ioc, "\tscan devices: pcie end devices start\n");
/* pcie devices */
handle = 0xFFFF;
@@ -9260,10 +9123,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
& MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
- " scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(pcie_device_pg0.DevHandle);
@@ -9280,14 +9141,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
_scsih_pcie_add_device(ioc, handle);
- pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
- "handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
- handle,
- (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
+ ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
+ handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
}
- pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
- ioc->name);
- pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
+ ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
+ ioc_info(ioc, "scan devices: complete\n");
}
/**
@@ -9298,8 +9156,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
*/
void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
}
/**
@@ -9311,8 +9168,7 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
ioc->scsih_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9340,8 +9196,7 @@ mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
!ioc->sas_hba.num_phys)) {
_scsih_prep_device_scan(ioc);
@@ -9396,9 +9251,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
if (missing_delay[0] != -1 && missing_delay[1] != -1)
mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "port enable: complete from worker thread\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "port enable: complete from worker thread\n"));
break;
case MPT3SAS_TURN_ON_PFA_LED:
_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
@@ -9496,8 +9350,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
@@ -9564,30 +9418,16 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
switch (le32_to_cpu(*log_code)) {
case MPT2_WARPDRIVE_LC_SSDT:
- pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
- "IO Throttling has occurred in the WarpDrive "
- "subsystem. Check WarpDrive documentation for "
- "additional details.\n", ioc->name);
+ ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
break;
case MPT2_WARPDRIVE_LC_SSDLW:
- pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
- "Program/Erase Cycles for the WarpDrive subsystem "
- "in degraded range. Check WarpDrive documentation "
- "for additional details.\n", ioc->name);
+ ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
break;
case MPT2_WARPDRIVE_LC_SSDLF:
- pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
- "There are no Program/Erase Cycles for the "
- "WarpDrive subsystem. The storage device will be "
- "in read-only mode. Check WarpDrive documentation "
- "for additional details.\n", ioc->name);
+ ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
break;
case MPT2_WARPDRIVE_LC_BRMF:
- pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
- "The Backup Rail Monitor has failed on the "
- "WarpDrive subsystem. Check WarpDrive "
- "documentation for additional details.\n",
- ioc->name);
+ ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
break;
}
@@ -9613,9 +9453,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
(Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
switch (ActiveCableEventData->ReasonCode) {
case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
- pr_notice(MPT3SAS_FMT
- "Currently an active cable with ReceptacleID %d\n",
- ioc->name, ActiveCableEventData->ReceptacleID);
+ ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
+ ActiveCableEventData->ReceptacleID);
pr_notice("cannot be powered and devices connected\n");
pr_notice("to this active cable will not be seen\n");
pr_notice("This active cable requires %d mW of power\n",
@@ -9623,9 +9462,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
break;
case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
- pr_notice(MPT3SAS_FMT
- "Currently a cable with ReceptacleID %d\n",
- ioc->name, ActiveCableEventData->ReceptacleID);
+ ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
+ ActiveCableEventData->ReceptacleID);
pr_notice(
"is not running at optimal speed(12 Gb/s rate)\n");
break;
@@ -9640,8 +9478,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
fw_event = alloc_fw_event_work(sz);
if (!fw_event) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
@@ -9690,11 +9528,9 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
sas_expander->sas_address_parent);
- pr_info(MPT3SAS_FMT
- "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name,
- sas_expander->handle, (unsigned long long)
- sas_expander->sas_address);
+ ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_del(&sas_expander->list);
@@ -9729,16 +9565,14 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
mutex_lock(&ioc->scsih_cmds.mutex);
if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
goto out;
}
ioc->scsih_cmds.status = MPT3_CMD_PENDING;
smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
goto out;
}
@@ -9751,24 +9585,22 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
if (!ioc->hide_ir_msg)
- pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
+ ioc_info(ioc, "IR shutdown (sending)\n");
init_completion(&ioc->scsih_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
goto out;
}
if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
mpi_reply = ioc->scsih_cmds.reply;
if (!ioc->hide_ir_msg)
- pr_info(MPT3SAS_FMT "IR shutdown "
- "(complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
}
out:
@@ -9817,9 +9649,8 @@ static void scsih_remove(struct pci_dev *pdev)
sas_target_priv_data->deleted = 1;
scsi_remove_target(&raid_device->starget->dev);
}
- pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, raid_device->handle,
- (unsigned long long) raid_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ raid_device->handle, (u64)raid_device->wwid);
_scsih_raid_device_remove(ioc, raid_device);
}
list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
@@ -10230,7 +10061,7 @@ scsih_scan_start(struct Scsi_Host *shost)
rc = mpt3sas_port_enable(ioc);
if (rc != 0)
- pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
+ ioc_info(ioc, "port enable: FAILED\n");
}
/**
@@ -10255,9 +10086,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (time >= (300 * HZ)) {
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
- pr_info(MPT3SAS_FMT
- "port enable: FAILED with timeout (timeout=300s)\n",
- ioc->name);
+ ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
ioc->is_driver_loading = 0;
return 1;
}
@@ -10266,16 +10095,15 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
return 0;
if (ioc->start_scan_failed) {
- pr_info(MPT3SAS_FMT
- "port enable: FAILED with (ioc_status=0x%08x)\n",
- ioc->name, ioc->start_scan_failed);
+ ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
+ ioc->start_scan_failed);
ioc->is_driver_loading = 0;
ioc->wait_for_discovery_to_complete = 0;
ioc->remove_host = 1;
return 1;
}
- pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
+ ioc_info(ioc, "port enable: SUCCESS\n");
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
if (ioc->wait_for_discovery_to_complete) {
@@ -10586,28 +10414,22 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ioc->is_mcpu_endpoint) {
/* mCPU MPI support 64K max IO */
shost->max_sectors = 128;
- pr_info(MPT3SAS_FMT
- "The max_sectors value is set to %d\n",
- ioc->name, shost->max_sectors);
+ ioc_info(ioc, "The max_sectors value is set to %d\n",
+ shost->max_sectors);
} else {
if (max_sectors != 0xFFFF) {
if (max_sectors < 64) {
shost->max_sectors = 64;
- pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
- "for max_sectors, range is 64 to 32767. " \
- "Assigning value of 64.\n", \
- ioc->name, max_sectors);
+ ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
+ max_sectors);
} else if (max_sectors > 32767) {
shost->max_sectors = 32767;
- pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
- "for max_sectors, range is 64 to 32767." \
- "Assigning default value of 32767.\n", \
- ioc->name, max_sectors);
+ ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
+ max_sectors);
} else {
shost->max_sectors = max_sectors & 0xFFFE;
- pr_info(MPT3SAS_FMT
- "The max_sectors value is set to %d\n",
- ioc->name, shost->max_sectors);
+ ioc_info(ioc, "The max_sectors value is set to %d\n",
+ shost->max_sectors);
}
}
}
@@ -10627,16 +10449,16 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->firmware_event_thread = alloc_ordered_workqueue(
ioc->firmware_event_name, 0);
if (!ioc->firmware_event_thread) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rv = -ENODEV;
goto out_thread_fail;
}
ioc->is_driver_loading = 1;
if ((mpt3sas_base_attach(ioc))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rv = -ENODEV;
goto out_attach_fail;
}
@@ -10657,8 +10479,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rv = scsi_add_host(shost, &pdev->dev);
if (rv) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_add_shost_fail;
}
@@ -10695,9 +10517,8 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
flush_scheduled_work();
scsi_block_requests(shost);
device_state = pci_choose_state(pdev, state);
- pr_info(MPT3SAS_FMT
- "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
- ioc->name, pdev, pci_name(pdev), device_state);
+ ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
pci_save_state(pdev);
mpt3sas_base_free_resources(ioc);
@@ -10719,9 +10540,8 @@ scsih_resume(struct pci_dev *pdev)
pci_power_t device_state = pdev->current_state;
int r;
- pr_info(MPT3SAS_FMT
- "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
- ioc->name, pdev, pci_name(pdev), device_state);
+ ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
@@ -10753,8 +10573,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
- ioc->name, state);
+ ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
switch (state) {
case pci_channel_io_normal:
@@ -10791,8 +10610,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
int rc;
- pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
- ioc->name);
+ ioc_info(ioc, "PCI error: slot reset callback!!\n");
ioc->pci_error_recovery = 0;
ioc->pdev = pdev;
@@ -10803,8 +10621,8 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
- (rc == 0) ? "success" : "failed");
+ ioc_warn(ioc, "hard reset: %s\n",
+ (rc == 0) ? "success" : "failed");
if (!rc)
return PCI_ERS_RESULT_RECOVERED;
@@ -10826,9 +10644,8 @@ scsih_pci_resume(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
+ ioc_info(ioc, "PCI error: resume callback!!\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
mpt3sas_base_start_watchdog(ioc);
scsi_unblock_requests(ioc->shost);
}
@@ -10843,8 +10660,7 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
- ioc->name);
+ ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
/* TODO - dump whatever for debugging purposes */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index f8cc2677c1cd..6a8a3c09b4b1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -146,25 +146,22 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
u32 ioc_status;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT
- "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n",
- ioc->name, handle, ioc_status,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
return -EIO;
}
@@ -310,16 +307,14 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -329,26 +324,22 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -359,9 +350,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
data_out_sz = sizeof(struct rep_manu_request);
data_in_sz = sizeof(struct rep_manu_reply);
- data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz,
- &data_out_dma);
-
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
+ &data_out_dma, GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -388,16 +378,15 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - send to sas_addr(0x%016llx)\n",
- ioc->name, (unsigned long long)sas_address));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "report_manufacture - send to sas_addr(0x%016llx)\n",
+ (u64)sas_address));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -405,17 +394,16 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
goto issue_host_reset;
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - complete\n", ioc->name));
+ dtransportprintk(ioc, ioc_info(ioc, "report_manufacture - complete\n"));
if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
u8 *tmp;
mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - reply data transfer size(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "report_manufacture - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct rep_manu_reply))
@@ -439,8 +427,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
manufacture_reply->component_revision_id;
}
} else
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - no reply\n", ioc->name));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "report_manufacture - no reply\n"));
issue_host_reset:
if (issue_reset)
@@ -448,7 +436,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
- pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
data_out, data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
@@ -643,8 +631,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
mpt3sas_port = kzalloc(sizeof(struct _sas_port),
GFP_KERNEL);
if (!mpt3sas_port) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return NULL;
}
@@ -655,22 +643,21 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (!sas_node) {
- pr_err(MPT3SAS_FMT
- "%s: Could not find parent sas_address(0x%016llx)!\n",
- ioc->name, __func__, (unsigned long long)sas_address);
+ ioc_err(ioc, "%s: Could not find parent sas_address(0x%016llx)!\n",
+ __func__, (u64)sas_address);
goto out_fail;
}
if ((_transport_set_identify(ioc, handle,
&mpt3sas_port->remote_identify))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
@@ -687,20 +674,20 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
}
if (!mpt3sas_port->num_phys) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
if (!sas_node->parent_dev) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
port = sas_port_alloc_num(sas_node->parent_dev);
if ((sas_port_add(port))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
@@ -729,17 +716,17 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
sas_device = mpt3sas_get_sdev_by_addr(ioc,
mpt3sas_port->remote_identify.sas_address);
if (!sas_device) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_info(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
goto out_fail;
}
sas_device->pend_sas_rphy_add = 1;
}
if ((sas_rphy_add(rphy))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
}
if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -861,14 +848,14 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
phy = sas_phy_alloc(parent_dev, phy_index);
if (!phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
&mpt3sas_phy->identify))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -890,8 +877,8 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
phy_pg0.ProgrammedLinkRate >> 4);
if ((sas_phy_add(phy))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -929,14 +916,14 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
phy = sas_phy_alloc(parent_dev, phy_index);
if (!phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
&mpt3sas_phy->identify))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -960,8 +947,8 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
expander_pg1.ProgrammedLinkRate >> 4);
if ((sas_phy_add(phy))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -1098,16 +1085,14 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1117,26 +1102,22 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1146,7 +1127,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
sz = sizeof(struct phy_error_log_request) +
sizeof(struct phy_error_log_reply);
- data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -1179,17 +1161,16 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
data_out_dma + sizeof(struct phy_error_log_request),
sizeof(struct phy_error_log_reply));
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)phy->identify.sas_address,
- phy->number));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
+ (u64)phy->identify.sas_address,
+ phy->number));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1197,16 +1178,15 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
goto issue_host_reset;
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - complete\n", ioc->name));
+ dtransportprintk(ioc, ioc_info(ioc, "phy_error_log - complete\n"));
if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - reply data transfer size(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct phy_error_log_reply))
@@ -1215,9 +1195,9 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
phy_error_log_reply = data_out +
sizeof(struct phy_error_log_request);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - function_result(%d)\n",
- ioc->name, phy_error_log_reply->function_result));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - function_result(%d)\n",
+ phy_error_log_reply->function_result));
phy->invalid_dword_count =
be32_to_cpu(phy_error_log_reply->invalid_dword);
@@ -1229,8 +1209,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
be32_to_cpu(phy_error_log_reply->phy_reset_problem);
rc = 0;
} else
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - no reply\n", ioc->name));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - no reply\n"));
issue_host_reset:
if (issue_reset)
@@ -1238,7 +1218,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
- pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+ dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
return rc;
@@ -1273,17 +1253,16 @@ _transport_get_linkerrors(struct sas_phy *phy)
/* get hba phy error logs */
if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
phy->number))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, phy->number,
- le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
+ phy->number,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
phy->running_disparity_error_count =
@@ -1411,16 +1390,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1430,26 +1407,22 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1459,7 +1432,8 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
sz = sizeof(struct phy_control_request) +
sizeof(struct phy_control_reply);
- data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -1497,17 +1471,16 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
data_out_dma + sizeof(struct phy_control_request),
sizeof(struct phy_control_reply));
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
- ioc->name, (unsigned long long)phy->identify.sas_address,
- phy->number, phy_operation));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
+ (u64)phy->identify.sas_address,
+ phy->number, phy_operation));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1515,16 +1488,15 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
goto issue_host_reset;
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - complete\n", ioc->name));
+ dtransportprintk(ioc, ioc_info(ioc, "phy_control - complete\n"));
if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - reply data transfer size(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct phy_control_reply))
@@ -1533,14 +1505,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
phy_control_reply = data_out +
sizeof(struct phy_control_request);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - function_result(%d)\n",
- ioc->name, phy_control_reply->function_result));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - function_result(%d)\n",
+ phy_control_reply->function_result));
rc = 0;
} else
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - no reply\n", ioc->name));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - no reply\n"));
issue_host_reset:
if (issue_reset)
@@ -1548,7 +1520,8 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
- pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+ dma_free_coherent(&ioc->pdev->dev, sz, data_out,
+ data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
return rc;
@@ -1591,16 +1564,15 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
mpi_request.PhyNum = phy->number;
if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
return 0;
}
@@ -1647,23 +1619,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
sizeof(Mpi2SasIOUnit0PhyData_t));
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
sas_iounit_pg0, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
@@ -1672,10 +1644,8 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
if (sas_iounit_pg0->PhyData[i].PortFlags &
MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
- pr_err(MPT3SAS_FMT "discovery is active on " \
- "port = %d, phy = %d: unable to enable/disable "
- "phys, try again later!\n", ioc->name,
- sas_iounit_pg0->PhyData[i].Port, i);
+ ioc_err(ioc, "discovery is active on port = %d, phy = %d: unable to enable/disable phys, try again later!\n",
+ sas_iounit_pg0->PhyData[i].Port, i);
discovery_active = 1;
}
}
@@ -1690,23 +1660,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
@@ -1798,23 +1768,23 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
@@ -1833,8 +1803,8 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
sz)) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
@@ -1922,8 +1892,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
unsigned int reslen = 0;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
rc = -EFAULT;
goto job_done;
}
@@ -1933,8 +1902,8 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
goto job_done;
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
- __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n",
+ __func__);
rc = -EAGAIN;
goto out;
}
@@ -1959,26 +1928,22 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto unmap_in;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto unmap_in;
}
@@ -1999,16 +1964,15 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in,
dma_len_in - 4);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - sending smp request\n", ioc->name, __func__));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "%s: sending smp request\n", __func__));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s : timeout\n",
- __func__, ioc->name);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) {
@@ -2018,12 +1982,11 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
}
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - complete\n", ioc->name, __func__));
+ dtransportprintk(ioc, ioc_info(ioc, "%s - complete\n", __func__));
if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) {
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - no reply\n", ioc->name, __func__));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "%s: no reply\n", __func__));
rc = -ENXIO;
goto unmap_in;
}
@@ -2031,9 +1994,9 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
mpi_reply = ioc->transport_cmds.reply;
dtransportprintk(ioc,
- pr_info(MPT3SAS_FMT "%s - reply data transfer size(%d)\n",
- ioc->name, __func__,
- le16_to_cpu(mpi_reply->ResponseDataLength)));
+ ioc_info(ioc, "%s: reply data transfer size(%d)\n",
+ __func__,
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
memcpy(job->reply, mpi_reply, sizeof(*mpi_reply));
job->reply_len = sizeof(*mpi_reply);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index cae7c1eaef34..6ac453fd5937 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -72,8 +72,7 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
u16 sz, event_data_sz;
unsigned long flags;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4;
@@ -85,23 +84,23 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
mpi_reply->EventDataLength = cpu_to_le16(event_data_sz);
memcpy(&mpi_reply->EventData, event_data,
sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: add to driver event log\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: add to driver event log\n",
+ __func__));
mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
kfree(mpi_reply);
out:
/* clearing the diag_trigger_active flag */
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: clearing diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: clearing diag_trigger_active flag\n",
+ __func__));
ioc->diag_trigger_active = 0;
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -115,22 +114,22 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
{
u8 issue_reset = 0;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
/* release the diag buffer trace */
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: release trace diag buffer\n", ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: release trace diag buffer\n",
+ __func__));
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
_mpt3sas_raise_sigio(ioc, event_data);
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -168,9 +167,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
by_pass_checks:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - trigger_bitmask = 0x%08x\n",
- ioc->name, __func__, trigger_bitmask));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - trigger_bitmask = 0x%08x\n",
+ __func__, trigger_bitmask));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -182,9 +181,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
if (ioc->diag_trigger_master.MasterData & trigger_bitmask) {
found_match = 1;
ioc->diag_trigger_active = 1;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
}
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
@@ -202,8 +201,8 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -239,9 +238,9 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
return;
}
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
- ioc->name, __func__, event, log_entry_qualifier));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
+ __func__, event, log_entry_qualifier));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -263,26 +262,26 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
}
found_match = 1;
ioc->diag_trigger_active = 1;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
}
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
if (!found_match)
goto out;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
event_data.trigger_type = MPT3SAS_TRIGGER_EVENT;
event_data.u.event.EventValue = event;
event_data.u.event.LogEntryQualifier = log_entry_qualifier;
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -319,9 +318,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
return;
}
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
- ioc->name, __func__, sense_key, asc, ascq));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
+ __func__, sense_key, asc, ascq));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -347,9 +346,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
if (!found_match)
goto out;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
event_data.trigger_type = MPT3SAS_TRIGGER_SCSI;
event_data.u.scsi.SenseKey = sense_key;
@@ -357,8 +356,8 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
event_data.u.scsi.ASCQ = ascq;
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -393,9 +392,9 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
return;
}
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
- ioc->name, __func__, ioc_status, loginfo));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
+ __func__, ioc_status, loginfo));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -420,15 +419,15 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
if (!found_match)
goto out;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
event_data.trigger_type = MPT3SAS_TRIGGER_MPI;
event_data.u.mpi.IOCStatus = ioc_status;
event_data.u.mpi.IocLogInfo = loginfo;
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index b4927f2b7677..cc07ba41f507 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -127,20 +127,17 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
return;
if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "globally as drives are exposed\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as drives are exposed\n");
return;
}
if (mpt3sas_get_num_volumes(ioc) > 1) {
_warpdrive_disable_ddio(ioc);
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "globally as number of drives > 1\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as number of drives > 1\n");
return;
}
if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
&num_pds)) || !num_pds) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "Failure in computing number of drives\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in computing number of drives\n");
return;
}
@@ -148,15 +145,13 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
sizeof(Mpi2RaidVol0PhysDisk_t));
vol_pg0 = kzalloc(sz, GFP_KERNEL);
if (!vol_pg0) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "Memory allocation failure for RVPG0\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled Memory allocation failure for RVPG0\n");
return;
}
if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "Failure in retrieving RVPG0\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in retrieving RVPG0\n");
kfree(vol_pg0);
return;
}
@@ -166,10 +161,8 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
* assumed for WARPDRIVE, disable direct I/O
*/
if (num_pds > MPT_MAX_WARPDRIVE_PDS) {
- pr_warn(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x): num_mem=%d, "
- "max_mem_allowed=%d\n", ioc->name, raid_device->handle,
- num_pds, MPT_MAX_WARPDRIVE_PDS);
+ ioc_warn(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): num_mem=%d, max_mem_allowed=%d\n",
+ raid_device->handle, num_pds, MPT_MAX_WARPDRIVE_PDS);
kfree(vol_pg0);
return;
}
@@ -179,22 +172,18 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
vol_pg0->PhysDisk[count].PhysDiskNum) ||
le16_to_cpu(pd_pg0.DevHandle) ==
MPT3SAS_INVALID_DEVICE_HANDLE) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
- "disabled for the drive with handle(0x%04x) member"
- "handle retrieval failed for member number=%d\n",
- ioc->name, raid_device->handle,
- vol_pg0->PhysDisk[count].PhysDiskNum);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle retrieval failed for member number=%d\n",
+ raid_device->handle,
+ vol_pg0->PhysDisk[count].PhysDiskNum);
goto out_error;
}
/* Disable direct I/O if member drive lba exceeds 4 bytes */
dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
if (dev_max_lba >> 32) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
- "disabled for the drive with handle(0x%04x) member"
- " handle (0x%04x) unsupported max lba 0x%016llx\n",
- ioc->name, raid_device->handle,
- le16_to_cpu(pd_pg0.DevHandle),
- (unsigned long long)dev_max_lba);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle (0x%04x) unsupported max lba 0x%016llx\n",
+ raid_device->handle,
+ le16_to_cpu(pd_pg0.DevHandle),
+ (u64)dev_max_lba);
goto out_error;
}
@@ -206,41 +195,36 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
* not RAID0
*/
if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x): type=%d, "
- "s_sz=%uK, blk_size=%u\n", ioc->name,
- raid_device->handle, raid_device->volume_type,
- (le32_to_cpu(vol_pg0->StripeSize) *
- le16_to_cpu(vol_pg0->BlockSize)) / 1024,
- le16_to_cpu(vol_pg0->BlockSize));
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): type=%d, s_sz=%uK, blk_size=%u\n",
+ raid_device->handle, raid_device->volume_type,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024,
+ le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
stripe_exp = find_first_bit(&stripe_sz, 32);
if (stripe_exp == 32) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
- ioc->name, raid_device->handle,
- (le32_to_cpu(vol_pg0->StripeSize) *
- le16_to_cpu(vol_pg0->BlockSize)) / 1024);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+ raid_device->handle,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024);
goto out_error;
}
raid_device->stripe_exponent = stripe_exp;
block_sz = le16_to_cpu(vol_pg0->BlockSize);
block_exp = find_first_bit(&block_sz, 16);
if (block_exp == 16) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) invalid block sz %u\n",
- ioc->name, raid_device->handle,
- le16_to_cpu(vol_pg0->BlockSize));
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid block sz %u\n",
+ raid_device->handle, le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
raid_device->block_exponent = block_exp;
raid_device->direct_io_enabled = 1;
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is Enabled for the drive"
- " with handle(0x%04x)\n", ioc->name, raid_device->handle);
+ ioc_info(ioc, "WarpDrive : Direct IO is Enabled for the drive with handle(0x%04x)\n",
+ raid_device->handle);
/*
* WARPDRIVE: Though the following fields are not used for direct IO,
* stored for future purpose:
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 8c91637cd598..3ac34373746c 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -403,29 +403,14 @@ static int pci_go_64(struct pci_dev *pdev)
{
int rc;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
return rc;
}
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
}
return rc;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index cff43bd9f675..3df1428df317 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -336,13 +336,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
* DMA-map SMP request, response buffers
*/
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out;
@@ -416,10 +416,10 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
err_out_2:
dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
err_out:
dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return rc;
}
@@ -904,9 +904,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
break;
case SAS_PROTOCOL_SATA:
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index b3cd9a6b1d30..2458974d1af6 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -143,8 +143,8 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
case RESOURCE_UNCACHED_MEMORY:
size = round_up(size, 8);
- res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
- &res->bus_addr);
+ res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size,
+ &res->bus_addr, GFP_KERNEL);
if (!res->virt_addr) {
dev_err(&mhba->pdev->dev,
"unable to allocate consistent mem,"
@@ -175,7 +175,7 @@ static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
switch (res->type) {
case RESOURCE_UNCACHED_MEMORY:
- pci_free_consistent(mhba->pdev, res->size,
+ dma_free_coherent(&mhba->pdev->dev, res->size,
res->virt_addr, res->bus_addr);
break;
case RESOURCE_CACHED_MEMORY:
@@ -211,14 +211,14 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
dma_addr_t busaddr;
sg = scsi_sglist(scmd);
- *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
- (int) scmd->sc_data_direction);
+ *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
+ scmd->sc_data_direction);
if (*sg_count > mhba->max_sge) {
dev_err(&mhba->pdev->dev,
"sg count[0x%x] is bigger than max sg[0x%x].\n",
*sg_count, mhba->max_sge);
- pci_unmap_sg(mhba->pdev, sg, sgnum,
- (int) scmd->sc_data_direction);
+ dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
+ scmd->sc_data_direction);
return -1;
}
for (i = 0; i < *sg_count; i++) {
@@ -246,7 +246,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
if (size == 0)
return 0;
- virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
+ virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr,
+ GFP_KERNEL);
if (!virt_addr)
return -1;
@@ -274,8 +275,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
}
INIT_LIST_HEAD(&cmd->queue_pointer);
- cmd->frame = pci_alloc_consistent(mhba->pdev,
- mhba->ib_max_size, &cmd->frame_phys);
+ cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
+ &cmd->frame_phys, GFP_KERNEL);
if (!cmd->frame) {
dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
" frame,size = %d.\n", mhba->ib_max_size);
@@ -287,7 +288,7 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
dev_err(&mhba->pdev->dev, "failed to allocate memory"
" for internal frame\n");
- pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+ dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
cmd->frame, cmd->frame_phys);
kfree(cmd);
return NULL;
@@ -313,10 +314,10 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
phy_addr = (dma_addr_t) m_sg->baseaddr_l |
(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
- pci_free_consistent(mhba->pdev, size, cmd->data_buf,
+ dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
phy_addr);
}
- pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+ dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
cmd->frame, cmd->frame_phys);
kfree(cmd);
}
@@ -663,16 +664,17 @@ static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
}
}
-static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
+static int mvumi_pci_set_master(struct pci_dev *pdev)
{
- unsigned int ret = 0;
+ int ret = 0;
+
pci_set_master(pdev);
if (IS_DMA64) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
} else
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
return ret;
}
@@ -771,7 +773,7 @@ static void mvumi_release_fw(struct mvumi_hba *mhba)
mvumi_free_cmds(mhba);
mvumi_release_mem_resource(mhba);
mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
- pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+ dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
mhba->handshake_page, mhba->handshake_page_phys);
kfree(mhba->regs);
pci_release_regions(mhba->pdev);
@@ -1339,9 +1341,9 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
}
if (scsi_bufflen(scmd))
- pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+ dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
- (int) scmd->sc_data_direction);
+ scmd->sc_data_direction);
cmd->scmd->scsi_done(scmd);
mvumi_return_cmd(mhba, cmd);
}
@@ -2148,9 +2150,9 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
scmd->SCp.ptr = NULL;
if (scsi_bufflen(scmd)) {
- pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+ dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
- (int)scmd->sc_data_direction);
+ scmd->sc_data_direction);
}
mvumi_return_cmd(mhba, cmd);
spin_unlock_irqrestore(mhba->shost->host_lock, flags);
@@ -2362,8 +2364,8 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
ret = -ENOMEM;
goto fail_alloc_mem;
}
- mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
- &mhba->handshake_page_phys);
+ mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
+ HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
if (!mhba->handshake_page) {
dev_err(&mhba->pdev->dev,
"failed to allocate memory for handshake\n");
@@ -2383,7 +2385,7 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
fail_ready_state:
mvumi_release_mem_resource(mhba);
- pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+ dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
mhba->handshake_page, mhba->handshake_page_phys);
fail_alloc_page:
kfree(mhba->regs);
@@ -2480,20 +2482,9 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- pci_set_master(pdev);
-
- if (IS_DMA64) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail_set_dma_mask;
- }
- } else {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail_set_dma_mask;
- }
+ ret = mvumi_pci_set_master(pdev);
+ if (ret)
+ goto fail_set_dma_mask;
host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
if (!host) {
@@ -2627,19 +2618,11 @@ static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "enable device failed\n");
return ret;
}
- pci_set_master(pdev);
- if (IS_DMA64) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail;
- }
- } else {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail;
- }
+
+ ret = mvumi_pci_set_master(pdev);
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
if (ret)
goto fail;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
new file mode 100644
index 000000000000..aeb282f617c5
--- /dev/null
+++ b/drivers/scsi/myrb.c
@@ -0,0 +1,3656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/raid_class.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "myrb.h"
+
+static struct raid_template *myrb_raid_template;
+
+static void myrb_monitor(struct work_struct *work);
+static inline void myrb_translate_devstate(void *DeviceState);
+
+static inline int myrb_logical_channel(struct Scsi_Host *shost)
+{
+ return shost->max_channel - 1;
+}
+
+static struct myrb_devstate_name_entry {
+ enum myrb_devstate state;
+ const char *name;
+} myrb_devstate_name_list[] = {
+ { MYRB_DEVICE_DEAD, "Dead" },
+ { MYRB_DEVICE_WO, "WriteOnly" },
+ { MYRB_DEVICE_ONLINE, "Online" },
+ { MYRB_DEVICE_CRITICAL, "Critical" },
+ { MYRB_DEVICE_STANDBY, "Standby" },
+ { MYRB_DEVICE_OFFLINE, "Offline" },
+};
+
+static const char *myrb_devstate_name(enum myrb_devstate state)
+{
+ struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
+ if (entry[i].state == state)
+ return entry[i].name;
+ }
+ return "Unknown";
+}
+
+static struct myrb_raidlevel_name_entry {
+ enum myrb_raidlevel level;
+ const char *name;
+} myrb_raidlevel_name_list[] = {
+ { MYRB_RAID_LEVEL0, "RAID0" },
+ { MYRB_RAID_LEVEL1, "RAID1" },
+ { MYRB_RAID_LEVEL3, "RAID3" },
+ { MYRB_RAID_LEVEL5, "RAID5" },
+ { MYRB_RAID_LEVEL6, "RAID6" },
+ { MYRB_RAID_JBOD, "JBOD" },
+};
+
+static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
+{
+ struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
+ if (entry[i].level == level)
+ return entry[i].name;
+ }
+ return NULL;
+}
+
+/**
+ * myrb_create_mempools - allocates auxiliary data structures
+ *
+ * Return: true on success, false otherwise.
+ */
+static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
+{
+ size_t elem_size, elem_align;
+
+ elem_align = sizeof(struct myrb_sge);
+ elem_size = cb->host->sg_tablesize * elem_align;
+ cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
+ elem_size, elem_align, 0);
+ if (cb->sg_pool == NULL) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate SG pool\n");
+ return false;
+ }
+
+ cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
+ sizeof(struct myrb_dcdb),
+ sizeof(unsigned int), 0);
+ if (!cb->dcdb_pool) {
+ dma_pool_destroy(cb->sg_pool);
+ cb->sg_pool = NULL;
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate DCDB pool\n");
+ return false;
+ }
+
+ snprintf(cb->work_q_name, sizeof(cb->work_q_name),
+ "myrb_wq_%d", cb->host->host_no);
+ cb->work_q = create_singlethread_workqueue(cb->work_q_name);
+ if (!cb->work_q) {
+ dma_pool_destroy(cb->dcdb_pool);
+ cb->dcdb_pool = NULL;
+ dma_pool_destroy(cb->sg_pool);
+ cb->sg_pool = NULL;
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to create workqueue\n");
+ return false;
+ }
+
+ /*
+ * Initialize the Monitoring Timer.
+ */
+ INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
+ queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
+
+ return true;
+}
+
+/**
+ * myrb_destroy_mempools - tears down the memory pools for the controller
+ */
+static void myrb_destroy_mempools(struct myrb_hba *cb)
+{
+ cancel_delayed_work_sync(&cb->monitor_work);
+ destroy_workqueue(cb->work_q);
+
+ dma_pool_destroy(cb->sg_pool);
+ dma_pool_destroy(cb->dcdb_pool);
+}
+
+/**
+ * myrb_reset_cmd - reset command block
+ */
+static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
+{
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ memset(mbox, 0, sizeof(union myrb_cmd_mbox));
+ cmd_blk->status = 0;
+}
+
+/**
+ * myrb_qcmd - queues command block for execution
+ */
+static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ void __iomem *base = cb->io_base;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
+
+ cb->write_cmd_mbox(next_mbox, mbox);
+ if (cb->prev_cmd_mbox1->words[0] == 0 ||
+ cb->prev_cmd_mbox2->words[0] == 0)
+ cb->get_cmd_mbox(base);
+ cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
+ cb->prev_cmd_mbox1 = next_mbox;
+ if (++next_mbox > cb->last_cmd_mbox)
+ next_mbox = cb->first_cmd_mbox;
+ cb->next_cmd_mbox = next_mbox;
+}
+
+/**
+ * myrb_exec_cmd - executes command block and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
+ struct myrb_cmdblk *cmd_blk)
+{
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ unsigned long flags;
+
+ cmd_blk->completion = &cmpl;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ cb->qcmd(cb, cmd_blk);
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+
+ WARN_ON(in_interrupt());
+ wait_for_completion(&cmpl);
+ return cmd_blk->status;
+}
+
+/**
+ * myrb_exec_type3 - executes a type 3 command and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_type3(struct myrb_hba *cb,
+ enum myrb_cmd_opcode op, dma_addr_t addr)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&cb->dcmd_mutex);
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_DCMD_TAG;
+ mbox->type3.opcode = op;
+ mbox->type3.addr = addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ return status;
+}
+
+/**
+ * myrb_exec_type3D - executes a type 3D command and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
+ enum myrb_cmd_opcode op, struct scsi_device *sdev,
+ struct myrb_pdev_state *pdev_info)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+ dma_addr_t pdev_info_addr;
+
+ pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
+ sizeof(struct myrb_pdev_state),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
+ return MYRB_STATUS_SUBSYS_FAILED;
+
+ mutex_lock(&cb->dcmd_mutex);
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3D.id = MYRB_DCMD_TAG;
+ mbox->type3D.opcode = op;
+ mbox->type3D.channel = sdev->channel;
+ mbox->type3D.target = sdev->id;
+ mbox->type3D.addr = pdev_info_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
+ sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
+ if (status == MYRB_STATUS_SUCCESS &&
+ mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
+ myrb_translate_devstate(pdev_info);
+
+ return status;
+}
+
+static char *myrb_event_msg[] = {
+ "killed because write recovery failed",
+ "killed because of SCSI bus reset failure",
+ "killed because of double check condition",
+ "killed because it was removed",
+ "killed because of gross error on SCSI chip",
+ "killed because of bad tag returned from drive",
+ "killed because of timeout on SCSI command",
+ "killed because of reset SCSI command issued from system",
+ "killed because busy or parity error count exceeded limit",
+ "killed because of 'kill drive' command from system",
+ "killed because of selection timeout",
+ "killed due to SCSI phase sequence error",
+ "killed due to unknown status",
+};
+
+/**
+ * myrb_get_event - get event log from HBA
+ * @cb: pointer to the hba structure
+ * @event: number of the event
+ *
+ * Execute a type 3E command and logs the event message
+ */
+static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_log_entry *ev_buf;
+ dma_addr_t ev_addr;
+ unsigned short status;
+
+ ev_buf = dma_alloc_coherent(&cb->pdev->dev,
+ sizeof(struct myrb_log_entry),
+ &ev_addr, GFP_KERNEL);
+ if (!ev_buf)
+ return;
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3E.id = MYRB_MCMD_TAG;
+ mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
+ mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
+ mbox->type3E.opqual = 1;
+ mbox->type3E.ev_seq = event;
+ mbox->type3E.addr = ev_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (status != MYRB_STATUS_SUCCESS)
+ shost_printk(KERN_INFO, cb->host,
+ "Failed to get event log %d, status %04x\n",
+ event, status);
+
+ else if (ev_buf->seq_num == event) {
+ struct scsi_sense_hdr sshdr;
+
+ memset(&sshdr, 0, sizeof(sshdr));
+ scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
+
+ if (sshdr.sense_key == VENDOR_SPECIFIC &&
+ sshdr.asc == 0x80 &&
+ sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
+ shost_printk(KERN_CRIT, cb->host,
+ "Physical drive %d:%d: %s\n",
+ ev_buf->channel, ev_buf->target,
+ myrb_event_msg[sshdr.ascq]);
+ else
+ shost_printk(KERN_CRIT, cb->host,
+ "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
+ ev_buf->channel, ev_buf->target,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ }
+
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
+ ev_buf, ev_addr);
+}
+
+/**
+ * myrb_get_errtable - retrieves the error table from the controller
+ *
+ * Executes a type 3 command and logs the error table from the controller.
+ */
+static void myrb_get_errtable(struct myrb_hba *cb)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+ struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
+
+ memcpy(&old_table, cb->err_table, sizeof(old_table));
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_MCMD_TAG;
+ mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
+ mbox->type3.addr = cb->err_table_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (status == MYRB_STATUS_SUCCESS) {
+ struct myrb_error_entry *table = cb->err_table;
+ struct myrb_error_entry *new, *old;
+ size_t err_table_offset;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, cb->host) {
+ if (sdev->channel >= myrb_logical_channel(cb->host))
+ continue;
+ err_table_offset = sdev->channel * MYRB_MAX_TARGETS
+ + sdev->id;
+ new = table + err_table_offset;
+ old = &old_table[err_table_offset];
+ if (new->parity_err == old->parity_err &&
+ new->soft_err == old->soft_err &&
+ new->hard_err == old->hard_err &&
+ new->misc_err == old->misc_err)
+ continue;
+ sdev_printk(KERN_CRIT, sdev,
+ "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
+ new->parity_err, new->soft_err,
+ new->hard_err, new->misc_err);
+ }
+ }
+}
+
+/**
+ * myrb_get_ldev_info - retrieves the logical device table from the controller
+ *
+ * Executes a type 3 command and updates the logical device table.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
+{
+ unsigned short status;
+ int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
+ struct Scsi_Host *shost = cb->host;
+
+ status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
+ cb->ldev_info_addr);
+ if (status != MYRB_STATUS_SUCCESS)
+ return status;
+
+ for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
+ struct myrb_ldev_info *old = NULL;
+ struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
+ ldev_num, 0);
+ if (!sdev) {
+ if (new->state == MYRB_DEVICE_OFFLINE)
+ continue;
+ shost_printk(KERN_INFO, shost,
+ "Adding Logical Drive %d in state %s\n",
+ ldev_num, myrb_devstate_name(new->state));
+ scsi_add_device(shost, myrb_logical_channel(shost),
+ ldev_num, 0);
+ continue;
+ }
+ old = sdev->hostdata;
+ if (new->state != old->state)
+ shost_printk(KERN_INFO, shost,
+ "Logical Drive %d is now %s\n",
+ ldev_num, myrb_devstate_name(new->state));
+ if (new->wb_enabled != old->wb_enabled)
+ sdev_printk(KERN_INFO, sdev,
+ "Logical Drive is now WRITE %s\n",
+ (new->wb_enabled ? "BACK" : "THRU"));
+ memcpy(old, new, sizeof(*new));
+ scsi_device_put(sdev);
+ }
+ return status;
+}
+
+/**
+ * myrb_get_rbld_progress - get rebuild progress information
+ *
+ * Executes a type 3 command and returns the rebuild progress
+ * information.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
+ struct myrb_rbld_progress *rbld)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_rbld_progress *rbld_buf;
+ dma_addr_t rbld_addr;
+ unsigned short status;
+
+ rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
+ sizeof(struct myrb_rbld_progress),
+ &rbld_addr, GFP_KERNEL);
+ if (!rbld_buf)
+ return MYRB_STATUS_RBLD_NOT_CHECKED;
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_MCMD_TAG;
+ mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
+ mbox->type3.addr = rbld_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (rbld)
+ memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
+ rbld_buf, rbld_addr);
+ return status;
+}
+
+/**
+ * myrb_update_rbld_progress - updates the rebuild status
+ *
+ * Updates the rebuild status for the attached logical devices.
+ *
+ */
+static void myrb_update_rbld_progress(struct myrb_hba *cb)
+{
+ struct myrb_rbld_progress rbld_buf;
+ unsigned short status;
+
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+ if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
+ cb->last_rbld_status == MYRB_STATUS_SUCCESS)
+ status = MYRB_STATUS_RBLD_SUCCESS;
+ if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
+ unsigned int blocks_done =
+ rbld_buf.ldev_size - rbld_buf.blocks_left;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(cb->host,
+ myrb_logical_channel(cb->host),
+ rbld_buf.ldev_num, 0);
+ if (!sdev)
+ return;
+
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild in Progress, %d%% completed\n",
+ (100 * (blocks_done >> 7))
+ / (rbld_buf.ldev_size >> 7));
+ break;
+ case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to Logical Drive Failure\n");
+ break;
+ case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to Bad Blocks on Other Drives\n");
+ break;
+ case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
+ break;
+ case MYRB_STATUS_RBLD_SUCCESS:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Completed Successfully\n");
+ break;
+ case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Successfully Terminated\n");
+ break;
+ default:
+ break;
+ }
+ scsi_device_put(sdev);
+ }
+ cb->last_rbld_status = status;
+}
+
+/**
+ * myrb_get_cc_progress - retrieve the rebuild status
+ *
+ * Execute a type 3 Command and fetch the rebuild / consistency check
+ * status.
+ */
+static void myrb_get_cc_progress(struct myrb_hba *cb)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_rbld_progress *rbld_buf;
+ dma_addr_t rbld_addr;
+ unsigned short status;
+
+ rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
+ sizeof(struct myrb_rbld_progress),
+ &rbld_addr, GFP_KERNEL);
+ if (!rbld_buf) {
+ cb->need_cc_status = true;
+ return;
+ }
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_MCMD_TAG;
+ mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
+ mbox->type3.addr = rbld_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (status == MYRB_STATUS_SUCCESS) {
+ unsigned int ldev_num = rbld_buf->ldev_num;
+ unsigned int ldev_size = rbld_buf->ldev_size;
+ unsigned int blocks_done =
+ ldev_size - rbld_buf->blocks_left;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(cb->host,
+ myrb_logical_channel(cb->host),
+ ldev_num, 0);
+ if (sdev) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check in Progress: %d%% completed\n",
+ (100 * (blocks_done >> 7))
+ / (ldev_size >> 7));
+ scsi_device_put(sdev);
+ }
+ }
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
+ rbld_buf, rbld_addr);
+}
+
+/**
+ * myrb_bgi_control - updates background initialisation status
+ *
+ * Executes a type 3B command and updates the background initialisation status
+ */
+static void myrb_bgi_control(struct myrb_hba *cb)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_bgi_status *bgi, *last_bgi;
+ dma_addr_t bgi_addr;
+ struct scsi_device *sdev = NULL;
+ unsigned short status;
+
+ bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
+ &bgi_addr, GFP_KERNEL);
+ if (!bgi) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate bgi memory\n");
+ return;
+ }
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3B.id = MYRB_DCMD_TAG;
+ mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
+ mbox->type3B.optype = 0x20;
+ mbox->type3B.addr = bgi_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ last_bgi = &cb->bgi_status;
+ sdev = scsi_device_lookup(cb->host,
+ myrb_logical_channel(cb->host),
+ bgi->ldev_num, 0);
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ switch (bgi->status) {
+ case MYRB_BGI_INVALID:
+ break;
+ case MYRB_BGI_STARTED:
+ if (!sdev)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Started\n");
+ break;
+ case MYRB_BGI_INPROGRESS:
+ if (!sdev)
+ break;
+ if (bgi->blocks_done == last_bgi->blocks_done &&
+ bgi->ldev_num == last_bgi->ldev_num)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization in Progress: %d%% completed\n",
+ (100 * (bgi->blocks_done >> 7))
+ / (bgi->ldev_size >> 7));
+ break;
+ case MYRB_BGI_SUSPENDED:
+ if (!sdev)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Suspended\n");
+ break;
+ case MYRB_BGI_CANCELLED:
+ if (!sdev)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Cancelled\n");
+ break;
+ }
+ memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
+ break;
+ case MYRB_STATUS_BGI_SUCCESS:
+ if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Completed Successfully\n");
+ cb->bgi_status.status = MYRB_BGI_INVALID;
+ break;
+ case MYRB_STATUS_BGI_ABORTED:
+ if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Aborted\n");
+ /* Fallthrough */
+ case MYRB_STATUS_NO_BGI_INPROGRESS:
+ cb->bgi_status.status = MYRB_BGI_INVALID;
+ break;
+ }
+ if (sdev)
+ scsi_device_put(sdev);
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
+ bgi, bgi_addr);
+}
+
+/**
+ * myrb_hba_enquiry - updates the controller status
+ *
+ * Executes a DAC_V1_Enquiry command and updates the controller status.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
+{
+ struct myrb_enquiry old, *new;
+ unsigned short status;
+
+ memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
+
+ status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
+ if (status != MYRB_STATUS_SUCCESS)
+ return status;
+
+ new = cb->enquiry;
+ if (new->ldev_count > old.ldev_count) {
+ int ldev_num = old.ldev_count - 1;
+
+ while (++ldev_num < new->ldev_count)
+ shost_printk(KERN_CRIT, cb->host,
+ "Logical Drive %d Now Exists\n",
+ ldev_num);
+ }
+ if (new->ldev_count < old.ldev_count) {
+ int ldev_num = new->ldev_count - 1;
+
+ while (++ldev_num < old.ldev_count)
+ shost_printk(KERN_CRIT, cb->host,
+ "Logical Drive %d No Longer Exists\n",
+ ldev_num);
+ }
+ if (new->status.deferred != old.status.deferred)
+ shost_printk(KERN_CRIT, cb->host,
+ "Deferred Write Error Flag is now %s\n",
+ (new->status.deferred ? "TRUE" : "FALSE"));
+ if (new->ev_seq != old.ev_seq) {
+ cb->new_ev_seq = new->ev_seq;
+ cb->need_err_info = true;
+ shost_printk(KERN_INFO, cb->host,
+ "Event log %d/%d (%d/%d) available\n",
+ cb->old_ev_seq, cb->new_ev_seq,
+ old.ev_seq, new->ev_seq);
+ }
+ if ((new->ldev_critical > 0 &&
+ new->ldev_critical != old.ldev_critical) ||
+ (new->ldev_offline > 0 &&
+ new->ldev_offline != old.ldev_offline) ||
+ (new->ldev_count != old.ldev_count)) {
+ shost_printk(KERN_INFO, cb->host,
+ "Logical drive count changed (%d/%d/%d)\n",
+ new->ldev_critical,
+ new->ldev_offline,
+ new->ldev_count);
+ cb->need_ldev_info = true;
+ }
+ if (new->pdev_dead > 0 ||
+ new->pdev_dead != old.pdev_dead ||
+ time_after_eq(jiffies, cb->secondary_monitor_time
+ + MYRB_SECONDARY_MONITOR_INTERVAL)) {
+ cb->need_bgi_status = cb->bgi_status_supported;
+ cb->secondary_monitor_time = jiffies;
+ }
+ if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
+ new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
+ old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
+ old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
+ cb->need_rbld = true;
+ cb->rbld_first = (new->ldev_critical < old.ldev_critical);
+ }
+ if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
+ switch (new->rbld) {
+ case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Completed Successfully\n");
+ break;
+ case MYRB_STDBY_RBLD_IN_PROGRESS:
+ case MYRB_BG_RBLD_IN_PROGRESS:
+ break;
+ case MYRB_BG_CHECK_IN_PROGRESS:
+ cb->need_cc_status = true;
+ break;
+ case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Completed with Error\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Failed - Physical Device Failed\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Failed - Logical Drive Failed\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Failed - Other Causes\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Successfully Terminated\n");
+ break;
+ }
+ else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
+ cb->need_cc_status = true;
+
+ return MYRB_STATUS_SUCCESS;
+}
+
+/**
+ * myrb_set_pdev_state - sets the device state for a physical device
+ *
+ * Return: command status
+ */
+static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
+ struct scsi_device *sdev, enum myrb_devstate state)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&cb->dcmd_mutex);
+ mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
+ mbox->type3D.id = MYRB_DCMD_TAG;
+ mbox->type3D.channel = sdev->channel;
+ mbox->type3D.target = sdev->id;
+ mbox->type3D.state = state & 0x1F;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+
+ return status;
+}
+
+/**
+ * myrb_enable_mmio - enables the Memory Mailbox Interface
+ *
+ * PD and P controller types have no memory mailbox, but still need the
+ * other dma mapped memory.
+ *
+ * Return: true on success, false otherwise.
+ */
+static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
+{
+ void __iomem *base = cb->io_base;
+ struct pci_dev *pdev = cb->pdev;
+ size_t err_table_size;
+ size_t ldev_info_size;
+ union myrb_cmd_mbox *cmd_mbox_mem;
+ struct myrb_stat_mbox *stat_mbox_mem;
+ union myrb_cmd_mbox mbox;
+ unsigned short status;
+
+ memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_err(&pdev->dev, "DMA mask out of range\n");
+ return false;
+ }
+
+ cb->enquiry = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct myrb_enquiry),
+ &cb->enquiry_addr, GFP_KERNEL);
+ if (!cb->enquiry)
+ return false;
+
+ err_table_size = sizeof(struct myrb_error_entry) *
+ MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
+ cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
+ &cb->err_table_addr, GFP_KERNEL);
+ if (!cb->err_table)
+ return false;
+
+ ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
+ cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
+ &cb->ldev_info_addr, GFP_KERNEL);
+ if (!cb->ldev_info_buf)
+ return false;
+
+ /*
+ * Skip mailbox initialisation for PD and P Controllers
+ */
+ if (!mmio_init_fn)
+ return true;
+
+ /* These are the base addresses for the command memory mailbox array */
+ cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
+ cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
+ cb->cmd_mbox_size,
+ &cb->cmd_mbox_addr,
+ GFP_KERNEL);
+ if (!cb->first_cmd_mbox)
+ return false;
+
+ cmd_mbox_mem = cb->first_cmd_mbox;
+ cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
+ cb->last_cmd_mbox = cmd_mbox_mem;
+ cb->next_cmd_mbox = cb->first_cmd_mbox;
+ cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
+ cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
+
+ /* These are the base addresses for the status memory mailbox array */
+ cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
+ sizeof(struct myrb_stat_mbox);
+ cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
+ cb->stat_mbox_size,
+ &cb->stat_mbox_addr,
+ GFP_KERNEL);
+ if (!cb->first_stat_mbox)
+ return false;
+
+ stat_mbox_mem = cb->first_stat_mbox;
+ stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
+ cb->last_stat_mbox = stat_mbox_mem;
+ cb->next_stat_mbox = cb->first_stat_mbox;
+
+ /* Enable the Memory Mailbox Interface. */
+ cb->dual_mode_interface = true;
+ mbox.typeX.opcode = 0x2B;
+ mbox.typeX.id = 0;
+ mbox.typeX.opcode2 = 0x14;
+ mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
+ mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
+
+ status = mmio_init_fn(pdev, base, &mbox);
+ if (status != MYRB_STATUS_SUCCESS) {
+ cb->dual_mode_interface = false;
+ mbox.typeX.opcode2 = 0x10;
+ status = mmio_init_fn(pdev, base, &mbox);
+ if (status != MYRB_STATUS_SUCCESS) {
+ dev_err(&pdev->dev,
+ "Failed to enable mailbox, statux %02X\n",
+ status);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * myrb_get_hba_config - reads the configuration information
+ *
+ * Reads the configuration information from the controller and
+ * initializes the controller structure.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int myrb_get_hba_config(struct myrb_hba *cb)
+{
+ struct myrb_enquiry2 *enquiry2;
+ dma_addr_t enquiry2_addr;
+ struct myrb_config2 *config2;
+ dma_addr_t config2_addr;
+ struct Scsi_Host *shost = cb->host;
+ struct pci_dev *pdev = cb->pdev;
+ int pchan_max = 0, pchan_cur = 0;
+ unsigned short status;
+ int ret = -ENODEV, memsize = 0;
+
+ enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+ &enquiry2_addr, GFP_KERNEL);
+ if (!enquiry2) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate V1 enquiry2 memory\n");
+ return -ENOMEM;
+ }
+ config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
+ &config2_addr, GFP_KERNEL);
+ if (!config2) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate V1 config2 memory\n");
+ dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+ enquiry2, enquiry2_addr);
+ return -ENOMEM;
+ }
+ mutex_lock(&cb->dma_mutex);
+ status = myrb_hba_enquiry(cb);
+ mutex_unlock(&cb->dma_mutex);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed it issue V1 Enquiry\n");
+ goto out_free;
+ }
+
+ status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed to issue V1 Enquiry2\n");
+ goto out_free;
+ }
+
+ status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed to issue ReadConfig2\n");
+ goto out_free;
+ }
+
+ status = myrb_get_ldev_info(cb);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed to get logical drive information\n");
+ goto out_free;
+ }
+
+ /*
+ * Initialize the Controller Model Name and Full Model Name fields.
+ */
+ switch (enquiry2->hw.sub_model) {
+ case DAC960_V1_P_PD_PU:
+ if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
+ strcpy(cb->model_name, "DAC960PU");
+ else
+ strcpy(cb->model_name, "DAC960PD");
+ break;
+ case DAC960_V1_PL:
+ strcpy(cb->model_name, "DAC960PL");
+ break;
+ case DAC960_V1_PG:
+ strcpy(cb->model_name, "DAC960PG");
+ break;
+ case DAC960_V1_PJ:
+ strcpy(cb->model_name, "DAC960PJ");
+ break;
+ case DAC960_V1_PR:
+ strcpy(cb->model_name, "DAC960PR");
+ break;
+ case DAC960_V1_PT:
+ strcpy(cb->model_name, "DAC960PT");
+ break;
+ case DAC960_V1_PTL0:
+ strcpy(cb->model_name, "DAC960PTL0");
+ break;
+ case DAC960_V1_PRL:
+ strcpy(cb->model_name, "DAC960PRL");
+ break;
+ case DAC960_V1_PTL1:
+ strcpy(cb->model_name, "DAC960PTL1");
+ break;
+ case DAC960_V1_1164P:
+ strcpy(cb->model_name, "eXtremeRAID 1100");
+ break;
+ default:
+ shost_printk(KERN_WARNING, cb->host,
+ "Unknown Model %X\n",
+ enquiry2->hw.sub_model);
+ goto out;
+ }
+ /*
+ * Initialize the Controller Firmware Version field and verify that it
+ * is a supported firmware version.
+ * The supported firmware versions are:
+ *
+ * DAC1164P 5.06 and above
+ * DAC960PTL/PRL/PJ/PG 4.06 and above
+ * DAC960PU/PD/PL 3.51 and above
+ * DAC960PU/PD/PL/P 2.73 and above
+ */
+#if defined(CONFIG_ALPHA)
+ /*
+ * DEC Alpha machines were often equipped with DAC960 cards that were
+ * OEMed from Mylex, and had their own custom firmware. Version 2.70,
+ * the last custom FW revision to be released by DEC for these older
+ * controllers, appears to work quite well with this driver.
+ *
+ * Cards tested successfully were several versions each of the PD and
+ * PU, called by DEC the KZPSC and KZPAC, respectively, and having
+ * the Manufacturer Numbers (from Mylex), usually on a sticker on the
+ * back of the board, of:
+ *
+ * KZPSC: D040347 (1-channel) or D040348 (2-channel)
+ * or D040349 (3-channel)
+ * KZPAC: D040395 (1-channel) or D040396 (2-channel)
+ * or D040397 (3-channel)
+ */
+# define FIRMWARE_27X "2.70"
+#else
+# define FIRMWARE_27X "2.73"
+#endif
+
+ if (enquiry2->fw.major_version == 0) {
+ enquiry2->fw.major_version = cb->enquiry->fw_major_version;
+ enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
+ enquiry2->fw.firmware_type = '0';
+ enquiry2->fw.turn_id = 0;
+ }
+ sprintf(cb->fw_version, "%d.%02d-%c-%02d",
+ enquiry2->fw.major_version,
+ enquiry2->fw.minor_version,
+ enquiry2->fw.firmware_type,
+ enquiry2->fw.turn_id);
+ if (!((enquiry2->fw.major_version == 5 &&
+ enquiry2->fw.minor_version >= 6) ||
+ (enquiry2->fw.major_version == 4 &&
+ enquiry2->fw.minor_version >= 6) ||
+ (enquiry2->fw.major_version == 3 &&
+ enquiry2->fw.minor_version >= 51) ||
+ (enquiry2->fw.major_version == 2 &&
+ strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Firmware Version '%s' unsupported\n",
+ cb->fw_version);
+ goto out;
+ }
+ /*
+ * Initialize the Channels, Targets, Memory Size, and SAF-TE
+ * Enclosure Management Enabled fields.
+ */
+ switch (enquiry2->hw.model) {
+ case MYRB_5_CHANNEL_BOARD:
+ pchan_max = 5;
+ break;
+ case MYRB_3_CHANNEL_BOARD:
+ case MYRB_3_CHANNEL_ASIC_DAC:
+ pchan_max = 3;
+ break;
+ case MYRB_2_CHANNEL_BOARD:
+ pchan_max = 2;
+ break;
+ default:
+ pchan_max = enquiry2->cfg_chan;
+ break;
+ }
+ pchan_cur = enquiry2->cur_chan;
+ if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
+ cb->bus_width = 32;
+ else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
+ cb->bus_width = 16;
+ else
+ cb->bus_width = 8;
+ cb->ldev_block_size = enquiry2->ldev_block_size;
+ shost->max_channel = pchan_cur;
+ shost->max_id = enquiry2->max_targets;
+ memsize = enquiry2->mem_size >> 20;
+ cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
+ /*
+ * Initialize the Controller Queue Depth, Driver Queue Depth,
+ * Logical Drive Count, Maximum Blocks per Command, Controller
+ * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+ * The Driver Queue Depth must be at most one less than the
+ * Controller Queue Depth to allow for an automatic drive
+ * rebuild operation.
+ */
+ shost->can_queue = cb->enquiry->max_tcq;
+ if (shost->can_queue < 3)
+ shost->can_queue = enquiry2->max_cmds;
+ if (shost->can_queue < 3)
+ /* Play safe and disable TCQ */
+ shost->can_queue = 1;
+
+ if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
+ shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
+ shost->max_sectors = enquiry2->max_sectors;
+ shost->sg_tablesize = enquiry2->max_sge;
+ if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
+ shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
+ /*
+ * Initialize the Stripe Size, Segment Size, and Geometry Translation.
+ */
+ cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
+ >> (10 - MYRB_BLKSIZE_BITS);
+ cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
+ >> (10 - MYRB_BLKSIZE_BITS);
+ /* Assume 255/63 translation */
+ cb->ldev_geom_heads = 255;
+ cb->ldev_geom_sectors = 63;
+ if (config2->drive_geometry) {
+ cb->ldev_geom_heads = 128;
+ cb->ldev_geom_sectors = 32;
+ }
+
+ /*
+ * Initialize the Background Initialization Status.
+ */
+ if ((cb->fw_version[0] == '4' &&
+ strcmp(cb->fw_version, "4.08") >= 0) ||
+ (cb->fw_version[0] == '5' &&
+ strcmp(cb->fw_version, "5.08") >= 0)) {
+ cb->bgi_status_supported = true;
+ myrb_bgi_control(cb);
+ }
+ cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
+ ret = 0;
+
+out:
+ shost_printk(KERN_INFO, cb->host,
+ "Configuring %s PCI RAID Controller\n", cb->model_name);
+ shost_printk(KERN_INFO, cb->host,
+ " Firmware Version: %s, Memory Size: %dMB\n",
+ cb->fw_version, memsize);
+ if (cb->io_addr == 0)
+ shost_printk(KERN_INFO, cb->host,
+ " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
+ (unsigned long)cb->pci_addr, cb->irq);
+ else
+ shost_printk(KERN_INFO, cb->host,
+ " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
+ (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
+ cb->irq);
+ shost_printk(KERN_INFO, cb->host,
+ " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+ cb->host->can_queue, cb->host->max_sectors);
+ shost_printk(KERN_INFO, cb->host,
+ " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+ cb->host->can_queue, cb->host->sg_tablesize,
+ MYRB_SCATTER_GATHER_LIMIT);
+ shost_printk(KERN_INFO, cb->host,
+ " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
+ cb->stripe_size, cb->segment_size,
+ cb->ldev_geom_heads, cb->ldev_geom_sectors,
+ cb->safte_enabled ?
+ " SAF-TE Enclosure Management Enabled" : "");
+ shost_printk(KERN_INFO, cb->host,
+ " Physical: %d/%d channels %d/%d/%d devices\n",
+ pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
+ cb->host->max_id);
+
+ shost_printk(KERN_INFO, cb->host,
+ " Logical: 1/1 channels, %d/%d disks\n",
+ cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
+
+out_free:
+ dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+ enquiry2, enquiry2_addr);
+ dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
+ config2, config2_addr);
+
+ return ret;
+}
+
+/**
+ * myrb_unmap - unmaps controller structures
+ */
+static void myrb_unmap(struct myrb_hba *cb)
+{
+ if (cb->ldev_info_buf) {
+ size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
+ MYRB_MAX_LDEVS;
+ dma_free_coherent(&cb->pdev->dev, ldev_info_size,
+ cb->ldev_info_buf, cb->ldev_info_addr);
+ cb->ldev_info_buf = NULL;
+ }
+ if (cb->err_table) {
+ size_t err_table_size = sizeof(struct myrb_error_entry) *
+ MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
+ dma_free_coherent(&cb->pdev->dev, err_table_size,
+ cb->err_table, cb->err_table_addr);
+ cb->err_table = NULL;
+ }
+ if (cb->enquiry) {
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
+ cb->enquiry, cb->enquiry_addr);
+ cb->enquiry = NULL;
+ }
+ if (cb->first_stat_mbox) {
+ dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
+ cb->first_stat_mbox, cb->stat_mbox_addr);
+ cb->first_stat_mbox = NULL;
+ }
+ if (cb->first_cmd_mbox) {
+ dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
+ cb->first_cmd_mbox, cb->cmd_mbox_addr);
+ cb->first_cmd_mbox = NULL;
+ }
+}
+
+/**
+ * myrb_cleanup - cleanup controller structures
+ */
+static void myrb_cleanup(struct myrb_hba *cb)
+{
+ struct pci_dev *pdev = cb->pdev;
+
+ /* Free the memory mailbox, status, and related structures */
+ myrb_unmap(cb);
+
+ if (cb->mmio_base) {
+ cb->disable_intr(cb->io_base);
+ iounmap(cb->mmio_base);
+ }
+ if (cb->irq)
+ free_irq(cb->irq, cb);
+ if (cb->io_addr)
+ release_region(cb->io_addr, 0x80);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ scsi_host_put(cb->host);
+}
+
+static int myrb_host_reset(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost = scmd->device->host;
+ struct myrb_hba *cb = shost_priv(shost);
+
+ cb->reset(cb->io_base);
+ return SUCCESS;
+}
+
+static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct myrb_hba *cb = shost_priv(shost);
+ struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_dcdb *dcdb;
+ dma_addr_t dcdb_addr;
+ struct scsi_device *sdev = scmd->device;
+ struct scatterlist *sgl;
+ unsigned long flags;
+ int nsge;
+
+ myrb_reset_cmd(cmd_blk);
+ dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
+ if (!dcdb)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ nsge = scsi_dma_map(scmd);
+ if (nsge > 1) {
+ dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
+ scmd->result = (DID_ERROR << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ mbox->type3.opcode = MYRB_CMD_DCDB;
+ mbox->type3.id = scmd->request->tag + 3;
+ mbox->type3.addr = dcdb_addr;
+ dcdb->channel = sdev->channel;
+ dcdb->target = sdev->id;
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
+ break;
+ case DMA_FROM_DEVICE:
+ dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
+ break;
+ default:
+ dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
+ break;
+ }
+ dcdb->early_status = false;
+ if (scmd->request->timeout <= 10)
+ dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
+ else if (scmd->request->timeout <= 60)
+ dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
+ else if (scmd->request->timeout <= 600)
+ dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
+ else
+ dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
+ dcdb->no_autosense = false;
+ dcdb->allow_disconnect = true;
+ sgl = scsi_sglist(scmd);
+ dcdb->dma_addr = sg_dma_address(sgl);
+ if (sg_dma_len(sgl) > USHRT_MAX) {
+ dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
+ dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
+ } else {
+ dcdb->xfer_len_lo = sg_dma_len(sgl);
+ dcdb->xfer_len_hi4 = 0;
+ }
+ dcdb->cdb_len = scmd->cmd_len;
+ dcdb->sense_len = sizeof(dcdb->sense);
+ memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ cb->qcmd(cb, cmd_blk);
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return 0;
+}
+
+static void myrb_inquiry(struct myrb_hba *cb,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char inq[36] = {
+ 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
+ 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ };
+
+ if (cb->bus_width > 16)
+ inq[7] |= 1 << 6;
+ if (cb->bus_width > 8)
+ inq[7] |= 1 << 5;
+ memcpy(&inq[16], cb->model_name, 16);
+ memcpy(&inq[32], cb->fw_version, 1);
+ memcpy(&inq[33], &cb->fw_version[2], 2);
+ memcpy(&inq[35], &cb->fw_version[7], 1);
+
+ scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
+}
+
+static void
+myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
+ struct myrb_ldev_info *ldev_info)
+{
+ unsigned char modes[32], *mode_pg;
+ bool dbd;
+ size_t mode_len;
+
+ dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+ if (dbd) {
+ mode_len = 24;
+ mode_pg = &modes[4];
+ } else {
+ mode_len = 32;
+ mode_pg = &modes[12];
+ }
+ memset(modes, 0, sizeof(modes));
+ modes[0] = mode_len - 1;
+ if (!dbd) {
+ unsigned char *block_desc = &modes[4];
+
+ modes[3] = 8;
+ put_unaligned_be32(ldev_info->size, &block_desc[0]);
+ put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
+ }
+ mode_pg[0] = 0x08;
+ mode_pg[1] = 0x12;
+ if (ldev_info->wb_enabled)
+ mode_pg[2] |= 0x04;
+ if (cb->segment_size) {
+ mode_pg[2] |= 0x08;
+ put_unaligned_be16(cb->segment_size, &mode_pg[14]);
+ }
+
+ scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static void myrb_request_sense(struct myrb_hba *cb,
+ struct scsi_cmnd *scmd)
+{
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ NO_SENSE, 0, 0);
+ scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+}
+
+static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
+ struct myrb_ldev_info *ldev_info)
+{
+ unsigned char data[8];
+
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Capacity %u, blocksize %u\n",
+ ldev_info->size, cb->ldev_block_size);
+ put_unaligned_be32(ldev_info->size - 1, &data[0]);
+ put_unaligned_be32(cb->ldev_block_size, &data[4]);
+ scsi_sg_copy_from_buffer(scmd, data, 8);
+}
+
+static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct myrb_hba *cb = shost_priv(shost);
+ struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_ldev_info *ldev_info;
+ struct scsi_device *sdev = scmd->device;
+ struct scatterlist *sgl;
+ unsigned long flags;
+ u64 lba;
+ u32 block_cnt;
+ int nsge;
+
+ ldev_info = sdev->hostdata;
+ if (ldev_info->state != MYRB_DEVICE_ONLINE &&
+ ldev_info->state != MYRB_DEVICE_WO) {
+ dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
+ sdev->id, ldev_info ? ldev_info->state : 0xff);
+ scmd->result = (DID_BAD_TARGET << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ switch (scmd->cmnd[0]) {
+ case TEST_UNIT_READY:
+ scmd->result = (DID_OK << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ case INQUIRY:
+ if (scmd->cmnd[1] & 1) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ myrb_inquiry(cb, scmd);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ case SYNCHRONIZE_CACHE:
+ scmd->result = (DID_OK << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ case MODE_SENSE:
+ if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+ (scmd->cmnd[2] & 0x3F) != 0x08) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ myrb_mode_sense(cb, scmd, ldev_info);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ case READ_CAPACITY:
+ if ((scmd->cmnd[1] & 1) ||
+ (scmd->cmnd[8] & 1)) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ if (lba) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ myrb_read_capacity(cb, scmd, ldev_info);
+ scmd->scsi_done(scmd);
+ return 0;
+ case REQUEST_SENSE:
+ myrb_request_sense(cb, scmd);
+ scmd->result = (DID_OK << 16);
+ return 0;
+ case SEND_DIAGNOSTIC:
+ if (scmd->cmnd[1] != 0x04) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ /* Assume good status */
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ case READ_6:
+ if (ldev_info->state == MYRB_DEVICE_WO) {
+ /* Data protect, attempt to read invalid data */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ DATA_PROTECT, 0x21, 0x06);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ case WRITE_6:
+ lba = (((scmd->cmnd[1] & 0x1F) << 16) |
+ (scmd->cmnd[2] << 8) |
+ scmd->cmnd[3]);
+ block_cnt = scmd->cmnd[4];
+ break;
+ case READ_10:
+ if (ldev_info->state == MYRB_DEVICE_WO) {
+ /* Data protect, attempt to read invalid data */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ DATA_PROTECT, 0x21, 0x06);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ case WRITE_10:
+ case VERIFY: /* 0x2F */
+ case WRITE_VERIFY: /* 0x2E */
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
+ break;
+ case READ_12:
+ if (ldev_info->state == MYRB_DEVICE_WO) {
+ /* Data protect, attempt to read invalid data */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ DATA_PROTECT, 0x21, 0x06);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ case WRITE_12:
+ case VERIFY_12: /* 0xAF */
+ case WRITE_VERIFY_12: /* 0xAE */
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+ break;
+ default:
+ /* Illegal request, invalid opcode */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x20, 0);
+ scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type5.id = scmd->request->tag + 3;
+ if (scmd->sc_data_direction == DMA_NONE)
+ goto submit;
+ nsge = scsi_dma_map(scmd);
+ if (nsge == 1) {
+ sgl = scsi_sglist(scmd);
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mbox->type5.opcode = MYRB_CMD_READ;
+ else
+ mbox->type5.opcode = MYRB_CMD_WRITE;
+
+ mbox->type5.ld.xfer_len = block_cnt;
+ mbox->type5.ld.ldev_num = sdev->id;
+ mbox->type5.lba = lba;
+ mbox->type5.addr = (u32)sg_dma_address(sgl);
+ } else {
+ struct myrb_sge *hw_sgl;
+ dma_addr_t hw_sgl_addr;
+ int i;
+
+ hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
+ if (!hw_sgl)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ cmd_blk->sgl = hw_sgl;
+ cmd_blk->sgl_addr = hw_sgl_addr;
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mbox->type5.opcode = MYRB_CMD_READ_SG;
+ else
+ mbox->type5.opcode = MYRB_CMD_WRITE_SG;
+
+ mbox->type5.ld.xfer_len = block_cnt;
+ mbox->type5.ld.ldev_num = sdev->id;
+ mbox->type5.lba = lba;
+ mbox->type5.addr = hw_sgl_addr;
+ mbox->type5.sg_count = nsge;
+
+ scsi_for_each_sg(scmd, sgl, nsge, i) {
+ hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
+ hw_sgl->sge_count = (u32)sg_dma_len(sgl);
+ hw_sgl++;
+ }
+ }
+submit:
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ cb->qcmd(cb, cmd_blk);
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+
+ return 0;
+}
+
+static int myrb_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct scsi_device *sdev = scmd->device;
+
+ if (sdev->channel > myrb_logical_channel(shost)) {
+ scmd->result = (DID_BAD_TARGET << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ if (sdev->channel == myrb_logical_channel(shost))
+ return myrb_ldev_queuecommand(shost, scmd);
+
+ return myrb_pthru_queuecommand(shost, scmd);
+}
+
+static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
+{
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_ldev_info *ldev_info;
+ unsigned short ldev_num = sdev->id;
+ enum raid_level level;
+
+ ldev_info = cb->ldev_info_buf + ldev_num;
+ if (!ldev_info)
+ return -ENXIO;
+
+ sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
+ if (!sdev->hostdata)
+ return -ENOMEM;
+ dev_dbg(&sdev->sdev_gendev,
+ "slave alloc ldev %d state %x\n",
+ ldev_num, ldev_info->state);
+ memcpy(sdev->hostdata, ldev_info,
+ sizeof(*ldev_info));
+ switch (ldev_info->raid_level) {
+ case MYRB_RAID_LEVEL0:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case MYRB_RAID_LEVEL1:
+ level = RAID_LEVEL_1;
+ break;
+ case MYRB_RAID_LEVEL3:
+ level = RAID_LEVEL_3;
+ break;
+ case MYRB_RAID_LEVEL5:
+ level = RAID_LEVEL_5;
+ break;
+ case MYRB_RAID_LEVEL6:
+ level = RAID_LEVEL_6;
+ break;
+ case MYRB_RAID_JBOD:
+ level = RAID_LEVEL_JBOD;
+ break;
+ default:
+ level = RAID_LEVEL_UNKNOWN;
+ break;
+ }
+ raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
+ return 0;
+}
+
+static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
+{
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_pdev_state *pdev_info;
+ unsigned short status;
+
+ if (sdev->id > MYRB_MAX_TARGETS)
+ return -ENXIO;
+
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ if (!pdev_info)
+ return -ENOMEM;
+
+ status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
+ sdev, pdev_info);
+ if (status != MYRB_STATUS_SUCCESS) {
+ dev_dbg(&sdev->sdev_gendev,
+ "Failed to get device state, status %x\n",
+ status);
+ kfree(pdev_info);
+ return -ENXIO;
+ }
+ if (!pdev_info->present) {
+ dev_dbg(&sdev->sdev_gendev,
+ "device not present, skip\n");
+ kfree(pdev_info);
+ return -ENXIO;
+ }
+ dev_dbg(&sdev->sdev_gendev,
+ "slave alloc pdev %d:%d state %x\n",
+ sdev->channel, sdev->id, pdev_info->state);
+ sdev->hostdata = pdev_info;
+
+ return 0;
+}
+
+static int myrb_slave_alloc(struct scsi_device *sdev)
+{
+ if (sdev->channel > myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ if (sdev->lun > 0)
+ return -ENXIO;
+
+ if (sdev->channel == myrb_logical_channel(sdev->host))
+ return myrb_ldev_slave_alloc(sdev);
+
+ return myrb_pdev_slave_alloc(sdev);
+}
+
+static int myrb_slave_configure(struct scsi_device *sdev)
+{
+ struct myrb_ldev_info *ldev_info;
+
+ if (sdev->channel > myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host)) {
+ sdev->no_uld_attach = 1;
+ return 0;
+ }
+ if (sdev->lun != 0)
+ return -ENXIO;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ if (ldev_info->state != MYRB_DEVICE_ONLINE)
+ sdev_printk(KERN_INFO, sdev,
+ "Logical drive is %s\n",
+ myrb_devstate_name(ldev_info->state));
+
+ sdev->tagged_supported = 1;
+ return 0;
+}
+
+static void myrb_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+}
+
+static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ struct myrb_hba *cb = shost_priv(sdev->host);
+
+ geom[0] = cb->ldev_geom_heads;
+ geom[1] = cb->ldev_geom_sectors;
+ geom[2] = sector_div(capacity, geom[0] * geom[1]);
+
+ return 0;
+}
+
+static ssize_t raid_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ int ret;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel == myrb_logical_channel(sdev->host)) {
+ struct myrb_ldev_info *ldev_info = sdev->hostdata;
+ const char *name;
+
+ name = myrb_devstate_name(ldev_info->state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->state);
+ } else {
+ struct myrb_pdev_state *pdev_info = sdev->hostdata;
+ unsigned short status;
+ const char *name;
+
+ status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
+ sdev, pdev_info);
+ if (status != MYRB_STATUS_SUCCESS)
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device state, status %x\n",
+ status);
+
+ if (!pdev_info->present)
+ name = "Removed";
+ else
+ name = myrb_devstate_name(pdev_info->state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ pdev_info->state);
+ }
+ return ret;
+}
+
+static ssize_t raid_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_pdev_state *pdev_info;
+ enum myrb_devstate new_state;
+ unsigned short status;
+
+ if (!strncmp(buf, "kill", 4) ||
+ !strncmp(buf, "offline", 7))
+ new_state = MYRB_DEVICE_DEAD;
+ else if (!strncmp(buf, "online", 6))
+ new_state = MYRB_DEVICE_ONLINE;
+ else if (!strncmp(buf, "standby", 7))
+ new_state = MYRB_DEVICE_STANDBY;
+ else
+ return -EINVAL;
+
+ pdev_info = sdev->hostdata;
+ if (!pdev_info) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - no physical device information\n");
+ return -ENXIO;
+ }
+ if (!pdev_info->present) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - device not present\n");
+ return -ENXIO;
+ }
+
+ if (pdev_info->state == new_state)
+ return count;
+
+ status = myrb_set_pdev_state(cb, sdev, new_state);
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ break;
+ case MYRB_STATUS_START_DEVICE_FAILED:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Unable to Start Device\n");
+ count = -EAGAIN;
+ break;
+ case MYRB_STATUS_NO_DEVICE:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - No Device at Address\n");
+ count = -ENODEV;
+ break;
+ case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Invalid Channel or Target or Modifier\n");
+ count = -EINVAL;
+ break;
+ case MYRB_STATUS_CHANNEL_BUSY:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Channel Busy\n");
+ count = -EBUSY;
+ break;
+ default:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Unexpected Status %04X\n", status);
+ count = -EIO;
+ break;
+ }
+ return count;
+}
+static DEVICE_ATTR_RW(raid_state);
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (sdev->channel == myrb_logical_channel(sdev->host)) {
+ struct myrb_ldev_info *ldev_info = sdev->hostdata;
+ const char *name;
+
+ if (!ldev_info)
+ return -ENXIO;
+
+ name = myrb_raidlevel_name(ldev_info->raid_level);
+ if (!name)
+ return snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->state);
+ return snprintf(buf, 32, "%s\n", name);
+ }
+ return snprintf(buf, 32, "Physical Drive\n");
+}
+static DEVICE_ATTR_RO(raid_level);
+
+static ssize_t rebuild_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_rbld_progress rbld_buf;
+ unsigned char status;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host))
+ return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+
+ if (rbld_buf.ldev_num != sdev->id ||
+ status != MYRB_STATUS_SUCCESS)
+ return snprintf(buf, 32, "not rebuilding\n");
+
+ return snprintf(buf, 32, "rebuilding block %u of %u\n",
+ rbld_buf.ldev_size - rbld_buf.blocks_left,
+ rbld_buf.ldev_size);
+}
+
+static ssize_t rebuild_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_cmdblk *cmd_blk;
+ union myrb_cmd_mbox *mbox;
+ unsigned short status;
+ int rc, start;
+ const char *msg;
+
+ rc = kstrtoint(buf, 0, &start);
+ if (rc)
+ return rc;
+
+ if (sdev->channel >= myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ status = myrb_get_rbld_progress(cb, NULL);
+ if (start) {
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Initiated; already in progress\n");
+ return -EALREADY;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
+ mbox->type3D.id = MYRB_DCMD_TAG;
+ mbox->type3D.channel = sdev->channel;
+ mbox->type3D.target = sdev->id;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ } else {
+ struct pci_dev *pdev = cb->pdev;
+ unsigned char *rate;
+ dma_addr_t rate_addr;
+
+ if (status != MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Cancelled; not in progress\n");
+ return 0;
+ }
+
+ rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
+ &rate_addr, GFP_KERNEL);
+ if (rate == NULL) {
+ sdev_printk(KERN_INFO, sdev,
+ "Cancellation of Rebuild Failed - Out of Memory\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
+ mbox->type3R.id = MYRB_DCMD_TAG;
+ mbox->type3R.rbld_rate = 0xFF;
+ mbox->type3R.addr = rate_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
+ mutex_unlock(&cb->dcmd_mutex);
+ }
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+ start ? "Initiated" : "Cancelled");
+ return count;
+ }
+ if (!start) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Cancelled, status 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ switch (status) {
+ case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
+ msg = "Attempt to Rebuild Online or Unresponsive Drive";
+ break;
+ case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
+ msg = "New Disk Failed During Rebuild";
+ break;
+ case MYRB_STATUS_INVALID_ADDRESS:
+ msg = "Invalid Device Address";
+ break;
+ case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
+ msg = "Already in Progress";
+ break;
+ default:
+ msg = NULL;
+ break;
+ }
+ if (msg)
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed - %s\n", msg);
+ else
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed, status 0x%x\n", status);
+
+ return -EIO;
+}
+static DEVICE_ATTR_RW(rebuild);
+
+static ssize_t consistency_check_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_rbld_progress rbld_buf;
+ struct myrb_cmdblk *cmd_blk;
+ union myrb_cmd_mbox *mbox;
+ unsigned short ldev_num = 0xFFFF;
+ unsigned short status;
+ int rc, start;
+ const char *msg;
+
+ rc = kstrtoint(buf, 0, &start);
+ if (rc)
+ return rc;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+ if (start) {
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Not Initiated; already in progress\n");
+ return -EALREADY;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
+ mbox->type3C.id = MYRB_DCMD_TAG;
+ mbox->type3C.ldev_num = sdev->id;
+ mbox->type3C.auto_restore = true;
+
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ } else {
+ struct pci_dev *pdev = cb->pdev;
+ unsigned char *rate;
+ dma_addr_t rate_addr;
+
+ if (ldev_num != sdev->id) {
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Not Cancelled; not in progress\n");
+ return 0;
+ }
+ rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
+ &rate_addr, GFP_KERNEL);
+ if (rate == NULL) {
+ sdev_printk(KERN_INFO, sdev,
+ "Cancellation of Check Consistency Failed - Out of Memory\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
+ mbox->type3R.id = MYRB_DCMD_TAG;
+ mbox->type3R.rbld_rate = 0xFF;
+ mbox->type3R.addr = rate_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
+ mutex_unlock(&cb->dcmd_mutex);
+ }
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
+ start ? "Initiated" : "Cancelled");
+ return count;
+ }
+ if (!start) {
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Not Cancelled, status 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ switch (status) {
+ case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
+ msg = "Dependent Physical Device is DEAD";
+ break;
+ case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
+ msg = "New Disk Failed During Rebuild";
+ break;
+ case MYRB_STATUS_INVALID_ADDRESS:
+ msg = "Invalid or Nonredundant Logical Drive";
+ break;
+ case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
+ msg = "Already in Progress";
+ break;
+ default:
+ msg = NULL;
+ break;
+ }
+ if (msg)
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Failed - %s\n", msg);
+ else
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Failed, status 0x%x\n", status);
+
+ return -EIO;
+}
+
+static ssize_t consistency_check_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return rebuild_show(dev, attr, buf);
+}
+static DEVICE_ATTR_RW(consistency_check);
+
+static ssize_t ctlr_num_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+
+ return snprintf(buf, 20, "%d\n", cb->ctlr_num);
+}
+static DEVICE_ATTR_RO(ctlr_num);
+
+static ssize_t firmware_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", cb->fw_version);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t model_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", cb->model_name);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t flush_cache_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+ unsigned short status;
+
+ status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
+ if (status == MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_INFO, shost,
+ "Cache Flush Completed\n");
+ return count;
+ }
+ shost_printk(KERN_INFO, shost,
+ "Cache Flush Failed, status %x\n", status);
+ return -EIO;
+}
+static DEVICE_ATTR_WO(flush_cache);
+
+static struct device_attribute *myrb_sdev_attrs[] = {
+ &dev_attr_rebuild,
+ &dev_attr_consistency_check,
+ &dev_attr_raid_state,
+ &dev_attr_raid_level,
+ NULL,
+};
+
+static struct device_attribute *myrb_shost_attrs[] = {
+ &dev_attr_ctlr_num,
+ &dev_attr_model,
+ &dev_attr_firmware,
+ &dev_attr_flush_cache,
+ NULL,
+};
+
+struct scsi_host_template myrb_template = {
+ .module = THIS_MODULE,
+ .name = "DAC960",
+ .proc_name = "myrb",
+ .queuecommand = myrb_queuecommand,
+ .eh_host_reset_handler = myrb_host_reset,
+ .slave_alloc = myrb_slave_alloc,
+ .slave_configure = myrb_slave_configure,
+ .slave_destroy = myrb_slave_destroy,
+ .bios_param = myrb_biosparam,
+ .cmd_size = sizeof(struct myrb_cmdblk),
+ .shost_attrs = myrb_shost_attrs,
+ .sdev_attrs = myrb_sdev_attrs,
+ .this_id = -1,
+};
+
+/**
+ * myrb_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int myrb_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sdev->channel == myrb_logical_channel(sdev->host);
+}
+
+/**
+ * myrb_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void myrb_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_rbld_progress rbld_buf;
+ unsigned int percent_complete = 0;
+ unsigned short status;
+ unsigned int ldev_size = 0, remaining = 0;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host))
+ return;
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+ if (status == MYRB_STATUS_SUCCESS) {
+ if (rbld_buf.ldev_num == sdev->id) {
+ ldev_size = rbld_buf.ldev_size;
+ remaining = rbld_buf.blocks_left;
+ }
+ }
+ if (remaining && ldev_size)
+ percent_complete = (ldev_size - remaining) * 100 / ldev_size;
+ raid_set_resync(myrb_raid_template, dev, percent_complete);
+}
+
+/**
+ * myrb_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void myrb_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_ldev_info *ldev_info = sdev->hostdata;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+ unsigned short status;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
+ state = RAID_STATE_UNKNOWN;
+ else {
+ status = myrb_get_rbld_progress(cb, NULL);
+ if (status == MYRB_STATUS_SUCCESS)
+ state = RAID_STATE_RESYNCING;
+ else {
+ switch (ldev_info->state) {
+ case MYRB_DEVICE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MYRB_DEVICE_WO:
+ case MYRB_DEVICE_CRITICAL:
+ state = RAID_STATE_DEGRADED;
+ break;
+ default:
+ state = RAID_STATE_OFFLINE;
+ }
+ }
+ }
+ raid_set_state(myrb_raid_template, dev, state);
+}
+
+struct raid_function_template myrb_raid_functions = {
+ .cookie = &myrb_template,
+ .is_raid = myrb_is_raid,
+ .get_resync = myrb_get_resync,
+ .get_state = myrb_get_state,
+};
+
+static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
+ struct scsi_cmnd *scmd)
+{
+ unsigned short status;
+
+ if (!cmd_blk)
+ return;
+
+ scsi_dma_unmap(scmd);
+
+ if (cmd_blk->dcdb) {
+ memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
+ dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
+ cmd_blk->dcdb_addr);
+ cmd_blk->dcdb = NULL;
+ }
+ if (cmd_blk->sgl) {
+ dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
+ cmd_blk->sgl = NULL;
+ cmd_blk->sgl_addr = 0;
+ }
+ status = cmd_blk->status;
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ case MYRB_STATUS_DEVICE_BUSY:
+ scmd->result = (DID_OK << 16) | status;
+ break;
+ case MYRB_STATUS_BAD_DATA:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Bad Data Encountered\n");
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ /* Unrecovered read error */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x11, 0);
+ else
+ /* Write error */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x0C, 0);
+ scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ break;
+ case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
+ scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ /* Unrecovered read error, auto-reallocation failed */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x11, 0x04);
+ else
+ /* Write error, auto-reallocation failed */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x0C, 0x02);
+ scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ break;
+ case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Logical Drive Nonexistent or Offline");
+ scmd->result = (DID_BAD_TARGET << 16);
+ break;
+ case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Attempt to Access Beyond End of Logical Drive");
+ /* Logical block address out of range */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ NOT_READY, 0x21, 0);
+ break;
+ case MYRB_STATUS_DEVICE_NONRESPONSIVE:
+ dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
+ scmd->result = (DID_BAD_TARGET << 16);
+ break;
+ default:
+ scmd_printk(KERN_ERR, scmd,
+ "Unexpected Error Status %04X", status);
+ scmd->result = (DID_ERROR << 16);
+ break;
+ }
+ scmd->scsi_done(scmd);
+}
+
+static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ if (!cmd_blk)
+ return;
+
+ if (cmd_blk->completion) {
+ complete(cmd_blk->completion);
+ cmd_blk->completion = NULL;
+ }
+}
+
+static void myrb_monitor(struct work_struct *work)
+{
+ struct myrb_hba *cb = container_of(work,
+ struct myrb_hba, monitor_work.work);
+ struct Scsi_Host *shost = cb->host;
+ unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
+
+ dev_dbg(&shost->shost_gendev, "monitor tick\n");
+
+ if (cb->new_ev_seq > cb->old_ev_seq) {
+ int event = cb->old_ev_seq;
+
+ dev_dbg(&shost->shost_gendev,
+ "get event log no %d/%d\n",
+ cb->new_ev_seq, event);
+ myrb_get_event(cb, event);
+ cb->old_ev_seq = event + 1;
+ interval = 10;
+ } else if (cb->need_err_info) {
+ cb->need_err_info = false;
+ dev_dbg(&shost->shost_gendev, "get error table\n");
+ myrb_get_errtable(cb);
+ interval = 10;
+ } else if (cb->need_rbld && cb->rbld_first) {
+ cb->need_rbld = false;
+ dev_dbg(&shost->shost_gendev,
+ "get rebuild progress\n");
+ myrb_update_rbld_progress(cb);
+ interval = 10;
+ } else if (cb->need_ldev_info) {
+ cb->need_ldev_info = false;
+ dev_dbg(&shost->shost_gendev,
+ "get logical drive info\n");
+ myrb_get_ldev_info(cb);
+ interval = 10;
+ } else if (cb->need_rbld) {
+ cb->need_rbld = false;
+ dev_dbg(&shost->shost_gendev,
+ "get rebuild progress\n");
+ myrb_update_rbld_progress(cb);
+ interval = 10;
+ } else if (cb->need_cc_status) {
+ cb->need_cc_status = false;
+ dev_dbg(&shost->shost_gendev,
+ "get consistency check progress\n");
+ myrb_get_cc_progress(cb);
+ interval = 10;
+ } else if (cb->need_bgi_status) {
+ cb->need_bgi_status = false;
+ dev_dbg(&shost->shost_gendev, "get background init status\n");
+ myrb_bgi_control(cb);
+ interval = 10;
+ } else {
+ dev_dbg(&shost->shost_gendev, "new enquiry\n");
+ mutex_lock(&cb->dma_mutex);
+ myrb_hba_enquiry(cb);
+ mutex_unlock(&cb->dma_mutex);
+ if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
+ cb->need_err_info || cb->need_rbld ||
+ cb->need_ldev_info || cb->need_cc_status ||
+ cb->need_bgi_status) {
+ dev_dbg(&shost->shost_gendev,
+ "reschedule monitor\n");
+ interval = 0;
+ }
+ }
+ if (interval > 1)
+ cb->primary_monitor_time = jiffies;
+ queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
+}
+
+/**
+ * myrb_err_status - reports controller BIOS messages
+ *
+ * Controller BIOS messages are passed through the Error Status Register
+ * when the driver performs the BIOS handshaking.
+ *
+ * Return: true for fatal errors and false otherwise.
+ */
+bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
+ unsigned char parm0, unsigned char parm1)
+{
+ struct pci_dev *pdev = cb->pdev;
+
+ switch (error) {
+ case 0x00:
+ dev_info(&pdev->dev,
+ "Physical Device %d:%d Not Responding\n",
+ parm1, parm0);
+ break;
+ case 0x08:
+ dev_notice(&pdev->dev, "Spinning Up Drives\n");
+ break;
+ case 0x30:
+ dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+ break;
+ case 0x60:
+ dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+ break;
+ case 0x70:
+ dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+ break;
+ case 0x90:
+ dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+ parm1, parm0);
+ break;
+ case 0xA0:
+ dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+ break;
+ case 0xB0:
+ dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+ break;
+ case 0xD0:
+ dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+ break;
+ case 0xF0:
+ dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+ return true;
+ default:
+ dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+ error);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Hardware-specific functions
+ */
+
+/*
+ * DAC960 LA Series Controllers
+ */
+
+static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
+
+ return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
+}
+
+static inline bool DAC960_LA_init_in_progress(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
+
+ return !(idb & DAC960_LA_IDB_INIT_DONE);
+}
+
+static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
+
+ return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
+
+ return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_LA_enable_intr(void __iomem *base)
+{
+ unsigned char odb = 0xFF;
+
+ odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
+ writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_LA_disable_intr(void __iomem *base)
+{
+ unsigned char odb = 0xFF;
+
+ odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
+ writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_LA_intr_enabled(void __iomem *base)
+{
+ unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
+
+ return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
+ union myrb_cmd_mbox *mbox)
+{
+ mem_mbox->words[1] = mbox->words[1];
+ mem_mbox->words[2] = mbox->words[2];
+ mem_mbox->words[3] = mbox->words[3];
+ /* Memory barrier to prevent reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Memory barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
+ writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
+ writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
+ writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
+}
+
+static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
+{
+ return readb(base + DAC960_LA_STSID_OFFSET);
+}
+
+static inline unsigned short DAC960_LA_read_status(void __iomem *base)
+{
+ return readw(base + DAC960_LA_STS_OFFSET);
+}
+
+static inline bool
+DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
+
+ if (!(errsts & DAC960_LA_ERRSTS_PENDING))
+ return false;
+ errsts &= ~DAC960_LA_ERRSTS_PENDING;
+
+ *error = errsts;
+ *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
+ *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
+ writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned short
+DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ unsigned short status;
+ int timeout = 0;
+
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (!DAC960_LA_hw_mbox_is_full(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (DAC960_LA_hw_mbox_is_full(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for empty mailbox\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ DAC960_LA_write_hw_mbox(base, mbox);
+ DAC960_LA_hw_mbox_new_cmd(base);
+ timeout = 0;
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_LA_hw_mbox_status_available(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (!DAC960_LA_hw_mbox_status_available(base)) {
+ dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ status = DAC960_LA_read_status(base);
+ DAC960_LA_ack_hw_mbox_intr(base);
+ DAC960_LA_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_LA_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ DAC960_LA_disable_intr(base);
+ DAC960_LA_ack_hw_mbox_status(base);
+ udelay(1000);
+ timeout = 0;
+ while (DAC960_LA_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_LA_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -ENODEV;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_LA_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_LA_enable_intr(base);
+ cb->qcmd = myrb_qcmd;
+ cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
+ if (cb->dual_mode_interface)
+ cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
+ else
+ cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
+ cb->disable_intr = DAC960_LA_disable_intr;
+ cb->reset = DAC960_LA_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ struct myrb_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ DAC960_LA_ack_intr(base);
+ next_stat_mbox = cb->next_stat_mbox;
+ while (next_stat_mbox->valid) {
+ unsigned char id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = next_stat_mbox->status;
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
+ if (++next_stat_mbox > cb->last_stat_mbox)
+ next_stat_mbox = cb->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ }
+ cb->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_LA_privdata = {
+ .hw_init = DAC960_LA_hw_init,
+ .irq_handler = DAC960_LA_intr_handler,
+ .mmio_size = DAC960_LA_mmio_size,
+};
+
+/*
+ * DAC960 PG Series Controllers
+ */
+static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_gen_intr(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_reset_ctrl(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
+{
+ unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
+
+ return idb & DAC960_PG_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_PG_init_in_progress(void __iomem *base)
+{
+ unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
+
+ return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
+{
+ writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
+{
+ writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_intr(void __iomem *base)
+{
+ writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
+
+ return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
+
+ return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_PG_enable_intr(void __iomem *base)
+{
+ unsigned int imask = (unsigned int)-1;
+
+ imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
+ writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_PG_disable_intr(void __iomem *base)
+{
+ unsigned int imask = (unsigned int)-1;
+
+ writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_PG_intr_enabled(void __iomem *base)
+{
+ unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
+
+ return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
+ union myrb_cmd_mbox *mbox)
+{
+ mem_mbox->words[1] = mbox->words[1];
+ mem_mbox->words[2] = mbox->words[2];
+ mem_mbox->words[3] = mbox->words[3];
+ /* Memory barrier to prevent reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Memory barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
+ writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
+ writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
+ writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
+}
+
+static inline unsigned char
+DAC960_PG_read_status_cmd_ident(void __iomem *base)
+{
+ return readb(base + DAC960_PG_STSID_OFFSET);
+}
+
+static inline unsigned short
+DAC960_PG_read_status(void __iomem *base)
+{
+ return readw(base + DAC960_PG_STS_OFFSET);
+}
+
+static inline bool
+DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
+
+ if (!(errsts & DAC960_PG_ERRSTS_PENDING))
+ return false;
+ errsts &= ~DAC960_PG_ERRSTS_PENDING;
+ *error = errsts;
+ *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
+ *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
+ writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned short
+DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ unsigned short status;
+ int timeout = 0;
+
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (!DAC960_PG_hw_mbox_is_full(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (DAC960_PG_hw_mbox_is_full(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for empty mailbox\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ DAC960_PG_write_hw_mbox(base, mbox);
+ DAC960_PG_hw_mbox_new_cmd(base);
+
+ timeout = 0;
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PG_hw_mbox_status_available(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (!DAC960_PG_hw_mbox_status_available(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for mailbox status\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ status = DAC960_PG_read_status(base);
+ DAC960_PG_ack_hw_mbox_intr(base);
+ DAC960_PG_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_PG_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ DAC960_PG_disable_intr(base);
+ DAC960_PG_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_PG_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PG_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_PG_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_PG_enable_intr(base);
+ cb->qcmd = myrb_qcmd;
+ cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
+ if (cb->dual_mode_interface)
+ cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
+ else
+ cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
+ cb->disable_intr = DAC960_PG_disable_intr;
+ cb->reset = DAC960_PG_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ struct myrb_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ DAC960_PG_ack_intr(base);
+ next_stat_mbox = cb->next_stat_mbox;
+ while (next_stat_mbox->valid) {
+ unsigned char id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = next_stat_mbox->status;
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
+ if (++next_stat_mbox > cb->last_stat_mbox)
+ next_stat_mbox = cb->first_stat_mbox;
+
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ cb->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_PG_privdata = {
+ .hw_init = DAC960_PG_hw_init,
+ .irq_handler = DAC960_PG_intr_handler,
+ .mmio_size = DAC960_PG_mmio_size,
+};
+
+
+/*
+ * DAC960 PD Series Controllers
+ */
+
+static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
+
+ return idb & DAC960_PD_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_PD_init_in_progress(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
+
+ return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_PD_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
+}
+
+static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
+
+ return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_PD_enable_intr(void __iomem *base)
+{
+ writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
+}
+
+static inline void DAC960_PD_disable_intr(void __iomem *base)
+{
+ writeb(0, base + DAC960_PD_IRQEN_OFFSET);
+}
+
+static inline bool DAC960_PD_intr_enabled(void __iomem *base)
+{
+ unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
+
+ return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
+}
+
+static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
+ writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
+ writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
+ writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
+}
+
+static inline unsigned char
+DAC960_PD_read_status_cmd_ident(void __iomem *base)
+{
+ return readb(base + DAC960_PD_STSID_OFFSET);
+}
+
+static inline unsigned short
+DAC960_PD_read_status(void __iomem *base)
+{
+ return readw(base + DAC960_PD_STS_OFFSET);
+}
+
+static inline bool
+DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
+
+ if (!(errsts & DAC960_PD_ERRSTS_PENDING))
+ return false;
+ errsts &= ~DAC960_PD_ERRSTS_PENDING;
+ *error = errsts;
+ *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
+ *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
+ writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
+ return true;
+}
+
+static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ void __iomem *base = cb->io_base;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ while (DAC960_PD_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_PD_write_cmd_mbox(base, mbox);
+ DAC960_PD_hw_mbox_new_cmd(base);
+}
+
+static int DAC960_PD_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ if (!request_region(cb->io_addr, 0x80, "myrb")) {
+ dev_err(&pdev->dev, "IO port 0x%lx busy\n",
+ (unsigned long)cb->io_addr);
+ return -EBUSY;
+ }
+ DAC960_PD_disable_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_PD_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PD_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, NULL)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_PD_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_PD_enable_intr(base);
+ cb->qcmd = DAC960_PD_qcmd;
+ cb->disable_intr = DAC960_PD_disable_intr;
+ cb->reset = DAC960_PD_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ while (DAC960_PD_hw_mbox_status_available(base)) {
+ unsigned char id = DAC960_PD_read_status_cmd_ident(base);
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = DAC960_PD_read_status(base);
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ DAC960_PD_ack_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_PD_privdata = {
+ .hw_init = DAC960_PD_hw_init,
+ .irq_handler = DAC960_PD_intr_handler,
+ .mmio_size = DAC960_PD_mmio_size,
+};
+
+
+/*
+ * DAC960 P Series Controllers
+ *
+ * Similar to the DAC960 PD Series Controllers, but some commands have
+ * to be translated.
+ */
+
+static inline void myrb_translate_enquiry(void *enq)
+{
+ memcpy(enq + 132, enq + 36, 64);
+ memset(enq + 36, 0, 96);
+}
+
+static inline void myrb_translate_devstate(void *state)
+{
+ memcpy(state + 2, state + 3, 1);
+ memmove(state + 4, state + 5, 2);
+ memmove(state + 6, state + 8, 4);
+}
+
+static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
+{
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ int ldev_num = mbox->type5.ld.ldev_num;
+
+ mbox->bytes[3] &= 0x7;
+ mbox->bytes[3] |= mbox->bytes[7] << 6;
+ mbox->bytes[7] = ldev_num;
+}
+
+static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
+{
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ int ldev_num = mbox->bytes[7];
+
+ mbox->bytes[7] = mbox->bytes[3] >> 6;
+ mbox->bytes[3] &= 0x7;
+ mbox->bytes[3] |= ldev_num << 3;
+}
+
+static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ void __iomem *base = cb->io_base;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ switch (mbox->common.opcode) {
+ case MYRB_CMD_ENQUIRY:
+ mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
+ break;
+ case MYRB_CMD_GET_DEVICE_STATE:
+ mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
+ break;
+ case MYRB_CMD_READ:
+ mbox->common.opcode = MYRB_CMD_READ_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE:
+ mbox->common.opcode = MYRB_CMD_WRITE_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_READ_SG:
+ mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE_SG:
+ mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ default:
+ break;
+ }
+ while (DAC960_PD_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_PD_write_cmd_mbox(base, mbox);
+ DAC960_PD_hw_mbox_new_cmd(base);
+}
+
+
+static int DAC960_P_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ if (!request_region(cb->io_addr, 0x80, "myrb")) {
+ dev_err(&pdev->dev, "IO port 0x%lx busy\n",
+ (unsigned long)cb->io_addr);
+ return -EBUSY;
+ }
+ DAC960_PD_disable_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_PD_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PD_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -EAGAIN;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, NULL)) {
+ dev_err(&pdev->dev,
+ "Unable to allocate DMA mapped memory\n");
+ DAC960_PD_reset_ctrl(base);
+ return -ETIMEDOUT;
+ }
+ DAC960_PD_enable_intr(base);
+ cb->qcmd = DAC960_P_qcmd;
+ cb->disable_intr = DAC960_PD_disable_intr;
+ cb->reset = DAC960_PD_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ while (DAC960_PD_hw_mbox_status_available(base)) {
+ unsigned char id = DAC960_PD_read_status_cmd_ident(base);
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+ union myrb_cmd_mbox *mbox;
+ enum myrb_cmd_opcode op;
+
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = DAC960_PD_read_status(base);
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ DAC960_PD_ack_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+
+ if (!cmd_blk)
+ continue;
+
+ mbox = &cmd_blk->mbox;
+ op = mbox->common.opcode;
+ switch (op) {
+ case MYRB_CMD_ENQUIRY_OLD:
+ mbox->common.opcode = MYRB_CMD_ENQUIRY;
+ myrb_translate_enquiry(cb->enquiry);
+ break;
+ case MYRB_CMD_READ_OLD:
+ mbox->common.opcode = MYRB_CMD_READ;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE_OLD:
+ mbox->common.opcode = MYRB_CMD_WRITE;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_READ_SG_OLD:
+ mbox->common.opcode = MYRB_CMD_READ_SG;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE_SG_OLD:
+ mbox->common.opcode = MYRB_CMD_WRITE_SG;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ default:
+ break;
+ }
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_P_privdata = {
+ .hw_init = DAC960_P_hw_init,
+ .irq_handler = DAC960_P_intr_handler,
+ .mmio_size = DAC960_PD_mmio_size,
+};
+
+static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
+ const struct pci_device_id *entry)
+{
+ struct myrb_privdata *privdata =
+ (struct myrb_privdata *)entry->driver_data;
+ irq_handler_t irq_handler = privdata->irq_handler;
+ unsigned int mmio_size = privdata->mmio_size;
+ struct Scsi_Host *shost;
+ struct myrb_hba *cb = NULL;
+
+ shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
+ if (!shost) {
+ dev_err(&pdev->dev, "Unable to allocate Controller\n");
+ return NULL;
+ }
+ shost->max_cmd_len = 12;
+ shost->max_lun = 256;
+ cb = shost_priv(shost);
+ mutex_init(&cb->dcmd_mutex);
+ mutex_init(&cb->dma_mutex);
+ cb->pdev = pdev;
+
+ if (pci_enable_device(pdev))
+ goto failure;
+
+ if (privdata->hw_init == DAC960_PD_hw_init ||
+ privdata->hw_init == DAC960_P_hw_init) {
+ cb->io_addr = pci_resource_start(pdev, 0);
+ cb->pci_addr = pci_resource_start(pdev, 1);
+ } else
+ cb->pci_addr = pci_resource_start(pdev, 0);
+
+ pci_set_drvdata(pdev, cb);
+ spin_lock_init(&cb->queue_lock);
+ if (mmio_size < PAGE_SIZE)
+ mmio_size = PAGE_SIZE;
+ cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
+ if (cb->mmio_base == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to map Controller Register Window\n");
+ goto failure;
+ }
+
+ cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
+ if (privdata->hw_init(pdev, cb, cb->io_base))
+ goto failure;
+
+ if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
+ dev_err(&pdev->dev,
+ "Unable to acquire IRQ Channel %d\n", pdev->irq);
+ goto failure;
+ }
+ cb->irq = pdev->irq;
+ return cb;
+
+failure:
+ dev_err(&pdev->dev,
+ "Failed to initialize Controller\n");
+ myrb_cleanup(cb);
+ return NULL;
+}
+
+static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+ struct myrb_hba *cb;
+ int ret;
+
+ cb = myrb_detect(dev, entry);
+ if (!cb)
+ return -ENODEV;
+
+ ret = myrb_get_hba_config(cb);
+ if (ret < 0) {
+ myrb_cleanup(cb);
+ return ret;
+ }
+
+ if (!myrb_create_mempools(dev, cb)) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ ret = scsi_add_host(cb->host, &dev->dev);
+ if (ret) {
+ dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+ myrb_destroy_mempools(cb);
+ goto failed;
+ }
+ scsi_scan_host(cb->host);
+ return 0;
+failed:
+ myrb_cleanup(cb);
+ return ret;
+}
+
+
+static void myrb_remove(struct pci_dev *pdev)
+{
+ struct myrb_hba *cb = pci_get_drvdata(pdev);
+
+ shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
+ myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
+ myrb_cleanup(cb);
+ myrb_destroy_mempools(cb);
+}
+
+
+static const struct pci_device_id myrb_id_table[] = {
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
+ PCI_DEVICE_ID_DEC_21285,
+ PCI_VENDOR_ID_MYLEX,
+ PCI_DEVICE_ID_MYLEX_DAC960_LA),
+ .driver_data = (unsigned long) &DAC960_LA_privdata,
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
+ },
+ {0, },
+};
+
+MODULE_DEVICE_TABLE(pci, myrb_id_table);
+
+static struct pci_driver myrb_pci_driver = {
+ .name = "myrb",
+ .id_table = myrb_id_table,
+ .probe = myrb_probe,
+ .remove = myrb_remove,
+};
+
+static int __init myrb_init_module(void)
+{
+ int ret;
+
+ myrb_raid_template = raid_class_attach(&myrb_raid_functions);
+ if (!myrb_raid_template)
+ return -ENODEV;
+
+ ret = pci_register_driver(&myrb_pci_driver);
+ if (ret)
+ raid_class_release(myrb_raid_template);
+
+ return ret;
+}
+
+static void __exit myrb_cleanup_module(void)
+{
+ pci_unregister_driver(&myrb_pci_driver);
+ raid_class_release(myrb_raid_template);
+}
+
+module_init(myrb_init_module);
+module_exit(myrb_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
new file mode 100644
index 000000000000..9289c19fcb2f
--- /dev/null
+++ b/drivers/scsi/myrb.h
@@ -0,0 +1,958 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ */
+
+#ifndef MYRB_H
+#define MYRB_H
+
+#define MYRB_MAX_LDEVS 32
+#define MYRB_MAX_CHANNELS 3
+#define MYRB_MAX_TARGETS 16
+#define MYRB_MAX_PHYSICAL_DEVICES 45
+#define MYRB_SCATTER_GATHER_LIMIT 32
+#define MYRB_CMD_MBOX_COUNT 256
+#define MYRB_STAT_MBOX_COUNT 1024
+
+#define MYRB_BLKSIZE_BITS 9
+#define MYRB_MAILBOX_TIMEOUT 1000000
+
+#define MYRB_DCMD_TAG 1
+#define MYRB_MCMD_TAG 2
+
+#define MYRB_PRIMARY_MONITOR_INTERVAL (10 * HZ)
+#define MYRB_SECONDARY_MONITOR_INTERVAL (60 * HZ)
+
+/*
+ * DAC960 V1 Firmware Command Opcodes.
+ */
+enum myrb_cmd_opcode {
+ /* I/O Commands */
+ MYRB_CMD_READ_EXTENDED = 0x33,
+ MYRB_CMD_WRITE_EXTENDED = 0x34,
+ MYRB_CMD_READAHEAD_EXTENDED = 0x35,
+ MYRB_CMD_READ_EXTENDED_SG = 0xB3,
+ MYRB_CMD_WRITE_EXTENDED_SG = 0xB4,
+ MYRB_CMD_READ = 0x36,
+ MYRB_CMD_READ_SG = 0xB6,
+ MYRB_CMD_WRITE = 0x37,
+ MYRB_CMD_WRITE_SG = 0xB7,
+ MYRB_CMD_DCDB = 0x04,
+ MYRB_CMD_DCDB_SG = 0x84,
+ MYRB_CMD_FLUSH = 0x0A,
+ /* Controller Status Related Commands */
+ MYRB_CMD_ENQUIRY = 0x53,
+ MYRB_CMD_ENQUIRY2 = 0x1C,
+ MYRB_CMD_GET_LDRV_ELEMENT = 0x55,
+ MYRB_CMD_GET_LDEV_INFO = 0x19,
+ MYRB_CMD_IOPORTREAD = 0x39,
+ MYRB_CMD_IOPORTWRITE = 0x3A,
+ MYRB_CMD_GET_SD_STATS = 0x3E,
+ MYRB_CMD_GET_PD_STATS = 0x3F,
+ MYRB_CMD_EVENT_LOG_OPERATION = 0x72,
+ /* Device Related Commands */
+ MYRB_CMD_START_DEVICE = 0x10,
+ MYRB_CMD_GET_DEVICE_STATE = 0x50,
+ MYRB_CMD_STOP_CHANNEL = 0x13,
+ MYRB_CMD_START_CHANNEL = 0x12,
+ MYRB_CMD_RESET_CHANNEL = 0x1A,
+ /* Commands Associated with Data Consistency and Errors */
+ MYRB_CMD_REBUILD = 0x09,
+ MYRB_CMD_REBUILD_ASYNC = 0x16,
+ MYRB_CMD_CHECK_CONSISTENCY = 0x0F,
+ MYRB_CMD_CHECK_CONSISTENCY_ASYNC = 0x1E,
+ MYRB_CMD_REBUILD_STAT = 0x0C,
+ MYRB_CMD_GET_REBUILD_PROGRESS = 0x27,
+ MYRB_CMD_REBUILD_CONTROL = 0x1F,
+ MYRB_CMD_READ_BADBLOCK_TABLE = 0x0B,
+ MYRB_CMD_READ_BADDATA_TABLE = 0x25,
+ MYRB_CMD_CLEAR_BADDATA_TABLE = 0x26,
+ MYRB_CMD_GET_ERROR_TABLE = 0x17,
+ MYRB_CMD_ADD_CAPACITY_ASYNC = 0x2A,
+ MYRB_CMD_BGI_CONTROL = 0x2B,
+ /* Configuration Related Commands */
+ MYRB_CMD_READ_CONFIG2 = 0x3D,
+ MYRB_CMD_WRITE_CONFIG2 = 0x3C,
+ MYRB_CMD_READ_CONFIG_ONDISK = 0x4A,
+ MYRB_CMD_WRITE_CONFIG_ONDISK = 0x4B,
+ MYRB_CMD_READ_CONFIG = 0x4E,
+ MYRB_CMD_READ_BACKUP_CONFIG = 0x4D,
+ MYRB_CMD_WRITE_CONFIG = 0x4F,
+ MYRB_CMD_ADD_CONFIG = 0x4C,
+ MYRB_CMD_READ_CONFIG_LABEL = 0x48,
+ MYRB_CMD_WRITE_CONFIG_LABEL = 0x49,
+ /* Firmware Upgrade Related Commands */
+ MYRB_CMD_LOAD_IMAGE = 0x20,
+ MYRB_CMD_STORE_IMAGE = 0x21,
+ MYRB_CMD_PROGRAM_IMAGE = 0x22,
+ /* Diagnostic Commands */
+ MYRB_CMD_SET_DIAGNOSTIC_MODE = 0x31,
+ MYRB_CMD_RUN_DIAGNOSTIC = 0x32,
+ /* Subsystem Service Commands */
+ MYRB_CMD_GET_SUBSYS_DATA = 0x70,
+ MYRB_CMD_SET_SUBSYS_PARAM = 0x71,
+ /* Version 2.xx Firmware Commands */
+ MYRB_CMD_ENQUIRY_OLD = 0x05,
+ MYRB_CMD_GET_DEVICE_STATE_OLD = 0x14,
+ MYRB_CMD_READ_OLD = 0x02,
+ MYRB_CMD_WRITE_OLD = 0x03,
+ MYRB_CMD_READ_SG_OLD = 0x82,
+ MYRB_CMD_WRITE_SG_OLD = 0x83
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Command Status Codes.
+ */
+#define MYRB_STATUS_SUCCESS 0x0000 /* Common */
+#define MYRB_STATUS_CHECK_CONDITION 0x0002 /* Common */
+#define MYRB_STATUS_NO_DEVICE 0x0102 /* Common */
+#define MYRB_STATUS_INVALID_ADDRESS 0x0105 /* Common */
+#define MYRB_STATUS_INVALID_PARAM 0x0105 /* Common */
+#define MYRB_STATUS_IRRECOVERABLE_DATA_ERROR 0x0001 /* I/O */
+#define MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE 0x0002 /* I/O */
+#define MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV 0x0105 /* I/O */
+#define MYRB_STATUS_BAD_DATA 0x010C /* I/O */
+#define MYRB_STATUS_DEVICE_BUSY 0x0008 /* DCDB */
+#define MYRB_STATUS_DEVICE_NONRESPONSIVE 0x000E /* DCDB */
+#define MYRB_STATUS_COMMAND_TERMINATED 0x000F /* DCDB */
+#define MYRB_STATUS_START_DEVICE_FAILED 0x0002 /* Device */
+#define MYRB_STATUS_INVALID_CHANNEL_OR_TARGET 0x0105 /* Device */
+#define MYRB_STATUS_CHANNEL_BUSY 0x0106 /* Device */
+#define MYRB_STATUS_OUT_OF_MEMORY 0x0107 /* Device */
+#define MYRB_STATUS_CHANNEL_NOT_STOPPED 0x0002 /* Device */
+#define MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE 0x0002 /* Consistency */
+#define MYRB_STATUS_RBLD_BADBLOCKS 0x0003 /* Consistency */
+#define MYRB_STATUS_RBLD_NEW_DISK_FAILED 0x0004 /* Consistency */
+#define MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS 0x0106 /* Consistency */
+#define MYRB_STATUS_DEPENDENT_DISK_DEAD 0x0002 /* Consistency */
+#define MYRB_STATUS_INCONSISTENT_BLOCKS 0x0003 /* Consistency */
+#define MYRB_STATUS_INVALID_OR_NONREDUNDANT_LDRV 0x0105 /* Consistency */
+#define MYRB_STATUS_NO_RBLD_OR_CHECK_INPROGRESS 0x0105 /* Consistency */
+#define MYRB_STATUS_RBLD_IN_PROGRESS_DATA_VALID 0x0000 /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE 0x0002 /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_BADBLOCKS 0x0003 /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED 0x0004 /* Consistency */
+#define MYRB_STATUS_RBLD_SUCCESS 0x0100 /* Consistency */
+#define MYRB_STATUS_RBLD_SUCCESS_TERMINATED 0x0107 /* Consistency */
+#define MYRB_STATUS_RBLD_NOT_CHECKED 0x0108 /* Consistency */
+#define MYRB_STATUS_BGI_SUCCESS 0x0100 /* Consistency */
+#define MYRB_STATUS_BGI_ABORTED 0x0005 /* Consistency */
+#define MYRB_STATUS_NO_BGI_INPROGRESS 0x0105 /* Consistency */
+#define MYRB_STATUS_ADD_CAPACITY_INPROGRESS 0x0004 /* Consistency */
+#define MYRB_STATUS_ADD_CAPACITY_FAILED_OR_SUSPENDED 0x00F4 /* Consistency */
+#define MYRB_STATUS_CONFIG2_CSUM_ERROR 0x0002 /* Configuration */
+#define MYRB_STATUS_CONFIGURATION_SUSPENDED 0x0106 /* Configuration */
+#define MYRB_STATUS_FAILED_TO_CONFIGURE_NVRAM 0x0105 /* Configuration */
+#define MYRB_STATUS_CONFIGURATION_NOT_SAVED 0x0106 /* Configuration */
+#define MYRB_STATUS_SUBSYS_NOTINSTALLED 0x0001 /* Subsystem */
+#define MYRB_STATUS_SUBSYS_FAILED 0x0002 /* Subsystem */
+#define MYRB_STATUS_SUBSYS_BUSY 0x0106 /* Subsystem */
+#define MYRB_STATUS_SUBSYS_TIMEOUT 0x0108 /* Subsystem */
+
+/*
+ * DAC960 V1 Firmware Enquiry Command reply structure.
+ */
+struct myrb_enquiry {
+ unsigned char ldev_count; /* Byte 0 */
+ unsigned int rsvd1:24; /* Bytes 1-3 */
+ unsigned int ldev_sizes[32]; /* Bytes 4-131 */
+ unsigned short flash_age; /* Bytes 132-133 */
+ struct {
+ unsigned char deferred:1; /* Byte 134 Bit 0 */
+ unsigned char low_bat:1; /* Byte 134 Bit 1 */
+ unsigned char rsvd2:6; /* Byte 134 Bits 2-7 */
+ } status;
+ unsigned char rsvd3:8; /* Byte 135 */
+ unsigned char fw_minor_version; /* Byte 136 */
+ unsigned char fw_major_version; /* Byte 137 */
+ enum {
+ MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS = 0x00,
+ MYRB_STDBY_RBLD_IN_PROGRESS = 0x01,
+ MYRB_BG_RBLD_IN_PROGRESS = 0x02,
+ MYRB_BG_CHECK_IN_PROGRESS = 0x03,
+ MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR = 0xFF,
+ MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED = 0xF0,
+ MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED = 0xF1,
+ MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER = 0xF2,
+ MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED = 0xF3
+ } __packed rbld; /* Byte 138 */
+ unsigned char max_tcq; /* Byte 139 */
+ unsigned char ldev_offline; /* Byte 140 */
+ unsigned char rsvd4:8; /* Byte 141 */
+ unsigned short ev_seq; /* Bytes 142-143 */
+ unsigned char ldev_critical; /* Byte 144 */
+ unsigned int rsvd5:24; /* Bytes 145-147 */
+ unsigned char pdev_dead; /* Byte 148 */
+ unsigned char rsvd6:8; /* Byte 149 */
+ unsigned char rbld_count; /* Byte 150 */
+ struct {
+ unsigned char rsvd7:3; /* Byte 151 Bits 0-2 */
+ unsigned char bbu_present:1; /* Byte 151 Bit 3 */
+ unsigned char rsvd8:4; /* Byte 151 Bits 4-7 */
+ } misc;
+ struct {
+ unsigned char target;
+ unsigned char channel;
+ } dead_drives[21]; /* Bytes 152-194 */
+ unsigned char rsvd9[62]; /* Bytes 195-255 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Enquiry2 Command reply structure.
+ */
+struct myrb_enquiry2 {
+ struct {
+ enum {
+ DAC960_V1_P_PD_PU = 0x01,
+ DAC960_V1_PL = 0x02,
+ DAC960_V1_PG = 0x10,
+ DAC960_V1_PJ = 0x11,
+ DAC960_V1_PR = 0x12,
+ DAC960_V1_PT = 0x13,
+ DAC960_V1_PTL0 = 0x14,
+ DAC960_V1_PRL = 0x15,
+ DAC960_V1_PTL1 = 0x16,
+ DAC960_V1_1164P = 0x20
+ } __packed sub_model; /* Byte 0 */
+ unsigned char actual_channels; /* Byte 1 */
+ enum {
+ MYRB_5_CHANNEL_BOARD = 0x01,
+ MYRB_3_CHANNEL_BOARD = 0x02,
+ MYRB_2_CHANNEL_BOARD = 0x03,
+ MYRB_3_CHANNEL_ASIC_DAC = 0x04
+ } __packed model; /* Byte 2 */
+ enum {
+ MYRB_EISA_CONTROLLER = 0x01,
+ MYRB_MCA_CONTROLLER = 0x02,
+ MYRB_PCI_CONTROLLER = 0x03,
+ MYRB_SCSI_TO_SCSI = 0x08
+ } __packed controller; /* Byte 3 */
+ } hw; /* Bytes 0-3 */
+ /* MajorVersion.MinorVersion-FirmwareType-TurnID */
+ struct {
+ unsigned char major_version; /* Byte 4 */
+ unsigned char minor_version; /* Byte 5 */
+ unsigned char turn_id; /* Byte 6 */
+ char firmware_type; /* Byte 7 */
+ } fw; /* Bytes 4-7 */
+ unsigned int rsvd1; /* Byte 8-11 */
+ unsigned char cfg_chan; /* Byte 12 */
+ unsigned char cur_chan; /* Byte 13 */
+ unsigned char max_targets; /* Byte 14 */
+ unsigned char max_tcq; /* Byte 15 */
+ unsigned char max_ldev; /* Byte 16 */
+ unsigned char max_arms; /* Byte 17 */
+ unsigned char max_spans; /* Byte 18 */
+ unsigned char rsvd2; /* Byte 19 */
+ unsigned int rsvd3; /* Bytes 20-23 */
+ unsigned int mem_size; /* Bytes 24-27 */
+ unsigned int cache_size; /* Bytes 28-31 */
+ unsigned int flash_size; /* Bytes 32-35 */
+ unsigned int nvram_size; /* Bytes 36-39 */
+ struct {
+ enum {
+ MYRB_RAM_TYPE_DRAM = 0x0,
+ MYRB_RAM_TYPE_EDO = 0x1,
+ MYRB_RAM_TYPE_SDRAM = 0x2,
+ MYRB_RAM_TYPE_Last = 0x7
+ } __packed ram:3; /* Byte 40 Bits 0-2 */
+ enum {
+ MYRB_ERR_CORR_None = 0x0,
+ MYRB_ERR_CORR_Parity = 0x1,
+ MYRB_ERR_CORR_ECC = 0x2,
+ MYRB_ERR_CORR_Last = 0x7
+ } __packed ec:3; /* Byte 40 Bits 3-5 */
+ unsigned char fast_page:1; /* Byte 40 Bit 6 */
+ unsigned char low_power:1; /* Byte 40 Bit 7 */
+ unsigned char rsvd4; /* Bytes 41 */
+ } mem_type;
+ unsigned short clock_speed; /* Bytes 42-43 */
+ unsigned short mem_speed; /* Bytes 44-45 */
+ unsigned short hw_speed; /* Bytes 46-47 */
+ unsigned char rsvd5[12]; /* Bytes 48-59 */
+ unsigned short max_cmds; /* Bytes 60-61 */
+ unsigned short max_sge; /* Bytes 62-63 */
+ unsigned short max_drv_cmds; /* Bytes 64-65 */
+ unsigned short max_io_desc; /* Bytes 66-67 */
+ unsigned short max_sectors; /* Bytes 68-69 */
+ unsigned char latency; /* Byte 70 */
+ unsigned char rsvd6; /* Byte 71 */
+ unsigned char scsi_tmo; /* Byte 72 */
+ unsigned char rsvd7; /* Byte 73 */
+ unsigned short min_freelines; /* Bytes 74-75 */
+ unsigned char rsvd8[8]; /* Bytes 76-83 */
+ unsigned char rbld_rate_const; /* Byte 84 */
+ unsigned char rsvd9[11]; /* Byte 85-95 */
+ unsigned short pdrv_block_size; /* Bytes 96-97 */
+ unsigned short ldev_block_size; /* Bytes 98-99 */
+ unsigned short max_blocks_per_cmd; /* Bytes 100-101 */
+ unsigned short block_factor; /* Bytes 102-103 */
+ unsigned short cacheline_size; /* Bytes 104-105 */
+ struct {
+ enum {
+ MYRB_WIDTH_NARROW_8BIT = 0x0,
+ MYRB_WIDTH_WIDE_16BIT = 0x1,
+ MYRB_WIDTH_WIDE_32BIT = 0x2
+ } __packed bus_width:2; /* Byte 106 Bits 0-1 */
+ enum {
+ MYRB_SCSI_SPEED_FAST = 0x0,
+ MYRB_SCSI_SPEED_ULTRA = 0x1,
+ MYRB_SCSI_SPEED_ULTRA2 = 0x2
+ } __packed bus_speed:2; /* Byte 106 Bits 2-3 */
+ unsigned char differential:1; /* Byte 106 Bit 4 */
+ unsigned char rsvd10:3; /* Byte 106 Bits 5-7 */
+ } scsi_cap;
+ unsigned char rsvd11[5]; /* Byte 107-111 */
+ unsigned short fw_build; /* Bytes 112-113 */
+ enum {
+ MYRB_FAULT_AEMI = 0x01,
+ MYRB_FAULT_OEM1 = 0x02,
+ MYRB_FAULT_OEM2 = 0x04,
+ MYRB_FAULT_OEM3 = 0x08,
+ MYRB_FAULT_CONNER = 0x10,
+ MYRB_FAULT_SAFTE = 0x20
+ } __packed fault_mgmt; /* Byte 114 */
+ unsigned char rsvd12; /* Byte 115 */
+ struct {
+ unsigned int clustering:1; /* Byte 116 Bit 0 */
+ unsigned int online_RAID_expansion:1; /* Byte 116 Bit 1 */
+ unsigned int readahead:1; /* Byte 116 Bit 2 */
+ unsigned int bgi:1; /* Byte 116 Bit 3 */
+ unsigned int rsvd13:28; /* Bytes 116-119 */
+ } fw_features;
+ unsigned char rsvd14[8]; /* Bytes 120-127 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Logical Drive State type.
+ */
+enum myrb_devstate {
+ MYRB_DEVICE_DEAD = 0x00,
+ MYRB_DEVICE_WO = 0x02,
+ MYRB_DEVICE_ONLINE = 0x03,
+ MYRB_DEVICE_CRITICAL = 0x04,
+ MYRB_DEVICE_STANDBY = 0x10,
+ MYRB_DEVICE_OFFLINE = 0xFF
+} __packed;
+
+/*
+ * DAC960 V1 RAID Levels
+ */
+enum myrb_raidlevel {
+ MYRB_RAID_LEVEL0 = 0x0, /* RAID 0 */
+ MYRB_RAID_LEVEL1 = 0x1, /* RAID 1 */
+ MYRB_RAID_LEVEL3 = 0x3, /* RAID 3 */
+ MYRB_RAID_LEVEL5 = 0x5, /* RAID 5 */
+ MYRB_RAID_LEVEL6 = 0x6, /* RAID 6 */
+ MYRB_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Logical Drive Information structure.
+ */
+struct myrb_ldev_info {
+ unsigned int size; /* Bytes 0-3 */
+ enum myrb_devstate state; /* Byte 4 */
+ unsigned int raid_level:7; /* Byte 5 Bits 0-6 */
+ unsigned int wb_enabled:1; /* Byte 5 Bit 7 */
+ unsigned int rsvd:16; /* Bytes 6-7 */
+};
+
+/*
+ * DAC960 V1 Firmware Perform Event Log Operation Types.
+ */
+#define DAC960_V1_GetEventLogEntry 0x00
+
+/*
+ * DAC960 V1 Firmware Get Event Log Entry Command reply structure.
+ */
+struct myrb_log_entry {
+ unsigned char msg_type; /* Byte 0 */
+ unsigned char msg_len; /* Byte 1 */
+ unsigned char target:5; /* Byte 2 Bits 0-4 */
+ unsigned char channel:3; /* Byte 2 Bits 5-7 */
+ unsigned char lun:6; /* Byte 3 Bits 0-5 */
+ unsigned char rsvd1:2; /* Byte 3 Bits 6-7 */
+ unsigned short seq_num; /* Bytes 4-5 */
+ unsigned char sense[26]; /* Bytes 6-31 */
+};
+
+/*
+ * DAC960 V1 Firmware Get Device State Command reply structure.
+ * The structure is padded by 2 bytes for compatibility with Version 2.xx
+ * Firmware.
+ */
+struct myrb_pdev_state {
+ unsigned int present:1; /* Byte 0 Bit 0 */
+ unsigned int :7; /* Byte 0 Bits 1-7 */
+ enum {
+ MYRB_TYPE_OTHER = 0x0,
+ MYRB_TYPE_DISK = 0x1,
+ MYRB_TYPE_TAPE = 0x2,
+ MYRB_TYPE_CDROM_OR_WORM = 0x3
+ } __packed devtype:2; /* Byte 1 Bits 0-1 */
+ unsigned int rsvd1:1; /* Byte 1 Bit 2 */
+ unsigned int fast20:1; /* Byte 1 Bit 3 */
+ unsigned int sync:1; /* Byte 1 Bit 4 */
+ unsigned int fast:1; /* Byte 1 Bit 5 */
+ unsigned int wide:1; /* Byte 1 Bit 6 */
+ unsigned int tcq_supported:1; /* Byte 1 Bit 7 */
+ enum myrb_devstate state; /* Byte 2 */
+ unsigned int rsvd2:8; /* Byte 3 */
+ unsigned int sync_multiplier; /* Byte 4 */
+ unsigned int sync_offset:5; /* Byte 5 Bits 0-4 */
+ unsigned int rsvd3:3; /* Byte 5 Bits 5-7 */
+ unsigned int size; /* Bytes 6-9 */
+ unsigned int rsvd4:16; /* Bytes 10-11 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
+ */
+struct myrb_rbld_progress {
+ unsigned int ldev_num; /* Bytes 0-3 */
+ unsigned int ldev_size; /* Bytes 4-7 */
+ unsigned int blocks_left; /* Bytes 8-11 */
+};
+
+/*
+ * DAC960 V1 Firmware Background Initialization Status Command reply structure.
+ */
+struct myrb_bgi_status {
+ unsigned int ldev_size; /* Bytes 0-3 */
+ unsigned int blocks_done; /* Bytes 4-7 */
+ unsigned char rsvd1[12]; /* Bytes 8-19 */
+ unsigned int ldev_num; /* Bytes 20-23 */
+ unsigned char raid_level; /* Byte 24 */
+ enum {
+ MYRB_BGI_INVALID = 0x00,
+ MYRB_BGI_STARTED = 0x02,
+ MYRB_BGI_INPROGRESS = 0x04,
+ MYRB_BGI_SUSPENDED = 0x05,
+ MYRB_BGI_CANCELLED = 0x06
+ } __packed status; /* Byte 25 */
+ unsigned char rsvd2[6]; /* Bytes 26-31 */
+};
+
+/*
+ * DAC960 V1 Firmware Error Table Entry structure.
+ */
+struct myrb_error_entry {
+ unsigned char parity_err; /* Byte 0 */
+ unsigned char soft_err; /* Byte 1 */
+ unsigned char hard_err; /* Byte 2 */
+ unsigned char misc_err; /* Byte 3 */
+};
+
+/*
+ * DAC960 V1 Firmware Read Config2 Command reply structure.
+ */
+struct myrb_config2 {
+ unsigned rsvd1:1; /* Byte 0 Bit 0 */
+ unsigned active_negation:1; /* Byte 0 Bit 1 */
+ unsigned rsvd2:5; /* Byte 0 Bits 2-6 */
+ unsigned no_rescan_on_reset_during_scan:1; /* Byte 0 Bit 7 */
+ unsigned StorageWorks_support:1; /* Byte 1 Bit 0 */
+ unsigned HewlettPackard_support:1; /* Byte 1 Bit 1 */
+ unsigned no_disconnect_on_first_command:1; /* Byte 1 Bit 2 */
+ unsigned rsvd3:2; /* Byte 1 Bits 3-4 */
+ unsigned AEMI_ARM:1; /* Byte 1 Bit 5 */
+ unsigned AEMI_OFM:1; /* Byte 1 Bit 6 */
+ unsigned rsvd4:1; /* Byte 1 Bit 7 */
+ enum {
+ MYRB_OEMID_MYLEX = 0x00,
+ MYRB_OEMID_IBM = 0x08,
+ MYRB_OEMID_HP = 0x0A,
+ MYRB_OEMID_DEC = 0x0C,
+ MYRB_OEMID_SIEMENS = 0x10,
+ MYRB_OEMID_INTEL = 0x12
+ } __packed OEMID; /* Byte 2 */
+ unsigned char oem_model_number; /* Byte 3 */
+ unsigned char physical_sector; /* Byte 4 */
+ unsigned char logical_sector; /* Byte 5 */
+ unsigned char block_factor; /* Byte 6 */
+ unsigned readahead_enabled:1; /* Byte 7 Bit 0 */
+ unsigned low_BIOS_delay:1; /* Byte 7 Bit 1 */
+ unsigned rsvd5:2; /* Byte 7 Bits 2-3 */
+ unsigned restrict_reassign_to_one_sector:1; /* Byte 7 Bit 4 */
+ unsigned rsvd6:1; /* Byte 7 Bit 5 */
+ unsigned FUA_during_write_recovery:1; /* Byte 7 Bit 6 */
+ unsigned enable_LeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */
+ unsigned char default_rebuild_rate; /* Byte 8 */
+ unsigned char rsvd7; /* Byte 9 */
+ unsigned char blocks_per_cacheline; /* Byte 10 */
+ unsigned char blocks_per_stripe; /* Byte 11 */
+ struct {
+ enum {
+ MYRB_SPEED_ASYNC = 0x0,
+ MYRB_SPEED_SYNC_8MHz = 0x1,
+ MYRB_SPEED_SYNC_5MHz = 0x2,
+ MYRB_SPEED_SYNC_10_OR_20MHz = 0x3
+ } __packed speed:2; /* Byte 11 Bits 0-1 */
+ unsigned force_8bit:1; /* Byte 11 Bit 2 */
+ unsigned disable_fast20:1; /* Byte 11 Bit 3 */
+ unsigned rsvd8:3; /* Byte 11 Bits 4-6 */
+ unsigned enable_tcq:1; /* Byte 11 Bit 7 */
+ } __packed channelparam[6]; /* Bytes 12-17 */
+ unsigned char SCSIInitiatorID; /* Byte 18 */
+ unsigned char rsvd9; /* Byte 19 */
+ enum {
+ MYRB_STARTUP_CONTROLLER_SPINUP = 0x00,
+ MYRB_STARTUP_POWERON_SPINUP = 0x01
+ } __packed startup; /* Byte 20 */
+ unsigned char simultaneous_device_spinup_count; /* Byte 21 */
+ unsigned char seconds_delay_between_spinups; /* Byte 22 */
+ unsigned char rsvd10[29]; /* Bytes 23-51 */
+ unsigned BIOS_disabled:1; /* Byte 52 Bit 0 */
+ unsigned CDROM_boot_enabled:1; /* Byte 52 Bit 1 */
+ unsigned rsvd11:3; /* Byte 52 Bits 2-4 */
+ enum {
+ MYRB_GEOM_128_32 = 0x0,
+ MYRB_GEOM_255_63 = 0x1,
+ MYRB_GEOM_RESERVED1 = 0x2,
+ MYRB_GEOM_RESERVED2 = 0x3
+ } __packed drive_geometry:2; /* Byte 52 Bits 5-6 */
+ unsigned rsvd12:1; /* Byte 52 Bit 7 */
+ unsigned char rsvd13[9]; /* Bytes 53-61 */
+ unsigned short csum; /* Bytes 62-63 */
+};
+
+/*
+ * DAC960 V1 Firmware DCDB request structure.
+ */
+struct myrb_dcdb {
+ unsigned target:4; /* Byte 0 Bits 0-3 */
+ unsigned channel:4; /* Byte 0 Bits 4-7 */
+ enum {
+ MYRB_DCDB_XFER_NONE = 0,
+ MYRB_DCDB_XFER_DEVICE_TO_SYSTEM = 1,
+ MYRB_DCDB_XFER_SYSTEM_TO_DEVICE = 2,
+ MYRB_DCDB_XFER_ILLEGAL = 3
+ } __packed data_xfer:2; /* Byte 1 Bits 0-1 */
+ unsigned early_status:1; /* Byte 1 Bit 2 */
+ unsigned rsvd1:1; /* Byte 1 Bit 3 */
+ enum {
+ MYRB_DCDB_TMO_24_HRS = 0,
+ MYRB_DCDB_TMO_10_SECS = 1,
+ MYRB_DCDB_TMO_60_SECS = 2,
+ MYRB_DCDB_TMO_10_MINS = 3
+ } __packed timeout:2; /* Byte 1 Bits 4-5 */
+ unsigned no_autosense:1; /* Byte 1 Bit 6 */
+ unsigned allow_disconnect:1; /* Byte 1 Bit 7 */
+ unsigned short xfer_len_lo; /* Bytes 2-3 */
+ u32 dma_addr; /* Bytes 4-7 */
+ unsigned char cdb_len:4; /* Byte 8 Bits 0-3 */
+ unsigned char xfer_len_hi4:4; /* Byte 8 Bits 4-7 */
+ unsigned char sense_len; /* Byte 9 */
+ unsigned char cdb[12]; /* Bytes 10-21 */
+ unsigned char sense[64]; /* Bytes 22-85 */
+ unsigned char status; /* Byte 86 */
+ unsigned char rsvd2; /* Byte 87 */
+};
+
+/*
+ * DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
+ *32 Bit Byte Count structure.
+ */
+struct myrb_sge {
+ u32 sge_addr; /* Bytes 0-3 */
+ u32 sge_count; /* Bytes 4-7 */
+};
+
+/*
+ * 13 Byte DAC960 V1 Firmware Command Mailbox structure.
+ * Bytes 13-15 are not used. The structure is padded to 16 bytes for
+ * efficient access.
+ */
+union myrb_cmd_mbox {
+ unsigned int words[4]; /* Words 0-3 */
+ unsigned char bytes[16]; /* Bytes 0-15 */
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd[14]; /* Bytes 2-15 */
+ } __packed common;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd1[6]; /* Bytes 2-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char optype; /* Byte 2 */
+ unsigned char rsvd1[5]; /* Bytes 3-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3B;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd1[5]; /* Bytes 2-6 */
+ unsigned char ldev_num:6; /* Byte 7 Bits 0-6 */
+ unsigned char auto_restore:1; /* Byte 7 Bit 7 */
+ unsigned char rsvd2[8]; /* Bytes 8-15 */
+ } __packed type3C;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char channel; /* Byte 2 */
+ unsigned char target; /* Byte 3 */
+ enum myrb_devstate state; /* Byte 4 */
+ unsigned char rsvd1[3]; /* Bytes 5-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3D;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char optype; /* Byte 2 */
+ unsigned char opqual; /* Byte 3 */
+ unsigned short ev_seq; /* Bytes 4-5 */
+ unsigned char rsvd1[2]; /* Bytes 6-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3E;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd1[2]; /* Bytes 2-3 */
+ unsigned char rbld_rate; /* Byte 4 */
+ unsigned char rsvd2[3]; /* Bytes 5-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd3[4]; /* Bytes 12-15 */
+ } __packed type3R;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned short xfer_len; /* Bytes 2-3 */
+ unsigned int lba; /* Bytes 4-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char ldev_num; /* Byte 12 */
+ unsigned char rsvd[3]; /* Bytes 13-15 */
+ } __packed type4;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ struct {
+ unsigned short xfer_len:11; /* Bytes 2-3 */
+ unsigned char ldev_num:5; /* Byte 3 Bits 3-7 */
+ } __packed ld;
+ unsigned int lba; /* Bytes 4-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char sg_count:6; /* Byte 12 Bits 0-5 */
+ enum {
+ MYRB_SGL_ADDR32_COUNT32 = 0x0,
+ MYRB_SGL_ADDR32_COUNT16 = 0x1,
+ MYRB_SGL_COUNT32_ADDR32 = 0x2,
+ MYRB_SGL_COUNT16_ADDR32 = 0x3
+ } __packed sg_type:2; /* Byte 12 Bits 6-7 */
+ unsigned char rsvd[3]; /* Bytes 13-15 */
+ } __packed type5;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char opcode2; /* Byte 2 */
+ unsigned char rsvd1:8; /* Byte 3 */
+ u32 cmd_mbox_addr; /* Bytes 4-7 */
+ u32 stat_mbox_addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed typeX;
+};
+
+/*
+ * DAC960 V1 Firmware Controller Status Mailbox structure.
+ */
+struct myrb_stat_mbox {
+ unsigned char id; /* Byte 0 */
+ unsigned char rsvd:7; /* Byte 1 Bits 0-6 */
+ unsigned char valid:1; /* Byte 1 Bit 7 */
+ unsigned short status; /* Bytes 2-3 */
+};
+
+struct myrb_cmdblk {
+ union myrb_cmd_mbox mbox;
+ unsigned short status;
+ struct completion *completion;
+ struct myrb_dcdb *dcdb;
+ dma_addr_t dcdb_addr;
+ struct myrb_sge *sgl;
+ dma_addr_t sgl_addr;
+};
+
+struct myrb_hba {
+ unsigned int ldev_block_size;
+ unsigned char ldev_geom_heads;
+ unsigned char ldev_geom_sectors;
+ unsigned char bus_width;
+ unsigned short stripe_size;
+ unsigned short segment_size;
+ unsigned short new_ev_seq;
+ unsigned short old_ev_seq;
+ bool dual_mode_interface;
+ bool bgi_status_supported;
+ bool safte_enabled;
+ bool need_ldev_info;
+ bool need_err_info;
+ bool need_rbld;
+ bool need_cc_status;
+ bool need_bgi_status;
+ bool rbld_first;
+
+ struct pci_dev *pdev;
+ struct Scsi_Host *host;
+
+ struct workqueue_struct *work_q;
+ char work_q_name[20];
+ struct delayed_work monitor_work;
+ unsigned long primary_monitor_time;
+ unsigned long secondary_monitor_time;
+
+ struct dma_pool *sg_pool;
+ struct dma_pool *dcdb_pool;
+
+ spinlock_t queue_lock;
+
+ void (*qcmd)(struct myrb_hba *cs, struct myrb_cmdblk *cmd_blk);
+ void (*write_cmd_mbox)(union myrb_cmd_mbox *next_mbox,
+ union myrb_cmd_mbox *cmd_mbox);
+ void (*get_cmd_mbox)(void __iomem *base);
+ void (*disable_intr)(void __iomem *base);
+ void (*reset)(void __iomem *base);
+
+ unsigned int ctlr_num;
+ unsigned char model_name[20];
+ unsigned char fw_version[12];
+
+ unsigned int irq;
+ phys_addr_t io_addr;
+ phys_addr_t pci_addr;
+ void __iomem *io_base;
+ void __iomem *mmio_base;
+
+ size_t cmd_mbox_size;
+ dma_addr_t cmd_mbox_addr;
+ union myrb_cmd_mbox *first_cmd_mbox;
+ union myrb_cmd_mbox *last_cmd_mbox;
+ union myrb_cmd_mbox *next_cmd_mbox;
+ union myrb_cmd_mbox *prev_cmd_mbox1;
+ union myrb_cmd_mbox *prev_cmd_mbox2;
+
+ size_t stat_mbox_size;
+ dma_addr_t stat_mbox_addr;
+ struct myrb_stat_mbox *first_stat_mbox;
+ struct myrb_stat_mbox *last_stat_mbox;
+ struct myrb_stat_mbox *next_stat_mbox;
+
+ struct myrb_cmdblk dcmd_blk;
+ struct myrb_cmdblk mcmd_blk;
+ struct mutex dcmd_mutex;
+
+ struct myrb_enquiry *enquiry;
+ dma_addr_t enquiry_addr;
+
+ struct myrb_error_entry *err_table;
+ dma_addr_t err_table_addr;
+
+ unsigned short last_rbld_status;
+
+ struct myrb_ldev_info *ldev_info_buf;
+ dma_addr_t ldev_info_addr;
+
+ struct myrb_bgi_status bgi_status;
+
+ struct mutex dma_mutex;
+};
+
+/*
+ * DAC960 LA Series Controller Interface Register Offsets.
+ */
+#define DAC960_LA_mmio_size 0x80
+
+enum DAC960_LA_reg_offset {
+ DAC960_LA_IRQMASK_OFFSET = 0x34,
+ DAC960_LA_CMDOP_OFFSET = 0x50,
+ DAC960_LA_CMDID_OFFSET = 0x51,
+ DAC960_LA_MBOX2_OFFSET = 0x52,
+ DAC960_LA_MBOX3_OFFSET = 0x53,
+ DAC960_LA_MBOX4_OFFSET = 0x54,
+ DAC960_LA_MBOX5_OFFSET = 0x55,
+ DAC960_LA_MBOX6_OFFSET = 0x56,
+ DAC960_LA_MBOX7_OFFSET = 0x57,
+ DAC960_LA_MBOX8_OFFSET = 0x58,
+ DAC960_LA_MBOX9_OFFSET = 0x59,
+ DAC960_LA_MBOX10_OFFSET = 0x5A,
+ DAC960_LA_MBOX11_OFFSET = 0x5B,
+ DAC960_LA_MBOX12_OFFSET = 0x5C,
+ DAC960_LA_STSID_OFFSET = 0x5D,
+ DAC960_LA_STS_OFFSET = 0x5E,
+ DAC960_LA_IDB_OFFSET = 0x60,
+ DAC960_LA_ODB_OFFSET = 0x61,
+ DAC960_LA_ERRSTS_OFFSET = 0x63,
+};
+
+/*
+ * DAC960 LA Series Inbound Door Bell Register.
+ */
+#define DAC960_LA_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_LA_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_LA_IDB_GEN_IRQ 0x04
+#define DAC960_LA_IDB_CTRL_RESET 0x08
+#define DAC960_LA_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_LA_IDB_HWMBOX_EMPTY 0x01
+#define DAC960_LA_IDB_INIT_DONE 0x02
+
+/*
+ * DAC960 LA Series Outbound Door Bell Register.
+ */
+#define DAC960_LA_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_LA_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_LA_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_LA_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 LA Series Interrupt Mask Register.
+ */
+#define DAC960_LA_IRQMASK_DISABLE_IRQ 0x04
+
+/*
+ * DAC960 LA Series Error Status Register.
+ */
+#define DAC960_LA_ERRSTS_PENDING 0x02
+
+/*
+ * DAC960 PG Series Controller Interface Register Offsets.
+ */
+#define DAC960_PG_mmio_size 0x2000
+
+enum DAC960_PG_reg_offset {
+ DAC960_PG_IDB_OFFSET = 0x0020,
+ DAC960_PG_ODB_OFFSET = 0x002C,
+ DAC960_PG_IRQMASK_OFFSET = 0x0034,
+ DAC960_PG_CMDOP_OFFSET = 0x1000,
+ DAC960_PG_CMDID_OFFSET = 0x1001,
+ DAC960_PG_MBOX2_OFFSET = 0x1002,
+ DAC960_PG_MBOX3_OFFSET = 0x1003,
+ DAC960_PG_MBOX4_OFFSET = 0x1004,
+ DAC960_PG_MBOX5_OFFSET = 0x1005,
+ DAC960_PG_MBOX6_OFFSET = 0x1006,
+ DAC960_PG_MBOX7_OFFSET = 0x1007,
+ DAC960_PG_MBOX8_OFFSET = 0x1008,
+ DAC960_PG_MBOX9_OFFSET = 0x1009,
+ DAC960_PG_MBOX10_OFFSET = 0x100A,
+ DAC960_PG_MBOX11_OFFSET = 0x100B,
+ DAC960_PG_MBOX12_OFFSET = 0x100C,
+ DAC960_PG_STSID_OFFSET = 0x1018,
+ DAC960_PG_STS_OFFSET = 0x101A,
+ DAC960_PG_ERRSTS_OFFSET = 0x103F,
+};
+
+/*
+ * DAC960 PG Series Inbound Door Bell Register.
+ */
+#define DAC960_PG_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_PG_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_PG_IDB_GEN_IRQ 0x04
+#define DAC960_PG_IDB_CTRL_RESET 0x08
+#define DAC960_PG_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_PG_IDB_HWMBOX_FULL 0x01
+#define DAC960_PG_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 PG Series Outbound Door Bell Register.
+ */
+#define DAC960_PG_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_PG_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_PG_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_PG_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 PG Series Interrupt Mask Register.
+ */
+#define DAC960_PG_IRQMASK_MSI_MASK1 0x03
+#define DAC960_PG_IRQMASK_DISABLE_IRQ 0x04
+#define DAC960_PG_IRQMASK_MSI_MASK2 0xF8
+
+/*
+ * DAC960 PG Series Error Status Register.
+ */
+#define DAC960_PG_ERRSTS_PENDING 0x04
+
+/*
+ * DAC960 PD Series Controller Interface Register Offsets.
+ */
+#define DAC960_PD_mmio_size 0x80
+
+enum DAC960_PD_reg_offset {
+ DAC960_PD_CMDOP_OFFSET = 0x00,
+ DAC960_PD_CMDID_OFFSET = 0x01,
+ DAC960_PD_MBOX2_OFFSET = 0x02,
+ DAC960_PD_MBOX3_OFFSET = 0x03,
+ DAC960_PD_MBOX4_OFFSET = 0x04,
+ DAC960_PD_MBOX5_OFFSET = 0x05,
+ DAC960_PD_MBOX6_OFFSET = 0x06,
+ DAC960_PD_MBOX7_OFFSET = 0x07,
+ DAC960_PD_MBOX8_OFFSET = 0x08,
+ DAC960_PD_MBOX9_OFFSET = 0x09,
+ DAC960_PD_MBOX10_OFFSET = 0x0A,
+ DAC960_PD_MBOX11_OFFSET = 0x0B,
+ DAC960_PD_MBOX12_OFFSET = 0x0C,
+ DAC960_PD_STSID_OFFSET = 0x0D,
+ DAC960_PD_STS_OFFSET = 0x0E,
+ DAC960_PD_ERRSTS_OFFSET = 0x3F,
+ DAC960_PD_IDB_OFFSET = 0x40,
+ DAC960_PD_ODB_OFFSET = 0x41,
+ DAC960_PD_IRQEN_OFFSET = 0x43,
+};
+
+/*
+ * DAC960 PD Series Inbound Door Bell Register.
+ */
+#define DAC960_PD_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_PD_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_PD_IDB_GEN_IRQ 0x04
+#define DAC960_PD_IDB_CTRL_RESET 0x08
+
+#define DAC960_PD_IDB_HWMBOX_FULL 0x01
+#define DAC960_PD_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 PD Series Outbound Door Bell Register.
+ */
+#define DAC960_PD_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_PD_ODB_HWMBOX_STS_AVAIL 0x01
+
+/*
+ * DAC960 PD Series Interrupt Enable Register.
+ */
+#define DAC960_PD_IRQMASK_ENABLE_IRQ 0x01
+
+/*
+ * DAC960 PD Series Error Status Register.
+ */
+#define DAC960_PD_ERRSTS_PENDING 0x04
+
+typedef int (*myrb_hw_init_t)(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base);
+typedef unsigned short (*mbox_mmio_init_t)(struct pci_dev *pdev,
+ void __iomem *base,
+ union myrb_cmd_mbox *mbox);
+
+struct myrb_privdata {
+ myrb_hw_init_t hw_init;
+ irq_handler_t irq_handler;
+ unsigned int mmio_size;
+};
+
+#endif /* MYRB_H */
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
new file mode 100644
index 000000000000..0264a2e2bc19
--- /dev/null
+++ b/drivers/scsi/myrs.c
@@ -0,0 +1,3268 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * This driver supports the newer, SCSI-based firmware interface only.
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver, which has
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/raid_class.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "myrs.h"
+
+static struct raid_template *myrs_raid_template;
+
+static struct myrs_devstate_name_entry {
+ enum myrs_devstate state;
+ char *name;
+} myrs_devstate_name_list[] = {
+ { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
+ { MYRS_DEVICE_ONLINE, "Online" },
+ { MYRS_DEVICE_REBUILD, "Rebuild" },
+ { MYRS_DEVICE_MISSING, "Missing" },
+ { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
+ { MYRS_DEVICE_OFFLINE, "Offline" },
+ { MYRS_DEVICE_CRITICAL, "Critical" },
+ { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
+ { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
+ { MYRS_DEVICE_STANDBY, "Standby" },
+ { MYRS_DEVICE_INVALID_STATE, "Invalid" },
+};
+
+static char *myrs_devstate_name(enum myrs_devstate state)
+{
+ struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
+ if (entry[i].state == state)
+ return entry[i].name;
+ }
+ return NULL;
+}
+
+static struct myrs_raid_level_name_entry {
+ enum myrs_raid_level level;
+ char *name;
+} myrs_raid_level_name_list[] = {
+ { MYRS_RAID_LEVEL0, "RAID0" },
+ { MYRS_RAID_LEVEL1, "RAID1" },
+ { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
+ { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
+ { MYRS_RAID_LEVEL6, "RAID6" },
+ { MYRS_RAID_JBOD, "JBOD" },
+ { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
+ { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
+ { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
+ { MYRS_RAID_SPAN, "Mylex SPAN" },
+ { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
+ { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
+ { MYRS_RAID_PHYSICAL, "Physical device" },
+};
+
+static char *myrs_raid_level_name(enum myrs_raid_level level)
+{
+ struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
+ if (entry[i].level == level)
+ return entry[i].name;
+ }
+ return NULL;
+}
+
+/**
+ * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
+ */
+static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
+{
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ memset(mbox, 0, sizeof(union myrs_cmd_mbox));
+ cmd_blk->status = 0;
+}
+
+/**
+ * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
+ */
+static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+{
+ void __iomem *base = cs->io_base;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
+
+ cs->write_cmd_mbox(next_mbox, mbox);
+
+ if (cs->prev_cmd_mbox1->words[0] == 0 ||
+ cs->prev_cmd_mbox2->words[0] == 0)
+ cs->get_cmd_mbox(base);
+
+ cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
+ cs->prev_cmd_mbox1 = next_mbox;
+
+ if (++next_mbox > cs->last_cmd_mbox)
+ next_mbox = cs->first_cmd_mbox;
+
+ cs->next_cmd_mbox = next_mbox;
+}
+
+/**
+ * myrs_exec_cmd - executes V2 Command and waits for completion.
+ */
+static void myrs_exec_cmd(struct myrs_hba *cs,
+ struct myrs_cmdblk *cmd_blk)
+{
+ DECLARE_COMPLETION_ONSTACK(complete);
+ unsigned long flags;
+
+ cmd_blk->complete = &complete;
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ myrs_qcmd(cs, cmd_blk);
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+
+ WARN_ON(in_interrupt());
+ wait_for_completion(&complete);
+}
+
+/**
+ * myrs_report_progress - prints progress message
+ */
+static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
+ unsigned char *msg, unsigned long blocks,
+ unsigned long size)
+{
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d: %s in Progress: %d%% completed\n",
+ ldev_num, msg,
+ (100 * (int)(blocks >> 7)) / (int)(size >> 7));
+}
+
+/**
+ * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
+ */
+static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ dma_addr_t ctlr_info_addr;
+ union myrs_sgl *sgl;
+ unsigned char status;
+ struct myrs_ctlr_info old;
+
+ memcpy(&old, cs->ctlr_info, sizeof(struct myrs_ctlr_info));
+ ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
+ sizeof(struct myrs_ctlr_info),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->ctlr_info.id = MYRS_DCMD_TAG;
+ mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->ctlr_info.control.dma_ctrl_to_host = true;
+ mbox->ctlr_info.control.no_autosense = true;
+ mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
+ mbox->ctlr_info.ctlr_num = 0;
+ mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
+ sgl = &mbox->ctlr_info.dma_addr;
+ sgl->sge[0].sge_addr = ctlr_info_addr;
+ sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
+ sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
+ if (status == MYRS_STATUS_SUCCESS) {
+ if (cs->ctlr_info->bg_init_active +
+ cs->ctlr_info->ldev_init_active +
+ cs->ctlr_info->pdev_init_active +
+ cs->ctlr_info->cc_active +
+ cs->ctlr_info->rbld_active +
+ cs->ctlr_info->exp_active != 0)
+ cs->needs_update = true;
+ if (cs->ctlr_info->ldev_present != old.ldev_present ||
+ cs->ctlr_info->ldev_critical != old.ldev_critical ||
+ cs->ctlr_info->ldev_offline != old.ldev_offline)
+ shost_printk(KERN_INFO, cs->host,
+ "Logical drive count changes (%d/%d/%d)\n",
+ cs->ctlr_info->ldev_critical,
+ cs->ctlr_info->ldev_offline,
+ cs->ctlr_info->ldev_present);
+ }
+
+ return status;
+}
+
+/**
+ * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
+ */
+static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
+ unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ dma_addr_t ldev_info_addr;
+ struct myrs_ldev_info ldev_info_orig;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
+ ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
+ sizeof(struct myrs_ldev_info),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->ldev_info.id = MYRS_DCMD_TAG;
+ mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->ldev_info.control.dma_ctrl_to_host = true;
+ mbox->ldev_info.control.no_autosense = true;
+ mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
+ mbox->ldev_info.ldev.ldev_num = ldev_num;
+ mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
+ sgl = &mbox->ldev_info.dma_addr;
+ sgl->sge[0].sge_addr = ldev_info_addr;
+ sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev,
+ "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
+ sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
+ if (status == MYRS_STATUS_SUCCESS) {
+ unsigned short ldev_num = ldev_info->ldev_num;
+ struct myrs_ldev_info *new = ldev_info;
+ struct myrs_ldev_info *old = &ldev_info_orig;
+ unsigned long ldev_size = new->cfg_devsize;
+
+ if (new->dev_state != old->dev_state) {
+ const char *name;
+
+ name = myrs_devstate_name(new->dev_state);
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d is now %s\n",
+ ldev_num, name ? name : "Invalid");
+ }
+ if ((new->soft_errs != old->soft_errs) ||
+ (new->cmds_failed != old->cmds_failed) ||
+ (new->deferred_write_errs != old->deferred_write_errs))
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
+ ldev_num, new->soft_errs,
+ new->cmds_failed,
+ new->deferred_write_errs);
+ if (new->bg_init_active)
+ myrs_report_progress(cs, ldev_num,
+ "Background Initialization",
+ new->bg_init_lba, ldev_size);
+ else if (new->fg_init_active)
+ myrs_report_progress(cs, ldev_num,
+ "Foreground Initialization",
+ new->fg_init_lba, ldev_size);
+ else if (new->migration_active)
+ myrs_report_progress(cs, ldev_num,
+ "Data Migration",
+ new->migration_lba, ldev_size);
+ else if (new->patrol_active)
+ myrs_report_progress(cs, ldev_num,
+ "Patrol Operation",
+ new->patrol_lba, ldev_size);
+ if (old->bg_init_active && !new->bg_init_active)
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d: Background Initialization %s\n",
+ ldev_num,
+ (new->ldev_control.ldev_init_done ?
+ "Completed" : "Failed"));
+ }
+ return status;
+}
+
+/**
+ * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
+ */
+static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
+ unsigned char channel, unsigned char target, unsigned char lun,
+ struct myrs_pdev_info *pdev_info)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ dma_addr_t pdev_info_addr;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
+ sizeof(struct myrs_pdev_info),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->pdev_info.id = MYRS_DCMD_TAG;
+ mbox->pdev_info.control.dma_ctrl_to_host = true;
+ mbox->pdev_info.control.no_autosense = true;
+ mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
+ mbox->pdev_info.pdev.lun = lun;
+ mbox->pdev_info.pdev.target = target;
+ mbox->pdev_info.pdev.channel = channel;
+ mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
+ sgl = &mbox->pdev_info.dma_addr;
+ sgl->sge[0].sge_addr = pdev_info_addr;
+ sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev,
+ "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
+ channel, target, lun);
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
+ sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
+ return status;
+}
+
+/**
+ * myrs_dev_op - executes a "Device Operation" Command
+ */
+static unsigned char myrs_dev_op(struct myrs_hba *cs,
+ enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned char status;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->dev_op.id = MYRS_DCMD_TAG;
+ mbox->dev_op.control.dma_ctrl_to_host = true;
+ mbox->dev_op.control.no_autosense = true;
+ mbox->dev_op.ioctl_opcode = opcode;
+ mbox->dev_op.opdev = opdev;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ return status;
+}
+
+/**
+ * myrs_translate_pdev - translates a Physical Device Channel and
+ * TargetID into a Logical Device.
+ */
+static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
+ unsigned char channel, unsigned char target, unsigned char lun,
+ struct myrs_devmap *devmap)
+{
+ struct pci_dev *pdev = cs->pdev;
+ dma_addr_t devmap_addr;
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ memset(devmap, 0x0, sizeof(struct myrs_devmap));
+ devmap_addr = dma_map_single(&pdev->dev, devmap,
+ sizeof(struct myrs_devmap),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, devmap_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ mbox = &cmd_blk->mbox;
+ mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->pdev_info.control.dma_ctrl_to_host = true;
+ mbox->pdev_info.control.no_autosense = true;
+ mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
+ mbox->pdev_info.pdev.target = target;
+ mbox->pdev_info.pdev.channel = channel;
+ mbox->pdev_info.pdev.lun = lun;
+ mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
+ sgl = &mbox->pdev_info.dma_addr;
+ sgl->sge[0].sge_addr = devmap_addr;
+ sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
+
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&pdev->dev, devmap_addr,
+ sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
+ return status;
+}
+
+/**
+ * myrs_get_event - executes a Get Event Command
+ */
+static unsigned char myrs_get_event(struct myrs_hba *cs,
+ unsigned int event_num, struct myrs_event *event_buf)
+{
+ struct pci_dev *pdev = cs->pdev;
+ dma_addr_t event_addr;
+ struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ event_addr = dma_map_single(&pdev->dev, event_buf,
+ sizeof(struct myrs_event), DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, event_addr))
+ return MYRS_STATUS_FAILED;
+
+ mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->get_event.dma_size = sizeof(struct myrs_event);
+ mbox->get_event.evnum_upper = event_num >> 16;
+ mbox->get_event.ctlr_num = 0;
+ mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
+ mbox->get_event.evnum_lower = event_num & 0xFFFF;
+ sgl = &mbox->get_event.dma_addr;
+ sgl->sge[0].sge_addr = event_addr;
+ sgl->sge[0].sge_count = mbox->get_event.dma_size;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ dma_unmap_single(&pdev->dev, event_addr,
+ sizeof(struct myrs_event), DMA_FROM_DEVICE);
+
+ return status;
+}
+
+/*
+ * myrs_get_fwstatus - executes a Get Health Status Command
+ */
+static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrs_sgl *sgl;
+ unsigned char status = cmd_blk->status;
+
+ myrs_reset_cmd(cmd_blk);
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_MCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ mbox->common.dma_size = sizeof(struct myrs_fwstat);
+ mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
+ sgl = &mbox->common.dma_addr;
+ sgl->sge[0].sge_addr = cs->fwstat_addr;
+ sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+
+ return status;
+}
+
+/**
+ * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
+ */
+static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
+ enable_mbox_t enable_mbox_fn)
+{
+ void __iomem *base = cs->io_base;
+ struct pci_dev *pdev = cs->pdev;
+ union myrs_cmd_mbox *cmd_mbox;
+ struct myrs_stat_mbox *stat_mbox;
+ union myrs_cmd_mbox *mbox;
+ dma_addr_t mbox_addr;
+ unsigned char status = MYRS_STATUS_FAILED;
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_err(&pdev->dev, "DMA mask out of range\n");
+ return false;
+ }
+
+ /* Temporary dma mapping, used only in the scope of this function */
+ mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+ &mbox_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, mbox_addr))
+ return false;
+
+ /* These are the base addresses for the command memory mailbox array */
+ cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
+ cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
+ &cs->cmd_mbox_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
+ dev_err(&pdev->dev, "Failed to map command mailbox\n");
+ goto out_free;
+ }
+ cs->first_cmd_mbox = cmd_mbox;
+ cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
+ cs->last_cmd_mbox = cmd_mbox;
+ cs->next_cmd_mbox = cs->first_cmd_mbox;
+ cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
+ cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
+
+ /* These are the base addresses for the status memory mailbox array */
+ cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
+ stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
+ &cs->stat_mbox_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
+ dev_err(&pdev->dev, "Failed to map status mailbox\n");
+ goto out_free;
+ }
+
+ cs->first_stat_mbox = stat_mbox;
+ stat_mbox += MYRS_MAX_STAT_MBOX - 1;
+ cs->last_stat_mbox = stat_mbox;
+ cs->next_stat_mbox = cs->first_stat_mbox;
+
+ cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct myrs_fwstat),
+ &cs->fwstat_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
+ dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
+ cs->fwstat_buf = NULL;
+ goto out_free;
+ }
+ cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs->ctlr_info)
+ goto out_free;
+
+ cs->event_buf = kzalloc(sizeof(struct myrs_event),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs->event_buf)
+ goto out_free;
+
+ /* Enable the Memory Mailbox Interface. */
+ memset(mbox, 0, sizeof(union myrs_cmd_mbox));
+ mbox->set_mbox.id = 1;
+ mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->set_mbox.control.no_autosense = true;
+ mbox->set_mbox.first_cmd_mbox_size_kb =
+ (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
+ mbox->set_mbox.first_stat_mbox_size_kb =
+ (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
+ mbox->set_mbox.second_cmd_mbox_size_kb = 0;
+ mbox->set_mbox.second_stat_mbox_size_kb = 0;
+ mbox->set_mbox.sense_len = 0;
+ mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
+ mbox->set_mbox.fwstat_buf_size_kb = 1;
+ mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
+ mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
+ mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
+ status = enable_mbox_fn(base, mbox_addr);
+
+out_free:
+ dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+ mbox, mbox_addr);
+ if (status != MYRS_STATUS_SUCCESS)
+ dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
+ status);
+ return (status == MYRS_STATUS_SUCCESS);
+}
+
+/**
+ * myrs_get_config - reads the Configuration Information
+ */
+static int myrs_get_config(struct myrs_hba *cs)
+{
+ struct myrs_ctlr_info *info = cs->ctlr_info;
+ struct Scsi_Host *shost = cs->host;
+ unsigned char status;
+ unsigned char model[20];
+ unsigned char fw_version[12];
+ int i, model_len;
+
+ /* Get data into dma-able area, then copy into permanent location */
+ mutex_lock(&cs->cinfo_mutex);
+ status = myrs_get_ctlr_info(cs);
+ mutex_unlock(&cs->cinfo_mutex);
+ if (status != MYRS_STATUS_SUCCESS) {
+ shost_printk(KERN_ERR, shost,
+ "Failed to get controller information\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the Controller Model Name and Full Model Name fields. */
+ model_len = sizeof(info->ctlr_name);
+ if (model_len > sizeof(model)-1)
+ model_len = sizeof(model)-1;
+ memcpy(model, info->ctlr_name, model_len);
+ model_len--;
+ while (model[model_len] == ' ' || model[model_len] == '\0')
+ model_len--;
+ model[++model_len] = '\0';
+ strcpy(cs->model_name, "DAC960 ");
+ strcat(cs->model_name, model);
+ /* Initialize the Controller Firmware Version field. */
+ sprintf(fw_version, "%d.%02d-%02d",
+ info->fw_major_version, info->fw_minor_version,
+ info->fw_turn_number);
+ if (info->fw_major_version == 6 &&
+ info->fw_minor_version == 0 &&
+ info->fw_turn_number < 1) {
+ shost_printk(KERN_WARNING, shost,
+ "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
+ "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
+ "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
+ fw_version);
+ return -ENODEV;
+ }
+ /* Initialize the Controller Channels and Targets. */
+ shost->max_channel = info->physchan_present + info->virtchan_present;
+ shost->max_id = info->max_targets[0];
+ for (i = 1; i < 16; i++) {
+ if (!info->max_targets[i])
+ continue;
+ if (shost->max_id < info->max_targets[i])
+ shost->max_id = info->max_targets[i];
+ }
+
+ /*
+ * Initialize the Controller Queue Depth, Driver Queue Depth,
+ * Logical Drive Count, Maximum Blocks per Command, Controller
+ * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+ * The Driver Queue Depth must be at most three less than
+ * the Controller Queue Depth; tag '1' is reserved for
+ * direct commands, and tag '2' for monitoring commands.
+ */
+ shost->can_queue = info->max_tcq - 3;
+ if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
+ shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
+ shost->max_sectors = info->max_transfer_size;
+ shost->sg_tablesize = info->max_sge;
+ if (shost->sg_tablesize > MYRS_SG_LIMIT)
+ shost->sg_tablesize = MYRS_SG_LIMIT;
+
+ shost_printk(KERN_INFO, shost,
+ "Configuring %s PCI RAID Controller\n", model);
+ shost_printk(KERN_INFO, shost,
+ " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
+ fw_version, info->physchan_present, info->mem_size_mb);
+
+ shost_printk(KERN_INFO, shost,
+ " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+ shost->can_queue, shost->max_sectors);
+
+ shost_printk(KERN_INFO, shost,
+ " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+ shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
+ for (i = 0; i < info->physchan_max; i++) {
+ if (!info->max_targets[i])
+ continue;
+ shost_printk(KERN_INFO, shost,
+ " Device Channel %d: max %d devices\n",
+ i, info->max_targets[i]);
+ }
+ shost_printk(KERN_INFO, shost,
+ " Physical: %d/%d channels, %d disks, %d devices\n",
+ info->physchan_present, info->physchan_max,
+ info->pdisk_present, info->pdev_present);
+
+ shost_printk(KERN_INFO, shost,
+ " Logical: %d/%d channels, %d disks\n",
+ info->virtchan_present, info->virtchan_max,
+ info->ldev_present);
+ return 0;
+}
+
+/**
+ * myrs_log_event - prints a Controller Event message
+ */
+static struct {
+ int ev_code;
+ unsigned char *ev_msg;
+} myrs_ev_list[] = {
+ /* Physical Device Events (0x0000 - 0x007F) */
+ { 0x0001, "P Online" },
+ { 0x0002, "P Standby" },
+ { 0x0005, "P Automatic Rebuild Started" },
+ { 0x0006, "P Manual Rebuild Started" },
+ { 0x0007, "P Rebuild Completed" },
+ { 0x0008, "P Rebuild Cancelled" },
+ { 0x0009, "P Rebuild Failed for Unknown Reasons" },
+ { 0x000A, "P Rebuild Failed due to New Physical Device" },
+ { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
+ { 0x000C, "S Offline" },
+ { 0x000D, "P Found" },
+ { 0x000E, "P Removed" },
+ { 0x000F, "P Unconfigured" },
+ { 0x0010, "P Expand Capacity Started" },
+ { 0x0011, "P Expand Capacity Completed" },
+ { 0x0012, "P Expand Capacity Failed" },
+ { 0x0013, "P Command Timed Out" },
+ { 0x0014, "P Command Aborted" },
+ { 0x0015, "P Command Retried" },
+ { 0x0016, "P Parity Error" },
+ { 0x0017, "P Soft Error" },
+ { 0x0018, "P Miscellaneous Error" },
+ { 0x0019, "P Reset" },
+ { 0x001A, "P Active Spare Found" },
+ { 0x001B, "P Warm Spare Found" },
+ { 0x001C, "S Sense Data Received" },
+ { 0x001D, "P Initialization Started" },
+ { 0x001E, "P Initialization Completed" },
+ { 0x001F, "P Initialization Failed" },
+ { 0x0020, "P Initialization Cancelled" },
+ { 0x0021, "P Failed because Write Recovery Failed" },
+ { 0x0022, "P Failed because SCSI Bus Reset Failed" },
+ { 0x0023, "P Failed because of Double Check Condition" },
+ { 0x0024, "P Failed because Device Cannot Be Accessed" },
+ { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
+ { 0x0026, "P Failed because of Bad Tag from Device" },
+ { 0x0027, "P Failed because of Command Timeout" },
+ { 0x0028, "P Failed because of System Reset" },
+ { 0x0029, "P Failed because of Busy Status or Parity Error" },
+ { 0x002A, "P Failed because Host Set Device to Failed State" },
+ { 0x002B, "P Failed because of Selection Timeout" },
+ { 0x002C, "P Failed because of SCSI Bus Phase Error" },
+ { 0x002D, "P Failed because Device Returned Unknown Status" },
+ { 0x002E, "P Failed because Device Not Ready" },
+ { 0x002F, "P Failed because Device Not Found at Startup" },
+ { 0x0030, "P Failed because COD Write Operation Failed" },
+ { 0x0031, "P Failed because BDT Write Operation Failed" },
+ { 0x0039, "P Missing at Startup" },
+ { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
+ { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
+ { 0x003D, "P Standby Rebuild Started" },
+ /* Logical Device Events (0x0080 - 0x00FF) */
+ { 0x0080, "M Consistency Check Started" },
+ { 0x0081, "M Consistency Check Completed" },
+ { 0x0082, "M Consistency Check Cancelled" },
+ { 0x0083, "M Consistency Check Completed With Errors" },
+ { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
+ { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
+ { 0x0086, "L Offline" },
+ { 0x0087, "L Critical" },
+ { 0x0088, "L Online" },
+ { 0x0089, "M Automatic Rebuild Started" },
+ { 0x008A, "M Manual Rebuild Started" },
+ { 0x008B, "M Rebuild Completed" },
+ { 0x008C, "M Rebuild Cancelled" },
+ { 0x008D, "M Rebuild Failed for Unknown Reasons" },
+ { 0x008E, "M Rebuild Failed due to New Physical Device" },
+ { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
+ { 0x0090, "M Initialization Started" },
+ { 0x0091, "M Initialization Completed" },
+ { 0x0092, "M Initialization Cancelled" },
+ { 0x0093, "M Initialization Failed" },
+ { 0x0094, "L Found" },
+ { 0x0095, "L Deleted" },
+ { 0x0096, "M Expand Capacity Started" },
+ { 0x0097, "M Expand Capacity Completed" },
+ { 0x0098, "M Expand Capacity Failed" },
+ { 0x0099, "L Bad Block Found" },
+ { 0x009A, "L Size Changed" },
+ { 0x009B, "L Type Changed" },
+ { 0x009C, "L Bad Data Block Found" },
+ { 0x009E, "L Read of Data Block in BDT" },
+ { 0x009F, "L Write Back Data for Disk Block Lost" },
+ { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
+ { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
+ { 0x00A2, "L Standby Rebuild Started" },
+ /* Fault Management Events (0x0100 - 0x017F) */
+ { 0x0140, "E Fan %d Failed" },
+ { 0x0141, "E Fan %d OK" },
+ { 0x0142, "E Fan %d Not Present" },
+ { 0x0143, "E Power Supply %d Failed" },
+ { 0x0144, "E Power Supply %d OK" },
+ { 0x0145, "E Power Supply %d Not Present" },
+ { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
+ { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
+ { 0x0148, "E Temperature Sensor %d Temperature Normal" },
+ { 0x0149, "E Temperature Sensor %d Not Present" },
+ { 0x014A, "E Enclosure Management Unit %d Access Critical" },
+ { 0x014B, "E Enclosure Management Unit %d Access OK" },
+ { 0x014C, "E Enclosure Management Unit %d Access Offline" },
+ /* Controller Events (0x0180 - 0x01FF) */
+ { 0x0181, "C Cache Write Back Error" },
+ { 0x0188, "C Battery Backup Unit Found" },
+ { 0x0189, "C Battery Backup Unit Charge Level Low" },
+ { 0x018A, "C Battery Backup Unit Charge Level OK" },
+ { 0x0193, "C Installation Aborted" },
+ { 0x0195, "C Battery Backup Unit Physically Removed" },
+ { 0x0196, "C Memory Error During Warm Boot" },
+ { 0x019E, "C Memory Soft ECC Error Corrected" },
+ { 0x019F, "C Memory Hard ECC Error Corrected" },
+ { 0x01A2, "C Battery Backup Unit Failed" },
+ { 0x01AB, "C Mirror Race Recovery Failed" },
+ { 0x01AC, "C Mirror Race on Critical Drive" },
+ /* Controller Internal Processor Events */
+ { 0x0380, "C Internal Controller Hung" },
+ { 0x0381, "C Internal Controller Firmware Breakpoint" },
+ { 0x0390, "C Internal Controller i960 Processor Specific Error" },
+ { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
+ { 0, "" }
+};
+
+static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
+{
+ unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
+ int ev_idx = 0, ev_code;
+ unsigned char ev_type, *ev_msg;
+ struct Scsi_Host *shost = cs->host;
+ struct scsi_device *sdev;
+ struct scsi_sense_hdr sshdr;
+ unsigned char sense_info[4];
+ unsigned char cmd_specific[4];
+
+ if (ev->ev_code == 0x1C) {
+ if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
+ memset(&sshdr, 0x0, sizeof(sshdr));
+ memset(sense_info, 0x0, sizeof(sense_info));
+ memset(cmd_specific, 0x0, sizeof(cmd_specific));
+ } else {
+ memcpy(sense_info, &ev->sense_data[3], 4);
+ memcpy(cmd_specific, &ev->sense_data[7], 4);
+ }
+ }
+ if (sshdr.sense_key == VENDOR_SPECIFIC &&
+ (sshdr.asc == 0x80 || sshdr.asc == 0x81))
+ ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
+ while (true) {
+ ev_code = myrs_ev_list[ev_idx].ev_code;
+ if (ev_code == ev->ev_code || ev_code == 0)
+ break;
+ ev_idx++;
+ }
+ ev_type = myrs_ev_list[ev_idx].ev_msg[0];
+ ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
+ if (ev_code == 0) {
+ shost_printk(KERN_WARNING, shost,
+ "Unknown Controller Event Code %04X\n",
+ ev->ev_code);
+ return;
+ }
+ switch (ev_type) {
+ case 'P':
+ sdev = scsi_device_lookup(shost, ev->channel,
+ ev->target, 0);
+ sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
+ ev->ev_seq, ev_msg);
+ if (sdev && sdev->hostdata &&
+ sdev->channel < cs->ctlr_info->physchan_present) {
+ struct myrs_pdev_info *pdev_info = sdev->hostdata;
+
+ switch (ev->ev_code) {
+ case 0x0001:
+ case 0x0007:
+ pdev_info->dev_state = MYRS_DEVICE_ONLINE;
+ break;
+ case 0x0002:
+ pdev_info->dev_state = MYRS_DEVICE_STANDBY;
+ break;
+ case 0x000C:
+ pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
+ break;
+ case 0x000E:
+ pdev_info->dev_state = MYRS_DEVICE_MISSING;
+ break;
+ case 0x000F:
+ pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
+ break;
+ }
+ }
+ break;
+ case 'L':
+ shost_printk(KERN_INFO, shost,
+ "event %d: Logical Drive %d %s\n",
+ ev->ev_seq, ev->lun, ev_msg);
+ cs->needs_update = true;
+ break;
+ case 'M':
+ shost_printk(KERN_INFO, shost,
+ "event %d: Logical Drive %d %s\n",
+ ev->ev_seq, ev->lun, ev_msg);
+ cs->needs_update = true;
+ break;
+ case 'S':
+ if (sshdr.sense_key == NO_SENSE ||
+ (sshdr.sense_key == NOT_READY &&
+ sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
+ sshdr.ascq == 0x02)))
+ break;
+ shost_printk(KERN_INFO, shost,
+ "event %d: Physical Device %d:%d %s\n",
+ ev->ev_seq, ev->channel, ev->target, ev_msg);
+ shost_printk(KERN_INFO, shost,
+ "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
+ ev->channel, ev->target,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ shost_printk(KERN_INFO, shost,
+ "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
+ ev->channel, ev->target,
+ sense_info[0], sense_info[1],
+ sense_info[2], sense_info[3],
+ cmd_specific[0], cmd_specific[1],
+ cmd_specific[2], cmd_specific[3]);
+ break;
+ case 'E':
+ if (cs->disable_enc_msg)
+ break;
+ sprintf(msg_buf, ev_msg, ev->lun);
+ shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
+ ev->ev_seq, ev->target, msg_buf);
+ break;
+ case 'C':
+ shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
+ ev->ev_seq, ev_msg);
+ break;
+ default:
+ shost_printk(KERN_INFO, shost,
+ "event %d: Unknown Event Code %04X\n",
+ ev->ev_seq, ev->ev_code);
+ break;
+ }
+}
+
+/*
+ * SCSI sysfs interface functions
+ */
+static ssize_t raid_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ int ret;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+ const char *name;
+
+ name = myrs_devstate_name(ldev_info->dev_state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->dev_state);
+ } else {
+ struct myrs_pdev_info *pdev_info;
+ const char *name;
+
+ pdev_info = sdev->hostdata;
+ name = myrs_devstate_name(pdev_info->dev_state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ pdev_info->dev_state);
+ }
+ return ret;
+}
+
+static ssize_t raid_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ enum myrs_devstate new_state;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (!strncmp(buf, "offline", 7) ||
+ !strncmp(buf, "kill", 4))
+ new_state = MYRS_DEVICE_OFFLINE;
+ else if (!strncmp(buf, "online", 6))
+ new_state = MYRS_DEVICE_ONLINE;
+ else if (!strncmp(buf, "standby", 7))
+ new_state = MYRS_DEVICE_STANDBY;
+ else
+ return -EINVAL;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present) {
+ struct myrs_pdev_info *pdev_info = sdev->hostdata;
+ struct myrs_devmap *pdev_devmap =
+ (struct myrs_devmap *)&pdev_info->rsvd13;
+
+ if (pdev_info->dev_state == new_state) {
+ sdev_printk(KERN_INFO, sdev,
+ "Device already in %s\n",
+ myrs_devstate_name(new_state));
+ return count;
+ }
+ status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
+ sdev->lun, pdev_devmap);
+ if (status != MYRS_STATUS_SUCCESS)
+ return -ENXIO;
+ ldev_num = pdev_devmap->ldev_num;
+ } else {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ if (ldev_info->dev_state == new_state) {
+ sdev_printk(KERN_INFO, sdev,
+ "Device already in %s\n",
+ myrs_devstate_name(new_state));
+ return count;
+ }
+ ldev_num = ldev_info->ldev_num;
+ }
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
+ mbox->set_devstate.state = new_state;
+ mbox->set_devstate.ldev.ldev_num = ldev_num;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status == MYRS_STATUS_SUCCESS) {
+ if (sdev->channel < cs->ctlr_info->physchan_present) {
+ struct myrs_pdev_info *pdev_info = sdev->hostdata;
+
+ pdev_info->dev_state = new_state;
+ } else {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ ldev_info->dev_state = new_state;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "Set device state to %s\n",
+ myrs_devstate_name(new_state));
+ return count;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to set device state to %s, status 0x%02x\n",
+ myrs_devstate_name(new_state), status);
+ return -EINVAL;
+}
+static DEVICE_ATTR_RW(raid_state);
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ const char *name = NULL;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info;
+
+ ldev_info = sdev->hostdata;
+ name = myrs_raid_level_name(ldev_info->raid_level);
+ if (!name)
+ return snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->dev_state);
+
+ } else
+ name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
+
+ return snprintf(buf, 32, "%s\n", name);
+}
+static DEVICE_ATTR_RO(raid_level);
+
+static ssize_t rebuild_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+ ldev_info = sdev->hostdata;
+ ldev_num = ldev_info->ldev_num;
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+ if (ldev_info->rbld_active) {
+ return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
+ (size_t)ldev_info->rbld_lba,
+ (size_t)ldev_info->cfg_devsize);
+ } else
+ return snprintf(buf, 32, "not rebuilding\n");
+}
+
+static ssize_t rebuild_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ unsigned short ldev_num;
+ unsigned char status;
+ int rebuild, ret;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return -EINVAL;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->ldev_num;
+
+ ret = kstrtoint(buf, 0, &rebuild);
+ if (ret)
+ return ret;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+
+ if (rebuild && ldev_info->rbld_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Initiated; already in progress\n");
+ return -EALREADY;
+ }
+ if (!rebuild && !ldev_info->rbld_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Cancelled; no rebuild in progress\n");
+ return count;
+ }
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ if (rebuild) {
+ mbox->ldev_info.ldev.ldev_num = ldev_num;
+ mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
+ } else {
+ mbox->ldev_info.ldev.ldev_num = ldev_num;
+ mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
+ }
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not %s, status 0x%02x\n",
+ rebuild ? "Initiated" : "Cancelled", status);
+ ret = -EIO;
+ } else {
+ sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+ rebuild ? "Initiated" : "Cancelled");
+ ret = count;
+ }
+
+ return ret;
+}
+static DEVICE_ATTR_RW(rebuild);
+
+static ssize_t consistency_check_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return snprintf(buf, 32, "physical device - not checking\n");
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->ldev_num;
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (ldev_info->cc_active)
+ return snprintf(buf, 32, "checking block %zu of %zu\n",
+ (size_t)ldev_info->cc_lba,
+ (size_t)ldev_info->cfg_devsize);
+ else
+ return snprintf(buf, 32, "not checking\n");
+}
+
+static ssize_t consistency_check_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ unsigned short ldev_num;
+ unsigned char status;
+ int check, ret;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return -EINVAL;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->ldev_num;
+
+ ret = kstrtoint(buf, 0, &check);
+ if (ret)
+ return ret;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+ if (check && ldev_info->cc_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not Initiated; "
+ "already in progress\n");
+ return -EALREADY;
+ }
+ if (!check && !ldev_info->cc_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not Cancelled; "
+ "check not in progress\n");
+ return count;
+ }
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ if (check) {
+ mbox->cc.ldev.ldev_num = ldev_num;
+ mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
+ mbox->cc.restore_consistency = true;
+ mbox->cc.initialized_area_only = false;
+ } else {
+ mbox->cc.ldev.ldev_num = ldev_num;
+ mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
+ }
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not %s, status 0x%02x\n",
+ check ? "Initiated" : "Cancelled", status);
+ ret = -EIO;
+ } else {
+ sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
+ check ? "Initiated" : "Cancelled");
+ ret = count;
+ }
+
+ return ret;
+}
+static DEVICE_ATTR_RW(consistency_check);
+
+static struct device_attribute *myrs_sdev_attrs[] = {
+ &dev_attr_consistency_check,
+ &dev_attr_rebuild,
+ &dev_attr_raid_state,
+ &dev_attr_raid_level,
+ NULL,
+};
+
+static ssize_t serial_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ char serial[17];
+
+ memcpy(serial, cs->ctlr_info->serial_number, 16);
+ serial[16] = '\0';
+ return snprintf(buf, 16, "%s\n", serial);
+}
+static DEVICE_ATTR_RO(serial);
+
+static ssize_t ctlr_num_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 20, "%d\n", cs->host->host_no);
+}
+static DEVICE_ATTR_RO(ctlr_num);
+
+static struct myrs_cpu_type_tbl {
+ enum myrs_cpu_type type;
+ char *name;
+} myrs_cpu_type_names[] = {
+ { MYRS_CPUTYPE_i960CA, "i960CA" },
+ { MYRS_CPUTYPE_i960RD, "i960RD" },
+ { MYRS_CPUTYPE_i960RN, "i960RN" },
+ { MYRS_CPUTYPE_i960RP, "i960RP" },
+ { MYRS_CPUTYPE_NorthBay, "NorthBay" },
+ { MYRS_CPUTYPE_StrongArm, "StrongARM" },
+ { MYRS_CPUTYPE_i960RM, "i960RM" },
+};
+
+static ssize_t processor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ struct myrs_cpu_type_tbl *tbl;
+ const char *first_processor = NULL;
+ const char *second_processor = NULL;
+ struct myrs_ctlr_info *info = cs->ctlr_info;
+ ssize_t ret;
+ int i;
+
+ if (info->cpu[0].cpu_count) {
+ tbl = myrs_cpu_type_names;
+ for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
+ if (tbl[i].type == info->cpu[0].cpu_type) {
+ first_processor = tbl[i].name;
+ break;
+ }
+ }
+ }
+ if (info->cpu[1].cpu_count) {
+ tbl = myrs_cpu_type_names;
+ for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
+ if (tbl[i].type == info->cpu[1].cpu_type) {
+ second_processor = tbl[i].name;
+ break;
+ }
+ }
+ }
+ if (first_processor && second_processor)
+ ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
+ "2: %s (%s, %d cpus)\n",
+ info->cpu[0].cpu_name,
+ first_processor, info->cpu[0].cpu_count,
+ info->cpu[1].cpu_name,
+ second_processor, info->cpu[1].cpu_count);
+ else if (first_processor && !second_processor)
+ ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
+ info->cpu[0].cpu_name,
+ first_processor, info->cpu[0].cpu_count);
+ else if (!first_processor && second_processor)
+ ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
+ info->cpu[1].cpu_name,
+ second_processor, info->cpu[1].cpu_count);
+ else
+ ret = snprintf(buf, 64, "1: absent\n2: absent\n");
+
+ return ret;
+}
+static DEVICE_ATTR_RO(processor);
+
+static ssize_t model_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 28, "%s\n", cs->model_name);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t ctlr_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
+}
+static DEVICE_ATTR_RO(ctlr_type);
+
+static ssize_t cache_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
+}
+static DEVICE_ATTR_RO(cache_size);
+
+static ssize_t firmware_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 16, "%d.%02d-%02d\n",
+ cs->ctlr_info->fw_major_version,
+ cs->ctlr_info->fw_minor_version,
+ cs->ctlr_info->fw_turn_number);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t discovery_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ unsigned char status;
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status != MYRS_STATUS_SUCCESS) {
+ shost_printk(KERN_INFO, shost,
+ "Discovery Not Initiated, status %02X\n",
+ status);
+ return -EINVAL;
+ }
+ shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
+ cs->next_evseq = 0;
+ cs->needs_update = true;
+ queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
+ flush_delayed_work(&cs->monitor_work);
+ shost_printk(KERN_INFO, shost, "Discovery Completed\n");
+
+ return count;
+}
+static DEVICE_ATTR_WO(discovery);
+
+static ssize_t flush_cache_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ unsigned char status;
+
+ status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
+ MYRS_RAID_CONTROLLER);
+ if (status == MYRS_STATUS_SUCCESS) {
+ shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
+ return count;
+ }
+ shost_printk(KERN_INFO, shost,
+ "Cache Flush failed, status 0x%02x\n", status);
+ return -EIO;
+}
+static DEVICE_ATTR_WO(flush_cache);
+
+static ssize_t disable_enclosure_messages_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
+}
+
+static ssize_t disable_enclosure_messages_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ int value, ret;
+
+ ret = kstrtoint(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ if (value > 2)
+ return -EINVAL;
+
+ cs->disable_enc_msg = value;
+ return count;
+}
+static DEVICE_ATTR_RW(disable_enclosure_messages);
+
+static struct device_attribute *myrs_shost_attrs[] = {
+ &dev_attr_serial,
+ &dev_attr_ctlr_num,
+ &dev_attr_processor,
+ &dev_attr_model,
+ &dev_attr_ctlr_type,
+ &dev_attr_cache_size,
+ &dev_attr_firmware,
+ &dev_attr_discovery,
+ &dev_attr_flush_cache,
+ &dev_attr_disable_enclosure_messages,
+ NULL,
+};
+
+/*
+ * SCSI midlayer interface
+ */
+int myrs_host_reset(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost = scmd->device->host;
+ struct myrs_hba *cs = shost_priv(shost);
+
+ cs->reset(cs->io_base);
+ return SUCCESS;
+}
+
+static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
+ struct myrs_ldev_info *ldev_info)
+{
+ unsigned char modes[32], *mode_pg;
+ bool dbd;
+ size_t mode_len;
+
+ dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+ if (dbd) {
+ mode_len = 24;
+ mode_pg = &modes[4];
+ } else {
+ mode_len = 32;
+ mode_pg = &modes[12];
+ }
+ memset(modes, 0, sizeof(modes));
+ modes[0] = mode_len - 1;
+ modes[2] = 0x10; /* Enable FUA */
+ if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
+ modes[2] |= 0x80;
+ if (!dbd) {
+ unsigned char *block_desc = &modes[4];
+
+ modes[3] = 8;
+ put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
+ put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
+ }
+ mode_pg[0] = 0x08;
+ mode_pg[1] = 0x12;
+ if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
+ mode_pg[2] |= 0x01;
+ if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
+ ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
+ mode_pg[2] |= 0x04;
+ if (ldev_info->cacheline_size) {
+ mode_pg[2] |= 0x08;
+ put_unaligned_be16(1 << ldev_info->cacheline_size,
+ &mode_pg[14]);
+ }
+
+ scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static int myrs_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct myrs_hba *cs = shost_priv(shost);
+ struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct scsi_device *sdev = scmd->device;
+ union myrs_sgl *hw_sge;
+ dma_addr_t sense_addr;
+ struct scatterlist *sgl;
+ unsigned long flags, timeout;
+ int nsge;
+
+ if (!scmd->device->hostdata) {
+ scmd->result = (DID_NO_CONNECT << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ switch (scmd->cmnd[0]) {
+ case REPORT_LUNS:
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x20, 0x0);
+ scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ case MODE_SENSE:
+ if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+ (scmd->cmnd[2] & 0x3F) != 0x08) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ myrs_mode_sense(cs, scmd, ldev_info);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ break;
+ }
+
+ myrs_reset_cmd(cmd_blk);
+ cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
+ &sense_addr);
+ if (!cmd_blk->sense)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ cmd_blk->sense_addr = sense_addr;
+
+ timeout = scmd->request->timeout;
+ if (scmd->cmd_len <= 10) {
+ if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
+ mbox->SCSI_10.pdev.lun = ldev_info->lun;
+ mbox->SCSI_10.pdev.target = ldev_info->target;
+ mbox->SCSI_10.pdev.channel = ldev_info->channel;
+ mbox->SCSI_10.pdev.ctlr = 0;
+ } else {
+ mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
+ mbox->SCSI_10.pdev.lun = sdev->lun;
+ mbox->SCSI_10.pdev.target = sdev->id;
+ mbox->SCSI_10.pdev.channel = sdev->channel;
+ }
+ mbox->SCSI_10.id = scmd->request->tag + 3;
+ mbox->SCSI_10.control.dma_ctrl_to_host =
+ (scmd->sc_data_direction == DMA_FROM_DEVICE);
+ if (scmd->request->cmd_flags & REQ_FUA)
+ mbox->SCSI_10.control.fua = true;
+ mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
+ mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
+ mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
+ mbox->SCSI_10.cdb_len = scmd->cmd_len;
+ if (timeout > 60) {
+ mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
+ mbox->SCSI_10.tmo.tmo_val = timeout / 60;
+ } else {
+ mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
+ mbox->SCSI_10.tmo.tmo_val = timeout;
+ }
+ memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
+ hw_sge = &mbox->SCSI_10.dma_addr;
+ cmd_blk->dcdb = NULL;
+ } else {
+ dma_addr_t dcdb_dma;
+
+ cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
+ &dcdb_dma);
+ if (!cmd_blk->dcdb) {
+ dma_pool_free(cs->sense_pool, cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ cmd_blk->dcdb_dma = dcdb_dma;
+ if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
+ mbox->SCSI_255.pdev.lun = ldev_info->lun;
+ mbox->SCSI_255.pdev.target = ldev_info->target;
+ mbox->SCSI_255.pdev.channel = ldev_info->channel;
+ mbox->SCSI_255.pdev.ctlr = 0;
+ } else {
+ mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
+ mbox->SCSI_255.pdev.lun = sdev->lun;
+ mbox->SCSI_255.pdev.target = sdev->id;
+ mbox->SCSI_255.pdev.channel = sdev->channel;
+ }
+ mbox->SCSI_255.id = scmd->request->tag + 3;
+ mbox->SCSI_255.control.dma_ctrl_to_host =
+ (scmd->sc_data_direction == DMA_FROM_DEVICE);
+ if (scmd->request->cmd_flags & REQ_FUA)
+ mbox->SCSI_255.control.fua = true;
+ mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
+ mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
+ mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
+ mbox->SCSI_255.cdb_len = scmd->cmd_len;
+ mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
+ if (timeout > 60) {
+ mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
+ mbox->SCSI_255.tmo.tmo_val = timeout / 60;
+ } else {
+ mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
+ mbox->SCSI_255.tmo.tmo_val = timeout;
+ }
+ memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
+ hw_sge = &mbox->SCSI_255.dma_addr;
+ }
+ if (scmd->sc_data_direction == DMA_NONE)
+ goto submit;
+ nsge = scsi_dma_map(scmd);
+ if (nsge == 1) {
+ sgl = scsi_sglist(scmd);
+ hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
+ hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
+ } else {
+ struct myrs_sge *hw_sgl;
+ dma_addr_t hw_sgl_addr;
+ int i;
+
+ if (nsge > 2) {
+ hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
+ &hw_sgl_addr);
+ if (WARN_ON(!hw_sgl)) {
+ if (cmd_blk->dcdb) {
+ dma_pool_free(cs->dcdb_pool,
+ cmd_blk->dcdb,
+ cmd_blk->dcdb_dma);
+ cmd_blk->dcdb = NULL;
+ cmd_blk->dcdb_dma = 0;
+ }
+ dma_pool_free(cs->sense_pool,
+ cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ cmd_blk->sgl = hw_sgl;
+ cmd_blk->sgl_addr = hw_sgl_addr;
+ if (scmd->cmd_len <= 10)
+ mbox->SCSI_10.control.add_sge_mem = true;
+ else
+ mbox->SCSI_255.control.add_sge_mem = true;
+ hw_sge->ext.sge0_len = nsge;
+ hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
+ } else
+ hw_sgl = hw_sge->sge;
+
+ scsi_for_each_sg(scmd, sgl, nsge, i) {
+ if (WARN_ON(!hw_sgl)) {
+ scsi_dma_unmap(scmd);
+ scmd->result = (DID_ERROR << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
+ hw_sgl->sge_count = (u64)sg_dma_len(sgl);
+ hw_sgl++;
+ }
+ }
+submit:
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ myrs_qcmd(cs, cmd_blk);
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+
+ return 0;
+}
+
+static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
+ struct scsi_device *sdev)
+{
+ unsigned short ldev_num;
+ unsigned int chan_offset =
+ sdev->channel - cs->ctlr_info->physchan_present;
+
+ ldev_num = sdev->id + chan_offset * sdev->host->max_id;
+
+ return ldev_num;
+}
+
+static int myrs_slave_alloc(struct scsi_device *sdev)
+{
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ unsigned char status;
+
+ if (sdev->channel > sdev->host->max_channel)
+ return 0;
+
+ if (sdev->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info;
+ unsigned short ldev_num;
+
+ if (sdev->lun > 0)
+ return -ENXIO;
+
+ ldev_num = myrs_translate_ldev(cs, sdev);
+
+ ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
+ if (!ldev_info)
+ return -ENOMEM;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev->hostdata = NULL;
+ kfree(ldev_info);
+ } else {
+ enum raid_level level;
+
+ dev_dbg(&sdev->sdev_gendev,
+ "Logical device mapping %d:%d:%d -> %d\n",
+ ldev_info->channel, ldev_info->target,
+ ldev_info->lun, ldev_info->ldev_num);
+
+ sdev->hostdata = ldev_info;
+ switch (ldev_info->raid_level) {
+ case MYRS_RAID_LEVEL0:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case MYRS_RAID_LEVEL1:
+ level = RAID_LEVEL_1;
+ break;
+ case MYRS_RAID_LEVEL3:
+ case MYRS_RAID_LEVEL3F:
+ case MYRS_RAID_LEVEL3L:
+ level = RAID_LEVEL_3;
+ break;
+ case MYRS_RAID_LEVEL5:
+ case MYRS_RAID_LEVEL5L:
+ level = RAID_LEVEL_5;
+ break;
+ case MYRS_RAID_LEVEL6:
+ level = RAID_LEVEL_6;
+ break;
+ case MYRS_RAID_LEVELE:
+ case MYRS_RAID_NEWSPAN:
+ case MYRS_RAID_SPAN:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case MYRS_RAID_JBOD:
+ level = RAID_LEVEL_JBOD;
+ break;
+ default:
+ level = RAID_LEVEL_UNKNOWN;
+ break;
+ }
+ raid_set_level(myrs_raid_template,
+ &sdev->sdev_gendev, level);
+ if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
+ const char *name;
+
+ name = myrs_devstate_name(ldev_info->dev_state);
+ sdev_printk(KERN_DEBUG, sdev,
+ "logical device in state %s\n",
+ name ? name : "Invalid");
+ }
+ }
+ } else {
+ struct myrs_pdev_info *pdev_info;
+
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ if (!pdev_info)
+ return -ENOMEM;
+
+ status = myrs_get_pdev_info(cs, sdev->channel,
+ sdev->id, sdev->lun,
+ pdev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev->hostdata = NULL;
+ kfree(pdev_info);
+ return -ENXIO;
+ }
+ sdev->hostdata = pdev_info;
+ }
+ return 0;
+}
+
+static int myrs_slave_configure(struct scsi_device *sdev)
+{
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+
+ if (sdev->channel > sdev->host->max_channel)
+ return -ENXIO;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present) {
+ /* Skip HBA device */
+ if (sdev->type == TYPE_RAID)
+ return -ENXIO;
+ sdev->no_uld_attach = 1;
+ return 0;
+ }
+ if (sdev->lun != 0)
+ return -ENXIO;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
+ ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
+ sdev->wce_default_on = 1;
+ sdev->tagged_supported = 1;
+ return 0;
+}
+
+static void myrs_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+}
+
+struct scsi_host_template myrs_template = {
+ .module = THIS_MODULE,
+ .name = "DAC960",
+ .proc_name = "myrs",
+ .queuecommand = myrs_queuecommand,
+ .eh_host_reset_handler = myrs_host_reset,
+ .slave_alloc = myrs_slave_alloc,
+ .slave_configure = myrs_slave_configure,
+ .slave_destroy = myrs_slave_destroy,
+ .cmd_size = sizeof(struct myrs_cmdblk),
+ .shost_attrs = myrs_shost_attrs,
+ .sdev_attrs = myrs_sdev_attrs,
+ .this_id = -1,
+};
+
+static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
+ const struct pci_device_id *entry)
+{
+ struct Scsi_Host *shost;
+ struct myrs_hba *cs;
+
+ shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
+ if (!shost)
+ return NULL;
+
+ shost->max_cmd_len = 16;
+ shost->max_lun = 256;
+ cs = shost_priv(shost);
+ mutex_init(&cs->dcmd_mutex);
+ mutex_init(&cs->cinfo_mutex);
+ cs->host = shost;
+
+ return cs;
+}
+
+/*
+ * RAID template functions
+ */
+
+/**
+ * myrs_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+myrs_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+
+ return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
+}
+
+/**
+ * myrs_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+myrs_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+ u64 percent_complete = 0;
+ u8 status;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
+ return;
+ if (ldev_info->rbld_active) {
+ unsigned short ldev_num = ldev_info->ldev_num;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ percent_complete = ldev_info->rbld_lba * 100;
+ do_div(percent_complete, ldev_info->cfg_devsize);
+ }
+ raid_set_resync(myrs_raid_template, dev, percent_complete);
+}
+
+/**
+ * myrs_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void
+myrs_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
+ state = RAID_STATE_UNKNOWN;
+ else {
+ switch (ldev_info->dev_state) {
+ case MYRS_DEVICE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MYRS_DEVICE_SUSPECTED_CRITICAL:
+ case MYRS_DEVICE_CRITICAL:
+ state = RAID_STATE_DEGRADED;
+ break;
+ case MYRS_DEVICE_REBUILD:
+ state = RAID_STATE_RESYNCING;
+ break;
+ case MYRS_DEVICE_UNCONFIGURED:
+ case MYRS_DEVICE_INVALID_STATE:
+ state = RAID_STATE_UNKNOWN;
+ break;
+ default:
+ state = RAID_STATE_OFFLINE;
+ }
+ }
+ raid_set_state(myrs_raid_template, dev, state);
+}
+
+struct raid_function_template myrs_raid_functions = {
+ .cookie = &myrs_template,
+ .is_raid = myrs_is_raid,
+ .get_resync = myrs_get_resync,
+ .get_state = myrs_get_state,
+};
+
+/*
+ * PCI interface functions
+ */
+void myrs_flush_cache(struct myrs_hba *cs)
+{
+ myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
+}
+
+static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char status;
+
+ if (!cmd_blk)
+ return;
+
+ scsi_dma_unmap(scmd);
+ status = cmd_blk->status;
+ if (cmd_blk->sense) {
+ if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
+ unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ if (sense_len > cmd_blk->sense_len)
+ sense_len = cmd_blk->sense_len;
+ memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
+ }
+ dma_pool_free(cs->sense_pool, cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ }
+ if (cmd_blk->dcdb) {
+ dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
+ cmd_blk->dcdb_dma);
+ cmd_blk->dcdb = NULL;
+ cmd_blk->dcdb_dma = 0;
+ }
+ if (cmd_blk->sgl) {
+ dma_pool_free(cs->sg_pool, cmd_blk->sgl,
+ cmd_blk->sgl_addr);
+ cmd_blk->sgl = NULL;
+ cmd_blk->sgl_addr = 0;
+ }
+ if (cmd_blk->residual)
+ scsi_set_resid(scmd, cmd_blk->residual);
+ if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
+ status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
+ scmd->result = (DID_BAD_TARGET << 16);
+ else
+ scmd->result = (DID_OK << 16) | status;
+ scmd->scsi_done(scmd);
+}
+
+static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+{
+ if (!cmd_blk)
+ return;
+
+ if (cmd_blk->complete) {
+ complete(cmd_blk->complete);
+ cmd_blk->complete = NULL;
+ }
+}
+
+static void myrs_monitor(struct work_struct *work)
+{
+ struct myrs_hba *cs = container_of(work, struct myrs_hba,
+ monitor_work.work);
+ struct Scsi_Host *shost = cs->host;
+ struct myrs_ctlr_info *info = cs->ctlr_info;
+ unsigned int epoch = cs->fwstat_buf->epoch;
+ unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
+ unsigned char status;
+
+ dev_dbg(&shost->shost_gendev, "monitor tick\n");
+
+ status = myrs_get_fwstatus(cs);
+
+ if (cs->needs_update) {
+ cs->needs_update = false;
+ mutex_lock(&cs->cinfo_mutex);
+ status = myrs_get_ctlr_info(cs);
+ mutex_unlock(&cs->cinfo_mutex);
+ }
+ if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
+ status = myrs_get_event(cs, cs->next_evseq,
+ cs->event_buf);
+ if (status == MYRS_STATUS_SUCCESS) {
+ myrs_log_event(cs, cs->event_buf);
+ cs->next_evseq++;
+ interval = 1;
+ }
+ }
+
+ if (time_after(jiffies, cs->secondary_monitor_time
+ + MYRS_SECONDARY_MONITOR_INTERVAL))
+ cs->secondary_monitor_time = jiffies;
+
+ if (info->bg_init_active +
+ info->ldev_init_active +
+ info->pdev_init_active +
+ info->cc_active +
+ info->rbld_active +
+ info->exp_active != 0) {
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, shost) {
+ struct myrs_ldev_info *ldev_info;
+ int ldev_num;
+
+ if (sdev->channel < info->physchan_present)
+ continue;
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ continue;
+ ldev_num = ldev_info->ldev_num;
+ myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ }
+ cs->needs_update = true;
+ }
+ if (epoch == cs->epoch &&
+ cs->fwstat_buf->next_evseq == cs->next_evseq &&
+ (cs->needs_update == false ||
+ time_before(jiffies, cs->primary_monitor_time
+ + MYRS_PRIMARY_MONITOR_INTERVAL))) {
+ interval = MYRS_SECONDARY_MONITOR_INTERVAL;
+ }
+
+ if (interval > 1)
+ cs->primary_monitor_time = jiffies;
+ queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
+}
+
+static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
+{
+ struct Scsi_Host *shost = cs->host;
+ size_t elem_size, elem_align;
+
+ elem_align = sizeof(struct myrs_sge);
+ elem_size = shost->sg_tablesize * elem_align;
+ cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
+ elem_size, elem_align, 0);
+ if (cs->sg_pool == NULL) {
+ shost_printk(KERN_ERR, shost,
+ "Failed to allocate SG pool\n");
+ return false;
+ }
+
+ cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
+ MYRS_SENSE_SIZE, sizeof(int), 0);
+ if (cs->sense_pool == NULL) {
+ dma_pool_destroy(cs->sg_pool);
+ cs->sg_pool = NULL;
+ shost_printk(KERN_ERR, shost,
+ "Failed to allocate sense data pool\n");
+ return false;
+ }
+
+ cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
+ MYRS_DCDB_SIZE,
+ sizeof(unsigned char), 0);
+ if (!cs->dcdb_pool) {
+ dma_pool_destroy(cs->sg_pool);
+ cs->sg_pool = NULL;
+ dma_pool_destroy(cs->sense_pool);
+ cs->sense_pool = NULL;
+ shost_printk(KERN_ERR, shost,
+ "Failed to allocate DCDB pool\n");
+ return false;
+ }
+
+ snprintf(cs->work_q_name, sizeof(cs->work_q_name),
+ "myrs_wq_%d", shost->host_no);
+ cs->work_q = create_singlethread_workqueue(cs->work_q_name);
+ if (!cs->work_q) {
+ dma_pool_destroy(cs->dcdb_pool);
+ cs->dcdb_pool = NULL;
+ dma_pool_destroy(cs->sg_pool);
+ cs->sg_pool = NULL;
+ dma_pool_destroy(cs->sense_pool);
+ cs->sense_pool = NULL;
+ shost_printk(KERN_ERR, shost,
+ "Failed to create workqueue\n");
+ return false;
+ }
+
+ /* Initialize the Monitoring Timer. */
+ INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
+ queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
+
+ return true;
+}
+
+static void myrs_destroy_mempools(struct myrs_hba *cs)
+{
+ cancel_delayed_work_sync(&cs->monitor_work);
+ destroy_workqueue(cs->work_q);
+
+ dma_pool_destroy(cs->sg_pool);
+ dma_pool_destroy(cs->dcdb_pool);
+ dma_pool_destroy(cs->sense_pool);
+}
+
+static void myrs_unmap(struct myrs_hba *cs)
+{
+ kfree(cs->event_buf);
+ kfree(cs->ctlr_info);
+ if (cs->fwstat_buf) {
+ dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
+ cs->fwstat_buf, cs->fwstat_addr);
+ cs->fwstat_buf = NULL;
+ }
+ if (cs->first_stat_mbox) {
+ dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
+ cs->first_stat_mbox, cs->stat_mbox_addr);
+ cs->first_stat_mbox = NULL;
+ }
+ if (cs->first_cmd_mbox) {
+ dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
+ cs->first_cmd_mbox, cs->cmd_mbox_addr);
+ cs->first_cmd_mbox = NULL;
+ }
+}
+
+static void myrs_cleanup(struct myrs_hba *cs)
+{
+ struct pci_dev *pdev = cs->pdev;
+
+ /* Free the memory mailbox, status, and related structures */
+ myrs_unmap(cs);
+
+ if (cs->mmio_base) {
+ cs->disable_intr(cs);
+ iounmap(cs->mmio_base);
+ }
+ if (cs->irq)
+ free_irq(cs->irq, cs);
+ if (cs->io_addr)
+ release_region(cs->io_addr, 0x80);
+ iounmap(cs->mmio_base);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ scsi_host_put(cs->host);
+}
+
+static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
+ const struct pci_device_id *entry)
+{
+ struct myrs_privdata *privdata =
+ (struct myrs_privdata *)entry->driver_data;
+ irq_handler_t irq_handler = privdata->irq_handler;
+ unsigned int mmio_size = privdata->mmio_size;
+ struct myrs_hba *cs = NULL;
+
+ cs = myrs_alloc_host(pdev, entry);
+ if (!cs) {
+ dev_err(&pdev->dev, "Unable to allocate Controller\n");
+ return NULL;
+ }
+ cs->pdev = pdev;
+
+ if (pci_enable_device(pdev))
+ goto Failure;
+
+ cs->pci_addr = pci_resource_start(pdev, 0);
+
+ pci_set_drvdata(pdev, cs);
+ spin_lock_init(&cs->queue_lock);
+ /* Map the Controller Register Window. */
+ if (mmio_size < PAGE_SIZE)
+ mmio_size = PAGE_SIZE;
+ cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
+ if (cs->mmio_base == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to map Controller Register Window\n");
+ goto Failure;
+ }
+
+ cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
+ if (privdata->hw_init(pdev, cs, cs->io_base))
+ goto Failure;
+
+ /* Acquire shared access to the IRQ Channel. */
+ if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
+ dev_err(&pdev->dev,
+ "Unable to acquire IRQ Channel %d\n", pdev->irq);
+ goto Failure;
+ }
+ cs->irq = pdev->irq;
+ return cs;
+
+Failure:
+ dev_err(&pdev->dev,
+ "Failed to initialize Controller\n");
+ myrs_cleanup(cs);
+ return NULL;
+}
+
+/**
+ * myrs_err_status reports Controller BIOS Messages passed through
+ the Error Status Register when the driver performs the BIOS handshaking.
+ It returns true for fatal errors and false otherwise.
+*/
+
+static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
+ unsigned char parm0, unsigned char parm1)
+{
+ struct pci_dev *pdev = cs->pdev;
+
+ switch (status) {
+ case 0x00:
+ dev_info(&pdev->dev,
+ "Physical Device %d:%d Not Responding\n",
+ parm1, parm0);
+ break;
+ case 0x08:
+ dev_notice(&pdev->dev, "Spinning Up Drives\n");
+ break;
+ case 0x30:
+ dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+ break;
+ case 0x60:
+ dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+ break;
+ case 0x70:
+ dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+ break;
+ case 0x90:
+ dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+ parm1, parm0);
+ break;
+ case 0xA0:
+ dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+ break;
+ case 0xB0:
+ dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+ break;
+ case 0xD0:
+ dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+ break;
+ case 0xF0:
+ dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+ return true;
+ default:
+ dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+ status);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Hardware-specific functions
+ */
+
+/*
+ * DAC960 GEM Series Controllers.
+ */
+
+static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
+
+ writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_gen_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
+
+ writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24);
+
+ writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
+ DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
+
+ writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_GEM_enable_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
+ DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
+ writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_disable_intr(void __iomem *base)
+{
+ __le32 val = 0;
+
+ writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
+}
+
+static inline bool DAC960_GEM_intr_enabled(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET);
+ return !((le32_to_cpu(val) >> 24) &
+ (DAC960_GEM_IRQMASK_HWMBOX_IRQ |
+ DAC960_GEM_IRQMASK_MMBOX_IRQ));
+}
+
+static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+ union myrs_cmd_mbox *mbox)
+{
+ memcpy(&mem_mbox->words[1], &mbox->words[1],
+ sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+ /* Barrier to avoid reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
+ dma_addr_t cmd_mbox_addr)
+{
+ dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base)
+{
+ return readw(base + DAC960_GEM_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
+{
+ return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
+ if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
+ return false;
+ *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
+ *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
+ *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
+ writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
+ return true;
+}
+
+static inline unsigned char
+DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+ unsigned char status;
+
+ while (DAC960_GEM_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_GEM_write_hw_mbox(base, mbox_addr);
+ DAC960_GEM_hw_mbox_new_cmd(base);
+ while (!DAC960_GEM_hw_mbox_status_available(base))
+ udelay(1);
+ status = DAC960_GEM_read_cmd_status(base);
+ DAC960_GEM_ack_hw_mbox_intr(base);
+ DAC960_GEM_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_GEM_hw_init(struct pci_dev *pdev,
+ struct myrs_hba *cs, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char status, parm0, parm1;
+
+ DAC960_GEM_disable_intr(base);
+ DAC960_GEM_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_GEM_init_in_progress(base) &&
+ timeout < MYRS_MAILBOX_TIMEOUT) {
+ if (DAC960_GEM_read_error_status(base, &status,
+ &parm0, &parm1) &&
+ myrs_err_status(cs, status, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRS_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_GEM_reset_ctrl(base);
+ return -EAGAIN;
+ }
+ DAC960_GEM_enable_intr(base);
+ cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
+ cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
+ cs->disable_intr = DAC960_GEM_disable_intr;
+ cs->reset = DAC960_GEM_reset_ctrl;
+ return 0;
+}
+
+static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
+{
+ struct myrs_hba *cs = arg;
+ void __iomem *base = cs->io_base;
+ struct myrs_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ DAC960_GEM_ack_intr(base);
+ next_stat_mbox = cs->next_stat_mbox;
+ while (next_stat_mbox->id > 0) {
+ unsigned short id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrs_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRS_DCMD_TAG)
+ cmd_blk = &cs->dcmd_blk;
+ else if (id == MYRS_MCMD_TAG)
+ cmd_blk = &cs->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cs->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = next_stat_mbox->status;
+ cmd_blk->sense_len = next_stat_mbox->sense_len;
+ cmd_blk->residual = next_stat_mbox->residual;
+ } else
+ dev_err(&cs->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+ if (++next_stat_mbox > cs->last_stat_mbox)
+ next_stat_mbox = cs->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrs_handle_cmdblk(cs, cmd_blk);
+ else
+ myrs_handle_scsi(cs, cmd_blk, scmd);
+ }
+ }
+ cs->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_GEM_privdata = {
+ .hw_init = DAC960_GEM_hw_init,
+ .irq_handler = DAC960_GEM_intr_handler,
+ .mmio_size = DAC960_GEM_mmio_size,
+};
+
+/*
+ * DAC960 BA Series Controllers.
+ */
+
+static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_IDB_OFFSET);
+ return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
+}
+
+static inline bool DAC960_BA_init_in_progress(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_IDB_OFFSET);
+ return !(val & DAC960_BA_IDB_INIT_DONE);
+}
+
+static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_ODB_OFFSET);
+ return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_ODB_OFFSET);
+ return val & DAC960_BA_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_BA_enable_intr(void __iomem *base)
+{
+ writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_BA_disable_intr(void __iomem *base)
+{
+ writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_BA_intr_enabled(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_IRQMASK_OFFSET);
+ return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+ union myrs_cmd_mbox *mbox)
+{
+ memcpy(&mem_mbox->words[1], &mbox->words[1],
+ sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+ /* Barrier to avoid reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Barrier to force PCI access */
+ mb();
+}
+
+
+static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
+ dma_addr_t cmd_mbox_addr)
+{
+ dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base)
+{
+ return readw(base + DAC960_BA_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
+{
+ return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_ERRSTS_OFFSET);
+ if (!(val & DAC960_BA_ERRSTS_PENDING))
+ return false;
+ val &= ~DAC960_BA_ERRSTS_PENDING;
+ *error = val;
+ *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
+ *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
+ writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned char
+DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+ unsigned char status;
+
+ while (DAC960_BA_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_BA_write_hw_mbox(base, mbox_addr);
+ DAC960_BA_hw_mbox_new_cmd(base);
+ while (!DAC960_BA_hw_mbox_status_available(base))
+ udelay(1);
+ status = DAC960_BA_read_cmd_status(base);
+ DAC960_BA_ack_hw_mbox_intr(base);
+ DAC960_BA_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_BA_hw_init(struct pci_dev *pdev,
+ struct myrs_hba *cs, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char status, parm0, parm1;
+
+ DAC960_BA_disable_intr(base);
+ DAC960_BA_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_BA_init_in_progress(base) &&
+ timeout < MYRS_MAILBOX_TIMEOUT) {
+ if (DAC960_BA_read_error_status(base, &status,
+ &parm0, &parm1) &&
+ myrs_err_status(cs, status, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRS_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_BA_reset_ctrl(base);
+ return -EAGAIN;
+ }
+ DAC960_BA_enable_intr(base);
+ cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
+ cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
+ cs->disable_intr = DAC960_BA_disable_intr;
+ cs->reset = DAC960_BA_reset_ctrl;
+ return 0;
+}
+
+static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
+{
+ struct myrs_hba *cs = arg;
+ void __iomem *base = cs->io_base;
+ struct myrs_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ DAC960_BA_ack_intr(base);
+ next_stat_mbox = cs->next_stat_mbox;
+ while (next_stat_mbox->id > 0) {
+ unsigned short id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrs_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRS_DCMD_TAG)
+ cmd_blk = &cs->dcmd_blk;
+ else if (id == MYRS_MCMD_TAG)
+ cmd_blk = &cs->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cs->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = next_stat_mbox->status;
+ cmd_blk->sense_len = next_stat_mbox->sense_len;
+ cmd_blk->residual = next_stat_mbox->residual;
+ } else
+ dev_err(&cs->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+ if (++next_stat_mbox > cs->last_stat_mbox)
+ next_stat_mbox = cs->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrs_handle_cmdblk(cs, cmd_blk);
+ else
+ myrs_handle_scsi(cs, cmd_blk, scmd);
+ }
+ }
+ cs->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_BA_privdata = {
+ .hw_init = DAC960_BA_hw_init,
+ .irq_handler = DAC960_BA_intr_handler,
+ .mmio_size = DAC960_BA_mmio_size,
+};
+
+/*
+ * DAC960 LP Series Controllers.
+ */
+
+static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_IDB_OFFSET);
+ return val & DAC960_LP_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_LP_init_in_progress(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_IDB_OFFSET);
+ return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_ODB_OFFSET);
+ return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_ODB_OFFSET);
+ return val & DAC960_LP_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_LP_enable_intr(void __iomem *base)
+{
+ writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_LP_disable_intr(void __iomem *base)
+{
+ writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_LP_intr_enabled(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_IRQMASK_OFFSET);
+ return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+ union myrs_cmd_mbox *mbox)
+{
+ memcpy(&mem_mbox->words[1], &mbox->words[1],
+ sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+ /* Barrier to avoid reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
+ dma_addr_t cmd_mbox_addr)
+{
+ dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base)
+{
+ return readw(base + DAC960_LP_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
+{
+ return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_ERRSTS_OFFSET);
+ if (!(val & DAC960_LP_ERRSTS_PENDING))
+ return false;
+ val &= ~DAC960_LP_ERRSTS_PENDING;
+ *error = val;
+ *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
+ *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
+ writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned char
+DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+ unsigned char status;
+
+ while (DAC960_LP_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_LP_write_hw_mbox(base, mbox_addr);
+ DAC960_LP_hw_mbox_new_cmd(base);
+ while (!DAC960_LP_hw_mbox_status_available(base))
+ udelay(1);
+ status = DAC960_LP_read_cmd_status(base);
+ DAC960_LP_ack_hw_mbox_intr(base);
+ DAC960_LP_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_LP_hw_init(struct pci_dev *pdev,
+ struct myrs_hba *cs, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char status, parm0, parm1;
+
+ DAC960_LP_disable_intr(base);
+ DAC960_LP_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_LP_init_in_progress(base) &&
+ timeout < MYRS_MAILBOX_TIMEOUT) {
+ if (DAC960_LP_read_error_status(base, &status,
+ &parm0, &parm1) &&
+ myrs_err_status(cs, status, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRS_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_LP_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_LP_enable_intr(base);
+ cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
+ cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
+ cs->disable_intr = DAC960_LP_disable_intr;
+ cs->reset = DAC960_LP_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
+{
+ struct myrs_hba *cs = arg;
+ void __iomem *base = cs->io_base;
+ struct myrs_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ DAC960_LP_ack_intr(base);
+ next_stat_mbox = cs->next_stat_mbox;
+ while (next_stat_mbox->id > 0) {
+ unsigned short id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrs_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRS_DCMD_TAG)
+ cmd_blk = &cs->dcmd_blk;
+ else if (id == MYRS_MCMD_TAG)
+ cmd_blk = &cs->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cs->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = next_stat_mbox->status;
+ cmd_blk->sense_len = next_stat_mbox->sense_len;
+ cmd_blk->residual = next_stat_mbox->residual;
+ } else
+ dev_err(&cs->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+ if (++next_stat_mbox > cs->last_stat_mbox)
+ next_stat_mbox = cs->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrs_handle_cmdblk(cs, cmd_blk);
+ else
+ myrs_handle_scsi(cs, cmd_blk, scmd);
+ }
+ }
+ cs->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_LP_privdata = {
+ .hw_init = DAC960_LP_hw_init,
+ .irq_handler = DAC960_LP_intr_handler,
+ .mmio_size = DAC960_LP_mmio_size,
+};
+
+/*
+ * Module functions
+ */
+static int
+myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+ struct myrs_hba *cs;
+ int ret;
+
+ cs = myrs_detect(dev, entry);
+ if (!cs)
+ return -ENODEV;
+
+ ret = myrs_get_config(cs);
+ if (ret < 0) {
+ myrs_cleanup(cs);
+ return ret;
+ }
+
+ if (!myrs_create_mempools(dev, cs)) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ ret = scsi_add_host(cs->host, &dev->dev);
+ if (ret) {
+ dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+ myrs_destroy_mempools(cs);
+ goto failed;
+ }
+ scsi_scan_host(cs->host);
+ return 0;
+failed:
+ myrs_cleanup(cs);
+ return ret;
+}
+
+
+static void myrs_remove(struct pci_dev *pdev)
+{
+ struct myrs_hba *cs = pci_get_drvdata(pdev);
+
+ if (cs == NULL)
+ return;
+
+ shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
+ myrs_flush_cache(cs);
+ myrs_destroy_mempools(cs);
+ myrs_cleanup(cs);
+}
+
+
+static const struct pci_device_id myrs_id_table[] = {
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
+ PCI_DEVICE_ID_MYLEX_DAC960_GEM,
+ PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
+ .driver_data = (unsigned long) &DAC960_GEM_privdata,
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
+ },
+ {0, },
+};
+
+MODULE_DEVICE_TABLE(pci, myrs_id_table);
+
+static struct pci_driver myrs_pci_driver = {
+ .name = "myrs",
+ .id_table = myrs_id_table,
+ .probe = myrs_probe,
+ .remove = myrs_remove,
+};
+
+static int __init myrs_init_module(void)
+{
+ int ret;
+
+ myrs_raid_template = raid_class_attach(&myrs_raid_functions);
+ if (!myrs_raid_template)
+ return -ENODEV;
+
+ ret = pci_register_driver(&myrs_pci_driver);
+ if (ret)
+ raid_class_release(myrs_raid_template);
+
+ return ret;
+}
+
+static void __exit myrs_cleanup_module(void)
+{
+ pci_unregister_driver(&myrs_pci_driver);
+ raid_class_release(myrs_raid_template);
+}
+
+module_init(myrs_init_module);
+module_exit(myrs_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
new file mode 100644
index 000000000000..e6702ee85e9f
--- /dev/null
+++ b/drivers/scsi/myrs.h
@@ -0,0 +1,1134 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * This driver supports the newer, SCSI-based firmware interface only.
+ *
+ * Copyright 2018 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver, which has
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ */
+
+#ifndef _MYRS_H
+#define _MYRS_H
+
+#define MYRS_MAILBOX_TIMEOUT 1000000
+
+#define MYRS_DCMD_TAG 1
+#define MYRS_MCMD_TAG 2
+
+#define MYRS_LINE_BUFFER_SIZE 128
+
+#define MYRS_PRIMARY_MONITOR_INTERVAL (10 * HZ)
+#define MYRS_SECONDARY_MONITOR_INTERVAL (60 * HZ)
+
+/* Maximum number of Scatter/Gather Segments supported */
+#define MYRS_SG_LIMIT 128
+
+/*
+ * Number of Command and Status Mailboxes used by the
+ * DAC960 V2 Firmware Memory Mailbox Interface.
+ */
+#define MYRS_MAX_CMD_MBOX 512
+#define MYRS_MAX_STAT_MBOX 512
+
+#define MYRS_DCDB_SIZE 16
+#define MYRS_SENSE_SIZE 14
+
+/*
+ * DAC960 V2 Firmware Command Opcodes.
+ */
+enum myrs_cmd_opcode {
+ MYRS_CMD_OP_MEMCOPY = 0x01,
+ MYRS_CMD_OP_SCSI_10_PASSTHRU = 0x02,
+ MYRS_CMD_OP_SCSI_255_PASSTHRU = 0x03,
+ MYRS_CMD_OP_SCSI_10 = 0x04,
+ MYRS_CMD_OP_SCSI_256 = 0x05,
+ MYRS_CMD_OP_IOCTL = 0x20,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware IOCTL Opcodes.
+ */
+enum myrs_ioctl_opcode {
+ MYRS_IOCTL_GET_CTLR_INFO = 0x01,
+ MYRS_IOCTL_GET_LDEV_INFO_VALID = 0x03,
+ MYRS_IOCTL_GET_PDEV_INFO_VALID = 0x05,
+ MYRS_IOCTL_GET_HEALTH_STATUS = 0x11,
+ MYRS_IOCTL_GET_EVENT = 0x15,
+ MYRS_IOCTL_START_DISCOVERY = 0x81,
+ MYRS_IOCTL_SET_DEVICE_STATE = 0x82,
+ MYRS_IOCTL_INIT_PDEV_START = 0x84,
+ MYRS_IOCTL_INIT_PDEV_STOP = 0x85,
+ MYRS_IOCTL_INIT_LDEV_START = 0x86,
+ MYRS_IOCTL_INIT_LDEV_STOP = 0x87,
+ MYRS_IOCTL_RBLD_DEVICE_START = 0x88,
+ MYRS_IOCTL_RBLD_DEVICE_STOP = 0x89,
+ MYRS_IOCTL_MAKE_CONSISTENT_START = 0x8A,
+ MYRS_IOCTL_MAKE_CONSISTENT_STOP = 0x8B,
+ MYRS_IOCTL_CC_START = 0x8C,
+ MYRS_IOCTL_CC_STOP = 0x8D,
+ MYRS_IOCTL_SET_MEM_MBOX = 0x8E,
+ MYRS_IOCTL_RESET_DEVICE = 0x90,
+ MYRS_IOCTL_FLUSH_DEVICE_DATA = 0x91,
+ MYRS_IOCTL_PAUSE_DEVICE = 0x92,
+ MYRS_IOCTL_UNPAUS_EDEVICE = 0x93,
+ MYRS_IOCTL_LOCATE_DEVICE = 0x94,
+ MYRS_IOCTL_CREATE_CONFIGURATION = 0xC0,
+ MYRS_IOCTL_DELETE_LDEV = 0xC1,
+ MYRS_IOCTL_REPLACE_INTERNALDEVICE = 0xC2,
+ MYRS_IOCTL_RENAME_LDEV = 0xC3,
+ MYRS_IOCTL_ADD_CONFIGURATION = 0xC4,
+ MYRS_IOCTL_XLATE_PDEV_TO_LDEV = 0xC5,
+ MYRS_IOCTL_CLEAR_CONFIGURATION = 0xCA,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Command Status Codes.
+ */
+#define MYRS_STATUS_SUCCESS 0x00
+#define MYRS_STATUS_FAILED 0x02
+#define MYRS_STATUS_DEVICE_BUSY 0x08
+#define MYRS_STATUS_DEVICE_NON_RESPONSIVE 0x0E
+#define MYRS_STATUS_DEVICE_NON_RESPONSIVE2 0x0F
+#define MYRS_STATUS_RESERVATION_CONFLICT 0x18
+
+/*
+ * DAC960 V2 Firmware Memory Type structure.
+ */
+struct myrs_mem_type {
+ enum {
+ MYRS_MEMTYPE_RESERVED = 0x00,
+ MYRS_MEMTYPE_DRAM = 0x01,
+ MYRS_MEMTYPE_EDRAM = 0x02,
+ MYRS_MEMTYPE_EDO = 0x03,
+ MYRS_MEMTYPE_SDRAM = 0x04,
+ MYRS_MEMTYPE_LAST = 0x1F,
+ } __packed mem_type:5; /* Byte 0 Bits 0-4 */
+ unsigned rsvd:1; /* Byte 0 Bit 5 */
+ unsigned mem_parity:1; /* Byte 0 Bit 6 */
+ unsigned mem_ecc:1; /* Byte 0 Bit 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Processor Type structure.
+ */
+enum myrs_cpu_type {
+ MYRS_CPUTYPE_i960CA = 0x01,
+ MYRS_CPUTYPE_i960RD = 0x02,
+ MYRS_CPUTYPE_i960RN = 0x03,
+ MYRS_CPUTYPE_i960RP = 0x04,
+ MYRS_CPUTYPE_NorthBay = 0x05,
+ MYRS_CPUTYPE_StrongArm = 0x06,
+ MYRS_CPUTYPE_i960RM = 0x07,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Get Controller Info reply structure.
+ */
+struct myrs_ctlr_info {
+ unsigned char rsvd1; /* Byte 0 */
+ enum {
+ MYRS_SCSI_BUS = 0x00,
+ MYRS_Fibre_BUS = 0x01,
+ MYRS_PCI_BUS = 0x03
+ } __packed bus; /* Byte 1 */
+ enum {
+ MYRS_CTLR_DAC960E = 0x01,
+ MYRS_CTLR_DAC960M = 0x08,
+ MYRS_CTLR_DAC960PD = 0x10,
+ MYRS_CTLR_DAC960PL = 0x11,
+ MYRS_CTLR_DAC960PU = 0x12,
+ MYRS_CTLR_DAC960PE = 0x13,
+ MYRS_CTLR_DAC960PG = 0x14,
+ MYRS_CTLR_DAC960PJ = 0x15,
+ MYRS_CTLR_DAC960PTL0 = 0x16,
+ MYRS_CTLR_DAC960PR = 0x17,
+ MYRS_CTLR_DAC960PRL = 0x18,
+ MYRS_CTLR_DAC960PT = 0x19,
+ MYRS_CTLR_DAC1164P = 0x1A,
+ MYRS_CTLR_DAC960PTL1 = 0x1B,
+ MYRS_CTLR_EXR2000P = 0x1C,
+ MYRS_CTLR_EXR3000P = 0x1D,
+ MYRS_CTLR_ACCELERAID352 = 0x1E,
+ MYRS_CTLR_ACCELERAID170 = 0x1F,
+ MYRS_CTLR_ACCELERAID160 = 0x20,
+ MYRS_CTLR_DAC960S = 0x60,
+ MYRS_CTLR_DAC960SU = 0x61,
+ MYRS_CTLR_DAC960SX = 0x62,
+ MYRS_CTLR_DAC960SF = 0x63,
+ MYRS_CTLR_DAC960SS = 0x64,
+ MYRS_CTLR_DAC960FL = 0x65,
+ MYRS_CTLR_DAC960LL = 0x66,
+ MYRS_CTLR_DAC960FF = 0x67,
+ MYRS_CTLR_DAC960HP = 0x68,
+ MYRS_CTLR_RAIDBRICK = 0x69,
+ MYRS_CTLR_METEOR_FL = 0x6A,
+ MYRS_CTLR_METEOR_FF = 0x6B
+ } __packed ctlr_type; /* Byte 2 */
+ unsigned char rsvd2; /* Byte 3 */
+ unsigned short bus_speed_mhz; /* Bytes 4-5 */
+ unsigned char bus_width; /* Byte 6 */
+ unsigned char flash_code; /* Byte 7 */
+ unsigned char ports_present; /* Byte 8 */
+ unsigned char rsvd3[7]; /* Bytes 9-15 */
+ unsigned char bus_name[16]; /* Bytes 16-31 */
+ unsigned char ctlr_name[16]; /* Bytes 32-47 */
+ unsigned char rsvd4[16]; /* Bytes 48-63 */
+ /* Firmware Release Information */
+ unsigned char fw_major_version; /* Byte 64 */
+ unsigned char fw_minor_version; /* Byte 65 */
+ unsigned char fw_turn_number; /* Byte 66 */
+ unsigned char fw_build_number; /* Byte 67 */
+ unsigned char fw_release_day; /* Byte 68 */
+ unsigned char fw_release_month; /* Byte 69 */
+ unsigned char fw_release_year_hi; /* Byte 70 */
+ unsigned char fw_release_year_lo; /* Byte 71 */
+ /* Hardware Release Information */
+ unsigned char hw_rev; /* Byte 72 */
+ unsigned char rsvd5[3]; /* Bytes 73-75 */
+ unsigned char hw_release_day; /* Byte 76 */
+ unsigned char hw_release_month; /* Byte 77 */
+ unsigned char hw_release_year_hi; /* Byte 78 */
+ unsigned char hw_release_year_lo; /* Byte 79 */
+ /* Hardware Manufacturing Information */
+ unsigned char manuf_batch_num; /* Byte 80 */
+ unsigned char rsvd6; /* Byte 81 */
+ unsigned char manuf_plant_num; /* Byte 82 */
+ unsigned char rsvd7; /* Byte 83 */
+ unsigned char hw_manuf_day; /* Byte 84 */
+ unsigned char hw_manuf_month; /* Byte 85 */
+ unsigned char hw_manuf_year_hi; /* Byte 86 */
+ unsigned char hw_manuf_year_lo; /* Byte 87 */
+ unsigned char max_pd_per_xld; /* Byte 88 */
+ unsigned char max_ild_per_xld; /* Byte 89 */
+ unsigned short nvram_size_kb; /* Bytes 90-91 */
+ unsigned char max_xld; /* Byte 92 */
+ unsigned char rsvd8[3]; /* Bytes 93-95 */
+ /* Unique Information per Controller */
+ unsigned char serial_number[16]; /* Bytes 96-111 */
+ unsigned char rsvd9[16]; /* Bytes 112-127 */
+ /* Vendor Information */
+ unsigned char rsvd10[3]; /* Bytes 128-130 */
+ unsigned char oem_code; /* Byte 131 */
+ unsigned char vendor[16]; /* Bytes 132-147 */
+ /* Other Physical/Controller/Operation Information */
+ unsigned char bbu_present:1; /* Byte 148 Bit 0 */
+ unsigned char cluster_mode:1; /* Byte 148 Bit 1 */
+ unsigned char rsvd11:6; /* Byte 148 Bits 2-7 */
+ unsigned char rsvd12[3]; /* Bytes 149-151 */
+ /* Physical Device Scan Information */
+ unsigned char pscan_active:1; /* Byte 152 Bit 0 */
+ unsigned char rsvd13:7; /* Byte 152 Bits 1-7 */
+ unsigned char pscan_chan; /* Byte 153 */
+ unsigned char pscan_target; /* Byte 154 */
+ unsigned char pscan_lun; /* Byte 155 */
+ /* Maximum Command Data Transfer Sizes */
+ unsigned short max_transfer_size; /* Bytes 156-157 */
+ unsigned short max_sge; /* Bytes 158-159 */
+ /* Logical/Physical Device Counts */
+ unsigned short ldev_present; /* Bytes 160-161 */
+ unsigned short ldev_critical; /* Bytes 162-163 */
+ unsigned short ldev_offline; /* Bytes 164-165 */
+ unsigned short pdev_present; /* Bytes 166-167 */
+ unsigned short pdisk_present; /* Bytes 168-169 */
+ unsigned short pdisk_critical; /* Bytes 170-171 */
+ unsigned short pdisk_offline; /* Bytes 172-173 */
+ unsigned short max_tcq; /* Bytes 174-175 */
+ /* Channel and Target ID Information */
+ unsigned char physchan_present; /* Byte 176 */
+ unsigned char virtchan_present; /* Byte 177 */
+ unsigned char physchan_max; /* Byte 178 */
+ unsigned char virtchan_max; /* Byte 179 */
+ unsigned char max_targets[16]; /* Bytes 180-195 */
+ unsigned char rsvd14[12]; /* Bytes 196-207 */
+ /* Memory/Cache Information */
+ unsigned short mem_size_mb; /* Bytes 208-209 */
+ unsigned short cache_size_mb; /* Bytes 210-211 */
+ unsigned int valid_cache_bytes; /* Bytes 212-215 */
+ unsigned int dirty_cache_bytes; /* Bytes 216-219 */
+ unsigned short mem_speed_mhz; /* Bytes 220-221 */
+ unsigned char mem_data_width; /* Byte 222 */
+ struct myrs_mem_type mem_type; /* Byte 223 */
+ unsigned char cache_mem_type_name[16]; /* Bytes 224-239 */
+ /* Execution Memory Information */
+ unsigned short exec_mem_size_mb; /* Bytes 240-241 */
+ unsigned short exec_l2_cache_size_mb; /* Bytes 242-243 */
+ unsigned char rsvd15[8]; /* Bytes 244-251 */
+ unsigned short exec_mem_speed_mhz; /* Bytes 252-253 */
+ unsigned char exec_mem_data_width; /* Byte 254 */
+ struct myrs_mem_type exec_mem_type; /* Byte 255 */
+ unsigned char exec_mem_type_name[16]; /* Bytes 256-271 */
+ /* CPU Type Information */
+ struct { /* Bytes 272-335 */
+ unsigned short cpu_speed_mhz;
+ enum myrs_cpu_type cpu_type;
+ unsigned char cpu_count;
+ unsigned char rsvd16[12];
+ unsigned char cpu_name[16];
+ } __packed cpu[2];
+ /* Debugging/Profiling/Command Time Tracing Information */
+ unsigned short cur_prof_page_num; /* Bytes 336-337 */
+ unsigned short num_prof_waiters; /* Bytes 338-339 */
+ unsigned short cur_trace_page_num; /* Bytes 340-341 */
+ unsigned short num_trace_waiters; /* Bytes 342-343 */
+ unsigned char rsvd18[8]; /* Bytes 344-351 */
+ /* Error Counters on Physical Devices */
+ unsigned short pdev_bus_resets; /* Bytes 352-353 */
+ unsigned short pdev_parity_errors; /* Bytes 355-355 */
+ unsigned short pdev_soft_errors; /* Bytes 356-357 */
+ unsigned short pdev_cmds_failed; /* Bytes 358-359 */
+ unsigned short pdev_misc_errors; /* Bytes 360-361 */
+ unsigned short pdev_cmd_timeouts; /* Bytes 362-363 */
+ unsigned short pdev_sel_timeouts; /* Bytes 364-365 */
+ unsigned short pdev_retries_done; /* Bytes 366-367 */
+ unsigned short pdev_aborts_done; /* Bytes 368-369 */
+ unsigned short pdev_host_aborts_done; /* Bytes 370-371 */
+ unsigned short pdev_predicted_failures; /* Bytes 372-373 */
+ unsigned short pdev_host_cmds_failed; /* Bytes 374-375 */
+ unsigned short pdev_hard_errors; /* Bytes 376-377 */
+ unsigned char rsvd19[6]; /* Bytes 378-383 */
+ /* Error Counters on Logical Devices */
+ unsigned short ldev_soft_errors; /* Bytes 384-385 */
+ unsigned short ldev_cmds_failed; /* Bytes 386-387 */
+ unsigned short ldev_host_aborts_done; /* Bytes 388-389 */
+ unsigned char rsvd20[2]; /* Bytes 390-391 */
+ /* Error Counters on Controller */
+ unsigned short ctlr_mem_errors; /* Bytes 392-393 */
+ unsigned short ctlr_host_aborts_done; /* Bytes 394-395 */
+ unsigned char rsvd21[4]; /* Bytes 396-399 */
+ /* Long Duration Activity Information */
+ unsigned short bg_init_active; /* Bytes 400-401 */
+ unsigned short ldev_init_active; /* Bytes 402-403 */
+ unsigned short pdev_init_active; /* Bytes 404-405 */
+ unsigned short cc_active; /* Bytes 406-407 */
+ unsigned short rbld_active; /* Bytes 408-409 */
+ unsigned short exp_active; /* Bytes 410-411 */
+ unsigned short patrol_active; /* Bytes 412-413 */
+ unsigned char rsvd22[2]; /* Bytes 414-415 */
+ /* Flash ROM Information */
+ unsigned char flash_type; /* Byte 416 */
+ unsigned char rsvd23; /* Byte 417 */
+ unsigned short flash_size_MB; /* Bytes 418-419 */
+ unsigned int flash_limit; /* Bytes 420-423 */
+ unsigned int flash_count; /* Bytes 424-427 */
+ unsigned char rsvd24[4]; /* Bytes 428-431 */
+ unsigned char flash_type_name[16]; /* Bytes 432-447 */
+ /* Firmware Run Time Information */
+ unsigned char rbld_rate; /* Byte 448 */
+ unsigned char bg_init_rate; /* Byte 449 */
+ unsigned char fg_init_rate; /* Byte 450 */
+ unsigned char cc_rate; /* Byte 451 */
+ unsigned char rsvd25[4]; /* Bytes 452-455 */
+ unsigned int max_dp; /* Bytes 456-459 */
+ unsigned int free_dp; /* Bytes 460-463 */
+ unsigned int max_iop; /* Bytes 464-467 */
+ unsigned int free_iop; /* Bytes 468-471 */
+ unsigned short max_combined_len; /* Bytes 472-473 */
+ unsigned short num_cfg_groups; /* Bytes 474-475 */
+ unsigned installation_abort_status:1; /* Byte 476 Bit 0 */
+ unsigned maint_mode_status:1; /* Byte 476 Bit 1 */
+ unsigned rsvd26:6; /* Byte 476 Bits 2-7 */
+ unsigned char rsvd27[6]; /* Bytes 477-511 */
+ unsigned char rsvd28[512]; /* Bytes 512-1023 */
+};
+
+/*
+ * DAC960 V2 Firmware Device State type.
+ */
+enum myrs_devstate {
+ MYRS_DEVICE_UNCONFIGURED = 0x00,
+ MYRS_DEVICE_ONLINE = 0x01,
+ MYRS_DEVICE_REBUILD = 0x03,
+ MYRS_DEVICE_MISSING = 0x04,
+ MYRS_DEVICE_SUSPECTED_CRITICAL = 0x05,
+ MYRS_DEVICE_OFFLINE = 0x08,
+ MYRS_DEVICE_CRITICAL = 0x09,
+ MYRS_DEVICE_SUSPECTED_DEAD = 0x0C,
+ MYRS_DEVICE_COMMANDED_OFFLINE = 0x10,
+ MYRS_DEVICE_STANDBY = 0x21,
+ MYRS_DEVICE_INVALID_STATE = 0xFF,
+} __packed;
+
+/*
+ * DAC960 V2 RAID Levels
+ */
+enum myrs_raid_level {
+ MYRS_RAID_LEVEL0 = 0x0, /* RAID 0 */
+ MYRS_RAID_LEVEL1 = 0x1, /* RAID 1 */
+ MYRS_RAID_LEVEL3 = 0x3, /* RAID 3 right asymmetric parity */
+ MYRS_RAID_LEVEL5 = 0x5, /* RAID 5 right asymmetric parity */
+ MYRS_RAID_LEVEL6 = 0x6, /* RAID 6 (Mylex RAID 6) */
+ MYRS_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */
+ MYRS_RAID_NEWSPAN = 0x8, /* New Mylex SPAN */
+ MYRS_RAID_LEVEL3F = 0x9, /* RAID 3 fixed parity */
+ MYRS_RAID_LEVEL3L = 0xb, /* RAID 3 left symmetric parity */
+ MYRS_RAID_SPAN = 0xc, /* current spanning implementation */
+ MYRS_RAID_LEVEL5L = 0xd, /* RAID 5 left symmetric parity */
+ MYRS_RAID_LEVELE = 0xe, /* RAID E (concatenation) */
+ MYRS_RAID_PHYSICAL = 0xf, /* physical device */
+} __packed;
+
+enum myrs_stripe_size {
+ MYRS_STRIPE_SIZE_0 = 0x0, /* no stripe (RAID 1, RAID 7, etc) */
+ MYRS_STRIPE_SIZE_512B = 0x1,
+ MYRS_STRIPE_SIZE_1K = 0x2,
+ MYRS_STRIPE_SIZE_2K = 0x3,
+ MYRS_STRIPE_SIZE_4K = 0x4,
+ MYRS_STRIPE_SIZE_8K = 0x5,
+ MYRS_STRIPE_SIZE_16K = 0x6,
+ MYRS_STRIPE_SIZE_32K = 0x7,
+ MYRS_STRIPE_SIZE_64K = 0x8,
+ MYRS_STRIPE_SIZE_128K = 0x9,
+ MYRS_STRIPE_SIZE_256K = 0xa,
+ MYRS_STRIPE_SIZE_512K = 0xb,
+ MYRS_STRIPE_SIZE_1M = 0xc,
+} __packed;
+
+enum myrs_cacheline_size {
+ MYRS_CACHELINE_ZERO = 0x0, /* caching cannot be enabled */
+ MYRS_CACHELINE_512B = 0x1,
+ MYRS_CACHELINE_1K = 0x2,
+ MYRS_CACHELINE_2K = 0x3,
+ MYRS_CACHELINE_4K = 0x4,
+ MYRS_CACHELINE_8K = 0x5,
+ MYRS_CACHELINE_16K = 0x6,
+ MYRS_CACHELINE_32K = 0x7,
+ MYRS_CACHELINE_64K = 0x8,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Get Logical Device Info reply structure.
+ */
+struct myrs_ldev_info {
+ unsigned char ctlr; /* Byte 0 */
+ unsigned char channel; /* Byte 1 */
+ unsigned char target; /* Byte 2 */
+ unsigned char lun; /* Byte 3 */
+ enum myrs_devstate dev_state; /* Byte 4 */
+ unsigned char raid_level; /* Byte 5 */
+ enum myrs_stripe_size stripe_size; /* Byte 6 */
+ enum myrs_cacheline_size cacheline_size; /* Byte 7 */
+ struct {
+ enum {
+ MYRS_READCACHE_DISABLED = 0x0,
+ MYRS_READCACHE_ENABLED = 0x1,
+ MYRS_READAHEAD_ENABLED = 0x2,
+ MYRS_INTELLIGENT_READAHEAD_ENABLED = 0x3,
+ MYRS_READCACHE_LAST = 0x7,
+ } __packed rce:3; /* Byte 8 Bits 0-2 */
+ enum {
+ MYRS_WRITECACHE_DISABLED = 0x0,
+ MYRS_LOGICALDEVICE_RO = 0x1,
+ MYRS_WRITECACHE_ENABLED = 0x2,
+ MYRS_INTELLIGENT_WRITECACHE_ENABLED = 0x3,
+ MYRS_WRITECACHE_LAST = 0x7,
+ } __packed wce:3; /* Byte 8 Bits 3-5 */
+ unsigned rsvd1:1; /* Byte 8 Bit 6 */
+ unsigned ldev_init_done:1; /* Byte 8 Bit 7 */
+ } ldev_control; /* Byte 8 */
+ /* Logical Device Operations Status */
+ unsigned char cc_active:1; /* Byte 9 Bit 0 */
+ unsigned char rbld_active:1; /* Byte 9 Bit 1 */
+ unsigned char bg_init_active:1; /* Byte 9 Bit 2 */
+ unsigned char fg_init_active:1; /* Byte 9 Bit 3 */
+ unsigned char migration_active:1; /* Byte 9 Bit 4 */
+ unsigned char patrol_active:1; /* Byte 9 Bit 5 */
+ unsigned char rsvd2:2; /* Byte 9 Bits 6-7 */
+ unsigned char raid5_writeupdate; /* Byte 10 */
+ unsigned char raid5_algo; /* Byte 11 */
+ unsigned short ldev_num; /* Bytes 12-13 */
+ /* BIOS Info */
+ unsigned char bios_disabled:1; /* Byte 14 Bit 0 */
+ unsigned char cdrom_boot:1; /* Byte 14 Bit 1 */
+ unsigned char drv_coercion:1; /* Byte 14 Bit 2 */
+ unsigned char write_same_disabled:1; /* Byte 14 Bit 3 */
+ unsigned char hba_mode:1; /* Byte 14 Bit 4 */
+ enum {
+ MYRS_GEOMETRY_128_32 = 0x0,
+ MYRS_GEOMETRY_255_63 = 0x1,
+ MYRS_GEOMETRY_RSVD1 = 0x2,
+ MYRS_GEOMETRY_RSVD2 = 0x3
+ } __packed drv_geom:2; /* Byte 14 Bits 5-6 */
+ unsigned char super_ra_enabled:1; /* Byte 14 Bit 7 */
+ unsigned char rsvd3; /* Byte 15 */
+ /* Error Counters */
+ unsigned short soft_errs; /* Bytes 16-17 */
+ unsigned short cmds_failed; /* Bytes 18-19 */
+ unsigned short cmds_aborted; /* Bytes 20-21 */
+ unsigned short deferred_write_errs; /* Bytes 22-23 */
+ unsigned int rsvd4; /* Bytes 24-27 */
+ unsigned int rsvd5; /* Bytes 28-31 */
+ /* Device Size Information */
+ unsigned short rsvd6; /* Bytes 32-33 */
+ unsigned short devsize_bytes; /* Bytes 34-35 */
+ unsigned int orig_devsize; /* Bytes 36-39 */
+ unsigned int cfg_devsize; /* Bytes 40-43 */
+ unsigned int rsvd7; /* Bytes 44-47 */
+ unsigned char ldev_name[32]; /* Bytes 48-79 */
+ unsigned char inquiry[36]; /* Bytes 80-115 */
+ unsigned char rsvd8[12]; /* Bytes 116-127 */
+ u64 last_read_lba; /* Bytes 128-135 */
+ u64 last_write_lba; /* Bytes 136-143 */
+ u64 cc_lba; /* Bytes 144-151 */
+ u64 rbld_lba; /* Bytes 152-159 */
+ u64 bg_init_lba; /* Bytes 160-167 */
+ u64 fg_init_lba; /* Bytes 168-175 */
+ u64 migration_lba; /* Bytes 176-183 */
+ u64 patrol_lba; /* Bytes 184-191 */
+ unsigned char rsvd9[64]; /* Bytes 192-255 */
+};
+
+/*
+ * DAC960 V2 Firmware Get Physical Device Info reply structure.
+ */
+struct myrs_pdev_info {
+ unsigned char rsvd1; /* Byte 0 */
+ unsigned char channel; /* Byte 1 */
+ unsigned char target; /* Byte 2 */
+ unsigned char lun; /* Byte 3 */
+ /* Configuration Status Bits */
+ unsigned char pdev_fault_tolerant:1; /* Byte 4 Bit 0 */
+ unsigned char pdev_connected:1; /* Byte 4 Bit 1 */
+ unsigned char pdev_local_to_ctlr:1; /* Byte 4 Bit 2 */
+ unsigned char rsvd2:5; /* Byte 4 Bits 3-7 */
+ /* Multiple Host/Controller Status Bits */
+ unsigned char remote_host_dead:1; /* Byte 5 Bit 0 */
+ unsigned char remove_ctlr_dead:1; /* Byte 5 Bit 1 */
+ unsigned char rsvd3:6; /* Byte 5 Bits 2-7 */
+ enum myrs_devstate dev_state; /* Byte 6 */
+ unsigned char nego_data_width; /* Byte 7 */
+ unsigned short nego_sync_rate; /* Bytes 8-9 */
+ /* Multiported Physical Device Information */
+ unsigned char num_ports; /* Byte 10 */
+ unsigned char drv_access_bitmap; /* Byte 11 */
+ unsigned int rsvd4; /* Bytes 12-15 */
+ unsigned char ip_address[16]; /* Bytes 16-31 */
+ unsigned short max_tags; /* Bytes 32-33 */
+ /* Physical Device Operations Status */
+ unsigned char cc_in_progress:1; /* Byte 34 Bit 0 */
+ unsigned char rbld_in_progress:1; /* Byte 34 Bit 1 */
+ unsigned char makecc_in_progress:1; /* Byte 34 Bit 2 */
+ unsigned char pdevinit_in_progress:1; /* Byte 34 Bit 3 */
+ unsigned char migration_in_progress:1; /* Byte 34 Bit 4 */
+ unsigned char patrol_in_progress:1; /* Byte 34 Bit 5 */
+ unsigned char rsvd5:2; /* Byte 34 Bits 6-7 */
+ unsigned char long_op_status; /* Byte 35 */
+ unsigned char parity_errs; /* Byte 36 */
+ unsigned char soft_errs; /* Byte 37 */
+ unsigned char hard_errs; /* Byte 38 */
+ unsigned char misc_errs; /* Byte 39 */
+ unsigned char cmd_timeouts; /* Byte 40 */
+ unsigned char retries; /* Byte 41 */
+ unsigned char aborts; /* Byte 42 */
+ unsigned char pred_failures; /* Byte 43 */
+ unsigned int rsvd6; /* Bytes 44-47 */
+ unsigned short rsvd7; /* Bytes 48-49 */
+ unsigned short devsize_bytes; /* Bytes 50-51 */
+ unsigned int orig_devsize; /* Bytes 52-55 */
+ unsigned int cfg_devsize; /* Bytes 56-59 */
+ unsigned int rsvd8; /* Bytes 60-63 */
+ unsigned char pdev_name[16]; /* Bytes 64-79 */
+ unsigned char rsvd9[16]; /* Bytes 80-95 */
+ unsigned char rsvd10[32]; /* Bytes 96-127 */
+ unsigned char inquiry[36]; /* Bytes 128-163 */
+ unsigned char rsvd11[20]; /* Bytes 164-183 */
+ unsigned char rsvd12[8]; /* Bytes 184-191 */
+ u64 last_read_lba; /* Bytes 192-199 */
+ u64 last_write_lba; /* Bytes 200-207 */
+ u64 cc_lba; /* Bytes 208-215 */
+ u64 rbld_lba; /* Bytes 216-223 */
+ u64 makecc_lba; /* Bytes 224-231 */
+ u64 devinit_lba; /* Bytes 232-239 */
+ u64 migration_lba; /* Bytes 240-247 */
+ u64 patrol_lba; /* Bytes 248-255 */
+ unsigned char rsvd13[256]; /* Bytes 256-511 */
+};
+
+/*
+ * DAC960 V2 Firmware Health Status Buffer structure.
+ */
+struct myrs_fwstat {
+ unsigned int uptime_usecs; /* Bytes 0-3 */
+ unsigned int uptime_msecs; /* Bytes 4-7 */
+ unsigned int seconds; /* Bytes 8-11 */
+ unsigned char rsvd1[4]; /* Bytes 12-15 */
+ unsigned int epoch; /* Bytes 16-19 */
+ unsigned char rsvd2[4]; /* Bytes 20-23 */
+ unsigned int dbg_msgbuf_idx; /* Bytes 24-27 */
+ unsigned int coded_msgbuf_idx; /* Bytes 28-31 */
+ unsigned int cur_timetrace_page; /* Bytes 32-35 */
+ unsigned int cur_prof_page; /* Bytes 36-39 */
+ unsigned int next_evseq; /* Bytes 40-43 */
+ unsigned char rsvd3[4]; /* Bytes 44-47 */
+ unsigned char rsvd4[16]; /* Bytes 48-63 */
+ unsigned char rsvd5[64]; /* Bytes 64-127 */
+};
+
+/*
+ * DAC960 V2 Firmware Get Event reply structure.
+ */
+struct myrs_event {
+ unsigned int ev_seq; /* Bytes 0-3 */
+ unsigned int ev_time; /* Bytes 4-7 */
+ unsigned int ev_code; /* Bytes 8-11 */
+ unsigned char rsvd1; /* Byte 12 */
+ unsigned char channel; /* Byte 13 */
+ unsigned char target; /* Byte 14 */
+ unsigned char lun; /* Byte 15 */
+ unsigned int rsvd2; /* Bytes 16-19 */
+ unsigned int ev_parm; /* Bytes 20-23 */
+ unsigned char sense_data[40]; /* Bytes 24-63 */
+};
+
+/*
+ * DAC960 V2 Firmware Command Control Bits structure.
+ */
+struct myrs_cmd_ctrl {
+ unsigned char fua:1; /* Byte 0 Bit 0 */
+ unsigned char disable_pgout:1; /* Byte 0 Bit 1 */
+ unsigned char rsvd1:1; /* Byte 0 Bit 2 */
+ unsigned char add_sge_mem:1; /* Byte 0 Bit 3 */
+ unsigned char dma_ctrl_to_host:1; /* Byte 0 Bit 4 */
+ unsigned char rsvd2:1; /* Byte 0 Bit 5 */
+ unsigned char no_autosense:1; /* Byte 0 Bit 6 */
+ unsigned char disc_prohibited:1; /* Byte 0 Bit 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Command Timeout structure.
+ */
+struct myrs_cmd_tmo {
+ unsigned char tmo_val:6; /* Byte 0 Bits 0-5 */
+ enum {
+ MYRS_TMO_SCALE_SECONDS = 0,
+ MYRS_TMO_SCALE_MINUTES = 1,
+ MYRS_TMO_SCALE_HOURS = 2,
+ MYRS_TMO_SCALE_RESERVED = 3
+ } __packed tmo_scale:2; /* Byte 0 Bits 6-7 */
+};
+
+/*
+ * DAC960 V2 Firmware Physical Device structure.
+ */
+struct myrs_pdev {
+ unsigned char lun; /* Byte 0 */
+ unsigned char target; /* Byte 1 */
+ unsigned char channel:3; /* Byte 2 Bits 0-2 */
+ unsigned char ctlr:5; /* Byte 2 Bits 3-7 */
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Logical Device structure.
+ */
+struct myrs_ldev {
+ unsigned short ldev_num; /* Bytes 0-1 */
+ unsigned char rsvd:3; /* Byte 2 Bits 0-2 */
+ unsigned char ctlr:5; /* Byte 2 Bits 3-7 */
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Operation Device type.
+ */
+enum myrs_opdev {
+ MYRS_PHYSICAL_DEVICE = 0x00,
+ MYRS_RAID_DEVICE = 0x01,
+ MYRS_PHYSICAL_CHANNEL = 0x02,
+ MYRS_RAID_CHANNEL = 0x03,
+ MYRS_PHYSICAL_CONTROLLER = 0x04,
+ MYRS_RAID_CONTROLLER = 0x05,
+ MYRS_CONFIGURATION_GROUP = 0x10,
+ MYRS_ENCLOSURE = 0x11,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Translate Physical To Logical Device structure.
+ */
+struct myrs_devmap {
+ unsigned short ldev_num; /* Bytes 0-1 */
+ unsigned short rsvd; /* Bytes 2-3 */
+ unsigned char prev_boot_ctlr; /* Byte 4 */
+ unsigned char prev_boot_channel; /* Byte 5 */
+ unsigned char prev_boot_target; /* Byte 6 */
+ unsigned char prev_boot_lun; /* Byte 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Scatter/Gather List Entry structure.
+ */
+struct myrs_sge {
+ u64 sge_addr; /* Bytes 0-7 */
+ u64 sge_count; /* Bytes 8-15 */
+};
+
+/*
+ * DAC960 V2 Firmware Data Transfer Memory Address structure.
+ */
+union myrs_sgl {
+ struct myrs_sge sge[2]; /* Bytes 0-31 */
+ struct {
+ unsigned short sge0_len; /* Bytes 0-1 */
+ unsigned short sge1_len; /* Bytes 2-3 */
+ unsigned short sge2_len; /* Bytes 4-5 */
+ unsigned short rsvd; /* Bytes 6-7 */
+ u64 sge0_addr; /* Bytes 8-15 */
+ u64 sge1_addr; /* Bytes 16-23 */
+ u64 sge2_addr; /* Bytes 24-31 */
+ } ext;
+};
+
+/*
+ * 64 Byte DAC960 V2 Firmware Command Mailbox structure.
+ */
+union myrs_cmd_mbox {
+ unsigned int words[16]; /* Words 0-15 */
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned int rsvd1:24; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd2[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } common;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size; /* Bytes 4-7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char cdb_len; /* Byte 21 */
+ unsigned char cdb[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } SCSI_10;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size; /* Bytes 4-7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char cdb_len; /* Byte 21 */
+ unsigned short rsvd; /* Bytes 22-23 */
+ u64 cdb_addr; /* Bytes 24-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } SCSI_255;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned short rsvd1; /* Bytes 16-17 */
+ unsigned char ctlr_num; /* Byte 18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd2[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } ctlr_info;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_ldev ldev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } ldev_info;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } pdev_info;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned short evnum_upper; /* Bytes 16-17 */
+ unsigned char ctlr_num; /* Byte 18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned short evnum_lower; /* Bytes 22-23 */
+ unsigned char rsvd[8]; /* Bytes 24-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } get_event;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ union {
+ struct myrs_ldev ldev; /* Bytes 16-18 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ };
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ enum myrs_devstate state; /* Byte 22 */
+ unsigned char rsvd[9]; /* Bytes 23-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } set_devstate;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_ldev ldev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char restore_consistency:1; /* Byte 22 Bit 0 */
+ unsigned char initialized_area_only:1; /* Byte 22 Bit 1 */
+ unsigned char rsvd1:6; /* Byte 22 Bits 2-7 */
+ unsigned char rsvd2[9]; /* Bytes 23-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } cc;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ unsigned char first_cmd_mbox_size_kb; /* Byte 4 */
+ unsigned char first_stat_mbox_size_kb; /* Byte 5 */
+ unsigned char second_cmd_mbox_size_kb; /* Byte 6 */
+ unsigned char second_stat_mbox_size_kb; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned int rsvd1:24; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char fwstat_buf_size_kb; /* Byte 22 */
+ unsigned char rsvd2; /* Byte 23 */
+ u64 fwstat_buf_addr; /* Bytes 24-31 */
+ u64 first_cmd_mbox_addr; /* Bytes 32-39 */
+ u64 first_stat_mbox_addr; /* Bytes 40-47 */
+ u64 second_cmd_mbox_addr; /* Bytes 48-55 */
+ u64 second_stat_mbox_addr; /* Bytes 56-63 */
+ } set_mbox;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ enum myrs_opdev opdev; /* Byte 22 */
+ unsigned char rsvd[9]; /* Bytes 23-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } dev_op;
+};
+
+/*
+ * DAC960 V2 Firmware Controller Status Mailbox structure.
+ */
+struct myrs_stat_mbox {
+ unsigned short id; /* Bytes 0-1 */
+ unsigned char status; /* Byte 2 */
+ unsigned char sense_len; /* Byte 3 */
+ int residual; /* Bytes 4-7 */
+};
+
+struct myrs_cmdblk {
+ union myrs_cmd_mbox mbox;
+ unsigned char status;
+ unsigned char sense_len;
+ int residual;
+ struct completion *complete;
+ struct myrs_sge *sgl;
+ dma_addr_t sgl_addr;
+ unsigned char *dcdb;
+ dma_addr_t dcdb_dma;
+ unsigned char *sense;
+ dma_addr_t sense_addr;
+};
+
+/*
+ * DAC960 Driver Controller structure.
+ */
+struct myrs_hba {
+ void __iomem *io_base;
+ void __iomem *mmio_base;
+ phys_addr_t io_addr;
+ phys_addr_t pci_addr;
+ unsigned int irq;
+
+ unsigned char model_name[28];
+ unsigned char fw_version[12];
+
+ struct Scsi_Host *host;
+ struct pci_dev *pdev;
+
+ unsigned int epoch;
+ unsigned int next_evseq;
+ /* Monitor flags */
+ bool needs_update;
+ bool disable_enc_msg;
+
+ struct workqueue_struct *work_q;
+ char work_q_name[20];
+ struct delayed_work monitor_work;
+ unsigned long primary_monitor_time;
+ unsigned long secondary_monitor_time;
+
+ spinlock_t queue_lock;
+
+ struct dma_pool *sg_pool;
+ struct dma_pool *sense_pool;
+ struct dma_pool *dcdb_pool;
+
+ void (*write_cmd_mbox)(union myrs_cmd_mbox *next_mbox,
+ union myrs_cmd_mbox *cmd_mbox);
+ void (*get_cmd_mbox)(void __iomem *base);
+ void (*disable_intr)(void __iomem *base);
+ void (*reset)(void __iomem *base);
+
+ dma_addr_t cmd_mbox_addr;
+ size_t cmd_mbox_size;
+ union myrs_cmd_mbox *first_cmd_mbox;
+ union myrs_cmd_mbox *last_cmd_mbox;
+ union myrs_cmd_mbox *next_cmd_mbox;
+ union myrs_cmd_mbox *prev_cmd_mbox1;
+ union myrs_cmd_mbox *prev_cmd_mbox2;
+
+ dma_addr_t stat_mbox_addr;
+ size_t stat_mbox_size;
+ struct myrs_stat_mbox *first_stat_mbox;
+ struct myrs_stat_mbox *last_stat_mbox;
+ struct myrs_stat_mbox *next_stat_mbox;
+
+ struct myrs_cmdblk dcmd_blk;
+ struct myrs_cmdblk mcmd_blk;
+ struct mutex dcmd_mutex;
+
+ struct myrs_fwstat *fwstat_buf;
+ dma_addr_t fwstat_addr;
+
+ struct myrs_ctlr_info *ctlr_info;
+ struct mutex cinfo_mutex;
+
+ struct myrs_event *event_buf;
+};
+
+typedef unsigned char (*enable_mbox_t)(void __iomem *base, dma_addr_t addr);
+typedef int (*myrs_hwinit_t)(struct pci_dev *pdev,
+ struct myrs_hba *c, void __iomem *base);
+
+struct myrs_privdata {
+ myrs_hwinit_t hw_init;
+ irq_handler_t irq_handler;
+ unsigned int mmio_size;
+};
+
+/*
+ * DAC960 GEM Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_GEM_mmio_size 0x600
+
+enum DAC960_GEM_reg_offset {
+ DAC960_GEM_IDB_READ_OFFSET = 0x214,
+ DAC960_GEM_IDB_CLEAR_OFFSET = 0x218,
+ DAC960_GEM_ODB_READ_OFFSET = 0x224,
+ DAC960_GEM_ODB_CLEAR_OFFSET = 0x228,
+ DAC960_GEM_IRQSTS_OFFSET = 0x208,
+ DAC960_GEM_IRQMASK_READ_OFFSET = 0x22C,
+ DAC960_GEM_IRQMASK_CLEAR_OFFSET = 0x230,
+ DAC960_GEM_CMDMBX_OFFSET = 0x510,
+ DAC960_GEM_CMDSTS_OFFSET = 0x518,
+ DAC960_GEM_ERRSTS_READ_OFFSET = 0x224,
+ DAC960_GEM_ERRSTS_CLEAR_OFFSET = 0x228,
+};
+
+/*
+ * DAC960 GEM Series Inbound Door Bell Register.
+ */
+#define DAC960_GEM_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_GEM_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_GEM_IDB_GEN_IRQ 0x04
+#define DAC960_GEM_IDB_CTRL_RESET 0x08
+#define DAC960_GEM_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_GEM_IDB_HWMBOX_FULL 0x01
+#define DAC960_GEM_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 GEM Series Outbound Door Bell Register.
+ */
+#define DAC960_GEM_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_GEM_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_GEM_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_GEM_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 GEM Series Interrupt Mask Register.
+ */
+#define DAC960_GEM_IRQMASK_HWMBOX_IRQ 0x01
+#define DAC960_GEM_IRQMASK_MMBOX_IRQ 0x02
+
+/*
+ * DAC960 GEM Series Error Status Register.
+ */
+#define DAC960_GEM_ERRSTS_PENDING 0x20
+
+/*
+ * dma_addr_writeql is provided to write dma_addr_t types
+ * to a 64-bit pci address space register. The controller
+ * will accept having the register written as two 32-bit
+ * values.
+ *
+ * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
+ * without HIGHMEM, dma_addr_t is a 32-bit value.
+ *
+ * The compiler should always fix up the assignment
+ * to u.wq appropriately, depending upon the size of
+ * dma_addr_t.
+ */
+static inline
+void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
+{
+ union {
+ u64 wq;
+ uint wl[2];
+ } u;
+
+ u.wq = addr;
+
+ writel(u.wl[0], write_address);
+ writel(u.wl[1], write_address + 4);
+}
+
+/*
+ * DAC960 BA Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_BA_mmio_size 0x80
+
+enum DAC960_BA_reg_offset {
+ DAC960_BA_IRQSTS_OFFSET = 0x30,
+ DAC960_BA_IRQMASK_OFFSET = 0x34,
+ DAC960_BA_CMDMBX_OFFSET = 0x50,
+ DAC960_BA_CMDSTS_OFFSET = 0x58,
+ DAC960_BA_IDB_OFFSET = 0x60,
+ DAC960_BA_ODB_OFFSET = 0x61,
+ DAC960_BA_ERRSTS_OFFSET = 0x63,
+};
+
+/*
+ * DAC960 BA Series Inbound Door Bell Register.
+ */
+#define DAC960_BA_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_BA_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_BA_IDB_GEN_IRQ 0x04
+#define DAC960_BA_IDB_CTRL_RESET 0x08
+#define DAC960_BA_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_BA_IDB_HWMBOX_EMPTY 0x01
+#define DAC960_BA_IDB_INIT_DONE 0x02
+
+/*
+ * DAC960 BA Series Outbound Door Bell Register.
+ */
+#define DAC960_BA_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_BA_ODB_MMBOX_ACK_IRQ 0x02
+
+#define DAC960_BA_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_BA_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 BA Series Interrupt Mask Register.
+ */
+#define DAC960_BA_IRQMASK_DISABLE_IRQ 0x04
+#define DAC960_BA_IRQMASK_DISABLEW_I2O 0x08
+
+/*
+ * DAC960 BA Series Error Status Register.
+ */
+#define DAC960_BA_ERRSTS_PENDING 0x04
+
+/*
+ * DAC960 LP Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_LP_mmio_size 0x80
+
+enum DAC960_LP_reg_offset {
+ DAC960_LP_CMDMBX_OFFSET = 0x10,
+ DAC960_LP_CMDSTS_OFFSET = 0x18,
+ DAC960_LP_IDB_OFFSET = 0x20,
+ DAC960_LP_ODB_OFFSET = 0x2C,
+ DAC960_LP_ERRSTS_OFFSET = 0x2E,
+ DAC960_LP_IRQSTS_OFFSET = 0x30,
+ DAC960_LP_IRQMASK_OFFSET = 0x34,
+};
+
+/*
+ * DAC960 LP Series Inbound Door Bell Register.
+ */
+#define DAC960_LP_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_LP_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_LP_IDB_GEN_IRQ 0x04
+#define DAC960_LP_IDB_CTRL_RESET 0x08
+#define DAC960_LP_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_LP_IDB_HWMBOX_FULL 0x01
+#define DAC960_LP_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 LP Series Outbound Door Bell Register.
+ */
+#define DAC960_LP_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_LP_ODB_MMBOX_ACK_IRQ 0x02
+
+#define DAC960_LP_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_LP_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 LP Series Interrupt Mask Register.
+ */
+#define DAC960_LP_IRQMASK_DISABLE_IRQ 0x04
+
+/*
+ * DAC960 LP Series Error Status Register.
+ */
+#define DAC960_LP_ERRSTS_PENDING 0x04
+
+#endif /* _MYRS_H */
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 8620ac5d6e41..5aac3e801903 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -2638,7 +2638,7 @@ static int nsp32_detect(struct pci_dev *pdev)
/*
* setup DMA
*/
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
goto scsi_unregister;
}
@@ -2646,7 +2646,9 @@ static int nsp32_detect(struct pci_dev *pdev)
/*
* allocate autoparam DMA resource.
*/
- data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
+ data->autoparam = dma_alloc_coherent(&pdev->dev,
+ sizeof(nsp32_autoparam), &(data->auto_paddr),
+ GFP_KERNEL);
if (data->autoparam == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto scsi_unregister;
@@ -2655,8 +2657,8 @@ static int nsp32_detect(struct pci_dev *pdev)
/*
* allocate scatter-gather DMA resource.
*/
- data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
- &(data->sg_paddr));
+ data->sg_list = dma_alloc_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
+ &data->sg_paddr, GFP_KERNEL);
if (data->sg_list == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto free_autoparam;
@@ -2761,11 +2763,11 @@ static int nsp32_detect(struct pci_dev *pdev)
free_irq(host->irq, data);
free_sg_list:
- pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
+ dma_free_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
free_autoparam:
- pci_free_consistent(pdev, sizeof(nsp32_autoparam),
+ dma_free_coherent(&pdev->dev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
scsi_unregister:
@@ -2780,12 +2782,12 @@ static int nsp32_release(struct Scsi_Host *host)
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
if (data->autoparam) {
- pci_free_consistent(data->Pci, sizeof(nsp32_autoparam),
+ dma_free_coherent(&data->Pci->dev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
}
if (data->sg_list) {
- pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE,
+ dma_free_coherent(&data->Pci->dev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
}
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 67b14576fff2..e19fa883376f 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -445,7 +445,7 @@ static void _put_request(struct request *rq)
* code paths.
*/
if (unlikely(rq->bio))
- blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq));
+ blk_mq_end_request(rq, BLK_STS_IOERR);
else
blk_put_request(rq);
}
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 199527dbaaa1..48e0624ecc68 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -132,4 +132,12 @@ enum pm8001_hba_info_flags {
PM8001F_RUN_TIME = (1U << 1),
};
+/**
+ * Phy Status
+ */
+#define PHY_LINK_DISABLE 0x00
+#define PHY_LINK_DOWN 0x01
+#define PHY_STATE_LINK_UP_SPCV 0x2
+#define PHY_STATE_LINK_UP_SPC 0x1
+
#endif
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 4dd6cad330e8..d0bb357034d8 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1479,6 +1479,12 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
} else {
u32 producer_index;
void *pi_virt = circularQ->pi_virt;
+ /* spurious interrupt during setup if
+ * kexec-ing and driver doing a doorbell access
+ * with the pre-kexec oq interrupt setup
+ */
+ if (!pi_virt)
+ break;
/* Update the producer index from SPC */
producer_index = pm8001_read_32(pi_virt);
circularQ->producer_index = cpu_to_le32(producer_index);
@@ -2414,7 +2420,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
- t->data_dir == PCI_DMA_FROMDEVICE) {
+ t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("PIO read len = %d\n", len));
@@ -3810,7 +3816,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
" status = %x\n", status));
if (status == 0) {
phy->phy_state = 1;
- if (pm8001_ha->flags == PM8001F_RUN_TIME)
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL)
complete(phy->enable_completion);
}
break;
@@ -4196,12 +4203,12 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
return ret;
}
-/* PCI_DMA_... to our direction translation. */
+/* DMA_... to our direction translation. */
static const u8 data_dir_flags[] = {
- [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
- [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
- [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
- [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
+ [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+ [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
+ [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
+ [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
};
void
pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
@@ -4248,13 +4255,13 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
* DMA-map SMP request, response buffers
*/
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out;
@@ -4287,10 +4294,10 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
err_out_2:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
err_out:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return rc;
}
@@ -4369,7 +4376,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- if (task->data_dir == PCI_DMA_NONE) {
+ if (task->data_dir == DMA_NONE) {
ATAP = 0x04; /* no data*/
PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
} else if (likely(!task->ata_task.device_control_reg_update)) {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index e4867e690c84..6d91e2446542 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,10 +131,6 @@
#define LINKRATE_30 (0x02 << 8)
#define LINKRATE_60 (0x04 << 8)
-/* for phy state */
-
-#define PHY_STATE_LINK_UP_SPC 0x1
-
/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
#define GSM_SM_BASE 0x4F0000
struct mpi_msg_hdr{
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 7a697ca68501..d71e7e4ec29c 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -121,7 +121,7 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
{
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- phy->phy_state = 0;
+ phy->phy_state = PHY_LINK_DISABLE;
phy->pm8001_ha = pm8001_ha;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
@@ -152,7 +152,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
- pci_free_consistent(pm8001_ha->pdev,
+ dma_free_coherent(&pm8001_ha->pdev->dev,
(pm8001_ha->memoryMap.region[i].total_len +
pm8001_ha->memoryMap.region[i].alignment),
pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -501,30 +501,12 @@ static int pci_go_44(struct pci_dev *pdev)
{
int rc;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "44-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
- return rc;
- }
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
}
return rc;
}
@@ -1067,6 +1049,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
if (rc)
goto err_out_shost;
scsi_scan_host(pm8001_ha->shost);
+ pm8001_ha->flags = PM8001F_RUN_TIME;
return 0;
err_out_shost:
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 947d6017d004..b3be49d41375 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
u64 align_offset = 0;
if (align)
align_offset = (dma_addr_t)align - 1;
- mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align,
- &mem_dma_handle);
+ mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align,
+ &mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) {
pm8001_printk("memory allocation error\n");
return -1;
@@ -157,9 +157,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
int rc = 0, phy_id = sas_phy->id;
struct pm8001_hba_info *pm8001_ha = NULL;
struct sas_phy_linkrates *rates;
+ struct sas_ha_struct *sas_ha;
+ struct pm8001_phy *phy;
DECLARE_COMPLETION_ONSTACK(completion);
unsigned long flags;
pm8001_ha = sas_phy->ha->lldd_ha;
+ phy = &pm8001_ha->phy[phy_id];
pm8001_ha->phy[phy_id].enable_completion = &completion;
switch (func) {
case PHY_FUNC_SET_LINK_RATE:
@@ -172,7 +175,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
pm8001_ha->phy[phy_id].maximum_linkrate =
rates->maximum_linkrate;
}
- if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -180,7 +183,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
PHY_LINK_RESET);
break;
case PHY_FUNC_HARD_RESET:
- if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -188,7 +191,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
PHY_HARD_RESET);
break;
case PHY_FUNC_LINK_RESET:
- if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -200,6 +203,25 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
PHY_LINK_RESET);
break;
case PHY_FUNC_DISABLE:
+ if (pm8001_ha->chip_id != chip_8001) {
+ if (pm8001_ha->phy[phy_id].phy_state ==
+ PHY_STATE_LINK_UP_SPCV) {
+ sas_ha = pm8001_ha->sas;
+ sas_phy_disconnected(&phy->sas_phy);
+ sas_ha->notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ }
+ } else {
+ if (pm8001_ha->phy[phy_id].phy_state ==
+ PHY_STATE_LINK_UP_SPC) {
+ sas_ha = pm8001_ha->sas;
+ sas_phy_disconnected(&phy->sas_phy);
+ sas_ha->notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ }
+ }
PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
break;
case PHY_FUNC_GET_EVENTS:
@@ -374,6 +396,13 @@ static int pm8001_task_exec(struct sas_task *task,
return 0;
}
pm8001_ha = pm8001_find_ha_by_dev(task->dev);
+ if (pm8001_ha->controller_fatal_error) {
+ struct task_status_struct *ts = &t->task_status;
+
+ ts->resp = SAS_TASK_UNDELIVERED;
+ t->task_done(t);
+ return 0;
+ }
PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
spin_lock_irqsave(&pm8001_ha->lock, flags);
do {
@@ -466,7 +495,7 @@ err_out:
dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
if (!sas_protocol_ata(t->task_proto))
if (n_elem)
- dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
+ dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
t->data_dir);
out_done:
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -504,9 +533,9 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
break;
case SAS_PROTOCOL_SATA:
@@ -1020,13 +1049,11 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
struct pm8001_device *pm8001_dev;
struct pm8001_hba_info *pm8001_ha;
struct sas_phy *phy;
- u32 device_id = 0;
if (!dev || !dev->lldd_dev)
return -1;
pm8001_dev = dev->lldd_dev;
- device_id = pm8001_dev->device_id;
pm8001_ha = pm8001_find_ha_by_dev(dev);
PM8001_EH_DBG(pm8001_ha,
@@ -1159,7 +1186,6 @@ int pm8001_abort_task(struct sas_task *task)
{
unsigned long flags;
u32 tag;
- u32 device_id;
struct domain_device *dev ;
struct pm8001_hba_info *pm8001_ha;
struct scsi_lun lun;
@@ -1173,7 +1199,6 @@ int pm8001_abort_task(struct sas_task *task)
dev = task->dev;
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
- device_id = pm8001_dev->device_id;
phy_id = pm8001_dev->attached_phy;
rc = pm8001_find_tag(task, &tag);
if (rc == 0) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 80b4dd6df0c2..f88b0d33c385 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -58,7 +58,7 @@
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
-#define DRV_VERSION "0.1.38"
+#define DRV_VERSION "0.1.39"
#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@@ -538,6 +538,7 @@ struct pm8001_hba_info {
u32 logging_level;
u32 fw_status;
u32 smp_exp_mode;
+ bool controller_fatal_error;
const struct firmware *fw_image;
struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
u32 reset_in_progress;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 42f0405601ad..63e4f7d34d6c 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -577,6 +577,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+ /* Update Fatal error interrupt vector */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ ((pm8001_ha->number_of_intr - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
@@ -1110,6 +1113,9 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
return -EBUSY;
}
+ /* Initialize the controller fatal error flag */
+ pm8001_ha->controller_fatal_error = false;
+
/* Initialize pci space address eg: mpi offset */
init_pci_device_addresses(pm8001_ha);
init_default_table_values(pm8001_ha);
@@ -1218,13 +1224,17 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
u32 bootloader_state;
u32 ibutton0, ibutton1;
- /* Check if MPI is in ready state to reset */
- if (mpi_uninit_check(pm8001_ha) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MPI state is not ready\n"));
- return -1;
+ /* Process MPI table uninitialization only if FW is ready */
+ if (!pm8001_ha->controller_fatal_error) {
+ /* Check if MPI is in ready state to reset */
+ if (mpi_uninit_check(pm8001_ha) != 0) {
+ regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "MPI state is not ready scratch1 :0x%x\n",
+ regval));
+ return -1;
+ }
}
-
/* checked for reset register normal state; 0x0 */
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
PM8001_INIT_DBG(pm8001_ha,
@@ -2123,7 +2133,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
- t->data_dir == PCI_DMA_FROMDEVICE) {
+ t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("PIO read len = %d\n", len));
@@ -3118,8 +3128,9 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
status, phy_id));
if (status == 0) {
- phy->phy_state = 1;
- if (pm8001_ha->flags == PM8001F_RUN_TIME)
+ phy->phy_state = PHY_LINK_DOWN;
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL)
complete(phy->enable_completion);
}
return 0;
@@ -3211,7 +3222,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
return 0;
}
phy->phy_attached = 0;
- phy->phy_state = 0;
+ phy->phy_state = PHY_LINK_DISABLE;
break;
case HW_EVENT_PORT_INVALID:
PM8001_MSG_DBG(pm8001_ha,
@@ -3384,13 +3395,14 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 status =
le32_to_cpu(pPayload->status);
u32 phyid =
- le32_to_cpu(pPayload->phyid);
+ le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("phy:0x%x status:0x%x\n",
phyid, status));
- if (status == 0)
- phy->phy_state = 0;
+ if (status == PHY_STOP_SUCCESS ||
+ status == PHY_STOP_ERR_DEVICE_ATTACHED)
+ phy->phy_state = PHY_LINK_DISABLE;
return 0;
}
@@ -3752,6 +3764,46 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
}
+static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha)
+{
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7)));
+}
+
static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
struct outbound_queue_table *circularQ;
@@ -3759,10 +3811,28 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
u8 uninitialized_var(bc);
u32 ret = MPI_IO_STATUS_FAIL;
unsigned long flags;
+ u32 regval;
+ if (vec == (pm8001_ha->number_of_intr - 1)) {
+ regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
+ SCRATCH_PAD_MIPSALL_READY) {
+ pm8001_ha->controller_fatal_error = true;
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "Firmware Fatal error! Regval:0x%x\n", regval));
+ print_scratchpad_registers(pm8001_ha);
+ return ret;
+ }
+ }
spin_lock_irqsave(&pm8001_ha->lock, flags);
circularQ = &pm8001_ha->outbnd_q_tbl[vec];
do {
+ /* spurious interrupt during setup if kexec-ing and
+ * driver doing a doorbell access w/ the pre-kexec oq
+ * interrupt setup.
+ */
+ if (!circularQ->pi_virt)
+ break;
ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
if (MPI_IO_STATUS_SUCCESS == ret) {
/* process the outbound message */
@@ -3785,12 +3855,12 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
return ret;
}
-/* PCI_DMA_... to our direction translation. */
+/* DMA_... to our direction translation. */
static const u8 data_dir_flags[] = {
- [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
- [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
- [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
- [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
+ [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+ [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
+ [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
+ [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
};
static void build_smp_cmd(u32 deviceID, __le32 hTag,
@@ -3832,13 +3902,13 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
* DMA-map SMP request, response buffers
*/
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out;
@@ -3929,10 +3999,10 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
err_out_2:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
err_out:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return rc;
}
@@ -4156,7 +4226,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
- if (task->data_dir == PCI_DMA_NONE) {
+ if (task->data_dir == DMA_NONE) {
ATAP = 0x04; /* no data*/
PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
} else if (likely(!task->ata_task.device_control_reg_update)) {
@@ -4606,9 +4676,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
u32 length, u8 *buf)
{
- u32 page_code, i;
+ u32 i;
- page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
mpi_set_phy_profile_req(pm8001_ha,
SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 889e69ce3689..84d7426441bf 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -170,6 +170,10 @@
#define LINKRATE_60 (0x04 << 8)
#define LINKRATE_120 (0x08 << 8)
+/*phy_stop*/
+#define PHY_STOP_SUCCESS 0x00
+#define PHY_STOP_ERR_DEVICE_ATTACHED 0x1046
+
/* phy_profile */
#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04
#define PHY_DWORD_LENGTH 0xC
@@ -216,8 +220,6 @@
#define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128
-/* for phy state */
-#define PHY_STATE_LINK_UP_SPCV 0x2
/*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
@@ -1384,6 +1386,9 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
#define SCRATCH_PAD_IOP0_READY 0xC00
#define SCRATCH_PAD_IOP1_READY 0x3000
+#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
+ SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_RAAE_READY)
/* boot loader state */
#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 0a5dd5595dd3..d5a4f17fce51 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2855,12 +2855,12 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
qedf->num_queues);
- qedf->p_cpuq = pci_alloc_consistent(qedf->pdev,
+ qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
qedf->num_queues * sizeof(struct qedf_glbl_q_params),
- &qedf->hw_p_cpuq);
+ &qedf->hw_p_cpuq, GFP_KERNEL);
if (!qedf->p_cpuq) {
- QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n");
+ QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
return 1;
}
@@ -2929,7 +2929,7 @@ static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
if (qedf->p_cpuq) {
size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
- pci_free_consistent(qedf->pdev, size, qedf->p_cpuq,
+ dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
qedf->hw_p_cpuq);
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e5bd035ebad0..105b0e4d7818 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -806,11 +806,11 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
memset(&qedi->pf_params.iscsi_pf_params, 0,
sizeof(qedi->pf_params.iscsi_pf_params));
- qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+ qedi->p_cpuq = dma_alloc_coherent(&qedi->pdev->dev,
qedi->num_queues * sizeof(struct qedi_glbl_q_params),
- &qedi->hw_p_cpuq);
+ &qedi->hw_p_cpuq, GFP_KERNEL);
if (!qedi->p_cpuq) {
- QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+ QEDI_ERR(&qedi->dbg_ctx, "dma_alloc_coherent fail\n");
rval = -1;
goto err_alloc_mem;
}
@@ -871,7 +871,7 @@ static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
if (qedi->p_cpuq) {
size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
- pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+ dma_free_coherent(&qedi->pdev->dev, size, qedi->p_cpuq,
qedi->hw_p_cpuq);
}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 390775d5c918..15a50cc7e4b3 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1750,7 +1750,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
uint8_t *sp, *tbuf;
dma_addr_t p_tbuf;
- tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
+ tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
if (!tbuf)
return -ENOMEM;
#endif
@@ -1841,7 +1841,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
out:
#if DUMP_IT_BACK
- pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
+ dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
#endif
return err;
}
@@ -4259,8 +4259,8 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->devnum = devnum; /* specifies microcode load address */
#ifdef QLA_64BIT_PTR
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);
error = -ENODEV;
@@ -4270,7 +4270,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
ha->host_no);
#else
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);
error = -ENODEV;
@@ -4278,17 +4278,17 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
#endif
- ha->request_ring = pci_alloc_consistent(ha->pdev,
+ ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
- &ha->request_dma);
+ &ha->request_dma, GFP_KERNEL);
if (!ha->request_ring) {
printk(KERN_INFO "qla1280: Failed to get request memory\n");
goto error_put_host;
}
- ha->response_ring = pci_alloc_consistent(ha->pdev,
+ ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
- &ha->response_dma);
+ &ha->response_dma, GFP_KERNEL);
if (!ha->response_ring) {
printk(KERN_INFO "qla1280: Failed to get response memory\n");
goto error_free_request_ring;
@@ -4370,11 +4370,11 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
release_region(host->io_port, 0xff);
#endif
error_free_response_ring:
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
ha->response_ring, ha->response_dma);
error_free_request_ring:
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
ha->request_ring, ha->request_dma);
error_put_host:
@@ -4404,10 +4404,10 @@ qla1280_remove_one(struct pci_dev *pdev)
release_region(host->io_port, 0xff);
#endif
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
ha->request_ring, ha->request_dma);
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
ha->response_ring, ha->response_dma);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4888b999e82f..b28f159fdaee 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -158,9 +158,17 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return 0;
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
+
if (IS_NOCACHE_VPD_TYPE(ha))
ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
ha->nvram_size);
+ mutex_unlock(&ha->optrom_mutex);
+
return memory_read_from_buffer(buf, count, &off, ha->nvram,
ha->nvram_size);
}
@@ -208,10 +216,17 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
return -EAGAIN;
}
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
+ return -EAGAIN;
+ }
+
/* Write NVRAM. */
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
- count);
+ count);
+ mutex_unlock(&ha->optrom_mutex);
ql_dbg(ql_dbg_user, vha, 0x7060,
"Setting ISP_ABORT_NEEDED\n");
@@ -322,6 +337,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
size = ha->optrom_size - start;
mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
switch (val) {
case 0:
if (ha->optrom_state != QLA_SREADING &&
@@ -499,8 +518,14 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
faddr = ha->flt_region_vpd_sec << 2;
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
ha->vpd_size);
+ mutex_unlock(&ha->optrom_mutex);
}
return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
}
@@ -518,9 +543,6 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev)))
return 0;
- if (qla2x00_chip_is_down(vha))
- return 0;
-
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram)
return 0;
@@ -531,16 +553,25 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
return -EAGAIN;
}
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
+
/* Write NVRAM. */
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
/* Update flash version information for 4Gb & above. */
- if (!IS_FWI2_CAPABLE(ha))
+ if (!IS_FWI2_CAPABLE(ha)) {
+ mutex_unlock(&ha->optrom_mutex);
return -EINVAL;
+ }
tmp_data = vmalloc(256);
if (!tmp_data) {
+ mutex_unlock(&ha->optrom_mutex);
ql_log(ql_log_warn, vha, 0x706b,
"Unable to allocate memory for VPD information update.\n");
return -ENOMEM;
@@ -548,6 +579,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
ha->isp_ops->get_flash_version(vha, tmp_data);
vfree(tmp_data);
+ mutex_unlock(&ha->optrom_mutex);
+
return count;
}
@@ -573,10 +606,15 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
return 0;
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
return 0;
+ }
rval = qla2x00_read_sfp_dev(vha, buf, count);
+ mutex_unlock(&vha->hw->optrom_mutex);
+
if (rval)
return -EIO;
@@ -785,9 +823,11 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev)))
return 0;
-
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
return 0;
+ }
if (ha->xgmac_data)
goto do_read;
@@ -795,6 +835,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
&ha->xgmac_data_dma, GFP_KERNEL);
if (!ha->xgmac_data) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x7076,
"Unable to allocate memory for XGMAC read-data.\n");
return 0;
@@ -806,6 +847,8 @@ do_read:
rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
XGMAC_DATA_SIZE, &actual_size);
+
+ mutex_unlock(&vha->hw->optrom_mutex);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7077,
"Unable to read XGMAC data (%x).\n", rval);
@@ -842,13 +885,16 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
if (ha->dcbx_tlv)
goto do_read;
-
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
return 0;
+ }
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
&ha->dcbx_tlv_dma, GFP_KERNEL);
if (!ha->dcbx_tlv) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x7078,
"Unable to allocate memory for DCBX TLV read-data.\n");
return -ENOMEM;
@@ -859,6 +905,9 @@ do_read:
rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
DCBX_TLV_DATA_SIZE);
+
+ mutex_unlock(&vha->hw->optrom_mutex);
+
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7079,
"Unable to read DCBX TLV (%x).\n", rval);
@@ -1159,6 +1208,34 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
+ vha->hw->last_zio_threshold);
+}
+
+static ssize_t
+qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
+ return -EINVAL;
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+ if (val < 0 || val > 256)
+ return -ERANGE;
+
+ atomic_set(&vha->hw->zio_threshold, val);
+ return strlen(buf);
+}
+
+static ssize_t
qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1184,15 +1261,17 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return -EPERM;
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x707a,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
}
- if (sscanf(buf, "%d", &val) != 1)
- return -EINVAL;
-
if (val)
rval = ha->isp_ops->beacon_on(vha);
else
@@ -1201,6 +1280,8 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (rval != QLA_SUCCESS)
count = 0;
+ mutex_unlock(&vha->hw->optrom_mutex);
+
return count;
}
@@ -1370,18 +1451,24 @@ qla2x00_thermal_temp_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
uint16_t temp = 0;
+ int rc;
+ mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
goto done;
}
if (vha->hw->flags.eeh_busy) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
goto done;
}
- if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+ rc = qla2x00_get_thermal_temp(vha, &temp);
+ mutex_unlock(&vha->hw->optrom_mutex);
+ if (rc == QLA_SUCCESS)
return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
done:
@@ -1402,13 +1489,24 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
}
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x707c,
"ISP reset active.\n");
- else if (!vha->hw->flags.eeh_busy)
- rval = qla2x00_get_firmware_state(vha, state);
- if (rval != QLA_SUCCESS)
+ goto out;
+ } else if (vha->hw->flags.eeh_busy) {
+ mutex_unlock(&vha->hw->optrom_mutex);
+ goto out;
+ }
+
+ rval = qla2x00_get_firmware_state(vha, state);
+ mutex_unlock(&vha->hw->optrom_mutex);
+out:
+ if (rval != QLA_SUCCESS) {
memset(state, -1, sizeof(state));
+ rval = qla2x00_get_firmware_state(vha, state);
+ }
return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
state[0], state[1], state[2], state[3], state[4], state[5]);
@@ -1534,6 +1632,433 @@ qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
ha->max_speed_sup ? "32Gps" : "16Gps");
}
+/* ----- */
+
+static ssize_t
+qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "Supported options: enabled | disabled | dual | exclusive\n");
+
+ /* --- */
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
+
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_EXCLUSIVE);
+ break;
+ case QLA2XXX_INI_MODE_DISABLED:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_DISABLED);
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_ENABLED);
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_DUAL);
+ break;
+ }
+ len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
+
+ return len;
+}
+
+static char *mode_to_str[] = {
+ "exclusive",
+ "disabled",
+ "enabled",
+ "dual",
+};
+
+#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
+static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
+{
+ int rc = 0;
+ enum {
+ NO_ACTION,
+ MODE_CHANGE_ACCEPT,
+ MODE_CHANGE_NO_ACTION,
+ TARGET_STILL_ACTIVE,
+ };
+ int action = NO_ACTION;
+ int set_mode = 0;
+ u8 eo_toggle = 0; /* exchange offload flipped */
+
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ switch (op) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle) {
+ /*
+ * The number of exchange to be offload
+ * was tweaked or offload option was
+ * flipped
+ */
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ break;
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle) {
+ /*
+ * The number of exchange to be offload
+ * was tweaked or offload option was
+ * flipped
+ */
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ } else {
+ action = MODE_CHANGE_ACCEPT;
+ }
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ action = MODE_CHANGE_ACCEPT;
+ /* active_mode is target only, reset it to dual */
+ if (qla_tgt_mode_enabled(vha)) {
+ set_mode = 1;
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (qla_tgt_mode_enabled(vha))
+ action = TARGET_STILL_ACTIVE;
+ else {
+ action = MODE_CHANGE_ACCEPT;
+ set_mode = 1;
+ }
+ break;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ switch (op) {
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle)
+ /*
+ * The number of exchange to be offload
+ * was tweaked or offload option was
+ * flipped
+ */
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = NO_ACTION;
+ } else
+ action = NO_ACTION;
+
+ break;
+
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle)
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = MODE_CHANGE_NO_ACTION;
+ } else
+ action = MODE_CHANGE_NO_ACTION;
+ break;
+
+ case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
+ if (qla_tgt_mode_enabled(vha)) {
+ action = MODE_CHANGE_ACCEPT;
+ set_mode = 1;
+ } else
+ action = MODE_CHANGE_ACCEPT;
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (qla_tgt_mode_enabled(vha))
+ action = TARGET_STILL_ACTIVE;
+ else {
+ if (vha->hw->flags.fw_started)
+ action = MODE_CHANGE_NO_ACTION;
+ else
+ action = MODE_CHANGE_ACCEPT;
+ }
+ break;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ switch (op) {
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
+ eo_toggle)
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = NO_ACTION;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ case QLA2XXX_INI_MODE_DISABLED:
+ action = MODE_CHANGE_ACCEPT;
+ break;
+ default:
+ action = MODE_CHANGE_NO_ACTION;
+ break;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_DUAL:
+ switch (op) {
+ case QLA2XXX_INI_MODE_DUAL:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
+ vha->u_ql2xiniexchg) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+
+ if ((((vha->ql2xexchoffld +
+ vha->ql2xiniexchg) !=
+ (vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) || eo_toggle)
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = NO_ACTION;
+ } else {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
+ vha->u_ql2xiniexchg) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+
+ if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
+ != (vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) || eo_toggle)
+ action = MODE_CHANGE_NO_ACTION;
+ else
+ action = NO_ACTION;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ /* turning off initiator mode */
+ set_mode = 1;
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ set_mode = 1;
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_ACCEPT;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ action = TARGET_STILL_ACTIVE;
+ } else {
+ action = MODE_CHANGE_ACCEPT;
+ }
+ }
+ break;
+ }
+
+ switch (action) {
+ case MODE_CHANGE_ACCEPT:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
+ mode_to_str[vha->qlini_mode], mode_to_str[op],
+ vha->ql2xexchoffld, vha->u_ql2xexchoffld,
+ vha->ql2xiniexchg, vha->u_ql2xiniexchg);
+
+ vha->qlini_mode = op;
+ vha->ql2xexchoffld = vha->u_ql2xexchoffld;
+ vha->ql2xiniexchg = vha->u_ql2xiniexchg;
+ if (set_mode)
+ qlt_set_mode(vha);
+ vha->flags.online = 1;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MODE_CHANGE_NO_ACTION:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
+ mode_to_str[vha->qlini_mode], mode_to_str[op],
+ vha->ql2xexchoffld, vha->u_ql2xexchoffld,
+ vha->ql2xiniexchg, vha->u_ql2xiniexchg);
+ vha->qlini_mode = op;
+ vha->ql2xexchoffld = vha->u_ql2xexchoffld;
+ vha->ql2xiniexchg = vha->u_ql2xiniexchg;
+ break;
+
+ case TARGET_STILL_ACTIVE:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Target Mode is active. Unable to change Mode.\n");
+ break;
+
+ case NO_ACTION:
+ default:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
+ vha->qlini_mode, op,
+ vha->ql2xexchoffld, vha->u_ql2xexchoffld);
+ break;
+ }
+
+ return rc;
+}
+
+static ssize_t
+qlini_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int ini;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
+ strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
+ ini = QLA2XXX_INI_MODE_EXCLUSIVE;
+ else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
+ strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
+ ini = QLA2XXX_INI_MODE_DISABLED;
+ else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
+ strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
+ ini = QLA2XXX_INI_MODE_ENABLED;
+ else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
+ strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
+ ini = QLA2XXX_INI_MODE_DUAL;
+ else
+ return -EINVAL;
+
+ qla_set_ini_mode(vha, ini);
+ return strlen(buf);
+}
+
+static ssize_t
+ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "target exchange: new %d : current: %d\n\n",
+ vha->u_ql2xexchoffld, vha->ql2xexchoffld);
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
+ vha->host_no);
+
+ return len;
+}
+
+static ssize_t
+ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val > FW_MAX_EXCHANGES_CNT)
+ val = FW_MAX_EXCHANGES_CNT;
+ else if (val < 0)
+ val = 0;
+
+ vha->u_ql2xexchoffld = val;
+ return strlen(buf);
+}
+
+static ssize_t
+ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "target exchange: new %d : current: %d\n\n",
+ vha->u_ql2xiniexchg, vha->ql2xiniexchg);
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
+ vha->host_no);
+
+ return len;
+}
+
+static ssize_t
+ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val > FW_MAX_EXCHANGES_CNT)
+ val = FW_MAX_EXCHANGES_CNT;
+ else if (val < 0)
+ val = 0;
+
+ vha->u_ql2xiniexchg = val;
+ return strlen(buf);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1581,6 +2106,13 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
+static DEVICE_ATTR(zio_threshold, 0644,
+ qla_zio_threshold_show,
+ qla_zio_threshold_store);
+static DEVICE_ATTR_RW(qlini_mode);
+static DEVICE_ATTR_RW(ql2xexchoffld);
+static DEVICE_ATTR_RW(ql2xiniexchg);
+
struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version,
@@ -1617,9 +2149,28 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_pep_version,
&dev_attr_min_link_speed,
&dev_attr_max_speed_sup,
+ &dev_attr_zio_threshold,
+ NULL, /* reserve for qlini_mode */
+ NULL, /* reserve for ql2xiniexchg */
+ NULL, /* reserve for ql2xexchoffld */
NULL,
};
+void qla_insert_tgt_attrs(void)
+{
+ struct device_attribute **attr;
+
+ /* advance to empty slot */
+ for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
+ continue;
+
+ *attr = &dev_attr_qlini_mode;
+ attr++;
+ *attr = &dev_attr_ql2xiniexchg;
+ attr++;
+ *attr = &dev_attr_ql2xexchoffld;
+}
+
/* Host attributes. */
static void
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c11a89be292c..4a9fd8d944d6 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2487,7 +2487,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
vha = shost_priv(host);
}
- if (qla2x00_reset_active(vha)) {
+ if (qla2x00_chip_is_down(vha)) {
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
bsg_request->msgcode);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a9dc9c4a6382..26b93c563f92 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -262,8 +262,8 @@ struct name_list_extended {
struct get_name_list_extended *l;
dma_addr_t ldma;
struct list_head fcports;
- spinlock_t fcports_lock;
u32 size;
+ u8 sent;
};
/*
* Timeout timer counts in seconds
@@ -519,6 +519,7 @@ struct srb_iocb {
enum {
TYPE_SRB,
TYPE_TGT_CMD,
+ TYPE_TGT_TMCMD, /* task management */
};
typedef struct srb {
@@ -2280,7 +2281,6 @@ struct ct_sns_desc {
enum discovery_state {
DSC_DELETED,
DSC_GNN_ID,
- DSC_GID_PN,
DSC_GNL,
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
@@ -2305,7 +2305,6 @@ enum login_state { /* FW control Target side */
enum fcport_mgt_event {
FCME_RELOGIN = 1,
FCME_RSCN,
- FCME_GIDPN_DONE,
FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */
FCME_PRLI_DONE,
FCME_GNL_DONE,
@@ -2351,7 +2350,7 @@ typedef struct fc_port {
unsigned int login_succ:1;
unsigned int query:1;
unsigned int id_changed:1;
- unsigned int rscn_rcvd:1;
+ unsigned int scan_needed:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2375,11 +2374,13 @@ typedef struct fc_port {
unsigned long expires;
struct list_head del_list_entry;
struct work_struct free_work;
-
+ struct work_struct reg_work;
+ uint64_t jiffies_at_registration;
struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
uint16_t tgt_id;
uint16_t old_tgt_id;
+ uint16_t sec_since_registration;
uint8_t fcp_prio;
@@ -2412,6 +2413,7 @@ typedef struct fc_port {
struct qla_tgt_sess *tgt_session;
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
+ enum discovery_state next_disc_state;
enum login_state fw_login_state;
unsigned long dm_login_expire;
unsigned long plogi_nack_done_deadline;
@@ -3212,17 +3214,14 @@ enum qla_work_type {
QLA_EVT_ASYNC_LOGOUT,
QLA_EVT_ASYNC_LOGOUT_DONE,
QLA_EVT_ASYNC_ADISC,
- QLA_EVT_ASYNC_ADISC_DONE,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
- QLA_EVT_GIDPN,
QLA_EVT_GPNID,
QLA_EVT_UNMAP,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
QLA_EVT_PRLI,
QLA_EVT_GPSC,
- QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
QLA_EVT_NACK,
QLA_EVT_RELOGIN,
@@ -3483,6 +3482,9 @@ struct qla_qpair {
struct list_head qp_list_elem; /* vha->qp_list */
struct list_head hints_list;
uint16_t cpuid;
+ uint16_t retry_term_cnt;
+ uint32_t retry_term_exchg_addr;
+ uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters;
};
@@ -4184,6 +4186,10 @@ struct qla_hw_data {
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
+
+ atomic_t zio_threshold;
+ uint16_t last_zio_threshold;
+#define DEFAULT_ZIO_THRESHOLD 64
};
#define FW_ABILITY_MAX_SPEED_MASK 0xFUL
@@ -4263,10 +4269,11 @@ typedef struct scsi_qla_host {
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
#define QPAIR_ONLINE_CHECK_NEEDED 27
-#define SET_ZIO_THRESHOLD_NEEDED 28
+#define SET_NVME_ZIO_THRESHOLD_NEEDED 28
#define DETECT_SFP_CHANGE 29
#define N2N_LOGIN_NEEDED 30
#define IOCB_WORK_ACTIVE 31
+#define SET_ZIO_THRESHOLD_NEEDED 32
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4369,6 +4376,13 @@ typedef struct scsi_qla_host {
atomic_t vref_count;
struct qla8044_reset_template reset_tmplt;
uint16_t bbcr;
+
+ uint16_t u_ql2xexchoffld;
+ uint16_t u_ql2xiniexchg;
+ uint16_t qlini_mode;
+ uint16_t ql2xexchoffld;
+ uint16_t ql2xiniexchg;
+
struct name_list_extended gnl;
/* Count of active session/fcport */
int fcport_count;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 178974896b5c..3673fcdb033a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -54,7 +54,7 @@ extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
extern void qla2x00_quiesce_io(scsi_qla_host_t *);
extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
-
+void qla_register_fcport_fn(struct work_struct *);
extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
@@ -73,8 +73,6 @@ extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -109,6 +107,7 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
int qla24xx_detect_sfp(scsi_qla_host_t *vha);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+
void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
@@ -118,6 +117,8 @@ extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
+void qla_rscn_replay(fc_port_t *fcport);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -158,6 +159,7 @@ extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
+extern int ql2xexlogins;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -208,7 +210,7 @@ extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
extern void qla2x00_sp_compl(void *, int);
extern void qla2xxx_qpair_sp_free_dma(void *);
extern void qla2xxx_qpair_sp_compl(void *, int);
-extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
+extern void qla24xx_sched_upd_fcport(fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
@@ -644,9 +646,6 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
extern void qla2x00_async_iocb_timeout(void *data);
-extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *);
-int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *);
-void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *);
extern void qla2x00_free_fcport(fc_port_t *);
@@ -677,6 +676,7 @@ void qla_scan_work_fn(struct work_struct *);
*/
struct device_attribute;
extern struct device_attribute *qla2x00_host_attrs[];
+extern struct device_attribute *qla2x00_host_attrs_dm[];
struct fc_function_template;
extern struct fc_function_template qla2xxx_transport_functions;
extern struct fc_function_template qla2xxx_transport_vport_functions;
@@ -690,7 +690,7 @@ extern int qla2x00_echo_test(scsi_qla_host_t *,
extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
struct qla_fcp_prio_cfg *, uint8_t);
-
+void qla_insert_tgt_attrs(void);
/*
* Global Function Prototypes in qla_dfs.c source file.
*/
@@ -897,5 +897,6 @@ void qlt_unknown_atio_work_fn(struct work_struct *);
void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
void qlt_remove_target_resources(struct qla_hw_data *);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
+void qlt_set_mode(struct scsi_qla_host *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index a0038d879b9d..90cfa394f942 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2973,237 +2973,6 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
}
}
-/* GID_PN completion processing. */
-void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- ql_dbg(ql_dbg_disc, vha, 0x201d,
- "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
- __func__, fcport->port_name, fcport->disc_state,
- fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
- fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
-
- if (fcport->disc_state == DSC_DELETE_PEND)
- return;
-
- if (ea->sp->gen2 != fcport->login_gen) {
- /* PLOGI/PRLI/LOGO came in while cmd was out.*/
- ql_dbg(ql_dbg_disc, vha, 0x201e,
- "%s %8phC generation changed rscn %d|%d n",
- __func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen);
- return;
- }
-
- if (!ea->rc) {
- if (ea->sp->gen1 == fcport->rscn_gen) {
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->flags |= FCF_FABRIC_DEVICE;
-
- if (fcport->d_id.b24 == ea->id.b24) {
- /* cable plugged into the same place */
- switch (vha->host->active_mode) {
- case MODE_TARGET:
- if (fcport->fw_login_state ==
- DSC_LS_PRLI_COMP) {
- u16 data[2];
- /*
- * Late RSCN was delivered.
- * Remote port already login'ed.
- */
- ql_dbg(ql_dbg_disc, vha, 0x201f,
- "%s %d %8phC post adisc\n",
- __func__, __LINE__,
- fcport->port_name);
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(
- vha, fcport, data);
- }
- break;
- case MODE_INITIATOR:
- case MODE_DUAL:
- default:
- ql_dbg(ql_dbg_disc, vha, 0x201f,
- "%s %d %8phC post %s\n", __func__,
- __LINE__, fcport->port_name,
- (atomic_read(&fcport->state) ==
- FCS_ONLINE) ? "adisc" : "gnl");
-
- if (atomic_read(&fcport->state) ==
- FCS_ONLINE) {
- u16 data[2];
-
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(
- vha, fcport, data);
- } else {
- qla24xx_post_gnl_work(vha,
- fcport);
- }
- break;
- }
- } else { /* fcport->d_id.b24 != ea->id.b24 */
- fcport->d_id.b24 = ea->id.b24;
- fcport->id_changed = 1;
- if (fcport->deleted != QLA_SESS_DELETED) {
- ql_dbg(ql_dbg_disc, vha, 0x2021,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- }
- }
- } else { /* ea->sp->gen1 != fcport->rscn_gen */
- ql_dbg(ql_dbg_disc, vha, 0x2022,
- "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- /* rscn came in while cmd was out */
- qla24xx_post_gidpn_work(vha, fcport);
- }
- } else { /* ea->rc */
- /* cable pulled */
- if (ea->sp->gen1 == fcport->rscn_gen) {
- if (ea->sp->gen2 == fcport->login_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x2042,
- "%s %d %8phC post del sess\n", __func__,
- __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2045,
- "%s %d %8phC login\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- }
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2049,
- "%s %d %8phC post gidpn\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
- }
- }
-} /* gidpn_event */
-
-static void qla2x00_async_gidpn_sp_done(void *s, int res)
-{
- struct srb *sp = s;
- struct scsi_qla_host *vha = sp->vha;
- fc_port_t *fcport = sp->fcport;
- u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
- struct event_arg ea;
-
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-
- memset(&ea, 0, sizeof(ea));
- ea.fcport = fcport;
- ea.id.b.domain = id[0];
- ea.id.b.area = id[1];
- ea.id.b.al_pa = id[2];
- ea.sp = sp;
- ea.rc = res;
- ea.event = FCME_GIDPN_DONE;
-
- if (res == QLA_FUNCTION_TIMEOUT) {
- ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
- "Async done-%s WWPN %8phC timed out.\n",
- sp->name, fcport->port_name);
- qla24xx_post_gidpn_work(sp->vha, fcport);
- sp->free(sp);
- return;
- } else if (res) {
- ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
- "Async done-%s fail res %x, WWPN %8phC\n",
- sp->name, res, fcport->port_name);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x204f,
- "Async done-%s good WWPN %8phC ID %3phC\n",
- sp->name, fcport->port_name, id);
- }
-
- qla2x00_fcport_event_handler(vha, &ea);
-
- sp->free(sp);
-}
-
-int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
-{
- int rval = QLA_FUNCTION_FAILED;
- struct ct_sns_req *ct_req;
- srb_t *sp;
-
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
- return rval;
-
- fcport->disc_state = DSC_GID_PN;
- fcport->scan_state = QLA_FCPORT_SCAN;
- sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
- if (!sp)
- goto done;
-
- fcport->flags |= FCF_ASYNC_SENT;
- sp->type = SRB_CT_PTHRU_CMD;
- sp->name = "gidpn";
- sp->gen1 = fcport->rscn_gen;
- sp->gen2 = fcport->login_gen;
-
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- /* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
- GID_PN_RSP_SIZE);
-
- /* GIDPN req */
- memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
- WWN_SIZE);
-
- /* req & rsp use the same buffer */
- sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
- sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
- sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_gidpn_sp_done;
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a4,
- "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
- sp->name, fcport->port_name,
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
- return rval;
-
-done_free_sp:
- sp->free(sp);
-done:
- fcport->flags &= ~FCF_ASYNC_ACTIVE;
- return rval;
-}
-
-int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
-{
- struct qla_work_evt *e;
- int ls;
-
- ls = atomic_read(&vha->loop_state);
- if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
- test_bit(UNLOADING, &vha->dpc_flags))
- return 0;
-
- e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
- if (!e)
- return QLA_FUNCTION_FAILED;
-
- e->u.fcport.fcport = fcport;
- fcport->flags |= FCF_ASYNC_ACTIVE;
- return qla2x00_post_work(vha, e);
-}
-
int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_work_evt *e;
@@ -3237,9 +3006,6 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__func__, fcport->port_name);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -3261,6 +3027,9 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
+ if (res == QLA_FUNCTION_TIMEOUT)
+ return;
+
if (res == (DID_ERROR << 16)) {
/* entry status error */
goto done;
@@ -3272,7 +3041,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
ql_dbg(ql_dbg_disc, vha, 0x2019,
"GPSC command unsupported, disabling query.\n");
ha->flags.gpsc_supported = 0;
- res = QLA_SUCCESS;
+ goto done;
}
} else {
switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
@@ -3305,7 +3074,6 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
-done:
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPSC_DONE;
ea.rc = res;
@@ -3313,6 +3081,7 @@ done:
ea.sp = sp;
qla2x00_fcport_event_handler(vha, &ea);
+done:
sp->free(sp);
}
@@ -3355,15 +3124,15 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla24xx_async_gpsc_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_disc, vha, 0x205e,
"Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
sp->name, fcport->port_name, sp->handle,
fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
return rval;
done_free_sp:
@@ -3442,26 +3211,10 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (ea->rc) {
/* cable is disconnected */
list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
- if (fcport->d_id.b24 == ea->id.b24) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC DS %d\n",
- __func__, __LINE__,
- fcport->port_name,
- fcport->disc_state);
+ if (fcport->d_id.b24 == ea->id.b24)
fcport->scan_state = QLA_FCPORT_SCAN;
- switch (fcport->disc_state) {
- case DSC_DELETED:
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- break;
- }
- }
+
+ qlt_schedule_sess_for_deletion(fcport);
}
} else {
/* cable is connected */
@@ -3470,34 +3223,19 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
list) {
if ((conflict->d_id.b24 == ea->id.b24) &&
- (fcport != conflict)) {
- /* 2 fcports with conflict Nport ID or
+ (fcport != conflict))
+ /*
+ * 2 fcports with conflict Nport ID or
* an existing fcport is having nport ID
* conflict with new fcport.
*/
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC DS %d\n",
- __func__, __LINE__,
- conflict->port_name,
- conflict->disc_state);
conflict->scan_state = QLA_FCPORT_SCAN;
- switch (conflict->disc_state) {
- case DSC_DELETED:
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict->port_name);
- qlt_schedule_sess_for_deletion
- (conflict);
- break;
- }
- }
+
+ qlt_schedule_sess_for_deletion(conflict);
}
+ fcport->scan_needed = 0;
fcport->rscn_gen++;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
@@ -3548,19 +3286,7 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
conflict->disc_state);
conflict->scan_state = QLA_FCPORT_SCAN;
- switch (conflict->disc_state) {
- case DSC_DELETED:
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict->port_name);
- qlt_schedule_sess_for_deletion
- (conflict);
- break;
- }
+ qlt_schedule_sess_for_deletion(conflict);
}
}
@@ -3724,13 +3450,14 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gpnid_sp_done;
+ ql_dbg(ql_dbg_disc, vha, 0x2067,
+ "Async-%s hdl=%x ID %3phC.\n", sp->name,
+ sp->handle, ct_req->req.port_id.port_id);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0x2067,
- "Async-%s hdl=%x ID %3phC.\n", sp->name,
- sp->handle, ct_req->req.port_id.port_id);
return rval;
done_free_sp:
@@ -3896,9 +3623,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
fc_port_t *fcport;
u32 i, rc;
bool found;
- struct fab_scan_rp *rp;
+ struct fab_scan_rp *rp, *trp;
unsigned long flags;
u8 recheck = 0;
+ u16 dup = 0, dup_cnt = 0;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s enter\n", __func__);
@@ -3929,6 +3657,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
for (i = 0; i < vha->hw->max_fibre_devices; i++) {
u64 wwn;
+ int k;
rp = &vha->scan.l[i];
found = false;
@@ -3937,6 +3666,20 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
if (wwn == 0)
continue;
+ /* Remove duplicate NPORT ID entries from switch data base */
+ for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
+ trp = &vha->scan.l[k];
+ if (rp->id.b24 == trp->id.b24) {
+ dup = 1;
+ dup_cnt++;
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ vha, 0xffff,
+ "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
+ rp->id.b24, rp->port_name, trp->port_name);
+ memset(trp, 0, sizeof(*trp));
+ }
+ }
+
if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
continue;
@@ -3951,7 +3694,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
continue;
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
fcport->scan_state = QLA_FCPORT_FOUND;
found = true;
/*
@@ -3976,25 +3719,30 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
}
}
+ if (dup) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Detected %d duplicate NPORT ID(s) from switch data base\n",
+ dup_cnt);
+ }
+
/*
* Logout all previous fabric dev marked lost, except FCP2 devices.
*/
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
continue;
}
if (fcport->scan_state != QLA_FCPORT_FOUND) {
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
if ((qla_dual_mode_enabled(vha) ||
qla_ini_mode_enabled(vha)) &&
atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID) {
+ if (fcport->flags & FCF_FCP2_DEVICE)
+ fcport->logout_on_delete = 0;
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_FCP2_DEVICE) == 0) {
ql_dbg(ql_dbg_disc, vha, 0x20f0,
"%s %d %8phC post del sess\n",
__func__, __LINE__,
@@ -4005,7 +3753,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
}
}
} else {
- if (fcport->rscn_rcvd ||
+ if (fcport->scan_needed ||
fcport->disc_state != DSC_LOGIN_COMPLETE) {
if (fcport->login_retry == 0) {
fcport->login_retry =
@@ -4015,7 +3763,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
fcport->port_name, fcport->loop_id,
fcport->login_retry);
}
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
qla24xx_fcport_handle_login(vha, fcport);
}
}
@@ -4030,7 +3778,7 @@ out:
if (recheck) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->rscn_rcvd) {
+ if (fcport->scan_needed) {
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
break;
@@ -4039,6 +3787,41 @@ out:
}
}
+static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
+ srb_t *sp, int cmd)
+{
+ struct qla_work_evt *e;
+
+ if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
+ return QLA_PARAMETER_ERROR;
+
+ e = qla2x00_alloc_work(vha, cmd);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.iosb.sp = sp;
+
+ return qla2x00_post_work(vha, e);
+}
+
+static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
+ srb_t *sp, int cmd)
+{
+ struct qla_work_evt *e;
+
+ if (cmd != QLA_EVT_GPNFT)
+ return QLA_PARAMETER_ERROR;
+
+ e = qla2x00_alloc_work(vha, cmd);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.gpnft.fc4_type = FC4_TYPE_NVME;
+ e->u.gpnft.sp = sp;
+
+ return qla2x00_post_work(vha, e);
+}
+
static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
struct srb *sp)
{
@@ -4139,120 +3922,85 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
{
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
- struct qla_work_evt *e;
struct ct_sns_req *ct_req =
(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
u16 cmd = be16_to_cpu(ct_req->command);
u8 fc4_type = sp->gen2;
unsigned long flags;
+ int rc;
/* gen2 field is holding the fc4type */
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async done-%s res %x FC4Type %x\n",
sp->name, res, sp->gen2);
+ del_timer(&sp->u.iocb_cmd.timer);
+ sp->rc = res;
if (res) {
unsigned long flags;
+ const char *name = sp->name;
- sp->free(sp);
- spin_lock_irqsave(&vha->work_lock, flags);
- vha->scan.scan_flags &= ~SF_SCANNING;
- vha->scan.scan_retry++;
- spin_unlock_irqrestore(&vha->work_lock, flags);
+ /*
+ * We are in an Interrupt context, queue up this
+ * sp for GNNFT_DONE work. This will allow all
+ * the resource to get freed up.
+ */
+ rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+ QLA_EVT_GNNFT_DONE);
+ if (rc) {
+ /* Cleanup here to prevent memory leak */
+ qla24xx_sp_unmap(vha, sp);
- if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- } else {
- ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
- "Async done-%s rescan failed on all retries\n",
- sp->name);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s rescan failed on all retries.\n",
+ name);
+ }
}
return;
}
- if (!res)
- qla2x00_find_free_fcp_nvme_slot(vha, sp);
+ qla2x00_find_free_fcp_nvme_slot(vha, sp);
if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
cmd == GNN_FT_CMD) {
- del_timer(&sp->u.iocb_cmd.timer);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
- if (!e) {
- /*
- * please ignore kernel warning. Otherwise,
- * we have mem leak.
- */
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- }
-
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async done-%s unable to alloc work element\n",
- sp->name);
- sp->free(sp);
+ sp->rc = res;
+ rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
+ if (rc) {
+ qla24xx_sp_unmap(vha, sp);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- return;
}
- e->u.gpnft.fc4_type = FC4_TYPE_NVME;
- sp->rc = res;
- e->u.gpnft.sp = sp;
-
- qla2x00_post_work(vha, e);
return;
}
- if (cmd == GPN_FT_CMD)
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
- else
- e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
- if (!e) {
- /* please ignore kernel warning. Otherwise, we have mem leak. */
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- }
+ if (cmd == GPN_FT_CMD) {
+ rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+ QLA_EVT_GPNFT_DONE);
+ } else {
+ rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+ QLA_EVT_GNNFT_DONE);
+ }
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async done-%s unable to alloc work element\n",
- sp->name);
- sp->free(sp);
+ if (rc) {
+ qla24xx_sp_unmap(vha, sp);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
return;
}
-
- sp->rc = res;
- e->u.iosb.sp = sp;
-
- qla2x00_post_work(vha, e);
}
/*
@@ -4285,11 +4033,13 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
WARN_ON(1);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
goto done_free_sp;
}
ql_dbg(ql_dbg_disc, vha, 0xfffff,
- "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
+ "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
__func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
sp->u.iocb_cmd.u.ctarg.req_size);
@@ -4318,8 +4068,12 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->done = qla2x00_async_gpnft_gnnft_sp_done;
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+ if (rval != QLA_SUCCESS) {
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
goto done_free_sp;
+ }
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
@@ -4351,7 +4105,6 @@ void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
{
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s enter\n", __func__);
- del_timer(&sp->u.iocb_cmd.timer);
qla24xx_async_gnnft(vha, sp, sp->gen2);
}
@@ -4444,9 +4197,9 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- rspsz = sizeof(struct ct_sns_gpnft_rsp) +
- ((vha->hw->max_fibre_devices - 1) *
- sizeof(struct ct_sns_gpn_ft_data));
+ rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
+ memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+ memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* CT_IU preamble */
@@ -4644,9 +4397,6 @@ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
__func__, fcport->port_name);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
return;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b934977c5c26..c72d8012fe2a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -52,12 +52,14 @@ qla2x00_sp_timeout(struct timer_list *t)
struct srb_iocb *iocb;
struct req_que *req;
unsigned long flags;
+ struct qla_hw_data *ha = sp->vha->hw;
- spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ WARN_ON_ONCE(irqs_disabled());
+ spin_lock_irqsave(&ha->hardware_lock, flags);
req = sp->qpair->req;
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
- spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
iocb->timeout(sp);
}
@@ -245,6 +247,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
}
+ ql_dbg(ql_dbg_disc, vha, 0x2072,
+ "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
+ "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->login_retry);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
fcport->flags |= FCF_LOGIN_NEEDED;
@@ -252,11 +260,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
}
- ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
- "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->login_retry);
return rval;
done_free_sp:
@@ -301,15 +304,16 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_logout_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2070,
"Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
fcport->port_name);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
return rval;
done_free_sp:
@@ -396,6 +400,9 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2066,
"%s %8phC: adisc fail: post delete\n",
__func__, ea->fcport->port_name);
+ /* deleted = 0 & logout_on_delete = force fw cleanup */
+ fcport->deleted = 0;
+ fcport->logout_on_delete = 1;
qlt_schedule_sess_for_deletion(ea->fcport);
return;
}
@@ -410,9 +417,8 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__func__, ea->fcport->port_name);
return;
} else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, ea->fcport->port_name);
- qla24xx_post_gidpn_work(vha, ea->fcport);
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -487,13 +493,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
sp->done = qla2x00_async_adisc_sp_done;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
"Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
return rval;
done_free_sp:
@@ -536,11 +544,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
}
if (fcport->last_rscn_gen != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20df,
- "%s %8phC rscn gen changed rscn %d|%d \n",
- __func__, fcport->port_name,
- fcport->last_rscn_gen, fcport->rscn_gen);
- qla24xx_post_gidpn_work(vha, fcport);
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
return;
} else if (fcport->last_login_gen != fcport->login_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e0,
@@ -787,6 +792,10 @@ qla24xx_async_gnl_sp_done(void *s, int res)
sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
sp->u.iocb_cmd.u.mbx.in_mb[2]);
+ if (res == QLA_FUNCTION_TIMEOUT)
+ return;
+
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.sp = sp;
ea.rc = res;
@@ -814,25 +823,24 @@ qla24xx_async_gnl_sp_done(void *s, int res)
(loop_id & 0x7fff));
}
- spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
INIT_LIST_HEAD(&h);
fcport = tf = NULL;
if (!list_empty(&vha->gnl.fcports))
list_splice_init(&vha->gnl.fcports, &h);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
list_del_init(&fcport->gnl_entry);
- spin_lock(&vha->hw->tgt.sess_lock);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
- spin_unlock(&vha->hw->tgt.sess_lock);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
}
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* create new fcport if fw has knowledge of new sessions */
for (i = 0; i < n; i++) {
port_id_t id;
@@ -865,6 +873,8 @@ qla24xx_async_gnl_sp_done(void *s, int res)
}
}
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ vha->gnl.sent = 0;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -884,27 +894,24 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
- spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
- if (!list_empty(&fcport->gnl_entry)) {
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
- rval = QLA_SUCCESS;
- goto done;
- }
-
- spin_lock(&vha->hw->tgt.sess_lock);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GNL;
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
- spin_unlock(&vha->hw->tgt.sess_lock);
list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
+ if (vha->gnl.sent) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ return QLA_SUCCESS;
+ }
+ vha->gnl.sent = 1;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
@@ -970,8 +977,13 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
+ sp->u.iocb_cmd.u.mbx.in_dma);
+ return;
+ }
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPDB_DONE;
ea.fcport = fcport;
@@ -1147,14 +1159,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
sp->done = qla24xx_async_gpdb_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_disc, vha, 0x20dc,
"Async-%s %8phC hndl %x opt %x\n",
sp->name, fcport->port_name, sp->handle, opt);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
return rval;
done_free_sp:
@@ -1182,11 +1193,9 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
vha->fcport_count++;
ea->fcport->login_succ = 1;
- ql_dbg(ql_dbg_disc, vha, 0x20d6,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, ea->fcport->port_name,
- vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ qla24xx_sched_upd_fcport(ea->fcport);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
} else if (ea->fcport->login_succ) {
/*
* We have an existing session. A late RSCN delivery
@@ -1226,6 +1235,19 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
else
ls = pd->current_login_state & 0xf;
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+
switch (ls) {
case PDS_PRLI_COMPLETE:
__qla24xx_parse_gpdb(vha, fcport, pd);
@@ -1280,7 +1302,8 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
login = 1;
}
- if (login) {
+ if (login && fcport->login_retry) {
+ fcport->login_retry--;
if (fcport->loop_id == FC_NO_LOOP_ID) {
fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
rc = qla2x00_find_new_loop_id(vha, fcport);
@@ -1304,14 +1327,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
u16 data[2];
u64 wwn;
+ u16 sec;
- ql_dbg(ql_dbg_disc, vha, 0x20d8,
- "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8,
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->login_gen, fcport->login_retry,
- fcport->loop_id, fcport->scan_state);
+ fcport->login_gen, fcport->loop_id, fcport->scan_state);
if (fcport->scan_state != QLA_FCPORT_FOUND)
return 0;
@@ -1410,22 +1433,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
case DSC_LOGIN_FAILED:
- fcport->login_retry--;
- ql_dbg(ql_dbg_disc, vha, 0x20d0,
- "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
if (N2N_TOPO(vha->hw))
qla_chk_n2n_b4_login(vha, fcport);
else
- qla24xx_post_gidpn_work(vha, fcport);
+ qlt_schedule_sess_for_deletion(fcport);
break;
case DSC_LOGIN_COMPLETE:
/* recheck login state */
- ql_dbg(ql_dbg_disc, vha, 0x20d1,
- "%s %d %8phC post adisc\n",
- __func__, __LINE__, fcport->port_name);
- fcport->login_retry--;
data[0] = data[1] = 0;
qla2x00_post_async_adisc_work(vha, fcport, data);
break;
@@ -1435,6 +1450,22 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
qla24xx_post_prli_work(vha, fcport);
break;
+ case DSC_UPD_FCPORT:
+ sec = jiffies_to_msecs(jiffies -
+ fcport->jiffies_at_registration)/1000;
+ if (fcport->sec_since_registration < sec && sec &&
+ !(sec % 60)) {
+ fcport->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phC - Slow Rport registration(%d Sec)\n",
+ __func__, fcport->port_name, sec);
+ }
+
+ if (fcport->next_disc_state != DSC_DELETE_PEND)
+ fcport->next_disc_state = DSC_ADISC;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+
default:
break;
}
@@ -1513,7 +1544,6 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -1533,7 +1563,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
fc_port_t *f, *tf;
uint32_t id = 0, mask, rid;
- unsigned long flags;
fc_port_t *fcport;
switch (ea->event) {
@@ -1548,10 +1577,16 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
return;
switch (ea->id.b.rsvd_1) {
case RSCN_PORT_ADDR:
+#define BIGSCAN 1
+#if defined BIGSCAN & BIGSCAN > 0
+ {
+ unsigned long flags;
fcport = qla2x00_find_fcport_by_nportid
(vha, &ea->id, 1);
- if (fcport)
- fcport->rscn_rcvd = 1;
+ if (fcport) {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
spin_lock_irqsave(&vha->work_lock, flags);
if (vha->scan.scan_flags == 0) {
@@ -1561,7 +1596,26 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
schedule_delayed_work(&vha->scan.scan_work, 5);
}
spin_unlock_irqrestore(&vha->work_lock, flags);
-
+ }
+#else
+ {
+ int rc;
+ fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+ if (!fcport) {
+ /* cable moved */
+ rc = qla24xx_post_gpnid_work(vha, &ea->id);
+ if (rc) {
+ ql_log(ql_log_warn, vha, 0xd044,
+ "RSCN GPNID work failed %06x\n",
+ ea->id.b24);
+ }
+ } else {
+ ea->fcport = fcport;
+ fcport->scan_needed = 1;
+ qla24xx_handle_rscn_event(fcport, ea);
+ }
+ }
+#endif
break;
case RSCN_AREA_ADDR:
case RSCN_DOM_ADDR:
@@ -1597,9 +1651,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
}
break;
- case FCME_GIDPN_DONE:
- qla24xx_handle_gidpn_event(vha, ea);
- break;
case FCME_GNL_DONE:
qla24xx_handle_gnl_done_event(vha, ea);
break;
@@ -1639,6 +1690,34 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
}
}
+/*
+ * RSCN(s) came in for this fcport, but the RSCN(s) was not able
+ * to be consumed by the fcport
+ */
+void qla_rscn_replay(fc_port_t *fcport)
+{
+ struct event_arg ea;
+
+ switch (fcport->disc_state) {
+ case DSC_DELETE_PEND:
+ return;
+ default:
+ break;
+ }
+
+ if (fcport->scan_needed) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RSCN;
+ ea.id = fcport->d_id;
+ ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
+#if defined BIGSCAN & BIGSCAN > 0
+ qla2x00_fcport_event_handler(fcport->vha, &ea);
+#else
+ qla24xx_post_gpnid_work(fcport->vha, &ea.id);
+#endif
+ }
+}
+
static void
qla2x00_tmf_iocb_timeout(void *data)
{
@@ -1684,15 +1763,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
tm_iocb->u.tmf.data = tag;
sp->done = qla2x00_tmf_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_taskm, vha, 0x802f,
"Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
wait_for_completion(&tm_iocb->u.tmf.comp);
rval = tm_iocb->u.tmf.data;
@@ -1747,47 +1825,46 @@ int
qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
{
scsi_qla_host_t *vha = cmd_sp->vha;
- fc_port_t *fcport = cmd_sp->fcport;
struct srb_iocb *abt_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
+ GFP_KERNEL);
if (!sp)
goto done;
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
+ sp->qpair = cmd_sp->qpair;
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+ /* FW can send 2 x ABTS's timeout/20s */
+ qla2x00_init_timer(sp, 42);
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
-
- if (vha->flags.qpairs_available && cmd_sp->qpair)
- abt_iocb->u.abt.req_que_no =
- cpu_to_le16(cmd_sp->qpair->req->id);
- else
- abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
+ abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
sp->done = qla24xx_abort_sp_done;
+ ql_dbg(ql_dbg_async, vha, 0x507c,
+ "Abort command issued - hdl=%x, type=%x\n",
+ cmd_sp->handle, cmd_sp->type);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_async, vha, 0x507c,
- "Abort command issued - hdl=%x, target_id=%x\n",
- cmd_sp->handle, fcport->tgt_id);
-
if (wait) {
wait_for_completion(&abt_iocb->u.abt.comp);
rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ } else {
+ goto done;
}
done_free_sp:
@@ -1803,19 +1880,17 @@ qla24xx_async_abort_command(srb_t *sp)
uint32_t handle;
fc_port_t *fcport = sp->fcport;
+ struct qla_qpair *qpair = sp->qpair;
struct scsi_qla_host *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
-
- if (vha->flags.qpairs_available && sp->qpair)
- req = sp->qpair->req;
+ struct req_que *req = qpair->req;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
if (handle == req->num_outstanding_cmds) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
@@ -1876,7 +1951,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
"%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
- ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
+ ea->sp->gen1, fcport->rscn_gen,
ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
@@ -1898,9 +1973,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC RSCN generation changed\n",
+ __func__, fcport->port_name);
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -1952,25 +2029,15 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
cid.b.rsvd_1 = 0;
ql_dbg(ql_dbg_disc, vha, 0x20ec,
- "%s %d %8phC LoopID 0x%x in use post gnl\n",
+ "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
__func__, __LINE__, ea->fcport->port_name,
- ea->fcport->loop_id);
+ ea->fcport->loop_id, cid.b24);
- if (IS_SW_RESV_ADDR(cid)) {
- set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
- ea->fcport->loop_id = FC_NO_LOOP_ID;
- } else {
- qla2x00_clear_loop_id(ea->fcport);
- }
+ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+ ea->fcport->loop_id = FC_NO_LOOP_ID;
qla24xx_post_gnl_work(vha, ea->fcport);
break;
case MBS_PORT_ID_USED:
- ql_dbg(ql_dbg_disc, vha, 0x20ed,
- "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
- __func__, __LINE__, ea->fcport->port_name,
- ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
- ea->fcport->d_id.b.al_pa);
-
lid = ea->iop[1] & 0xffff;
qlt_find_sess_invalidate_other(vha,
wwn_to_u64(ea->fcport->port_name),
@@ -1989,8 +2056,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
"%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
__func__, __LINE__, ea->fcport->port_name,
ea->fcport->d_id.b24, lid);
- qla2x00_clear_loop_id(ea->fcport);
- qla24xx_post_gidpn_work(vha, ea->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ed,
"%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
@@ -2018,26 +2083,6 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
return;
}
-void
-qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
- uint16_t *data)
-{
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
- if (data[0] == MBS_COMMAND_COMPLETE) {
- qla2x00_update_fcport(vha, fcport);
-
- return;
- }
-
- /* Retry login. */
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- else
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
-
- return;
-}
-
/****************************************************************************/
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
@@ -3527,6 +3572,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (rval == QLA_SUCCESS) {
qla24xx_detect_sfp(vha);
+ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
+ (ha->zio_mode == QLA_ZIO_MODE_6))
+ qla27xx_set_zio_threshold(vha,
+ ha->last_zio_threshold);
+
rval = qla2x00_set_exlogins_buffer(vha);
if (rval != QLA_SUCCESS)
goto failed;
@@ -4015,6 +4065,7 @@ next_check:
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
QLA_FW_STARTED(ha);
+ vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
}
return (rval);
@@ -4728,6 +4779,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport = NULL;
}
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+ INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
@@ -4853,19 +4905,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
*/
if (qla_tgt_mode_enabled(vha) ||
qla_dual_mode_enabled(vha)) {
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
- spin_lock_irqsave(&ha->tgt.atio_lock,
- flags);
- qlt_24xx_process_atio_queue(vha, 0);
- spin_unlock_irqrestore(
- &ha->tgt.atio_lock, flags);
- } else {
- spin_lock_irqsave(&ha->hardware_lock,
- flags);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- }
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock,
+ flags);
}
}
}
@@ -4958,6 +5001,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
(uint8_t *)ha->gid_list,
entries * sizeof(struct gid_list_info));
+ if (entries == 0) {
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ }
+ } else {
+ vha->scan.scan_retry = 0;
+ }
+
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = QLA_FCPORT_SCAN;
}
@@ -5223,20 +5279,20 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- fcport->vha = vha;
-
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
+ ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
+ __func__, fcport->port_name);
+
+ fcport->disc_state = DSC_UPD_FCPORT;
+ fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
- fcport->disc_state = DSC_LOGIN_COMPLETE;
fcport->deleted = 0;
fcport->logout_on_delete = 1;
fcport->login_retry = vha->hw->login_retry_count;
fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
- qla2x00_iidma_fcport(vha, fcport);
-
switch (vha->hw->current_topology) {
case ISP_CFG_N:
case ISP_CFG_NL:
@@ -5246,6 +5302,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ qla2x00_iidma_fcport(vha, fcport);
+
if (fcport->fc4f_nvme) {
qla_nvme_register_remote(vha, fcport);
fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -5274,6 +5332,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
if (fcport->id_changed) {
fcport->id_changed = 0;
@@ -5290,7 +5350,36 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla24xx_post_gpsc_work(vha, fcport);
}
}
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+}
+
+void qla_register_fcport_fn(struct work_struct *work)
+{
+ fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
+ u32 rscn_gen = fcport->rscn_gen;
+ u16 data[2];
+
+ if (IS_SW_RESV_ADDR(fcport->d_id))
+ return;
+
+ qla2x00_update_fcport(fcport->vha, fcport);
+
+ if (rscn_gen != fcport->rscn_gen) {
+ /* RSCN(s) came in while registration */
+ switch (fcport->next_disc_state) {
+ case DSC_DELETE_PEND:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ case DSC_ADISC:
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(fcport->vha, fcport,
+ data);
+ break;
+ default:
+ break;
+ }
+ }
}
/*
@@ -6494,6 +6583,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ ha->link_data_rate = PORT_SPEED_UNKNOWN;
SAVE_TOPO(ha);
ha->flags.rida_fmt2 = 0;
ha->flags.n2n_ae = 0;
@@ -6622,6 +6712,20 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
return status;
}
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (!qla_tgt_mode_enabled(vha))
+ return 0;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ if (!qla_dual_mode_enabled(vha))
+ return 0;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ default:
+ break;
+ }
+
ha->isp_ops->get_flash_version(vha, req->ring);
ha->isp_ops->nvram_config(vha);
@@ -6682,7 +6786,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
* The next call disables the board
* completely.
*/
- ha->isp_ops->reset_adapter(vha);
+ qla2x00_abort_isp_cleanup(vha);
vha->flags.online = 0;
clear_bit(ISP_ABORT_RETRY,
&vha->dpc_flags);
@@ -7142,7 +7246,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
icb->firmware_options_2 &= cpu_to_le32(
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
- vha->flags.process_response_queue = 0;
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
@@ -7153,7 +7256,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
icb->firmware_options_2 |= cpu_to_le32(
(uint32_t)ha->zio_mode);
icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
- vha->flags.process_response_queue = 1;
}
if (rval) {
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4351736b2426..512c3c37b447 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -209,7 +209,8 @@ qla2x00_chip_is_down(scsi_qla_host_t *vha)
}
static inline srb_t *
-qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
+ fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
uint8_t bail;
@@ -225,7 +226,9 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->iocbs = 1;
- sp->vha = qpair->vha;
+ sp->vha = vha;
+ sp->qpair = qpair;
+ sp->cmd_type = TYPE_SRB;
INIT_LIST_HEAD(&sp->elem);
done:
@@ -246,19 +249,17 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
uint8_t bail;
+ struct qla_qpair *qpair;
QLA_VHA_MARK_BUSY(vha, bail);
if (unlikely(bail))
return NULL;
- sp = mempool_alloc(vha->hw->srb_mempool, flag);
+ qpair = vha->hw->base_qpair;
+ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
if (!sp)
goto done;
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->cmd_type = TYPE_SRB;
- sp->iocbs = 1;
sp->vha = vha;
done:
if (!sp)
@@ -270,7 +271,7 @@ static inline void
qla2x00_rel_sp(srb_t *sp)
{
QLA_VHA_MARK_NOT_BUSY(sp->vha);
- mempool_free(sp, sp->vha->hw->srb_mempool);
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
static inline void
@@ -317,13 +318,13 @@ static inline bool
qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
{
if (qla_ini_mode_enabled(vha) &&
- (ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
+ (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
return true;
else if (qla_tgt_mode_enabled(vha) &&
- (ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
+ (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
return true;
else if (qla_dual_mode_enabled(vha) &&
- ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
+ ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
return true;
else
return false;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ac8e097419..86fb8b21aa71 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1526,12 +1526,6 @@ qla24xx_start_scsi(srb_t *sp)
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
-
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -1725,12 +1719,6 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
-
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1880,11 +1868,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
-
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_SUCCESS;
@@ -2287,8 +2270,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
- if (!sp->fcport->se_sess ||
- !sp->fcport->keep_nport_handle)
+ if (!sp->fcport->keep_nport_handle)
logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2659,7 +2641,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
struct qla_hw_data *ha = vha->hw;
int rval = QLA_SUCCESS;
void *ptr, *resp_ptr;
- dma_addr_t ptr_dma;
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -2691,7 +2672,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ptr = elsio->u.els_plogi.els_plogi_pyld =
dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
- ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
if (!elsio->u.els_plogi.els_plogi_pyld) {
rval = QLA_FUNCTION_FAILED;
@@ -3314,19 +3294,21 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
{
struct srb_iocb *aio = &sp->u.iocb_cmd;
scsi_qla_host_t *vha = sp->vha;
- struct req_que *req = vha->req;
+ struct req_que *req = sp->qpair->req;
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
- abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ if (sp->fcport) {
+ abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ }
abt_iocb->handle_to_abort =
cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
aio->u.abt.cmd_hndl));
- abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
- abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
- abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
abt_iocb->vp_index = vha->vp_idx;
abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
/* Send the command to the firmware */
@@ -3455,12 +3437,13 @@ qla2x00_start_sp(srb_t *sp)
int rval;
scsi_qla_host_t *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qp = sp->qpair;
void *pkt;
unsigned long flags;
rval = QLA_FUNCTION_FAILED;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- pkt = qla2x00_alloc_iocbs(vha, sp);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
if (!pkt) {
ql_log(ql_log_warn, vha, 0x700c,
"qla2x00_alloc_iocbs failed.\n");
@@ -3538,9 +3521,9 @@ qla2x00_start_sp(srb_t *sp)
}
wmb();
- qla2x00_start_iocbs(vha, ha->req_q_map[0]);
+ qla2x00_start_iocbs(vha, qp->req);
done:
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 36cbb29c84f6..d73b04e40590 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1850,11 +1850,12 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
uint16_t state_flags;
struct nvmefc_fcp_req *fd;
- uint16_t ret = 0;
+ uint16_t ret = QLA_SUCCESS;
+ uint16_t comp_status = le16_to_cpu(sts->comp_status);
iocb = &sp->u.iocb_cmd;
fcport = sp->fcport;
- iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
+ iocb->u.nvme.comp_status = comp_status;
state_flags = le16_to_cpu(sts->state_flags);
fd = iocb->u.nvme.desc;
@@ -1892,28 +1893,35 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fd->transferred_length = fd->payload_length -
le32_to_cpu(sts->residual_len);
- switch (le16_to_cpu(sts->comp_status)) {
+ if (unlikely(comp_status != CS_COMPLETE))
+ ql_log(ql_log_warn, fcport->vha, 0x5060,
+ "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
+ sp->name, sp->handle, comp_status,
+ fd->transferred_length, le32_to_cpu(sts->residual_len),
+ sts->ox_id);
+
+ /*
+ * If transport error then Failure (HBA rejects request)
+ * otherwise transport will handle.
+ */
+ switch (comp_status) {
case CS_COMPLETE:
- ret = QLA_SUCCESS;
break;
- case CS_ABORTED:
+
case CS_RESET:
case CS_PORT_UNAVAILABLE:
case CS_PORT_LOGGED_OUT:
+ fcport->nvme_flag |= NVME_FLAG_RESETTING;
+ /* fall through */
+ case CS_ABORTED:
case CS_PORT_BUSY:
- ql_log(ql_log_warn, fcport->vha, 0x5060,
- "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
- sp->name, sp->handle, sts->comp_status,
- le32_to_cpu(sts->residual_len), sts->ox_id);
fd->transferred_length = 0;
iocb->u.nvme.rsp_pyld_len = 0;
ret = QLA_ABORTED;
break;
+ case CS_DATA_UNDERRUN:
+ break;
default:
- ql_log(ql_log_warn, fcport->vha, 0x5060,
- "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
- sp->name, sp->handle, sts->comp_status,
- le32_to_cpu(sts->residual_len), sts->ox_id);
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2837,6 +2845,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
case ELS_IOCB_TYPE:
case ABORT_IOCB_TYPE:
case MBX_IOCB_TYPE:
+ default:
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
sp->done(sp, res);
@@ -2847,7 +2856,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
- default:
return 1;
}
fatal:
@@ -3121,6 +3129,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
uint16_t mb[8];
struct rsp_que *rsp;
unsigned long flags;
+ bool process_atio = false;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -3181,22 +3190,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
case INTR_ATIO_QUE_UPDATE_27XX:
- case INTR_ATIO_QUE_UPDATE:{
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+ case INTR_ATIO_QUE_UPDATE:
+ process_atio = true;
break;
- }
- case INTR_ATIO_RSP_QUE_UPDATE: {
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
-
+ case INTR_ATIO_RSP_QUE_UPDATE:
+ process_atio = true;
qla24xx_process_response_queue(vha, rsp);
break;
- }
default:
ql_dbg(ql_dbg_async, vha, 0x504f,
"Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -3210,6 +3210,12 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (process_atio) {
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+ }
+
return IRQ_HANDLED;
}
@@ -3256,6 +3262,7 @@ qla24xx_msix_default(int irq, void *dev_id)
uint32_t hccr;
uint16_t mb[8];
unsigned long flags;
+ bool process_atio = false;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -3312,22 +3319,13 @@ qla24xx_msix_default(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
case INTR_ATIO_QUE_UPDATE_27XX:
- case INTR_ATIO_QUE_UPDATE:{
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+ case INTR_ATIO_QUE_UPDATE:
+ process_atio = true;
break;
- }
- case INTR_ATIO_RSP_QUE_UPDATE: {
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
-
+ case INTR_ATIO_RSP_QUE_UPDATE:
+ process_atio = true;
qla24xx_process_response_queue(vha, rsp);
break;
- }
default:
ql_dbg(ql_dbg_async, vha, 0x5051,
"Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -3338,6 +3336,12 @@ qla24xx_msix_default(int irq, void *dev_id)
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (process_atio) {
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2c6c2cd5a0d0..2f3e5075ae76 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -60,6 +60,7 @@ static struct rom_cmd {
{ MBC_GET_ADAPTER_LOOP_ID },
{ MBC_READ_SFP },
{ MBC_GET_RNID_PARAMS },
+ { MBC_GET_SET_ZIO_THRESHOLD },
};
static int is_rom_cmd(uint16_t cmd)
@@ -189,7 +190,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
goto premature_exit;
}
- ha->flags.mbox_busy = 1;
+
/* Save mailbox command for debug */
ha->mcp = mcp;
@@ -198,12 +199,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
+ ha->flags.mbox_busy) {
rval = QLA_ABORTED;
- ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
goto premature_exit;
}
+ ha->flags.mbox_busy = 1;
/* Load mailbox registers. */
if (IS_P3P_TYPE(ha))
@@ -254,9 +256,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
+ ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
- ha->flags.mbox_busy = 0;
+
atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
@@ -274,6 +277,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
+ if (chip_reset != ha->chip_reset) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ atomic_dec(&ha->num_pend_mbx_stage3);
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
ql_dbg(ql_dbg_mbx, vha, 0x117a,
"cmd=%x Timeout.\n", command);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -282,7 +295,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
atomic_dec(&ha->num_pend_mbx_stage2);
atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
@@ -300,9 +315,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
+ ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
- ha->flags.mbox_busy = 0;
atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
@@ -320,7 +335,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
while (!ha->flags.mbox_int) {
if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
atomic_dec(&ha->num_pend_mbx_stage2);
rval = QLA_ABORTED;
goto premature_exit;
@@ -363,7 +381,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ha->mcp = NULL;
@@ -436,7 +457,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* then only PCI ERR flag would be set.
* we will do premature exit for above case.
*/
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
@@ -451,8 +475,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_FUNCTION_TIMEOUT;
}
}
-
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Clean up */
ha->mcp = NULL;
@@ -493,7 +518,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- } else if (!abort_active) {
+ } else if (current == ha->dpc_thread) {
/* call abort directly since we are in the DPC thread */
ql_dbg(ql_dbg_mbx, vha, 0x101d,
"Timeout, calling abort_isp.\n");
@@ -1486,7 +1511,6 @@ qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
struct req_que *req;
struct rsp_que *rsp;
- l = l;
vha = fcport->vha;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
@@ -3072,22 +3096,25 @@ qla24xx_abort_command(srb_t *sp)
struct scsi_qla_host *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
+ struct qla_qpair *qpair = sp->qpair;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
if (vha->flags.qpairs_available && sp->qpair)
req = sp->qpair->req;
+ else
+ return QLA_FUNCTION_FAILED;
if (ql2xasynctmfenable)
return qla24xx_async_abort_command(sp);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
if (handle == req->num_outstanding_cmds) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
@@ -3762,10 +3789,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- if (IS_CNA_CAPABLE(vha->hw))
- mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
- else
- mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
+ mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_3|MBX_1|MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 20d9dc39f0fb..7e78e7eff783 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -506,7 +506,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return -EBUSY;
/* Alloc SRB structure */
- sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
if (!sp)
return -EBUSY;
@@ -607,7 +607,7 @@ void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
{
int rval;
- if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) {
+ if (ha->flags.fw_started) {
rval = ha->isp_ops->abort_command(sp);
if (!rval && !qla_nvme_wait_on_command(sp))
ql_log(ql_log_warn, NULL, 0x2112,
@@ -660,9 +660,6 @@ void qla_nvme_delete(struct scsi_qla_host *vha)
__func__, fcport);
nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
- init_completion(&fcport->nvme_del_done);
- nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
- wait_for_completion(&fcport->nvme_del_done);
}
if (vha->nvme_local_port) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de2bc78449e7..121e18b3b9f8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3699,8 +3699,8 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for pending cmds (physical and virtual) to complete */
- if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
- WAIT_HOST) == QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ WAIT_HOST)) {
ql_dbg(ql_dbg_init, vha, 0x00b3,
"Done wait for "
"pending commands.\n");
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 42b8f0d3e580..8794e54f43a9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -14,6 +14,8 @@
#include <linux/kobject.h>
#include <linux/slab.h>
#include <linux/blk-mq-pci.h>
+#include <linux/refcount.h>
+
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -204,7 +206,7 @@ int ql2xasynctmfenable = 1;
module_param(ql2xasynctmfenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xasynctmfenable,
"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
- "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
+ "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
int ql2xdontresethba;
module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
@@ -391,12 +393,14 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
struct qla_hw_data *ha = vha->hw;
rsp->qpair = ha->base_qpair;
rsp->req = req;
+ ha->base_qpair->hw = ha;
ha->base_qpair->req = req;
ha->base_qpair->rsp = rsp;
ha->base_qpair->vha = vha;
ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
+ ha->base_qpair->srb_mempool = ha->srb_mempool;
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
@@ -1012,7 +1016,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
else
goto qc24_target_busy;
- sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
if (!sp)
goto qc24_host_busy;
@@ -1212,10 +1216,14 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-static void
+static int
sp_get(struct srb *sp)
{
- atomic_inc(&sp->ref_count);
+ if (!refcount_inc_not_zero((refcount_t*)&sp->ref_count))
+ /* kref get fail */
+ return ENXIO;
+ else
+ return 0;
}
#define ISP_REG_DISCONNECT 0xffffffffU
@@ -1273,38 +1281,51 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int rval, wait = 0;
struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
"PCI/Register disconnect, exiting.\n");
return FAILED;
}
- if (!CMD_SP(cmd))
- return SUCCESS;
ret = fc_block_scsi_eh(cmd);
if (ret != 0)
return ret;
ret = SUCCESS;
- id = cmd->device->id;
- lun = cmd->device->lun;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
sp = (srb_t *) CMD_SP(cmd);
- if (!sp) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (!sp)
+ return SUCCESS;
+
+ qpair = sp->qpair;
+ if (!qpair)
+ return SUCCESS;
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ if (!CMD_SP(cmd)) {
+ /* there's a chance an interrupt could clear
+ the ptr as part of done & free */
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return SUCCESS;
}
+ if (sp_get(sp)){
+ /* ref_count is already 0 */
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return SUCCESS;
+ }
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ id = cmd->device->id;
+ lun = cmd->device->lun;
+
ql_dbg(ql_dbg_taskm, vha, 0x8002,
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
vha->host_no, id, lun, sp, cmd, sp->handle);
/* Get a reference to the sp and drop the lock.*/
- sp_get(sp);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = ha->isp_ops->abort_command(sp);
if (rval) {
if (rval == QLA_FUNCTION_PARAMETER_ERROR)
@@ -1320,14 +1341,29 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
wait = 1;
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
- sp->done(sp, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ /*
+ * Clear the slot in the oustanding_cmds array if we can't find the
+ * command to reclaim the resources.
+ */
+ if (rval == QLA_FUNCTION_PARAMETER_ERROR)
+ vha->req->outstanding_cmds[sp->handle] = NULL;
+
+ /*
+ * sp->done will do ref_count--
+ * sp_get() took an extra count above
+ */
+ sp->done(sp, DID_RESET << 16);
/* Did the command return during mailbox execution? */
if (ret == FAILED && !CMD_SP(cmd))
ret = SUCCESS;
+ if (!CMD_SP(cmd))
+ wait = 0;
+
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
/* Wait for the command to be returned. */
if (wait) {
if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
@@ -1721,7 +1757,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
struct req_que *req;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
- uint8_t trace = 0;
if (!ha->req_q_map)
return;
@@ -1731,64 +1766,68 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
- if (sp->cmd_type == TYPE_SRB) {
+ switch (sp->cmd_type) {
+ case TYPE_SRB:
if (sp->type == SRB_NVME_CMD ||
sp->type == SRB_NVME_LS) {
- sp_get(sp);
- spin_unlock_irqrestore(qp->qp_lock_ptr,
- flags);
- qla_nvme_abort(ha, sp, res);
- spin_lock_irqsave(qp->qp_lock_ptr,
- flags);
+ if (!sp_get(sp)) {
+ /* got sp */
+ spin_unlock_irqrestore
+ (qp->qp_lock_ptr,
+ flags);
+ qla_nvme_abort(ha, sp, res);
+ spin_lock_irqsave
+ (qp->qp_lock_ptr, flags);
+ }
} else if (GET_CMD_SP(sp) &&
!ha->flags.eeh_busy &&
(!test_bit(ABORT_ISP_ACTIVE,
&vha->dpc_flags)) &&
+ !qla2x00_isp_reg_stat(ha) &&
(sp->type == SRB_SCSI_CMD)) {
/*
- * Don't abort commands in
- * adapter during EEH
- * recovery as it's not
+ * Don't abort commands in adapter
+ * during EEH recovery as it's not
* accessible/responding.
*
- * Get a reference to the sp
- * and drop the lock. The
- * reference ensures this
- * sp->done() call and not the
- * call in qla2xxx_eh_abort()
- * ends the SCSI command (with
- * result 'res').
- */
- sp_get(sp);
- spin_unlock_irqrestore(qp->qp_lock_ptr,
- flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
- spin_lock_irqsave(qp->qp_lock_ptr,
- flags);
- /*
- * Get rid of extra reference
- * if immediate exit from
- * ql2xxx_eh_abort
+ * Get a reference to the sp and drop
+ * the lock. The reference ensures this
+ * sp->done() call and not the call in
+ * qla2xxx_eh_abort() ends the SCSI cmd
+ * (with result 'res').
*/
- if (status == FAILED &&
- (qla2x00_isp_reg_stat(ha)))
- atomic_dec(
- &sp->ref_count);
+ if (!sp_get(sp)) {
+ spin_unlock_irqrestore
+ (qp->qp_lock_ptr, flags);
+ status = qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
+ spin_lock_irqsave
+ (qp->qp_lock_ptr, flags);
+ }
}
sp->done(sp, res);
- } else {
+ break;
+ case TYPE_TGT_CMD:
if (!vha->hw->tgt.tgt_ops || !tgt ||
qla_ini_mode_enabled(vha)) {
- if (!trace)
- ql_dbg(ql_dbg_tgt_mgt,
- vha, 0xf003,
- "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
- vha->dpc_flags);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+ vha->dpc_flags);
continue;
}
cmd = (struct qla_tgt_cmd *)sp;
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ break;
+ case TYPE_TGT_TMCMD:
+ /*
+ * Currently, only ABTS response gets on the
+ * outstanding_cmds[]
+ */
+ ha->tgt.tgt_ops->free_mcmd(
+ (struct qla_tgt_mgmt_cmd *)sp);
+ break;
+ default:
+ break;
}
}
}
@@ -2708,7 +2747,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
struct scsi_qla_host, iocb_work);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- int i = 20;
+ int i = 2;
unsigned long flags;
if (test_bit(UNLOADING, &base_vha->dpc_flags))
@@ -2819,6 +2858,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
atomic_set(&ha->num_pend_mbx_stage1, 0);
atomic_set(&ha->num_pend_mbx_stage2, 0);
atomic_set(&ha->num_pend_mbx_stage3, 0);
+ atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
+ ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -4249,29 +4290,34 @@ static void
qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
{
u32 temp;
+ struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
*ret_cnt = FW_DEF_EXCHANGES_CNT;
if (max_cnt > vha->hw->max_exchg)
max_cnt = vha->hw->max_exchg;
if (qla_ini_mode_enabled(vha)) {
- if (ql2xiniexchg > max_cnt)
- ql2xiniexchg = max_cnt;
+ if (vha->ql2xiniexchg > max_cnt)
+ vha->ql2xiniexchg = max_cnt;
+
+ if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = vha->ql2xiniexchg;
- if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
- *ret_cnt = ql2xiniexchg;
} else if (qla_tgt_mode_enabled(vha)) {
- if (ql2xexchoffld > max_cnt)
- ql2xexchoffld = max_cnt;
+ if (vha->ql2xexchoffld > max_cnt) {
+ vha->ql2xexchoffld = max_cnt;
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
+ }
- if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
- *ret_cnt = ql2xexchoffld;
+ if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = vha->ql2xexchoffld;
} else if (qla_dual_mode_enabled(vha)) {
- temp = ql2xiniexchg + ql2xexchoffld;
+ temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
if (temp > max_cnt) {
- ql2xiniexchg -= (temp - max_cnt)/2;
- ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
+ vha->ql2xiniexchg -= (temp - max_cnt)/2;
+ vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
temp = max_cnt;
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
}
if (temp > FW_DEF_EXCHANGES_CNT)
@@ -4309,6 +4355,12 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
if (totsz != ha->exchoffld_size) {
qla2x00_free_exchoffld_buffer(ha);
+ if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
+ ha->exchoffld_size = 0;
+ ha->flags.exchoffld_enabled = 0;
+ return QLA_SUCCESS;
+ }
+
ha->exchoffld_size = totsz;
ql_log(ql_log_info, vha, 0xd016,
@@ -4341,6 +4393,15 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
return -ENOMEM;
}
+ } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
+ /* pathological case */
+ qla2x00_free_exchoffld_buffer(ha);
+ ha->exchoffld_size = 0;
+ ha->flags.exchoffld_enabled = 0;
+ ql_log(ql_log_info, vha, 0xd016,
+ "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
+ ha->exchoffld_size, actual_cnt, size, totsz);
+ return 0;
}
/* Now configure the dma buffer */
@@ -4356,7 +4417,7 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
if (qla_ini_mode_enabled(vha))
icb->exchange_count = 0;
else
- icb->exchange_count = cpu_to_le16(ql2xexchoffld);
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
}
return rval;
@@ -4564,6 +4625,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
vha->host_no = host->host_no;
vha->hw = ha;
+ vha->qlini_mode = ql2x_ini_mode;
+ vha->ql2xexchoffld = ql2xexchoffld;
+ vha->ql2xiniexchg = ql2xiniexchg;
+
INIT_LIST_HEAD(&vha->vp_fcports);
INIT_LIST_HEAD(&vha->work_list);
INIT_LIST_HEAD(&vha->list);
@@ -4579,7 +4644,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
- spin_lock_init(&vha->gnl.fcports_lock);
init_waitqueue_head(&vha->fcport_waitQ);
init_waitqueue_head(&vha->vref_waitq);
@@ -4710,7 +4774,6 @@ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
-qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
@@ -4761,16 +4824,25 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
return qla2x00_post_work(vha, e);
}
-int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+void qla24xx_sched_upd_fcport(fc_port_t *fcport)
{
- struct qla_work_evt *e;
+ unsigned long flags;
- e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT);
- if (!e)
- return QLA_FUNCTION_FAILED;
+ if (IS_SW_RESV_ADDR(fcport->d_id))
+ return;
- e->u.fcport.fcport = fcport;
- return qla2x00_post_work(vha, e);
+ spin_lock_irqsave(&fcport->vha->work_lock, flags);
+ if (fcport->disc_state == DSC_UPD_FCPORT) {
+ spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+ return;
+ }
+ fcport->jiffies_at_registration = jiffies;
+ fcport->sec_since_registration = 0;
+ fcport->next_disc_state = DSC_DELETED;
+ fcport->disc_state = DSC_UPD_FCPORT;
+ spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+
+ queue_work(system_unbound_wq, &fcport->reg_work);
}
static
@@ -4808,10 +4880,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
+ if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
fcport->fc4_type = FC4_TYPE_FCP_SCSI;
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
+ if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
fcport->fc4_type = FC4_TYPE_OTHER;
fcport->fc4f_nvme = FC4_TYPE_NVME;
}
@@ -4990,19 +5062,12 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla2x00_async_adisc(vha, e->u.logio.fcport,
e->u.logio.data);
break;
- case QLA_EVT_ASYNC_ADISC_DONE:
- qla2x00_async_adisc_done(vha, e->u.logio.fcport,
- e->u.logio.data);
- break;
case QLA_EVT_UEVENT:
qla2x00_uevent_emit(vha, e->u.uevent.code);
break;
case QLA_EVT_AENFX:
qlafx00_process_aen(vha, e);
break;
- case QLA_EVT_GIDPN:
- qla24xx_async_gidpn(vha, e->u.fcport.fcport);
- break;
case QLA_EVT_GPNID:
qla24xx_async_gpnid(vha, &e->u.gpnid.id);
break;
@@ -5025,9 +5090,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GPSC:
qla24xx_async_gpsc(vha, e->u.fcport.fcport);
break;
- case QLA_EVT_UPD_FCPORT:
- qla2x00_update_fcport(vha, e->u.fcport.fcport);
- break;
case QLA_EVT_GNL:
qla24xx_async_gnl(vha, e->u.fcport.fcport);
break;
@@ -6041,12 +6103,29 @@ qla2x00_do_dpc(void *data)
if (test_and_clear_bit
(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
!test_bit(UNLOADING, &base_vha->dpc_flags)) {
+ bool do_reset = true;
+
+ switch (base_vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_ENABLED:
+ break;
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (!qla_tgt_mode_enabled(base_vha) &&
+ !ha->flags.fw_started)
+ do_reset = false;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ if (!qla_dual_mode_enabled(base_vha) &&
+ !ha->flags.fw_started)
+ do_reset = false;
+ break;
+ default:
+ break;
+ }
- ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
- "ISP abort scheduled.\n");
- if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+ if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
-
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
+ "ISP abort scheduled.\n");
if (ha->isp_ops->abort_isp(base_vha)) {
/* failed. retry later */
set_bit(ISP_ABORT_NEEDED,
@@ -6054,10 +6133,9 @@ qla2x00_do_dpc(void *data)
}
clear_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
+ "ISP abort end.\n");
}
-
- ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
- "ISP abort end.\n");
}
if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
@@ -6183,17 +6261,28 @@ intr_on_check:
mutex_unlock(&ha->mq_lock);
}
- if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) {
+ if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED,
+ &base_vha->dpc_flags)) {
ql_log(ql_log_info, base_vha, 0xffffff,
"nvme: SET ZIO Activity exchange threshold to %d.\n",
ha->nvme_last_rptd_aen);
- if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) {
+ if (qla27xx_set_zio_threshold(base_vha,
+ ha->nvme_last_rptd_aen)) {
ql_log(ql_log_info, base_vha, 0xffffff,
- "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
- ha->nvme_last_rptd_aen);
+ "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
+ ha->nvme_last_rptd_aen);
}
}
+ if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
+ &base_vha->dpc_flags)) {
+ ql_log(ql_log_info, base_vha, 0xffffff,
+ "SET ZIO Activity exchange threshold to %d.\n",
+ ha->last_zio_threshold);
+ qla27xx_set_zio_threshold(base_vha,
+ ha->last_zio_threshold);
+ }
+
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@@ -6406,13 +6495,24 @@ qla2x00_timer(struct timer_list *t)
* FC-NVME
* see if the active AEN count has changed from what was last reported.
*/
- if (!vha->vp_idx &&
- atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen &&
- ha->zio_mode == QLA_ZIO_MODE_6) {
+ if (!vha->vp_idx && (atomic_read(&ha->nvme_active_aen_cnt) !=
+ ha->nvme_last_rptd_aen) && ha->zio_mode == QLA_ZIO_MODE_6) {
ql_log(ql_log_info, vha, 0x3002,
- "nvme: Sched: Set ZIO exchange threshold to %d.\n",
- ha->nvme_last_rptd_aen);
+ "nvme: Sched: Set ZIO exchange threshold to %d.\n",
+ ha->nvme_last_rptd_aen);
ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
+ set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+ start_dpc++;
+ }
+
+ if (!vha->vp_idx &&
+ (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
+ (ha->zio_mode == QLA_ZIO_MODE_6) &&
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
+ ql_log(ql_log_info, vha, 0x3002,
+ "Sched: Set ZIO exchange threshold to %d.\n",
+ ha->last_zio_threshold);
+ ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
start_dpc++;
}
@@ -6839,8 +6939,6 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
"The device failed to resume I/O from slot/link_reset.\n");
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
ha->flags.eeh_busy = 0;
}
@@ -6946,6 +7044,9 @@ qla2x00_module_init(void)
if (ql2xextended_error_logging == 1)
ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+ if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
+ qla_insert_tgt_attrs();
+
qla2xxx_transport_template =
fc_attach_transport(&qla2xxx_transport_functions);
if (!qla2xxx_transport_template) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8c811b251d42..39828207bc1d 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -141,6 +141,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *,
struct abts_recv_from_24xx *);
static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
uint16_t);
+static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
+static inline uint32_t qlt_make_handle(struct qla_qpair *);
/*
* Global Variables
@@ -541,7 +543,6 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
qlt_response_pkt(host, rsp, pkt);
break;
}
-
default:
qlt_response_pkt(vha, rsp, pkt);
break;
@@ -600,14 +601,9 @@ void qla2x00_async_nack_sp_done(void *s, int res)
sp->fcport->login_succ = 1;
vha->fcport_count++;
-
- ql_dbg(ql_dbg_disc, vha, 0x20f3,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__,
- sp->fcport->port_name,
- vha->fcport_count);
- sp->fcport->disc_state = DSC_UPD_FCPORT;
- qla24xx_post_upd_fcport_work(vha, sp->fcport);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ qla24xx_sched_upd_fcport(sp->fcport);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
} else {
sp->fcport->login_retry = 0;
sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -1230,11 +1226,12 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
unsigned long flags;
+ u16 sec;
- if (sess->disc_state == DSC_DELETE_PEND)
+ switch (sess->disc_state) {
+ case DSC_DELETE_PEND:
return;
-
- if (sess->disc_state == DSC_DELETED) {
+ case DSC_DELETED:
if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
wake_up_all(&tgt->waitQ);
if (sess->vha->fcport_count == 0)
@@ -1243,11 +1240,26 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
return;
+ break;
+ case DSC_UPD_FCPORT:
+ /*
+ * This port is not done reporting to upper layer.
+ * let it finish
+ */
+ sess->next_disc_state = DSC_DELETE_PEND;
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration)/1000;
+ if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ "%s %8phC : Slow Rport registration(%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+ return;
+ default:
+ break;
}
- if (sess->deleted == QLA_SESS_DELETED)
- sess->logout_on_delete = 0;
-
spin_lock_irqsave(&sess->vha->work_lock, flags);
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
@@ -1261,7 +1273,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
qla24xx_chk_fcp_state(sess);
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
- "Scheduling sess %p for deletion\n", sess);
+ "Scheduling sess %p for deletion %8phC\n",
+ sess, sess->port_name);
INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
@@ -1479,27 +1492,14 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
struct qla_hw_data *ha = tgt->ha;
unsigned long flags;
+ mutex_lock(&ha->optrom_mutex);
mutex_lock(&qla_tgt_mutex);
- if (!vha->fc_vport) {
- struct Scsi_Host *sh = vha->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
- bool npiv_vports;
-
- spin_lock_irqsave(sh->host_lock, flags);
- npiv_vports = (fc_host->npiv_vports_inuse);
- spin_unlock_irqrestore(sh->host_lock, flags);
-
- if (npiv_vports) {
- mutex_unlock(&qla_tgt_mutex);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
- "NPIV is in use. Can not stop target\n");
- return -EPERM;
- }
- }
+
if (tgt->tgt_stop || tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
"Already in tgt->tgt_stop or tgt_stopped state\n");
mutex_unlock(&qla_tgt_mutex);
+ mutex_unlock(&ha->optrom_mutex);
return -EPERM;
}
@@ -1537,6 +1537,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
/* Wait for sessions to clear out (just in case) */
wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
+ mutex_unlock(&ha->optrom_mutex);
+
return 0;
}
EXPORT_SYMBOL(qlt_stop_phase1);
@@ -1566,6 +1568,15 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
tgt);
+
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->flags.online = 1;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+ default:
+ break;
+ }
}
EXPORT_SYMBOL(qlt_stop_phase2);
@@ -1715,6 +1726,94 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
qla2x00_start_iocbs(vha, qpair->req);
}
+static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ struct scsi_qla_host *vha = mcmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct abts_resp_to_24xx *resp;
+ uint32_t f_ctl, h;
+ uint8_t *p;
+ int rc;
+ struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
+ struct qla_qpair *qpair = mcmd->qpair;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe006,
+ "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
+ ha, mcmd->fc_tm_rsp);
+
+ rc = qlt_check_reserve_free_req(qpair, 1);
+ if (rc) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+ "qla_target(%d): %s failed: unable to allocate request packet\n",
+ vha->vp_idx, __func__);
+ return -EAGAIN;
+ }
+
+ resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
+ memset(resp, 0, sizeof(*resp));
+
+ h = qlt_make_handle(qpair);
+ if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+ /*
+ * CTIO type 7 from the firmware doesn't provide a way to
+ * know the initiator's LOOP ID, hence we can't find
+ * the session and, so, the command.
+ */
+ return -EAGAIN;
+ } else {
+ qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
+ }
+
+ resp->handle = MAKE_HANDLE(qpair->req->id, h);
+ resp->entry_type = ABTS_RESP_24XX;
+ resp->entry_count = 1;
+ resp->nport_handle = abts->nport_handle;
+ resp->vp_index = vha->vp_idx;
+ resp->sof_type = abts->sof_type;
+ resp->exchange_address = abts->exchange_address;
+ resp->fcp_hdr_le = abts->fcp_hdr_le;
+ f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+ F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+ F_CTL_SEQ_INITIATIVE);
+ p = (uint8_t *)&f_ctl;
+ resp->fcp_hdr_le.f_ctl[0] = *p++;
+ resp->fcp_hdr_le.f_ctl[1] = *p++;
+ resp->fcp_hdr_le.f_ctl[2] = *p;
+
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+
+ resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+ if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+ resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+ resp->payload.ba_acct.low_seq_cnt = 0x0000;
+ resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+ resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+ } else {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+ resp->payload.ba_rjt.reason_code =
+ BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+ /* Other bytes are zero */
+ }
+
+ vha->vha_tgt.qla_tgt->abts_resp_expected++;
+
+ /* Memory Barrier */
+ wmb();
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+
+ return rc;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -1742,6 +1841,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
}
resp->entry_type = ABTS_RESP_24XX;
+ resp->handle = QLA_TGT_SKIP_HANDLE;
resp->entry_count = 1;
resp->nport_handle = abts->nport_handle;
resp->vp_index = vha->vp_idx;
@@ -1799,15 +1899,13 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
- struct abts_resp_from_24xx_fw *entry)
+ struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
{
struct ctio7_to_24xx *ctio;
+ u16 tmp;
+ struct abts_recv_from_24xx *entry;
- ql_dbg(ql_dbg_tgt, vha, 0xe007,
- "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
-
- ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
- vha->hw->base_qpair, NULL);
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
if (ctio == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe04b,
"qla_target(%d): %s failed: unable to allocate "
@@ -1815,6 +1913,13 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
return;
}
+ if (mcmd)
+ /* abts from remote port */
+ entry = &mcmd->orig_iocb.abts;
+ else
+ /* abts from this driver. */
+ entry = (struct abts_recv_from_24xx *)pkt;
+
/*
* We've got on entrance firmware's response on by us generated
* ABTS response. So, in it ID fields are reversed.
@@ -1826,56 +1931,48 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = vha->vp_idx;
- ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
- ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
- ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
ctio->exchange_addr = entry->exchange_addr_to_abort;
- ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
- CTIO7_FLAGS_TERMINATE);
- ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
+ tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
- /* Memory Barrier */
- wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (mcmd) {
+ ctio->initiator_id[0] = entry->fcp_hdr_le.s_id[0];
+ ctio->initiator_id[1] = entry->fcp_hdr_le.s_id[1];
+ ctio->initiator_id[2] = entry->fcp_hdr_le.s_id[2];
- qlt_24xx_send_abts_resp(vha->hw->base_qpair,
- (struct abts_recv_from_24xx *)entry,
- FCP_TMF_CMPL, true);
-}
-
-static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
-{
- struct qla_tgt_sess_op *op;
- struct qla_tgt_cmd *cmd;
- unsigned long flags;
+ if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
+ tmp |= (mcmd->abort_io_attr << 9);
+ else if (qpair->retry_term_cnt & 1)
+ tmp |= (0x4 << 9);
+ } else {
+ ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+ ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+ ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
- if (tag == op->atio.u.isp24.exchange_addr) {
- op->aborted = true;
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- return 1;
- }
+ if (qpair->retry_term_cnt & 1)
+ tmp |= (0x4 << 9);
}
+ ctio->u.status1.flags = cpu_to_le16(tmp);
+ ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
- list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
- if (tag == op->atio.u.isp24.exchange_addr) {
- op->aborted = true;
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- return 1;
- }
- }
+ ql_dbg(ql_dbg_tgt, vha, 0xe007,
+ "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
+ le16_to_cpu(ctio->u.status1.flags),
+ le16_to_cpu(ctio->u.status1.ox_id),
+ (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
- list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
- if (tag == cmd->atio.u.isp24.exchange_addr) {
- cmd->aborted = 1;
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- return 1;
- }
- }
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ /* Memory Barrier */
+ wmb();
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+
+ if (mcmd)
+ qlt_build_abts_resp_iocb(mcmd);
+ else
+ qlt_24xx_send_abts_resp(qpair,
+ (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
- return 0;
}
/* drop cmds for the given lun
@@ -1970,9 +2067,8 @@ static void qlt_do_tmr_work(struct work_struct *work)
spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
- qlt_24xx_send_abts_resp(mcmd->qpair,
- &mcmd->orig_iocb.abts,
- FCP_TMF_REJECTED, false);
+ mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
+ qlt_build_abts_resp_iocb(mcmd);
break;
case QLA_TGT_LUN_RESET:
case QLA_TGT_CLEAR_TS:
@@ -2007,12 +2103,6 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_tgt_mgmt_cmd *mcmd;
struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
- if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
- /* send TASK_ABORT response immediately */
- qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
- return 0;
- }
-
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
vha->vp_idx, abts->exchange_addr_to_abort);
@@ -2025,7 +2115,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
return -ENOMEM;
}
memset(mcmd, 0, sizeof(*mcmd));
-
+ mcmd->cmd_type = TYPE_TGT_TMCMD;
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
mcmd->reset_count = ha->base_qpair->chip_reset;
@@ -2047,6 +2137,8 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
if (abort_cmd && abort_cmd->qpair) {
mcmd->qpair = abort_cmd->qpair;
mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
+ mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
+ mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
}
}
@@ -2264,6 +2356,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
struct qla_qpair *qpair = mcmd->qpair;
+ bool free_mcmd = true;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
"TM response mcmd (%p) status %#x state %#x",
@@ -2302,10 +2395,10 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
}
} else {
- if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
- qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
- mcmd->fc_tm_rsp, false);
- else
+ if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
+ qlt_build_abts_resp_iocb(mcmd);
+ free_mcmd = false;
+ } else
qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
mcmd->fc_tm_rsp);
}
@@ -2317,7 +2410,9 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
* descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
* qlt_xmit_tm_rsp() returns here..
*/
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ if (free_mcmd)
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
}
EXPORT_SYMBOL(qlt_xmit_tm_rsp);
@@ -2330,7 +2425,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
BUG_ON(cmd->sg_cnt == 0);
prm->sg = (struct scatterlist *)cmd->sg;
- prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
+ prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
cmd->sg_cnt, cmd->dma_data_direction);
if (unlikely(prm->seg_cnt == 0))
goto out_err;
@@ -2357,7 +2452,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
if (cmd->prot_sg_cnt) {
prm->prot_sg = cmd->prot_sg;
- prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
+ prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (unlikely(prm->prot_seg_cnt == 0))
@@ -2392,12 +2487,12 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
qpair = cmd->qpair;
- pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
+ dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
cmd->dma_data_direction);
cmd->sg_mapped = 0;
if (cmd->prot_sg_cnt)
- pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+ dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (!cmd->ctx)
@@ -3289,7 +3384,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+ spin_lock(&cmd->cmd_lock);
cmd->cmd_sent_to_fw = 1;
+ spin_unlock(&cmd->cmd_lock);
+ cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
/* Memory Barrier */
wmb();
@@ -3367,7 +3465,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
qlt_load_data_segments(&prm);
cmd->state = QLA_TGT_STATE_NEED_DATA;
+ spin_lock(&cmd->cmd_lock);
cmd->cmd_sent_to_fw = 1;
+ spin_unlock(&cmd->cmd_lock);
+ cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
/* Memory Barrier */
wmb();
@@ -3825,10 +3926,10 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
/* ha->hardware_lock supposed to be held on entry */
-static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
struct rsp_que *rsp, uint32_t handle, void *ctio)
{
- struct qla_tgt_cmd *cmd = NULL;
+ void *cmd = NULL;
struct req_que *req;
int qid = GET_QID(handle);
uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
@@ -3857,7 +3958,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
- cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
+ cmd = (void *) req->outstanding_cmds[h];
if (unlikely(cmd == NULL)) {
ql_dbg(ql_dbg_async, vha, 0xe053,
"qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
@@ -3930,7 +4031,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
return;
}
- cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
+ cmd = (struct qla_tgt_cmd *)qlt_ctio_to_cmd(vha, rsp, handle, ctio);
if (cmd == NULL)
return;
@@ -3941,12 +4042,20 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
if (unlikely(status != CTIO_SUCCESS)) {
switch (status & 0xFFFF) {
+ case CTIO_INVALID_RX_ID:
+ if (printk_ratelimit())
+ dev_info(&vha->hw->pdev->dev,
+ "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
+ vha->vp_idx, cmd->atio.u.isp24.attr,
+ ((cmd->ctio_flags >> 9) & 0xf),
+ cmd->ctio_flags);
+
+ break;
case CTIO_LIP_RESET:
case CTIO_TARGET_RESET:
case CTIO_ABORTED:
/* driver request abort via Terminate exchange */
case CTIO_TIMEOUT:
- case CTIO_INVALID_RX_ID:
/* They are OK */
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
"qla_target(%d): CTIO with "
@@ -3973,7 +4082,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
* Session is already logged out, but we need
* to notify initiator, who's not aware of this
*/
- cmd->sess->logout_on_delete = 0;
cmd->sess->send_els_logo = 1;
ql_dbg(ql_dbg_disc, vha, 0x20f8,
"%s %d %8phC post del sess\n",
@@ -4711,6 +4819,12 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
sess = qlt_find_sess_invalidate_other(vha, wwn,
port_id, loop_id, &conflict_sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
+ __func__, __LINE__, loop_id, port_id.b24);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
}
if (IS_SW_RESV_ADDR(port_id)) {
@@ -4752,6 +4866,32 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
goto out;
}
+ if (sess->disc_state == DSC_UPD_FCPORT) {
+ u16 sec;
+
+ /*
+ * Remote port registration is still going on from
+ * previous login. Allow it to finish before we
+ * accept the new login.
+ */
+ sess->next_disc_state = DSC_DELETE_PEND;
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration) / 1000;
+ if (sess->sec_since_registration < sec && sec &&
+ !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC - Slow Rport registration (%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+
+ if (!conflict_sess)
+ kmem_cache_free(qla_tgt_plogi_cachep, pla);
+
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
sess->d_id = port_id;
sess->login_gen++;
@@ -4910,6 +5050,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
if (sess != NULL) {
bool delete = false;
+ int sec;
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
switch (sess->fw_login_state) {
case DSC_LS_PLOGI_PEND:
@@ -4922,9 +5063,24 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
switch (sess->disc_state) {
+ case DSC_UPD_FCPORT:
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+ flags);
+
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration)/1000;
+ if (sess->sec_since_registration < sec && sec &&
+ !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ "%s %8phC : Slow Rport registration(%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ return 0;
+
case DSC_LOGIN_PEND:
case DSC_GPDB:
- case DSC_UPD_FCPORT:
case DSC_LOGIN_COMPLETE:
case DSC_ADISC:
delete = false;
@@ -5608,6 +5764,101 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
tgt->atio_irq_cmd_count--;
}
+/*
+ * qpair lock is assume to be held
+ * rc = 0 : send terminate & abts respond
+ * rc != 0: do not send term & abts respond
+ */
+static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
+ struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rc = 0;
+
+ /*
+ * Detect unresolved exchange. If the same ABTS is unable
+ * to terminate an existing command and the same ABTS loops
+ * between FW & Driver, then force FW dump. Under 1 jiff,
+ * we should see multiple loops.
+ */
+ if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
+ qpair->retry_term_jiff == jiffies) {
+ /* found existing exchange */
+ qpair->retry_term_cnt++;
+ if (qpair->retry_term_cnt >= 5) {
+ rc = EIO;
+ qpair->retry_term_cnt = 0;
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to send ABTS Respond. Dumping firmware.\n");
+ ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
+ vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
+
+ if (qpair == ha->base_qpair)
+ ha->isp_ops->fw_dump(vha, 1);
+ else
+ ha->isp_ops->fw_dump(vha, 0);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else if (qpair->retry_term_jiff != jiffies) {
+ qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
+ qpair->retry_term_cnt = 0;
+ qpair->retry_term_jiff = jiffies;
+ }
+
+ return rc;
+}
+
+
+static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, response_t *pkt)
+{
+ struct abts_resp_from_24xx_fw *entry =
+ (struct abts_resp_from_24xx_fw *)pkt;
+ u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ struct qla_hw_data *ha = vha->hw;
+
+ mcmd = (struct qla_tgt_mgmt_cmd *)qlt_ctio_to_cmd(vha, rsp,
+ pkt->handle, pkt);
+ if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
+ ql_dbg(ql_dbg_async, vha, 0xe064,
+ "qla_target(%d): ABTS Comp without mcmd\n",
+ vha->vp_idx);
+ return;
+ }
+
+ if (mcmd)
+ vha = mcmd->vha;
+ vha->vha_tgt.qla_tgt->abts_resp_expected--;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe038,
+ "ABTS_RESP_24XX: compl_status %x\n",
+ entry->compl_status);
+
+ if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
+ if ((entry->error_subcode1 == 0x1E) &&
+ (entry->error_subcode2 == 0)) {
+ if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ return;
+ }
+ qlt_24xx_retry_term_exchange(vha, rsp->qpair,
+ pkt, mcmd);
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe063,
+ "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
+ vha->vp_idx, entry->compl_status,
+ entry->error_subcode1,
+ entry->error_subcode2);
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ }
+ } else {
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ }
+}
+
/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_response_pkt(struct scsi_qla_host *vha,
@@ -5740,41 +5991,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
case ABTS_RESP_24XX:
if (tgt->abts_resp_expected > 0) {
- struct abts_resp_from_24xx_fw *entry =
- (struct abts_resp_from_24xx_fw *)pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe038,
- "ABTS_RESP_24XX: compl_status %x\n",
- entry->compl_status);
- tgt->abts_resp_expected--;
- if (le16_to_cpu(entry->compl_status) !=
- ABTS_RESP_COMPL_SUCCESS) {
- if ((entry->error_subcode1 == 0x1E) &&
- (entry->error_subcode2 == 0)) {
- /*
- * We've got a race here: aborted
- * exchange not terminated, i.e.
- * response for the aborted command was
- * sent between the abort request was
- * received and processed.
- * Unfortunately, the firmware has a
- * silly requirement that all aborted
- * exchanges must be explicitely
- * terminated, otherwise it refuses to
- * send responses for the abort
- * requests. So, we have to
- * (re)terminate the exchange and retry
- * the abort response.
- */
- qlt_24xx_retry_term_exchange(vha,
- entry);
- } else
- ql_dbg(ql_dbg_tgt, vha, 0xe063,
- "qla_target(%d): ABTS_RESP_24XX "
- "failed %x (subcode %x:%x)",
- vha->vp_idx, entry->compl_status,
- entry->error_subcode1,
- entry->error_subcode2);
- }
+ qlt_handle_abts_completion(vha, rsp, pkt);
} else {
ql_dbg(ql_dbg_tgt, vha, 0xe064,
"qla_target(%d): Unexpected ABTS_RESP_24XX "
@@ -5964,10 +6181,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
case MODE_DUAL:
if (newfcport) {
if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0x20fe,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name, vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, fcport);
+ qla24xx_sched_upd_fcport(fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ff,
"%s %d %8phC post gpsc fcp_cnt %d\n",
@@ -6413,6 +6627,9 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
if (!(host->hostt->supported_mode & MODE_TARGET))
continue;
+ if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
+ continue;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
@@ -6475,15 +6692,15 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
EXPORT_SYMBOL(qlt_lport_deregister);
/* Must be called under HW lock */
-static void qlt_set_mode(struct scsi_qla_host *vha)
+void qlt_set_mode(struct scsi_qla_host *vha)
{
- switch (ql2x_ini_mode) {
+ switch (vha->qlini_mode) {
case QLA2XXX_INI_MODE_DISABLED:
case QLA2XXX_INI_MODE_EXCLUSIVE:
vha->host->active_mode = MODE_TARGET;
break;
case QLA2XXX_INI_MODE_ENABLED:
- vha->host->active_mode = MODE_UNKNOWN;
+ vha->host->active_mode = MODE_INITIATOR;
break;
case QLA2XXX_INI_MODE_DUAL:
vha->host->active_mode = MODE_DUAL;
@@ -6496,7 +6713,7 @@ static void qlt_set_mode(struct scsi_qla_host *vha)
/* Must be called under HW lock */
static void qlt_clear_mode(struct scsi_qla_host *vha)
{
- switch (ql2x_ini_mode) {
+ switch (vha->qlini_mode) {
case QLA2XXX_INI_MODE_DISABLED:
vha->host->active_mode = MODE_UNKNOWN;
break;
@@ -6532,12 +6749,17 @@ qlt_enable_vha(struct scsi_qla_host *vha)
dump_stack();
return;
}
+ if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
+ return;
spin_lock_irqsave(&ha->hardware_lock, flags);
tgt->tgt_stopped = 0;
qlt_set_mode(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_lock(&ha->optrom_mutex);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+ "%s.\n", __func__);
if (vha->vp_idx) {
qla24xx_disable_vp(vha);
qla24xx_enable_vp(vha);
@@ -6546,6 +6768,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qla2xxx_wake_dpc(base_vha);
qla2x00_wait_for_hba_online(base_vha);
}
+ mutex_unlock(&ha->optrom_mutex);
}
EXPORT_SYMBOL(qlt_enable_vha);
@@ -6767,7 +6990,7 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
if (qla_tgt_mode_enabled(vha))
nv->exchange_count = cpu_to_le16(0xFFFF);
else /* dual */
- nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+ nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6846,14 +7069,6 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
-
- /* disable ZIO at start time. */
- if (!vha->flags.init_done) {
- uint32_t tmp;
- tmp = le32_to_cpu(icb->firmware_options_2);
- tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
- icb->firmware_options_2 = cpu_to_le32(tmp);
- }
}
void
@@ -6881,7 +7096,7 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
if (qla_tgt_mode_enabled(vha))
nv->exchange_count = cpu_to_le16(0xFFFF);
else /* dual */
- nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+ nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6957,15 +7172,6 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
-
- /* disable ZIO at start time. */
- if (!vha->flags.init_done) {
- uint32_t tmp;
- tmp = le32_to_cpu(icb->firmware_options_2);
- tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
- icb->firmware_options_2 = cpu_to_le32(tmp);
- }
-
}
void
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 199d3ba1916d..721da593b1bc 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -900,6 +900,7 @@ struct qla_tgt_cmd {
unsigned int aborted:1;
unsigned int data_work:1;
unsigned int data_work_free:1;
+ unsigned int released:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@@ -908,6 +909,7 @@ struct qla_tgt_cmd {
u64 unpacked_lun;
enum dma_data_direction dma_data_direction;
+ uint16_t ctio_flags;
uint16_t vp_idx;
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
@@ -956,16 +958,20 @@ struct qla_tgt_sess_work_param {
};
struct qla_tgt_mgmt_cmd {
+ uint8_t cmd_type;
+ uint8_t pad[3];
uint16_t tmr_func;
uint8_t fc_tm_rsp;
+ uint8_t abort_io_attr;
struct fc_port *sess;
struct qla_qpair *qpair;
struct scsi_qla_host *vha;
struct se_cmd se_cmd;
struct work_struct free_work;
unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK BIT_0
+#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1
uint32_t reset_count;
-#define QLA24XX_MGMT_SEND_NACK 1
struct work_struct work;
uint64_t unpacked_lun;
union {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3850b28518e5..12bafff71a1a 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.08-k"
+#define QLA2XXX_VERSION "10.00.00.11-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index e03d12a5f986..65053c066680 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -277,14 +277,25 @@ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
static void tcm_qla2xxx_complete_free(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ bool released = false;
+ unsigned long flags;
cmd->cmd_in_wq = 0;
WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
cmd->trc_flags |= TRC_CMD_FREE;
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ cmd->cmd_sent_to_fw = 0;
+ if (cmd->released)
+ released = true;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+ if (released)
+ qlt_free_cmd(cmd);
+ else
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
}
/*
@@ -325,6 +336,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd;
+ unsigned long flags;
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
@@ -332,9 +344,16 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
qlt_free_mcmd(mcmd);
return;
}
-
cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
- qlt_free_cmd(cmd);
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->cmd_sent_to_fw) {
+ cmd->released = 1;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ qlt_free_cmd(cmd);
+ }
}
static void tcm_qla2xxx_release_session(struct kref *kref)
@@ -405,7 +424,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
se_cmd->pi_err = 0;
/*
- * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+ * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup
* the SGL mappings into PCIe memory for incoming FCP WRITE data.
*/
return qlt_rdy_to_xfer(cmd);
@@ -499,6 +518,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
@@ -506,6 +526,25 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
cmd->cmd_in_wq = 0;
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ cmd->cmd_sent_to_fw = 0;
+
+ if (cmd->released) {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ qlt_free_cmd(cmd);
+ return;
+ }
+
+ cmd->data_work = 1;
+ if (cmd->aborted) {
+ cmd->data_work_free = 1;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+ tcm_qla2xxx_free_cmd(cmd);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
@@ -718,10 +757,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->sg_cnt = 0;
cmd->offset = 0;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
- if (cmd->trc_flags & TRC_XMIT_STATUS) {
- pr_crit("Multiple calls for status = %p.\n", cmd);
- dump_stack();
- }
cmd->trc_flags |= TRC_XMIT_STATUS;
if (se_cmd->data_direction == DMA_FROM_DEVICE) {
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 52b1a0bc93c9..1ef74aa2d00a 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -766,12 +766,10 @@ int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
while (drvr_wait) {
if (ql4xxx_lock_drvr(a) == 0) {
ssleep(QL4_LOCK_DRVR_SLEEP);
- if (drvr_wait) {
- DEBUG2(printk("scsi%ld: %s: Waiting for "
- "Global Init Semaphore(%d)...\n",
- a->host_no,
- __func__, drvr_wait));
- }
+ DEBUG2(printk("scsi%ld: %s: Waiting for "
+ "Global Init Semaphore(%d)...\n",
+ a->host_no,
+ __func__, drvr_wait));
drvr_wait -= QL4_LOCK_DRVR_SLEEP;
} else {
DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0e13349dce57..051164f755a4 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3382,7 +3382,7 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (task->data_count) {
task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
task->data_count,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -3437,7 +3437,7 @@ static void qla4xxx_task_cleanup(struct iscsi_task *task)
if (task->data_count) {
dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
- task->data_count, PCI_DMA_TODEVICE);
+ task->data_count, DMA_TO_DEVICE);
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -9020,25 +9020,16 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)
/**
* qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
- *
- * At exit, the @ha's flags.enable_64bit_addressing set to indicated
- * supported addressing method.
*/
static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
{
- int retval;
-
/* Update our PCI device dma_mask for full 64 bit mask */
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
- if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
- dev_dbg(&ha->pdev->dev,
- "Failed to set 64 bit PCI consistent mask; "
- "using 32 bit.\n");
- retval = pci_set_consistent_dma_mask(ha->pdev,
- DMA_BIT_MASK(32));
- }
- } else
- retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+ if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(&ha->pdev->dev,
+ "Failed to set 64 bit PCI consistent mask; "
+ "using 32 bit.\n");
+ dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32));
+ }
}
static int qla4xxx_slave_alloc(struct scsi_device *sdev)
@@ -9824,7 +9815,6 @@ qla4xxx_pci_resume(struct pci_dev *pdev)
__func__);
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
clear_bit(AF_EEH_BUSY, &ha->flags);
}
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index ea88906d2cc5..5c3d6e1e0145 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -63,8 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
* emulated RAID devices, so start with SCSI */
struct raid_internal *i = ac_to_raid_internal(cont);
-#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
- if (scsi_is_sdev_device(dev)) {
+ if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
if (i->f->cookie != sdev->host->hostt)
@@ -72,7 +71,6 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
return i->f->is_raid(dev);
}
-#endif
/* FIXME: look at other subsystems too */
return 0;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b7a8fdfeb2f4..c736d61b1648 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -338,9 +338,6 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
online = scsi_device_online(sdev);
- SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
- "%s: rtn: %d\n", __func__, online));
-
return online;
}
EXPORT_SYMBOL(scsi_block_when_processing_errors);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62348412ed1b..c7fccbb8f554 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1201,8 +1201,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
count = blk_rq_map_integrity_sg(rq->q, rq->bio,
prot_sdb->table.sgl);
- BUG_ON(unlikely(count > ivecs));
- BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
+ BUG_ON(count > ivecs);
+ BUG_ON(count > queue_max_integrity_segments(rq->q));
cmd->prot_sdb = prot_sdb;
cmd->prot_sdb->table.nents = count;
@@ -2753,6 +2753,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (oldstate) {
case SDEV_RUNNING:
case SDEV_CREATED_BLOCK:
+ case SDEV_OFFLINE:
break;
default:
goto illegal;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 0cd16e80b019..0a165b2b3e81 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -612,7 +612,6 @@ sas_phy_protocol_attr(identify.target_port_protocols,
sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
unsigned long long);
sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
-//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
sas_phy_linkspeed_attr(negotiated_linkrate);
sas_phy_linkspeed_attr(minimum_linkrate_hw);
sas_phy_linkspeed_rw_attr(minimum_linkrate);
@@ -1802,7 +1801,6 @@ sas_attach_transport(struct sas_function_template *ft)
SETUP_PHY_ATTRIBUTE(device_type);
SETUP_PHY_ATTRIBUTE(sas_address);
SETUP_PHY_ATTRIBUTE(phy_identifier);
- //SETUP_PHY_ATTRIBUTE(port_identifier);
SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b762d0fd773c..3bb2b3351e35 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1272,8 +1272,6 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_READ:
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
- case REQ_OP_ZONE_REPORT:
- return sd_zbc_setup_report_cmnd(cmd);
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_reset_cmnd(cmd);
default:
@@ -1802,6 +1800,7 @@ static const struct block_device_operations sd_fops = {
.check_events = sd_check_events,
.revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
+ .report_zones = sd_zbc_report_zones,
.pr_ops = &sd_pr_ops,
};
@@ -1953,16 +1952,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
scsi_set_resid(SCpnt, blk_rq_bytes(req));
}
break;
- case REQ_OP_ZONE_REPORT:
- if (!result) {
- good_bytes = scsi_bufflen(SCpnt)
- - scsi_get_resid(SCpnt);
- scsi_set_resid(SCpnt, 0);
- } else {
- good_bytes = 0;
- scsi_set_resid(SCpnt, blk_rq_bytes(req));
- }
- break;
default:
/*
* In case of bogus fw or device, we could end up having
@@ -3425,8 +3414,6 @@ static int sd_remove(struct device *dev)
del_gendisk(sdkp->disk);
sd_shutdown(dev);
- sd_zbc_remove(sdkp);
-
free_opal_dev(sdkp->opal_dev);
blk_register_region(devt, SD_MINORS, NULL,
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index a7d4f50b67d4..1d63f3a23ffb 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -76,7 +76,6 @@ struct scsi_disk {
#ifdef CONFIG_BLK_DEV_ZONED
u32 nr_zones;
u32 zone_blocks;
- u32 zone_shift;
u32 zones_optimal_open;
u32 zones_optimal_nonseq;
u32 zones_max_open;
@@ -271,12 +270,13 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
-extern void sd_zbc_remove(struct scsi_disk *sdkp);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr);
+extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask);
#else /* CONFIG_BLK_DEV_ZONED */
@@ -286,15 +286,8 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
return 0;
}
-static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
-
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
-static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
-{
- return BLKPREP_INVALID;
-}
-
static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
{
return BLKPREP_INVALID;
@@ -304,6 +297,8 @@ static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
unsigned int good_bytes,
struct scsi_sense_hdr *sshdr) {}
+#define sd_zbc_report_zones NULL
+
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 412c1787dcd9..e06c48c866e4 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -62,16 +62,22 @@ static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
}
/**
- * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
+ * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command.
* @sdkp: The target disk
* @buf: Buffer to use for the reply
* @buflen: the buffer size
* @lba: Start LBA of the report
+ * @partial: Do partial report
*
* For internal use during device validation.
+ * Using partial=true can significantly speed up execution of a report zones
+ * command because the disk does not have to count all possible report matching
+ * zones and will only report the count of zones fitting in the command reply
+ * buffer.
*/
-static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
- unsigned int buflen, sector_t lba)
+static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ unsigned int buflen, sector_t lba,
+ bool partial)
{
struct scsi_device *sdp = sdkp->device;
const int timeout = sdp->request_queue->rq_timeout;
@@ -85,6 +91,8 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
cmd[1] = ZI_REPORT_ZONES;
put_unaligned_be64(lba, &cmd[2]);
put_unaligned_be32(buflen, &cmd[10]);
+ if (partial)
+ cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
memset(buf, 0, buflen);
result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
@@ -110,108 +118,56 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
}
/**
- * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
- * @cmd: The command to setup
+ * sd_zbc_report_zones - Disk report zones operation.
+ * @disk: The target disk
+ * @sector: Start 512B sector of the report
+ * @zones: Array of zone descriptors
+ * @nr_zones: Number of descriptors in the array
+ * @gfp_mask: Memory allocation mask
*
- * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
+ * Execute a report zones command on the target disk.
*/
-int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
{
- struct request *rq = cmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- sector_t lba, sector = blk_rq_pos(rq);
- unsigned int nr_bytes = blk_rq_bytes(rq);
- int ret;
-
- WARN_ON(nr_bytes == 0);
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ unsigned int i, buflen, nrz = *nr_zones;
+ unsigned char *buf;
+ size_t offset = 0;
+ int ret = 0;
if (!sd_is_zoned(sdkp))
/* Not a zoned device */
- return BLKPREP_KILL;
-
- ret = scsi_init_io(cmd);
- if (ret != BLKPREP_OK)
- return ret;
-
- cmd->cmd_len = 16;
- memset(cmd->cmnd, 0, cmd->cmd_len);
- cmd->cmnd[0] = ZBC_IN;
- cmd->cmnd[1] = ZI_REPORT_ZONES;
- lba = sectors_to_logical(sdkp->device, sector);
- put_unaligned_be64(lba, &cmd->cmnd[2]);
- put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
- /* Do partial report for speeding things up */
- cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
-
- cmd->sc_data_direction = DMA_FROM_DEVICE;
- cmd->sdb.length = nr_bytes;
- cmd->transfersize = sdkp->device->sector_size;
- cmd->allowed = 0;
-
- return BLKPREP_OK;
-}
-
-/**
- * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
- * @scmd: The completed report zones command
- * @good_bytes: reply size in bytes
- *
- * Convert all reported zone descriptors to struct blk_zone. The conversion
- * is done in-place, directly in the request specified sg buffer.
- */
-static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
- unsigned int good_bytes)
-{
- struct request *rq = scmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- struct sg_mapping_iter miter;
- struct blk_zone_report_hdr hdr;
- struct blk_zone zone;
- unsigned int offset, bytes = 0;
- unsigned long flags;
- u8 *buf;
-
- if (good_bytes < 64)
- return;
+ return -EOPNOTSUPP;
- memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
-
- sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
-
- local_irq_save(flags);
- while (sg_miter_next(&miter) && bytes < good_bytes) {
+ /*
+ * Get a reply buffer for the number of requested zones plus a header.
+ * For ATA, buffers must be aligned to 512B.
+ */
+ buflen = roundup((nrz + 1) * 64, 512);
+ buf = kmalloc(buflen, gfp_mask);
+ if (!buf)
+ return -ENOMEM;
- buf = miter.addr;
- offset = 0;
+ ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
+ sectors_to_logical(sdkp->device, sector), true);
+ if (ret)
+ goto out_free_buf;
- if (bytes == 0) {
- /* Set the report header */
- hdr.nr_zones = min_t(unsigned int,
- (good_bytes - 64) / 64,
- get_unaligned_be32(&buf[0]) / 64);
- memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
- offset += 64;
- bytes += 64;
- }
+ nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64);
+ for (i = 0; i < nrz; i++) {
+ offset += 64;
+ sd_zbc_parse_report(sdkp, buf + offset, zones);
+ zones++;
+ }
- /* Parse zone descriptors */
- while (offset < miter.length && hdr.nr_zones) {
- WARN_ON(offset > miter.length);
- buf = miter.addr + offset;
- sd_zbc_parse_report(sdkp, buf, &zone);
- memcpy(buf, &zone, sizeof(struct blk_zone));
- offset += 64;
- bytes += 64;
- hdr.nr_zones--;
- }
+ *nr_zones = nrz;
- if (!hdr.nr_zones)
- break;
+out_free_buf:
+ kfree(buf);
- }
- sg_miter_stop(&miter);
- local_irq_restore(flags);
+ return ret;
}
/**
@@ -294,30 +250,23 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
break;
-
- case REQ_OP_ZONE_REPORT:
-
- if (!result)
- sd_zbc_report_zones_complete(cmd, good_bytes);
- break;
-
}
}
/**
- * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
+ * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics
* @sdkp: Target disk
* @buf: Buffer where to store the VPD page data
*
- * Read VPD page B6.
+ * Read VPD page B6, get information and check that reads are unconstrained.
*/
-static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
- unsigned char *buf)
+static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
+ unsigned char *buf)
{
if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
sd_printk(KERN_NOTICE, sdkp,
- "Unconstrained-read check failed\n");
+ "Read zoned characteristics VPD page failed\n");
return -ENODEV;
}
@@ -335,43 +284,17 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
}
- return 0;
-}
-
-/**
- * sd_zbc_check_capacity - Check reported capacity.
- * @sdkp: Target disk
- * @buf: Buffer to use for commands
- *
- * ZBC drive may report only the capacity of the first conventional zones at
- * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
- * Check this here. If the disk reported only its conventional zones capacity,
- * get the total capacity by doing a report zones.
- */
-static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
-{
- sector_t lba;
- int ret;
-
- if (sdkp->rc_basis != 0)
- return 0;
-
- /* Do a report zone to get the maximum LBA to check capacity */
- ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0);
- if (ret)
- return ret;
-
- /* The max_lba field is the capacity of this device */
- lba = get_unaligned_be64(&buf[8]);
- if (lba + 1 == sdkp->capacity)
- return 0;
-
- if (sdkp->first_scan)
- sd_printk(KERN_WARNING, sdkp,
- "Changing capacity from %llu to max LBA+1 %llu\n",
- (unsigned long long)sdkp->capacity,
- (unsigned long long)lba + 1);
- sdkp->capacity = lba + 1;
+ /*
+ * Check for unconstrained reads: host-managed devices with
+ * constrained reads (drives failing read after write pointer)
+ * are not supported.
+ */
+ if (!sdkp->urswrz) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "constrained reads devices are not supported\n");
+ return -ENODEV;
+ }
return 0;
}
@@ -379,24 +302,27 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
#define SD_ZBC_BUF_SIZE 131072U
/**
- * sd_zbc_check_zone_size - Check the device zone sizes
+ * sd_zbc_check_zones - Check the device capacity and zone sizes
* @sdkp: Target disk
*
- * Check that all zones of the device are equal. The last zone can however
- * be smaller. The zone size must also be a power of two number of LBAs.
+ * Check that the device capacity as reported by READ CAPACITY matches the
+ * max_lba value (plus one)of the report zones command reply. Also check that
+ * all zones of the device have an equal size, only allowing the last zone of
+ * the disk to have a smaller size (runt zone). The zone size must also be a
+ * power of two.
*
* Returns the zone size in number of blocks upon success or an error code
* upon failure.
*/
-static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
{
u64 zone_blocks = 0;
- sector_t block = 0;
+ sector_t max_lba, block = 0;
unsigned char *buf;
unsigned char *rec;
unsigned int buf_len;
unsigned int list_length;
- s64 ret;
+ int ret;
u8 same;
/* Get a buffer */
@@ -404,11 +330,28 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
if (!buf)
return -ENOMEM;
- /* Do a report zone to get the same field */
- ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
+ /* Do a report zone to get max_lba and the same field */
+ ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0, false);
if (ret)
goto out_free;
+ if (sdkp->rc_basis == 0) {
+ /* The max_lba field is the capacity of this device */
+ max_lba = get_unaligned_be64(&buf[8]);
+ if (sdkp->capacity != max_lba + 1) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_WARNING, sdkp,
+ "Changing capacity from %llu to max LBA+1 %llu\n",
+ (unsigned long long)sdkp->capacity,
+ (unsigned long long)max_lba + 1);
+ sdkp->capacity = max_lba + 1;
+ }
+ }
+
+ /*
+ * Check same field: for any value other than 0, we know that all zones
+ * have the same size.
+ */
same = buf[4] & 0x0f;
if (same > 0) {
rec = &buf[64];
@@ -445,8 +388,8 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
}
if (block < sdkp->capacity) {
- ret = sd_zbc_report_zones(sdkp, buf,
- SD_ZBC_BUF_SIZE, block);
+ ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
+ block, true);
if (ret)
goto out_free;
}
@@ -470,9 +413,10 @@ out:
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Zone size too large\n");
- ret = -ENODEV;
+ ret = -EFBIG;
} else {
- ret = zone_blocks;
+ *zblocks = zone_blocks;
+ ret = 0;
}
out_free:
@@ -481,191 +425,11 @@ out_free:
return ret;
}
-/**
- * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
- * @nr_zones: Number of zones to allocate space for.
- * @numa_node: NUMA node to allocate the memory from.
- */
-static inline unsigned long *
-sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
-{
- return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
- GFP_KERNEL, numa_node);
-}
-
-/**
- * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
- * @sdkp: disk used
- * @buf: report reply buffer
- * @buflen: length of @buf
- * @zone_shift: logarithm base 2 of the number of blocks in a zone
- * @seq_zones_bitmap: bitmap of sequential zones to set
- *
- * Parse reported zone descriptors in @buf to identify sequential zones and
- * set the reported zone bit in @seq_zones_bitmap accordingly.
- * Since read-only and offline zones cannot be written, do not
- * mark them as sequential in the bitmap.
- * Return the LBA after the last zone reported.
- */
-static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
- unsigned int buflen, u32 zone_shift,
- unsigned long *seq_zones_bitmap)
-{
- sector_t lba, next_lba = sdkp->capacity;
- unsigned int buf_len, list_length;
- unsigned char *rec;
- u8 type, cond;
-
- list_length = get_unaligned_be32(&buf[0]) + 64;
- buf_len = min(list_length, buflen);
- rec = buf + 64;
-
- while (rec < buf + buf_len) {
- type = rec[0] & 0x0f;
- cond = (rec[1] >> 4) & 0xf;
- lba = get_unaligned_be64(&rec[16]);
- if (type != ZBC_ZONE_TYPE_CONV &&
- cond != ZBC_ZONE_COND_READONLY &&
- cond != ZBC_ZONE_COND_OFFLINE)
- set_bit(lba >> zone_shift, seq_zones_bitmap);
- next_lba = lba + get_unaligned_be64(&rec[8]);
- rec += 64;
- }
-
- return next_lba;
-}
-
-/**
- * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
- * @sdkp: target disk
- * @zone_shift: logarithm base 2 of the number of blocks in a zone
- * @nr_zones: number of zones to set up a seq zone bitmap for
- *
- * Allocate a zone bitmap and initialize it by identifying sequential zones.
- */
-static unsigned long *
-sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
- u32 nr_zones)
-{
- struct request_queue *q = sdkp->disk->queue;
- unsigned long *seq_zones_bitmap;
- sector_t lba = 0;
- unsigned char *buf;
- int ret = -ENOMEM;
-
- seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
- if (!seq_zones_bitmap)
- return ERR_PTR(-ENOMEM);
-
- buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- goto out;
-
- while (lba < sdkp->capacity) {
- ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba);
- if (ret)
- goto out;
- lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
- zone_shift, seq_zones_bitmap);
- }
-
- if (lba != sdkp->capacity) {
- /* Something went wrong */
- ret = -EIO;
- }
-
-out:
- kfree(buf);
- if (ret) {
- kfree(seq_zones_bitmap);
- return ERR_PTR(ret);
- }
- return seq_zones_bitmap;
-}
-
-static void sd_zbc_cleanup(struct scsi_disk *sdkp)
-{
- struct request_queue *q = sdkp->disk->queue;
-
- kfree(q->seq_zones_bitmap);
- q->seq_zones_bitmap = NULL;
-
- kfree(q->seq_zones_wlock);
- q->seq_zones_wlock = NULL;
-
- q->nr_zones = 0;
-}
-
-static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
-{
- struct request_queue *q = sdkp->disk->queue;
- u32 zone_shift = ilog2(zone_blocks);
- u32 nr_zones;
- int ret;
-
- /* chunk_sectors indicates the zone size */
- blk_queue_chunk_sectors(q,
- logical_to_sectors(sdkp->device, zone_blocks));
- nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
-
- /*
- * Initialize the device request queue information if the number
- * of zones changed.
- */
- if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
- unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
- size_t zone_bitmap_size;
-
- if (nr_zones) {
- seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
- q->node);
- if (!seq_zones_wlock) {
- ret = -ENOMEM;
- goto err;
- }
-
- seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
- zone_shift, nr_zones);
- if (IS_ERR(seq_zones_bitmap)) {
- ret = PTR_ERR(seq_zones_bitmap);
- kfree(seq_zones_wlock);
- goto err;
- }
- }
- zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
- sizeof(unsigned long);
- blk_mq_freeze_queue(q);
- if (q->nr_zones != nr_zones) {
- /* READ16/WRITE16 is mandatory for ZBC disks */
- sdkp->device->use_16_for_rw = 1;
- sdkp->device->use_10_for_rw = 0;
-
- sdkp->zone_blocks = zone_blocks;
- sdkp->zone_shift = zone_shift;
- sdkp->nr_zones = nr_zones;
- q->nr_zones = nr_zones;
- swap(q->seq_zones_wlock, seq_zones_wlock);
- swap(q->seq_zones_bitmap, seq_zones_bitmap);
- } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
- zone_bitmap_size) != 0) {
- memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
- zone_bitmap_size);
- }
- blk_mq_unfreeze_queue(q);
- kfree(seq_zones_wlock);
- kfree(seq_zones_bitmap);
- }
-
- return 0;
-
-err:
- sd_zbc_cleanup(sdkp);
- return ret;
-}
-
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
- int64_t zone_blocks;
+ struct gendisk *disk = sdkp->disk;
+ unsigned int nr_zones;
+ u32 zone_blocks;
int ret;
if (!sd_is_zoned(sdkp))
@@ -675,26 +439,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
*/
return 0;
- /* Get zoned block device characteristics */
- ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
- if (ret)
- goto err;
-
- /*
- * Check for unconstrained reads: host-managed devices with
- * constrained reads (drives failing read after write pointer)
- * are not supported.
- */
- if (!sdkp->urswrz) {
- if (sdkp->first_scan)
- sd_printk(KERN_NOTICE, sdkp,
- "constrained reads devices are not supported\n");
- ret = -ENODEV;
- goto err;
- }
-
- /* Check capacity */
- ret = sd_zbc_check_capacity(sdkp, buf);
+ /* Check zoned block device characteristics (unconstrained reads) */
+ ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
if (ret)
goto err;
@@ -702,33 +448,44 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
* Check zone size: only devices with a constant zone size (except
* an eventual last runt zone) that is a power of 2 are supported.
*/
- zone_blocks = sd_zbc_check_zone_size(sdkp);
- ret = -EFBIG;
- if (zone_blocks != (u32)zone_blocks)
- goto err;
- ret = zone_blocks;
- if (ret < 0)
+ ret = sd_zbc_check_zones(sdkp, &zone_blocks);
+ if (ret != 0)
goto err;
/* The drive satisfies the kernel restrictions: set it up */
- ret = sd_zbc_setup(sdkp, zone_blocks);
- if (ret)
- goto err;
+ blk_queue_chunk_sectors(sdkp->disk->queue,
+ logical_to_sectors(sdkp->device, zone_blocks));
+ nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
+
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
+ /*
+ * If something changed, revalidate the disk zone bitmaps once we have
+ * the capacity, that is on the second revalidate execution during disk
+ * scan and always during normal revalidate.
+ */
+ if (sdkp->first_scan)
+ return 0;
+ if (sdkp->zone_blocks != zone_blocks ||
+ sdkp->nr_zones != nr_zones ||
+ disk->queue->nr_zones != nr_zones) {
+ ret = blk_revalidate_disk_zones(disk);
+ if (ret != 0)
+ goto err;
+ sdkp->zone_blocks = zone_blocks;
+ sdkp->nr_zones = nr_zones;
+ }
return 0;
err:
sdkp->capacity = 0;
- sd_zbc_cleanup(sdkp);
return ret;
}
-void sd_zbc_remove(struct scsi_disk *sdkp)
-{
- sd_zbc_cleanup(sdkp);
-}
-
void sd_zbc_print_zones(struct scsi_disk *sdkp)
{
if (!sd_is_zoned(sdkp) || !sdkp->capacity)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8a254bb46a9b..c6ad00703c5b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -822,7 +822,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
scsi_req_free_cmd(scsi_req(srp->rq));
- blk_end_request_all(srp->rq, BLK_STS_IOERR);
+ blk_put_request(srp->rq);
srp->rq = NULL;
}
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 2112ea6723c6..a25a07a0b7f0 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -349,16 +349,16 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
static int pqi_map_single(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
- size_t buffer_length, int data_direction)
+ size_t buffer_length, enum dma_data_direction data_direction)
{
dma_addr_t bus_address;
- if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
+ if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
return 0;
- bus_address = pci_map_single(pci_dev, buffer, buffer_length,
+ bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
data_direction);
- if (pci_dma_mapping_error(pci_dev, bus_address))
+ if (dma_mapping_error(&pci_dev->dev, bus_address))
return -ENOMEM;
put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
@@ -370,15 +370,15 @@ static int pqi_map_single(struct pci_dev *pci_dev,
static void pqi_pci_unmap(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *descriptors, int num_descriptors,
- int data_direction)
+ enum dma_data_direction data_direction)
{
int i;
- if (data_direction == PCI_DMA_NONE)
+ if (data_direction == DMA_NONE)
return;
for (i = 0; i < num_descriptors; i++)
- pci_unmap_single(pci_dev,
+ dma_unmap_single(&pci_dev->dev,
(dma_addr_t)get_unaligned_le64(&descriptors[i].address),
get_unaligned_le32(&descriptors[i].length),
data_direction);
@@ -387,10 +387,9 @@ static void pqi_pci_unmap(struct pci_dev *pci_dev,
static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
struct pqi_raid_path_request *request, u8 cmd,
u8 *scsi3addr, void *buffer, size_t buffer_length,
- u16 vpd_page, int *pci_direction)
+ u16 vpd_page, enum dma_data_direction *dir)
{
u8 *cdb;
- int pci_dir;
memset(request, 0, sizeof(*request));
@@ -458,23 +457,21 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
switch (request->data_direction) {
case SOP_READ_FLAG:
- pci_dir = PCI_DMA_FROMDEVICE;
+ *dir = DMA_FROM_DEVICE;
break;
case SOP_WRITE_FLAG:
- pci_dir = PCI_DMA_TODEVICE;
+ *dir = DMA_TO_DEVICE;
break;
case SOP_NO_DIRECTION_FLAG:
- pci_dir = PCI_DMA_NONE;
+ *dir = DMA_NONE;
break;
default:
- pci_dir = PCI_DMA_BIDIRECTIONAL;
+ *dir = DMA_BIDIRECTIONAL;
break;
}
- *pci_direction = pci_dir;
-
return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
- buffer, buffer_length, pci_dir);
+ buffer, buffer_length, *dir);
}
static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
@@ -516,21 +513,19 @@ static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
struct bmic_identify_controller *buffer)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
- sizeof(*buffer), 0, &pci_direction);
+ sizeof(*buffer), 0, &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -538,21 +533,19 @@ static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
- &pci_direction);
+ &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -562,13 +555,13 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
size_t buffer_length)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
u16 bmic_device_index;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &pci_direction);
+ buffer_length, 0, &dir);
if (rc)
return rc;
@@ -579,9 +572,7 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
0, NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -590,8 +581,8 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
{
int rc;
struct pqi_raid_path_request request;
- int pci_direction;
struct bmic_flush_cache *flush_cache;
+ enum dma_data_direction dir;
/*
* Don't bother trying to flush the cache if the controller is
@@ -608,16 +599,14 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
rc = pqi_build_raid_path_request(ctrl_info, &request,
SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
- sizeof(*flush_cache), 0, &pci_direction);
+ sizeof(*flush_cache), 0, &dir);
if (rc)
goto out;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
0, NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
out:
kfree(flush_cache);
@@ -629,20 +618,18 @@ static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
{
int rc;
struct pqi_raid_path_request request;
- int pci_direction;
+ enum dma_data_direction dir;
rc = pqi_build_raid_path_request(ctrl_info, &request,
BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &pci_direction);
+ buffer_length, 0, &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
0, NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -793,20 +780,18 @@ static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
void *buffer, size_t buffer_length)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
- cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
+ cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -1089,7 +1074,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
struct raid_map *raid_map;
@@ -1099,15 +1084,14 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
rc = pqi_build_raid_path_request(ctrl_info, &request,
CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
- sizeof(*raid_map), 0, &pci_direction);
+ sizeof(*raid_map), 0, &dir);
if (rc)
goto error;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
if (rc)
goto error;
@@ -3822,7 +3806,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
rc = pqi_map_single(ctrl_info->pci_dev,
&request.data.report_device_capability.sg_descriptor,
capability, sizeof(*capability),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -3831,7 +3815,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
pqi_pci_unmap(ctrl_info->pci_dev,
&request.data.report_device_capability.sg_descriptor, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -4158,7 +4142,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
rc = pqi_map_single(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors,
event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -4167,7 +4151,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -4194,7 +4178,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
rc = pqi_map_single(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors,
event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (rc)
goto out;
@@ -4203,7 +4187,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
out:
kfree(event_config);
@@ -5534,7 +5518,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
rc = pqi_map_single(ctrl_info->pci_dev,
&request.sg_descriptors[0], kernel_buffer,
- iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ iocommand.buf_size, DMA_BIDIRECTIONAL);
if (rc)
goto out;
@@ -5548,7 +5532,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
if (iocommand.buf_size > 0)
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 5141bd4c9f06..ea91658c7060 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -316,9 +316,9 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
put_unaligned_le32(ctrl_info->max_io_slots,
&base_struct->error_buffer_num_elements);
- bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
- sizeof(*base_struct), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
+ bus_address = dma_map_single(&ctrl_info->pci_dev->dev, base_struct,
+ sizeof(*base_struct), DMA_TO_DEVICE);
+ if (dma_mapping_error(&ctrl_info->pci_dev->dev, bus_address)) {
rc = -ENOMEM;
goto out;
}
@@ -331,9 +331,8 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
&params);
- pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
- PCI_DMA_TODEVICE);
-
+ dma_unmap_single(&ctrl_info->pci_dev->dev, bus_address,
+ sizeof(*base_struct), DMA_TO_DEVICE);
out:
kfree(base_struct_unaligned);
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index b106596cc0cf..e9ccfb97773f 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -111,8 +111,8 @@ snic_queue_report_tgt_req(struct snic *snic)
SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
- pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(snic->pdev, pa)) {
+ pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost,
"Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
buf);
@@ -138,7 +138,8 @@ snic_queue_report_tgt_req(struct snic *snic)
ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
if (ret) {
- pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&snic->pdev->dev, pa, buf_len,
+ DMA_FROM_DEVICE);
kfree(buf);
rqi->sge_va = 0;
snic_release_untagged_req(snic, rqi);
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 8e69548395b9..159ee94d2a55 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -102,7 +102,8 @@ snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct snic_req_info *rqi = NULL;
unsigned long flags;
- pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
rqi = req_to_rqi(req);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
@@ -172,8 +173,8 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
snic_print_desc(__func__, os_buf, len);
/* Map request buffer */
- pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(snic->pdev, pa)) {
+ pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
return -ENOMEM;
@@ -186,7 +187,7 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
spin_lock_irqsave(&snic->wq_lock[q_num], flags);
desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
if (desc_avail <= 0) {
- pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
req->req_pa = 0;
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
@@ -350,29 +351,29 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi)
if (rqi->abort_req) {
if (rqi->abort_req->req_pa)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
rqi->abort_req->req_pa,
sizeof(struct snic_host_req),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
}
if (rqi->dr_req) {
if (rqi->dr_req->req_pa)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
rqi->dr_req->req_pa,
sizeof(struct snic_host_req),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
}
if (rqi->req->req_pa)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
rqi->req->req_pa,
rqi->req_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
}
@@ -384,10 +385,10 @@ snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
sgd = req_to_sgl(rqi_to_req(rqi));
SNIC_BUG_ON(sgd[0].addr == 0);
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
le64_to_cpu(sgd[0].addr),
le32_to_cpu(sgd[0].len),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/*
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 7cf70aaec0ba..5295277d6325 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -435,37 +435,17 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* limitation for the device. Try 43-bit first, and
* fail to 32-bit.
*/
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
if (ret) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
SNIC_HOST_ERR(shost,
"No Usable DMA Configuration, aborting %d\n",
ret);
-
- goto err_rel_regions;
- }
-
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret) {
- SNIC_HOST_ERR(shost,
- "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
- ret);
-
- goto err_rel_regions;
- }
- } else {
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
- if (ret) {
- SNIC_HOST_ERR(shost,
- "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
- ret);
-
goto err_rel_regions;
}
}
-
/* Map vNIC resources from BAR0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index d9b2e46424aa..b3650c989ed4 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -146,10 +146,10 @@ snic_release_req_buf(struct snic *snic,
CMD_FLAGS(sc));
if (req->u.icmnd.sense_addr)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
le64_to_cpu(req->u.icmnd.sense_addr),
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
scsi_dma_unmap(sc);
@@ -185,12 +185,11 @@ snic_queue_icmnd_req(struct snic *snic,
}
}
- pa = pci_map_single(snic->pdev,
+ pa = dma_map_single(&snic->pdev->dev,
sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
-
- if (pci_dma_mapping_error(snic->pdev, pa)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost,
"QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
sc->sense_buffer, snic_cmd_tag(sc));
@@ -2001,7 +2000,7 @@ snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
}
dr_failed:
- SNIC_BUG_ON(!spin_is_locked(io_lock));
+ lockdep_assert_held(io_lock);
if (rqi)
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -2604,7 +2603,7 @@ snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
ret = SUCCESS;
skip_internal_abts:
- SNIC_BUG_ON(!spin_is_locked(io_lock));
+ lockdep_assert_held(io_lock);
spin_unlock_irqrestore(io_lock, flags);
return ret;
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index dad5fc66effb..05e374f80946 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -225,10 +225,9 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{
svnic_dev_desc_ring_size(ring, desc_count, desc_size);
- ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
- ring->size_unaligned,
- &ring->base_addr_unaligned);
-
+ ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
+ ring->size_unaligned, &ring->base_addr_unaligned,
+ GFP_KERNEL);
if (!ring->descs_unaligned) {
pr_err("Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
@@ -251,7 +250,7 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
@@ -470,9 +469,9 @@ int svnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0;
if (!vdev->fw_info) {
- vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+ vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
- &vdev->fw_info_pa);
+ &vdev->fw_info_pa, GFP_KERNEL);
if (!vdev->fw_info)
return -ENOMEM;
@@ -534,8 +533,8 @@ int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = VNIC_DVCMD_TMO;
if (!vdev->stats) {
- vdev->stats = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_stats), &vdev->stats_pa);
+ vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
if (!vdev->stats)
return -ENOMEM;
}
@@ -607,9 +606,9 @@ int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
int wait = VNIC_DVCMD_TMO;
if (!vdev->notify) {
- vdev->notify = pci_alloc_consistent(vdev->pdev,
+ vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
- &vdev->notify_pa);
+ &vdev->notify_pa, GFP_KERNEL);
if (!vdev->notify)
return -ENOMEM;
}
@@ -697,21 +696,21 @@ void svnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
if (vdev->devcmd2)
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 0b1421cdf8a0..c9a55d0f076d 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -60,30 +60,6 @@ static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
return readb(esp->regs + (reg * 4UL));
}
-static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int sun3x_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -182,10 +158,6 @@ static int sun3x_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops sun3x_esp_ops = {
.esp_write8 = sun3x_esp_write8,
.esp_read8 = sun3x_esp_read8,
- .map_single = sun3x_esp_map_single,
- .map_sg = sun3x_esp_map_sg,
- .unmap_single = sun3x_esp_unmap_single,
- .unmap_sg = sun3x_esp_unmap_sg,
.irq_pending = sun3x_esp_irq_pending,
.reset_dma = sun3x_esp_reset_dma,
.dma_drain = sun3x_esp_dma_drain,
@@ -246,7 +218,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
dev_set_drvdata(&dev->dev, esp);
- err = scsi_esp_register(esp, &dev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 747ee64a78e1..a11efbcb7f8b 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -80,7 +80,7 @@ static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
static int esp_sbus_map_regs(struct esp *esp, int hme)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct resource *res;
/* On HME, two reg sets exist, first is DVMA,
@@ -100,11 +100,9 @@ static int esp_sbus_map_regs(struct esp *esp, int hme)
static int esp_sbus_map_command_block(struct esp *esp)
{
- struct platform_device *op = esp->dev;
-
- esp->command_block = dma_alloc_coherent(&op->dev, 16,
+ esp->command_block = dma_alloc_coherent(esp->dev, 16,
&esp->command_block_dma,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!esp->command_block)
return -ENOMEM;
return 0;
@@ -113,7 +111,7 @@ static int esp_sbus_map_command_block(struct esp *esp)
static int esp_sbus_register_irq(struct esp *esp)
{
struct Scsi_Host *host = esp->host;
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
host->irq = op->archdata.irqs[0];
return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
@@ -121,7 +119,7 @@ static int esp_sbus_register_irq(struct esp *esp)
static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *dp;
dp = op->dev.of_node;
@@ -143,7 +141,7 @@ done:
static void esp_get_differential(struct esp *esp)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *dp;
dp = op->dev.of_node;
@@ -155,7 +153,7 @@ static void esp_get_differential(struct esp *esp)
static void esp_get_clock_params(struct esp *esp)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *bus_dp, *dp;
int fmhz;
@@ -172,7 +170,7 @@ static void esp_get_clock_params(struct esp *esp)
static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
{
struct device_node *dma_dp = dma_of->dev.of_node;
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *dp;
u8 bursts, val;
@@ -212,38 +210,6 @@ static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
return sbus_readb(esp->regs + (reg * 4UL));
}
-static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- struct platform_device *op = esp->dev;
-
- return dma_map_single(&op->dev, buf, sz, dir);
-}
-
-static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- struct platform_device *op = esp->dev;
-
- return dma_map_sg(&op->dev, sg, num_sg, dir);
-}
-
-static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- struct platform_device *op = esp->dev;
-
- dma_unmap_single(&op->dev, addr, sz, dir);
-}
-
-static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- struct platform_device *op = esp->dev;
-
- dma_unmap_sg(&op->dev, sg, num_sg, dir);
-}
-
static int sbus_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -255,14 +221,13 @@ static void sbus_esp_reset_dma(struct esp *esp)
{
int can_do_burst16, can_do_burst32, can_do_burst64;
int can_do_sbus64, lim;
- struct platform_device *op;
+ struct platform_device *op = to_platform_device(esp->dev);
u32 val;
can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
can_do_burst64 = 0;
can_do_sbus64 = 0;
- op = esp->dev;
if (sbus_can_dma_64bit())
can_do_sbus64 = 1;
if (sbus_can_burst64())
@@ -474,10 +439,6 @@ static int sbus_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops sbus_esp_ops = {
.esp_write8 = sbus_esp_write8,
.esp_read8 = sbus_esp_read8,
- .map_single = sbus_esp_map_single,
- .map_sg = sbus_esp_map_sg,
- .unmap_single = sbus_esp_unmap_single,
- .unmap_sg = sbus_esp_unmap_sg,
.irq_pending = sbus_esp_irq_pending,
.reset_dma = sbus_esp_reset_dma,
.dma_drain = sbus_esp_dma_drain,
@@ -504,7 +465,7 @@ static int esp_sbus_probe_one(struct platform_device *op,
esp = shost_priv(host);
esp->host = host;
- esp->dev = op;
+ esp->dev = &op->dev;
esp->ops = &sbus_esp_ops;
if (hme)
@@ -540,7 +501,7 @@ static int esp_sbus_probe_one(struct platform_device *op,
dev_set_drvdata(&op->dev, esp);
- err = scsi_esp_register(esp, &op->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index bd3f6e2d6834..0a2a54517b15 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -4370,6 +4370,13 @@ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym
OUTB(np, HS_PRT, HS_BUSY);
}
+#define sym_printk(lvl, tp, cp, fmt, v...) do { \
+ if (cp) \
+ scmd_printk(lvl, cp->cmd, fmt, ##v); \
+ else \
+ starget_printk(lvl, tp->starget, fmt, ##v); \
+} while (0)
+
/*
* chip exception handler for programmed interrupts.
*/
@@ -4415,7 +4422,7 @@ static void sym_int_sir(struct sym_hcb *np)
* been selected with ATN. We do not want to handle that.
*/
case SIR_SEL_ATN_NO_MSG_OUT:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No MSG OUT phase after selection with ATN\n");
goto out_stuck;
/*
@@ -4423,7 +4430,7 @@ static void sym_int_sir(struct sym_hcb *np)
* having reselected the initiator.
*/
case SIR_RESEL_NO_MSG_IN:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No MSG IN phase after reselection\n");
goto out_stuck;
/*
@@ -4431,7 +4438,7 @@ static void sym_int_sir(struct sym_hcb *np)
* an IDENTIFY.
*/
case SIR_RESEL_NO_IDENTIFY:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No IDENTIFY after reselection\n");
goto out_stuck;
/*
@@ -4460,7 +4467,7 @@ static void sym_int_sir(struct sym_hcb *np)
case SIR_RESEL_ABORTED:
np->lastmsg = np->msgout[0];
np->msgout[0] = M_NOOP;
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"message %x sent on bad reselection\n", np->lastmsg);
goto out;
/*
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index e09fe6ab3572..2ddd426323e9 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -109,3 +109,22 @@ config SCSI_UFS_HISI
Select this if you have UFS controller on Hisilicon chipset.
If unsure, say N.
+
+config SCSI_UFS_BSG
+ bool "Universal Flash Storage BSG device node"
+ depends on SCSI_UFSHCD
+ select BLK_DEV_BSGLIB
+ help
+ Universal Flash Storage (UFS) is SCSI transport specification for
+ accessing flash storage on digital cameras, mobile phones and
+ consumer electronic devices.
+ A UFS controller communicates with a UFS device by exchanging
+ UFS Protocol Information Units (UPIUs).
+ UPIUs can not only be used as a transport layer for the SCSI protocol
+ but are also used by the UFS native command set.
+ This transport driver supports exchanging UFS protocol information units
+ with a UFS device. See also the ufshcd driver, which is a SCSI driver
+ that supports UFS devices.
+
+ Select this if you need a bsg device node for your UFS controller.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 2c50f03d8c4a..aca481329828 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -4,7 +4,8 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-objs := ufshcd.o ufs-sysfs.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 75ee5906b966..3aeadb14aae1 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -16,7 +16,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/phy/phy-qcom-ufs.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
@@ -70,20 +69,27 @@ static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
}
static int ufs_qcom_host_clk_get(struct device *dev,
- const char *name, struct clk **clk_out)
+ const char *name, struct clk **clk_out, bool optional)
{
struct clk *clk;
int err = 0;
clk = devm_clk_get(dev, name);
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- dev_err(dev, "%s: failed to get %s err %d",
- __func__, name, err);
- } else {
+ if (!IS_ERR(clk)) {
*clk_out = clk;
+ return 0;
}
+ err = PTR_ERR(clk);
+
+ if (optional && err == -ENOENT) {
+ *clk_out = NULL;
+ return 0;
+ }
+
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s err %d\n", name, err);
+
return err;
}
@@ -104,11 +110,9 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
if (!host->is_lane_clks_enabled)
return;
- if (host->hba->lanes_per_direction > 1)
- clk_disable_unprepare(host->tx_l1_sync_clk);
+ clk_disable_unprepare(host->tx_l1_sync_clk);
clk_disable_unprepare(host->tx_l0_sync_clk);
- if (host->hba->lanes_per_direction > 1)
- clk_disable_unprepare(host->rx_l1_sync_clk);
+ clk_disable_unprepare(host->rx_l1_sync_clk);
clk_disable_unprepare(host->rx_l0_sync_clk);
host->is_lane_clks_enabled = false;
@@ -132,24 +136,21 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
if (err)
goto disable_rx_l0;
- if (host->hba->lanes_per_direction > 1) {
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
host->rx_l1_sync_clk);
- if (err)
- goto disable_tx_l0;
+ if (err)
+ goto disable_tx_l0;
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
- }
+ if (err)
+ goto disable_rx_l1;
host->is_lane_clks_enabled = true;
goto out;
disable_rx_l1:
- if (host->hba->lanes_per_direction > 1)
- clk_disable_unprepare(host->rx_l1_sync_clk);
+ clk_disable_unprepare(host->rx_l1_sync_clk);
disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
@@ -163,25 +164,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
int err = 0;
struct device *dev = host->hba->dev;
- err = ufs_qcom_host_clk_get(dev,
- "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
+ err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
+ &host->rx_l0_sync_clk, false);
if (err)
goto out;
- err = ufs_qcom_host_clk_get(dev,
- "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
+ err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
+ &host->tx_l0_sync_clk, false);
if (err)
goto out;
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
- &host->rx_l1_sync_clk);
+ &host->rx_l1_sync_clk, false);
if (err)
goto out;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk);
+ &host->tx_l1_sync_clk, true);
}
out:
return err;
@@ -189,22 +190,9 @@ out:
static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
u32 tx_lanes;
- int err = 0;
-
- err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
- if (err)
- goto out;
-
- err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
- __func__);
-out:
- return err;
+ return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
@@ -932,10 +920,8 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
{
u32 val;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
struct ufs_qcom_dev_params ufs_qcom_cap;
int ret = 0;
- int res = 0;
if (!dev_req_params) {
pr_err("%s: incoming dev_req_params is NULL\n", __func__);
@@ -1002,12 +988,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
}
val = ~(MAX_U32 << dev_req_params->lane_tx);
- res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
- if (res) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
- __func__, res);
- ret = res;
- }
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
@@ -1264,10 +1244,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
}
}
- /* update phy revision information before calling phy_init() */
- ufs_qcom_phy_save_controller_version(host->generic_phy,
- host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
-
err = ufs_qcom_init_lane_clks(host);
if (err)
goto out_variant_clear;
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 295f4bef6a0e..c114826316eb 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -129,11 +129,6 @@ enum {
MASK_CLK_NS_REG = 0xFFFC00,
};
-enum ufs_qcom_phy_init_type {
- UFS_PHY_INIT_FULL,
- UFS_PHY_INIT_CFG_RESTORE,
-};
-
/* QCOM UFS debug print bit mask */
#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0)
#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1)
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 14e5bf7af0bb..58087d3916d0 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -38,9 +38,9 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include <uapi/scsi/scsi_bsg_ufs.h>
-#define MAX_CDB_SIZE 16
-#define GENERAL_UPIU_REQUEST_SIZE 32
+#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
#define QUERY_DESC_HDR_SIZE 2
@@ -414,6 +414,7 @@ enum {
MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
MASK_RSP_EXCEPTION_EVENT = 0x10000,
MASK_TM_SERVICE_RESP = 0xFF,
+ MASK_TM_FUNC = 0xFF,
};
/* Task management service response */
@@ -433,65 +434,6 @@ enum ufs_dev_pwr_mode {
};
/**
- * struct utp_upiu_header - UPIU header structure
- * @dword_0: UPIU header DW-0
- * @dword_1: UPIU header DW-1
- * @dword_2: UPIU header DW-2
- */
-struct utp_upiu_header {
- __be32 dword_0;
- __be32 dword_1;
- __be32 dword_2;
-};
-
-/**
- * struct utp_upiu_cmd - Command UPIU structure
- * @data_transfer_len: Data Transfer Length DW-3
- * @cdb: Command Descriptor Block CDB DW-4 to DW-7
- */
-struct utp_upiu_cmd {
- __be32 exp_data_transfer_len;
- u8 cdb[MAX_CDB_SIZE];
-};
-
-/**
- * struct utp_upiu_query - upiu request buffer structure for
- * query request.
- * @opcode: command to perform B-0
- * @idn: a value that indicates the particular type of data B-1
- * @index: Index to further identify data B-2
- * @selector: Index to further identify data B-3
- * @reserved_osf: spec reserved field B-4,5
- * @length: number of descriptor bytes to read/write B-6,7
- * @value: Attribute value to be written DW-5
- * @reserved: spec reserved DW-6,7
- */
-struct utp_upiu_query {
- u8 opcode;
- u8 idn;
- u8 index;
- u8 selector;
- __be16 reserved_osf;
- __be16 length;
- __be32 value;
- __be32 reserved[2];
-};
-
-/**
- * struct utp_upiu_req - general upiu request structure
- * @header:UPIU header structure DW-0 to DW-2
- * @sc: fields structure for scsi command DW-3 to DW-7
- * @qr: fields structure for query request DW-3 to DW-7
- */
-struct utp_upiu_req {
- struct utp_upiu_header header;
- union {
- struct utp_upiu_cmd sc;
- struct utp_upiu_query qr;
- };
-};
-
-/**
* struct utp_cmd_rsp - Response UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
* @reserved: Reserved double words DW-4 to DW-7
@@ -520,36 +462,6 @@ struct utp_upiu_rsp {
};
/**
- * struct utp_upiu_task_req - Task request UPIU structure
- * @header - UPIU header structure DW0 to DW-2
- * @input_param1: Input parameter 1 DW-3
- * @input_param2: Input parameter 2 DW-4
- * @input_param3: Input parameter 3 DW-5
- * @reserved: Reserved double words DW-6 to DW-7
- */
-struct utp_upiu_task_req {
- struct utp_upiu_header header;
- __be32 input_param1;
- __be32 input_param2;
- __be32 input_param3;
- __be32 reserved[2];
-};
-
-/**
- * struct utp_upiu_task_rsp - Task Management Response UPIU structure
- * @header: UPIU header structure DW0-DW-2
- * @output_param1: Ouput parameter 1 DW3
- * @output_param2: Output parameter 2 DW4
- * @reserved: Reserved double words DW-5 to DW-7
- */
-struct utp_upiu_task_rsp {
- struct utp_upiu_header header;
- __be32 output_param1;
- __be32 output_param2;
- __be32 reserved[3];
-};
-
-/**
* struct ufs_query_req - parameters for building a query request
* @query_func: UPIU header query function
* @upiu_req: the query request data
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
new file mode 100644
index 000000000000..e5f8e54bf644
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bsg endpoint that supports UPIUs
+ *
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#include "ufs_bsg.h"
+
+static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
+ struct utp_upiu_query *qr)
+{
+ int desc_size = be16_to_cpu(qr->length);
+ int desc_id = qr->idn;
+ int ret;
+
+ if (desc_size <= 0)
+ return -EINVAL;
+
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
+ if (ret || !*desc_len)
+ return -EINVAL;
+
+ *desc_len = min_t(int, *desc_len, desc_size);
+
+ return 0;
+}
+
+static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
+ unsigned int request_len,
+ unsigned int reply_len,
+ int desc_len, enum query_opcode desc_op)
+{
+ int min_req_len = sizeof(struct ufs_bsg_request);
+ int min_rsp_len = sizeof(struct ufs_bsg_reply);
+
+ if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
+ min_req_len += desc_len;
+
+ if (min_req_len > request_len || min_rsp_len > reply_len) {
+ dev_err(hba->dev, "not enough space assigned\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ufs_bsg_verify_query_params(struct ufs_hba *hba,
+ struct ufs_bsg_request *bsg_request,
+ unsigned int request_len,
+ unsigned int reply_len,
+ uint8_t *desc_buff, int *desc_len,
+ enum query_opcode desc_op)
+{
+ struct utp_upiu_query *qr;
+
+ if (desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
+ dev_err(hba->dev, "unsupported opcode %d\n", desc_op);
+ return -ENOTSUPP;
+ }
+
+ if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC)
+ goto out;
+
+ qr = &bsg_request->upiu_req.qr;
+ if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
+ dev_err(hba->dev, "Illegal desc size\n");
+ return -EINVAL;
+ }
+
+ if (ufs_bsg_verify_query_size(hba, request_len, reply_len, *desc_len,
+ desc_op))
+ return -EINVAL;
+
+ desc_buff = (uint8_t *)(bsg_request + 1);
+
+out:
+ return 0;
+}
+
+static int ufs_bsg_request(struct bsg_job *job)
+{
+ struct ufs_bsg_request *bsg_request = job->request;
+ struct ufs_bsg_reply *bsg_reply = job->reply;
+ struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
+ unsigned int req_len = job->request_len;
+ unsigned int reply_len = job->reply_len;
+ struct uic_command uc = {};
+ int msgcode;
+ uint8_t *desc_buff = NULL;
+ int desc_len = 0;
+ enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
+ int ret;
+
+ ret = ufs_bsg_verify_query_size(hba, req_len, reply_len, 0, desc_op);
+ if (ret)
+ goto out;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ msgcode = bsg_request->msgcode;
+ switch (msgcode) {
+ case UPIU_TRANSACTION_QUERY_REQ:
+ desc_op = bsg_request->upiu_req.qr.opcode;
+ ret = ufs_bsg_verify_query_params(hba, bsg_request, req_len,
+ reply_len, desc_buff,
+ &desc_len, desc_op);
+ if (ret)
+ goto out;
+
+ /* fall through */
+ case UPIU_TRANSACTION_NOP_OUT:
+ case UPIU_TRANSACTION_TASK_REQ:
+ ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
+ &bsg_reply->upiu_rsp, msgcode,
+ desc_buff, &desc_len, desc_op);
+ if (ret)
+ dev_err(hba->dev,
+ "exe raw upiu: error code %d\n", ret);
+
+ break;
+ case UPIU_TRANSACTION_UIC_CMD:
+ memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
+ ret = ufshcd_send_uic_cmd(hba, &uc);
+ if (ret)
+ dev_dbg(hba->dev,
+ "send uic cmd: error code %d\n", ret);
+
+ memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
+
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
+
+ break;
+ }
+
+out:
+ bsg_reply->result = ret;
+ job->reply_len = sizeof(struct ufs_bsg_reply) +
+ bsg_reply->reply_payload_rcv_len;
+
+ bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+
+ return ret;
+}
+
+/**
+ * ufs_bsg_remove - detach and remove the added ufs-bsg node
+ *
+ * Should be called when unloading the driver.
+ */
+void ufs_bsg_remove(struct ufs_hba *hba)
+{
+ struct device *bsg_dev = &hba->bsg_dev;
+
+ if (!hba->bsg_queue)
+ return;
+
+ bsg_unregister_queue(hba->bsg_queue);
+
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+}
+
+static inline void ufs_bsg_node_release(struct device *dev)
+{
+ put_device(dev->parent);
+}
+
+/**
+ * ufs_bsg_probe - Add ufs bsg device node
+ * @hba: per adapter object
+ *
+ * Called during initial loading of the driver, and before scsi_scan_host.
+ */
+int ufs_bsg_probe(struct ufs_hba *hba)
+{
+ struct device *bsg_dev = &hba->bsg_dev;
+ struct Scsi_Host *shost = hba->host;
+ struct device *parent = &shost->shost_gendev;
+ struct request_queue *q;
+ int ret;
+
+ device_initialize(bsg_dev);
+
+ bsg_dev->parent = get_device(parent);
+ bsg_dev->release = ufs_bsg_node_release;
+
+ dev_set_name(bsg_dev, "ufs-bsg");
+
+ ret = device_add(bsg_dev);
+ if (ret)
+ goto out;
+
+ q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, 0);
+ if (IS_ERR(q)) {
+ ret = PTR_ERR(q);
+ goto out;
+ }
+
+ hba->bsg_queue = q;
+
+ return 0;
+
+out:
+ dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
+ put_device(bsg_dev);
+ return ret;
+}
diff --git a/drivers/scsi/ufs/ufs_bsg.h b/drivers/scsi/ufs/ufs_bsg.h
new file mode 100644
index 000000000000..d09918758631
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_bsg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#ifndef UFS_BSG_H
+#define UFS_BSG_H
+
+#include <linux/bsg-lib.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include "ufshcd.h"
+#include "ufs.h"
+
+#ifdef CONFIG_SCSI_UFS_BSG
+void ufs_bsg_remove(struct ufs_hba *hba);
+int ufs_bsg_probe(struct ufs_hba *hba);
+#else
+static inline void ufs_bsg_remove(struct ufs_hba *hba) {}
+static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; }
+#endif
+
+#endif /* UFS_BSG_H */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c55f38ec391c..23d7cca36ff0 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -46,6 +46,7 @@
#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-sysfs.h"
+#include "ufs_bsg.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -326,14 +327,11 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
const char *str)
{
- struct utp_task_req_desc *descp;
- struct utp_upiu_task_req *task_req;
int off = (int)tag - hba->nutrs;
+ struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
- descp = &hba->utmrdl_base_addr[off];
- task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
- trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
- &task_req->input_param1);
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
+ &descp->input_param1);
}
static void ufshcd_add_command_trace(struct ufs_hba *hba,
@@ -475,22 +473,13 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
{
- struct utp_task_req_desc *tmrdp;
int tag;
for_each_set_bit(tag, &bitmap, hba->nutmrs) {
- tmrdp = &hba->utmrdl_base_addr[tag];
+ struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
+
dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
- ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
- sizeof(struct request_desc_header));
- dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
- tag);
- ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
- sizeof(struct utp_upiu_req));
- dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
- tag);
- ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
- sizeof(struct utp_task_req_desc));
+ ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
}
}
@@ -646,19 +635,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
- * @task_req_descp: pointer to utp_task_req_desc structure
- *
- * This function is used to get the OCS field from UTMRD
- * Returns the OCS field in the UTMRD
- */
-static inline int
-ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
-{
- return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
-}
-
-/**
* ufshcd_get_tm_free_slot - get a free slot for task management request
* @hba: per adapter instance
* @free_slot: pointer to variable with available slot value
@@ -1691,8 +1667,9 @@ static void __ufshcd_release(struct ufs_hba *hba)
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- schedule_delayed_work(&hba->clk_gating.gate_work,
- msecs_to_jiffies(hba->clk_gating.delay_ms));
+ queue_delayed_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.gate_work,
+ msecs_to_jiffies(hba->clk_gating.delay_ms));
}
void ufshcd_release(struct ufs_hba *hba)
@@ -1763,6 +1740,34 @@ out:
return count;
}
+static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+{
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+ hba->host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ ufshcd_clkscaling_init_sysfs(hba);
+}
+
+static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
+}
+
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
char wq_name[sizeof("ufs_clk_gating_00")];
@@ -2055,8 +2060,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
*
* Returns 0 only if success.
*/
-static int
-ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
unsigned long flags;
@@ -2238,8 +2242,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
ucd_req_ptr->sc.exp_data_transfer_len =
cpu_to_be32(lrbp->cmd->sdb.length);
- cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
- memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
+ cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
+ memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
@@ -2258,7 +2262,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
struct ufs_query *query = &hba->dev_cmd.query;
u16 len = be16_to_cpu(query->request.upiu_req.length);
- u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
/* Query request header */
ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -2280,7 +2283,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
- memcpy(descp, query->descriptor, len);
+ memcpy(ucd_req_ptr + 1, query->descriptor, len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
@@ -4601,46 +4604,6 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
}
/**
- * ufshcd_task_req_compl - handle task management request completion
- * @hba: per adapter instance
- * @index: index of the completed request
- * @resp: task management service response
- *
- * Returns non-zero value on error, zero on success
- */
-static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
-{
- struct utp_task_req_desc *task_req_descp;
- struct utp_upiu_task_rsp *task_rsp_upiup;
- unsigned long flags;
- int ocs_value;
- int task_result;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
-
- /* Clear completed tasks from outstanding_tasks */
- __clear_bit(index, &hba->outstanding_tasks);
-
- task_req_descp = hba->utmrdl_base_addr;
- ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
-
- if (ocs_value == OCS_SUCCESS) {
- task_rsp_upiup = (struct utp_upiu_task_rsp *)
- task_req_descp[index].task_rsp_upiu;
- task_result = be32_to_cpu(task_rsp_upiup->output_param1);
- task_result = task_result & MASK_TM_SERVICE_RESP;
- if (resp)
- *resp = (u8)task_result;
- } else {
- dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
- __func__, ocs_value);
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- return ocs_value;
-}
-
-/**
* ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
* @lrbp: pointer to local reference block of completed command
* @scsi_status: SCSI command status
@@ -5597,28 +5560,12 @@ out:
return err;
}
-/**
- * ufshcd_issue_tm_cmd - issues task management commands to controller
- * @hba: per adapter instance
- * @lun_id: LUN ID to which TM command is sent
- * @task_id: task ID to which the TM command is applicable
- * @tm_function: task management function opcode
- * @tm_response: task management service response return value
- *
- * Returns non-zero value on error, zero on success.
- */
-static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
- u8 tm_function, u8 *tm_response)
+static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+ struct utp_task_req_desc *treq, u8 tm_function)
{
- struct utp_task_req_desc *task_req_descp;
- struct utp_upiu_task_req *task_req_upiup;
- struct Scsi_Host *host;
+ struct Scsi_Host *host = hba->host;
unsigned long flags;
- int free_slot;
- int err;
- int task_tag;
-
- host = hba->host;
+ int free_slot, task_tag, err;
/*
* Get free slot, sleep if slots are unavailable.
@@ -5629,30 +5576,11 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags);
- task_req_descp = hba->utmrdl_base_addr;
- task_req_descp += free_slot;
-
- /* Configure task request descriptor */
- task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- task_req_descp->header.dword_2 =
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
- /* Configure task request UPIU */
- task_req_upiup =
- (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
task_tag = hba->nutrs + free_slot;
- task_req_upiup->header.dword_0 =
- UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
- lun_id, task_tag);
- task_req_upiup->header.dword_1 =
- UPIU_HEADER_DWORD(0, tm_function, 0, 0);
- /*
- * The host shall provide the same value for LUN field in the basic
- * header and for Input Parameter.
- */
- task_req_upiup->input_param1 = cpu_to_be32(lun_id);
- task_req_upiup->input_param2 = cpu_to_be32(task_id);
+ treq->req_header.dword_0 |= cpu_to_be32(task_tag);
+
+ memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
/* send command to the controller */
@@ -5682,8 +5610,15 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
__func__, free_slot);
err = -ETIMEDOUT;
} else {
- err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+ err = 0;
+ memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
+
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __clear_bit(free_slot, &hba->outstanding_tasks);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
}
clear_bit(free_slot, &hba->tm_condition);
@@ -5695,6 +5630,228 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
}
/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
+ *
+ * Returns non-zero value on error, zero on success.
+ */
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+ u8 tm_function, u8 *tm_response)
+{
+ struct utp_task_req_desc treq = { { 0 }, };
+ int ocs_value, err;
+
+ /* Configure task request descriptor */
+ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* Configure task request UPIU */
+ treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
+ cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
+ treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
+
+ /*
+ * The host shall provide the same value for LUN field in the basic
+ * header and for Input Parameter.
+ */
+ treq.input_param1 = cpu_to_be32(lun_id);
+ treq.input_param2 = cpu_to_be32(task_id);
+
+ err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
+ if (err == -ETIMEDOUT)
+ return err;
+
+ ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+ if (ocs_value != OCS_SUCCESS)
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+ __func__, ocs_value);
+ else if (tm_response)
+ *tm_response = be32_to_cpu(treq.output_param1) &
+ MASK_TM_SERVICE_RESP;
+ return err;
+}
+
+/**
+ * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
+ * @hba: per-adapter instance
+ * @req_upiu: upiu request
+ * @rsp_upiu: upiu reply
+ * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
+ * @buff_len: descriptor size, 0 if NA
+ * @desc_op: descriptor operation
+ *
+ * Those type of requests uses UTP Transfer Request Descriptor - utrd.
+ * Therefore, it "rides" the device management infrastructure: uses its tag and
+ * tasks work queues.
+ *
+ * Since there is only one available tag for device management commands,
+ * the caller is expected to hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
+ struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu,
+ u8 *desc_buff, int *buff_len,
+ int cmd_type,
+ enum query_opcode desc_op)
+{
+ struct ufshcd_lrb *lrbp;
+ int err = 0;
+ int tag;
+ struct completion wait;
+ unsigned long flags;
+ u32 upiu_flags;
+
+ down_read(&hba->clk_scaling_lock);
+
+ wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+
+ lrbp->cmd = NULL;
+ lrbp->sense_bufflen = 0;
+ lrbp->sense_buffer = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = 0;
+ lrbp->intr_cmd = true;
+ hba->dev_cmd.type = cmd_type;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ case UFSHCI_VERSION_11:
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ break;
+ default:
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ break;
+ }
+
+ /* update the task tag in the request upiu */
+ req_upiu->header.dword_0 |= cpu_to_be32(tag);
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+
+ /* just copy the upiu request as it is */
+ memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
+ if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
+ /* The Data Segment Area is optional depending upon the query
+ * function value. for WRITE DESCRIPTOR, the data segment
+ * follows right after the tsf.
+ */
+ memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
+ *buff_len = 0;
+ }
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
+ hba->dev_cmd.complete = &wait;
+
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * ignore the returning value here - ufshcd_check_query_response is
+ * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
+ * read the response directly ignoring all errors.
+ */
+ ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
+
+ /* just copy the upiu response as it is */
+ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
+
+ ufshcd_put_dev_cmd_tag(hba, tag);
+ wake_up(&hba->dev_cmd.tag_wq);
+ up_read(&hba->clk_scaling_lock);
+ return err;
+}
+
+/**
+ * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
+ * @hba: per-adapter instance
+ * @req_upiu: upiu request
+ * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
+ * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
+ * @buff_len: descriptor size, 0 if NA
+ * @desc_op: descriptor operation
+ *
+ * Supports UTP Transfer requests (nop and query), and UTP Task
+ * Management requests.
+ * It is up to the caller to fill the upiu conent properly, as it will
+ * be copied without any further input validations.
+ */
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+ struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu,
+ int msgcode,
+ u8 *desc_buff, int *buff_len,
+ enum query_opcode desc_op)
+{
+ int err;
+ int cmd_type = DEV_CMD_TYPE_QUERY;
+ struct utp_task_req_desc treq = { { 0 }, };
+ int ocs_value;
+ u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
+
+ if (desc_buff && desc_op != UPIU_QUERY_OPCODE_WRITE_DESC) {
+ err = -ENOTSUPP;
+ goto out;
+ }
+
+ switch (msgcode) {
+ case UPIU_TRANSACTION_NOP_OUT:
+ cmd_type = DEV_CMD_TYPE_NOP;
+ /* fall through */
+ case UPIU_TRANSACTION_QUERY_REQ:
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
+ desc_buff, buff_len,
+ cmd_type, desc_op);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ break;
+ case UPIU_TRANSACTION_TASK_REQ:
+ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
+
+ err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
+ if (err == -ETIMEDOUT)
+ break;
+
+ ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+ if (ocs_value != OCS_SUCCESS) {
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
+ ocs_value);
+ break;
+ }
+
+ memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
+
+ break;
+ default:
+ err = -EINVAL;
+
+ break;
+ }
+
+out:
+ return err;
+}
+
+/**
* ufshcd_eh_device_reset_handler - device reset handler registered to
* scsi layer.
* @cmd: SCSI command pointer
@@ -6652,6 +6809,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
hba->clk_scaling.is_allowed = true;
}
+ ufs_bsg_probe(hba);
+
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
}
@@ -6666,6 +6825,7 @@ out:
*/
if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
pm_runtime_put_sync(hba->dev);
+ ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
}
@@ -7201,12 +7361,9 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
ufshcd_suspend_clkscaling(hba);
- if (ufshcd_is_clkscaling_supported(hba)) {
+ if (ufshcd_is_clkscaling_supported(hba))
if (hba->devfreq)
ufshcd_suspend_clkscaling(hba);
- destroy_workqueue(hba->clk_scaling.workq);
- ufshcd_devfreq_remove(hba);
- }
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -7875,12 +8032,14 @@ EXPORT_SYMBOL(ufshcd_shutdown);
*/
void ufshcd_remove(struct ufs_hba *hba)
{
+ ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
+ ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
if (ufshcd_is_clkscaling_supported(hba))
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
@@ -8027,7 +8186,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
host->unique_id = host->host_no;
- host->max_cmd_len = MAX_CDB_SIZE;
+ host->max_cmd_len = UFS_CDB_SIZE;
hba->max_pwr_info.is_valid = false;
@@ -8052,6 +8211,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_init_clk_gating(hba);
+ ufshcd_init_clk_scaling(hba);
+
/*
* In order to avoid any spurious interrupt immediately after
* registering UFS controller interrupt handler, clear any pending UFS
@@ -8090,21 +8251,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_remove_scsi_host;
}
- if (ufshcd_is_clkscaling_supported(hba)) {
- char wq_name[sizeof("ufs_clkscaling_00")];
-
- INIT_WORK(&hba->clk_scaling.suspend_work,
- ufshcd_clk_scaling_suspend_work);
- INIT_WORK(&hba->clk_scaling.resume_work,
- ufshcd_clk_scaling_resume_work);
-
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
-
- ufshcd_clkscaling_init_sysfs(hba);
- }
-
/*
* Set the default power management level for runtime and system PM.
* Default power saving mode is to keep UFS link in Hibern8 state
@@ -8142,6 +8288,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
+ ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
out_disable:
hba->is_irq_enabled = false;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 33fdd3f281ae..1a1c2b487a4e 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -702,6 +702,9 @@ struct ufs_hba {
struct rw_semaphore clk_scaling_lock;
struct ufs_desc_size desc_size;
atomic_t scsi_block_reqs_cnt;
+
+ struct device bsg_dev;
+ struct request_queue *bsg_queue;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -892,6 +895,15 @@ int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+ struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu,
+ int msgcode,
+ u8 *desc_buff, int *buff_len,
+ enum query_opcode desc_op);
+
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
{
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index bb5d9c7f3353..6fa889de5ee5 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -433,22 +433,25 @@ struct utp_transfer_req_desc {
__le16 prd_table_offset;
};
-/**
- * struct utp_task_req_desc - UTMRD structure
- * @header: UTMRD header DW-0 to DW-3
- * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
- * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
+/*
+ * UTMRD structure.
*/
struct utp_task_req_desc {
-
/* DW 0-3 */
struct request_desc_header header;
- /* DW 4-11 */
- __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
-
- /* DW 12-19 */
- __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+ /* DW 4-11 - Task request UPIU structure */
+ struct utp_upiu_header req_header;
+ __be32 input_param1;
+ __be32 input_param2;
+ __be32 input_param3;
+ __be32 __reserved1[2];
+
+ /* DW 12-19 - Task Management Response UPIU structure */
+ struct utp_upiu_header rsp_header;
+ __be32 output_param1;
+ __be32 output_param2;
+ __be32 __reserved2[3];
};
#endif /* End of Header */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 0cd947f78b5b..6e491023fdd8 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -372,9 +372,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
pvscsi_create_sg(ctx, sg, segs);
e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
- ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
- SGL_SIZE, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) {
+ ctx->sglPA = dma_map_single(&adapter->dev->dev,
+ ctx->sgl, SGL_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) {
scmd_printk(KERN_ERR, cmd,
"vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
scsi_dma_unmap(cmd);
@@ -389,9 +389,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
* In case there is no S/G list, scsi_sglist points
* directly to the buffer.
*/
- ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
+ ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
cmd->sc_data_direction);
- if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) {
+ if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
scmd_printk(KERN_ERR, cmd,
"vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
return -ENOMEM;
@@ -417,23 +417,23 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
if (count != 0) {
scsi_dma_unmap(cmd);
if (ctx->sglPA) {
- pci_unmap_single(adapter->dev, ctx->sglPA,
- SGL_SIZE, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->dev->dev, ctx->sglPA,
+ SGL_SIZE, DMA_TO_DEVICE);
ctx->sglPA = 0;
}
} else
- pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
- cmd->sc_data_direction);
+ dma_unmap_single(&adapter->dev->dev, ctx->dataPA,
+ bufflen, cmd->sc_data_direction);
}
if (cmd->sense_buffer)
- pci_unmap_single(adapter->dev, ctx->sensePA,
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
{
- adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
- &adapter->ringStatePA);
+ adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
+ &adapter->ringStatePA, GFP_KERNEL);
if (!adapter->rings_state)
return -ENOMEM;
@@ -441,17 +441,17 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
pvscsi_ring_pages);
adapter->req_depth = adapter->req_pages
* PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
- adapter->req_ring = pci_alloc_consistent(adapter->dev,
- adapter->req_pages * PAGE_SIZE,
- &adapter->reqRingPA);
+ adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev,
+ adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA,
+ GFP_KERNEL);
if (!adapter->req_ring)
return -ENOMEM;
adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
pvscsi_ring_pages);
- adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
- adapter->cmp_pages * PAGE_SIZE,
- &adapter->cmpRingPA);
+ adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev,
+ adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA,
+ GFP_KERNEL);
if (!adapter->cmp_ring)
return -ENOMEM;
@@ -464,9 +464,9 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
pvscsi_msg_ring_pages);
- adapter->msg_ring = pci_alloc_consistent(adapter->dev,
- adapter->msg_pages * PAGE_SIZE,
- &adapter->msgRingPA);
+ adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev,
+ adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA,
+ GFP_KERNEL);
if (!adapter->msg_ring)
return -ENOMEM;
BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
@@ -708,10 +708,10 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
e->lun[1] = sdev->lun;
if (cmd->sense_buffer) {
- ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) {
+ ctx->sensePA = dma_map_single(&adapter->dev->dev,
+ cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
scmd_printk(KERN_ERR, cmd,
"vmw_pvscsi: Failed to map sense buffer for DMA.\n");
ctx->sensePA = 0;
@@ -740,9 +740,9 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
if (cmd->sense_buffer) {
- pci_unmap_single(adapter->dev, ctx->sensePA,
+ dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ctx->sensePA = 0;
}
return -ENOMEM;
@@ -1218,21 +1218,21 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
}
if (adapter->rings_state)
- pci_free_consistent(adapter->dev, PAGE_SIZE,
+ dma_free_coherent(&adapter->dev->dev, PAGE_SIZE,
adapter->rings_state, adapter->ringStatePA);
if (adapter->req_ring)
- pci_free_consistent(adapter->dev,
+ dma_free_coherent(&adapter->dev->dev,
adapter->req_pages * PAGE_SIZE,
adapter->req_ring, adapter->reqRingPA);
if (adapter->cmp_ring)
- pci_free_consistent(adapter->dev,
+ dma_free_coherent(&adapter->dev->dev,
adapter->cmp_pages * PAGE_SIZE,
adapter->cmp_ring, adapter->cmpRingPA);
if (adapter->msg_ring)
- pci_free_consistent(adapter->dev,
+ dma_free_coherent(&adapter->dev->dev,
adapter->msg_pages * PAGE_SIZE,
adapter->msg_ring, adapter->msgRingPA);
}
@@ -1291,8 +1291,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
u32 numPhys = 16;
dev = pvscsi_dev(adapter);
- config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
- &configPagePA);
+ config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
+ &configPagePA, GFP_KERNEL);
if (!config_page) {
dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
goto exit;
@@ -1326,7 +1326,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
} else
dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
header->hostStatus, header->scsiStatus);
- pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
+ dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page,
+ configPagePA);
exit:
return numPhys;
}
@@ -1346,11 +1347,9 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_enable_device(pdev))
return error;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
+ } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
} else {
printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index bb70882e6b56..ca8e3abeb2c7 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -9,8 +9,6 @@
*
* Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
* Blizzard 1230 DMA and probe function fixes
- *
- * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
*/
/*
* ZORRO bus code from:
@@ -159,7 +157,6 @@ struct fastlane_dma_registers {
struct zorro_esp_priv {
struct esp *esp; /* our ESP instance - for Scsi_host* */
void __iomem *board_base; /* virtual address (Zorro III board) */
- int error; /* PIO error flag */
int zorro3; /* board is Zorro III */
unsigned char ctrl_data; /* shadow copy of ctrl_reg */
};
@@ -182,30 +179,6 @@ static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
return readb(esp->regs + (reg * 4UL));
}
-static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int zorro_esp_irq_pending(struct esp *esp)
{
/* check ESP status register; DMA has no status reg. */
@@ -245,7 +218,7 @@ static int fastlane_esp_irq_pending(struct esp *esp)
static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
u32 dma_len)
{
- return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len;
+ return dma_len > 0xFFFF ? 0xFFFF : dma_len;
}
static void zorro_esp_reset_dma(struct esp *esp)
@@ -274,192 +247,29 @@ static void fastlane_esp_dma_invalidate(struct esp *esp)
z_writel(0, zep->board_base);
}
-/*
- * Programmed IO routines follow.
- */
-
-static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
-{
- int i = 500000;
-
- do {
- unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
- & ESP_FF_FBYTES;
-
- if (fbytes)
- return fbytes;
-
- udelay(2);
- } while (--i);
-
- pr_err("FIFO is empty (sreg %02x)\n",
- zorro_esp_read8(esp, ESP_STATUS));
- return 0;
-}
-
-static inline int zorro_esp_wait_for_intr(struct esp *esp)
-{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
- int i = 500000;
-
- do {
- esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
- if (esp->sreg & ESP_STAT_INTR)
- return 0;
-
- udelay(2);
- } while (--i);
-
- pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
- zep->error = 1;
- return 1;
-}
-
-/*
- * PIO macros as used in mac_esp.c.
- * Note that addr and fifo arguments are local-scope variables declared
- * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
- * and addr and fifo are referenced in each use of the macros so there
- * is no need to pass them as macro parameters.
- */
-#define ZORRO_ESP_PIO_LOOP(operands, reg1) \
- asm volatile ( \
- "1: moveb " operands "\n" \
- " subqw #1,%1 \n" \
- " jbne 1b \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo));
-
-#define ZORRO_ESP_PIO_FILL(operands, reg1) \
- asm volatile ( \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " subqw #8,%1 \n" \
- " subqw #8,%1 \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo));
-
-#define ZORRO_ESP_FIFO_SIZE 16
-
-static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
- u32 dma_count, int write, u8 cmd)
-{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
- u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
- u8 phase = esp->sreg & ESP_STAT_PMASK;
-
- cmd &= ~ESP_CMD_DMA;
-
- if (write) {
- u8 *dst = (u8 *)addr;
- u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
-
- scsi_esp_cmd(esp, cmd);
-
- while (1) {
- if (!zorro_esp_wait_for_fifo(esp))
- break;
-
- *dst++ = zorro_esp_read8(esp, ESP_FDATA);
- --esp_count;
-
- if (!esp_count)
- break;
-
- if (zorro_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
- if (esp->ireg & mask) {
- zep->error = 1;
- break;
- }
-
- if (phase == ESP_MIP)
- scsi_esp_cmd(esp, ESP_CMD_MOK);
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- } else { /* unused, as long as we only handle MIP here */
- scsi_esp_cmd(esp, ESP_CMD_FLUSH);
-
- if (esp_count >= ZORRO_ESP_FIFO_SIZE)
- ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
- else
- ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
-
- scsi_esp_cmd(esp, cmd);
-
- while (esp_count) {
- unsigned int n;
-
- if (zorro_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
- if (esp->ireg & ~ESP_INTR_BSERV) {
- zep->error = 1;
- break;
- }
-
- n = ZORRO_ESP_FIFO_SIZE -
- (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
- if (n > esp_count)
- n = esp_count;
-
- if (n == ZORRO_ESP_FIFO_SIZE)
- ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
- else {
- esp_count -= n;
- ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
- }
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- }
-}
-
/* Blizzard 1230/60 SCSI-IV DMA */
static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/*
* Use PIO if transferring message bytes to esp->command_block_dma.
* PIO requires a virtual address, so substitute esp->command_block
* for addr.
*/
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ /* Clear the results of a possible prior esp->ops->send_dma_cmd() */
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
if (write)
/* DMA receive */
dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -484,7 +294,6 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
scsi_esp_cmd(esp, ESP_CMD_DMA);
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
scsi_esp_cmd(esp, cmd);
}
@@ -494,18 +303,19 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
if (write)
/* DMA receive */
dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -529,7 +339,6 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
scsi_esp_cmd(esp, ESP_CMD_DMA);
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
scsi_esp_cmd(esp, cmd);
}
@@ -539,18 +348,19 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
if (write)
/* DMA receive */
dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -574,7 +384,6 @@ static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
scsi_esp_cmd(esp, ESP_CMD_DMA);
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
scsi_esp_cmd(esp, cmd);
}
@@ -589,17 +398,18 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
u8 phase = esp->sreg & ESP_STAT_PMASK;
unsigned char *ctrl_data = &zep->ctrl_data;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
if (write) {
/* DMA receive */
@@ -635,21 +445,21 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
if (write) {
/* DMA receive */
@@ -681,17 +491,18 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
u8 phase = esp->sreg & ESP_STAT_PMASK;
unsigned char *ctrl_data = &zep->ctrl_data;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
if (write) {
/* DMA receive */
@@ -724,14 +535,7 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
static int zorro_esp_dma_error(struct esp *esp)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
-
- /* check for error in case we've been doing PIO */
- if (zep->error == 1)
- return 1;
-
- /* do nothing - there seems to be no way to check for DMA errors */
- return 0;
+ return esp->send_cmd_error;
}
/* per-board ESP driver ops */
@@ -739,10 +543,6 @@ static int zorro_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops blz1230_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -755,10 +555,6 @@ static const struct esp_driver_ops blz1230_esp_ops = {
static const struct esp_driver_ops blz1230II_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -771,10 +567,6 @@ static const struct esp_driver_ops blz1230II_esp_ops = {
static const struct esp_driver_ops blz2060_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -787,10 +579,6 @@ static const struct esp_driver_ops blz2060_esp_ops = {
static const struct esp_driver_ops cyber_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = cyber_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -803,10 +591,6 @@ static const struct esp_driver_ops cyber_esp_ops = {
static const struct esp_driver_ops cyberII_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -819,10 +603,6 @@ static const struct esp_driver_ops cyberII_esp_ops = {
static const struct esp_driver_ops fastlane_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = fastlane_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -1039,6 +819,8 @@ static int zorro_esp_probe(struct zorro_dev *z,
goto fail_unmap_fastlane;
}
+ esp->fifo_reg = esp->regs + ESP_FDATA * 4;
+
/* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
if (zdd->scsi_option) {
zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
@@ -1082,7 +864,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
}
/* register the chip */
- err = scsi_esp_register(esp, &z->dev);
+ err = scsi_esp_register(esp);
if (err) {
err = -ENOMEM;
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 95b00d28ad6e..55eda5863a6b 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/slimbus.h>
#include "slimbus.h"
@@ -32,6 +33,10 @@ static int slim_device_match(struct device *dev, struct device_driver *drv)
struct slim_device *sbdev = to_slim_device(dev);
struct slim_driver *sbdrv = to_slim_driver(drv);
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
return !!slim_match(sbdrv->id_table, sbdev);
}
@@ -39,8 +44,23 @@ static int slim_device_probe(struct device *dev)
{
struct slim_device *sbdev = to_slim_device(dev);
struct slim_driver *sbdrv = to_slim_driver(dev->driver);
+ int ret;
- return sbdrv->probe(sbdev);
+ ret = sbdrv->probe(sbdev);
+ if (ret)
+ return ret;
+
+ /* try getting the logical address after probe */
+ ret = slim_get_logical_addr(sbdev);
+ if (!ret) {
+ if (sbdrv->device_status)
+ sbdrv->device_status(sbdev, sbdev->status);
+ } else {
+ dev_err(&sbdev->dev, "Failed to get logical address\n");
+ ret = -EPROBE_DEFER;
+ }
+
+ return ret;
}
static int slim_device_remove(struct device *dev)
@@ -57,11 +77,24 @@ static int slim_device_remove(struct device *dev)
return 0;
}
+static int slim_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct slim_device *sbdev = to_slim_device(dev);
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return add_uevent_var(env, "MODALIAS=slim:%s", dev_name(&sbdev->dev));
+}
+
struct bus_type slimbus_bus = {
.name = "slimbus",
.match = slim_device_match,
.probe = slim_device_probe,
.remove = slim_device_remove,
+ .uevent = slim_device_uevent,
};
EXPORT_SYMBOL_GPL(slimbus_bus);
@@ -77,7 +110,7 @@ EXPORT_SYMBOL_GPL(slimbus_bus);
int __slim_driver_register(struct slim_driver *drv, struct module *owner)
{
/* ID table and probe are mandatory */
- if (!drv->id_table || !drv->probe)
+ if (!(drv->driver.of_match_table || drv->id_table) || !drv->probe)
return -EINVAL;
drv->driver.bus = &slimbus_bus;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 8be4d6786c61..7218fb963d0a 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1004,6 +1004,7 @@ static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
struct slim_eaddr *ea, u8 *laddr)
{
struct slim_val_inf msg = {0};
+ u8 failed_ea[6] = {0, 0, 0, 0, 0, 0};
struct slim_msg_txn txn;
u8 wbuf[10] = {0};
u8 rbuf[10] = {0};
@@ -1034,6 +1035,9 @@ static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
return ret;
}
+ if (!memcmp(rbuf, failed_ea, 6))
+ return -ENXIO;
+
*laddr = rbuf[6];
return ret;
@@ -1234,8 +1238,17 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
pm_runtime_resume(ctrl->dev);
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_put(ctrl->dev);
+
+ ret = slim_register_controller(&ctrl->ctrl);
+ if (ret) {
+ dev_err(ctrl->dev, "error adding slim controller\n");
+ return ret;
+ }
+
+ dev_info(ctrl->dev, "SLIM controller Registered\n");
} else {
qcom_slim_qmi_exit(ctrl);
+ slim_unregister_controller(&ctrl->ctrl);
}
return 0;
@@ -1342,7 +1355,6 @@ static int of_qcom_slim_ngd_register(struct device *parent,
ngd->base = ctrl->base + ngd->id * data->offset +
(ngd->id - 1) * data->size;
ctrl->ngd = ngd;
- platform_driver_register(&qcom_slim_ngd_driver);
return 0;
}
@@ -1357,11 +1369,6 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev)
int ret;
ctrl->ctrl.dev = dev;
- ret = slim_register_controller(&ctrl->ctrl);
- if (ret) {
- dev_err(dev, "error adding slim controller\n");
- return ret;
- }
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
@@ -1371,7 +1378,7 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev)
ret = qcom_slim_ngd_qmi_svc_event_init(ctrl);
if (ret) {
dev_err(&pdev->dev, "QMI service registration failed:%d", ret);
- goto err;
+ return ret;
}
INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker);
@@ -1383,14 +1390,12 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev)
}
return 0;
-err:
- slim_unregister_controller(&ctrl->ctrl);
wq_err:
qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
if (ctrl->mwq)
destroy_workqueue(ctrl->mwq);
- return 0;
+ return ret;
}
static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
@@ -1441,6 +1446,7 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
init_completion(&ctrl->reconf);
init_completion(&ctrl->qmi.qmi_comp);
+ platform_driver_register(&qcom_slim_ngd_driver);
return of_qcom_slim_ngd_register(dev, ctrl);
}
@@ -1456,7 +1462,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev)
struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
- slim_unregister_controller(&ctrl->ctrl);
+ qcom_slim_ngd_enable(ctrl, false);
qcom_slim_ngd_exit_dma(ctrl);
qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
if (ctrl->mwq)
@@ -1467,7 +1473,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev)
return 0;
}
-static int qcom_slim_ngd_runtime_idle(struct device *dev)
+static int __maybe_unused qcom_slim_ngd_runtime_idle(struct device *dev)
{
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
@@ -1477,8 +1483,7 @@ static int qcom_slim_ngd_runtime_idle(struct device *dev)
return -EAGAIN;
}
-#ifdef CONFIG_PM
-static int qcom_slim_ngd_runtime_suspend(struct device *dev)
+static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
{
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
int ret = 0;
@@ -1491,7 +1496,6 @@ static int qcom_slim_ngd_runtime_suspend(struct device *dev)
return ret;
}
-#endif
static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
index 5abb08ffb74d..ffc5311c0ed8 100644
--- a/drivers/soc/dove/pmu.c
+++ b/drivers/soc/dove/pmu.c
@@ -383,7 +383,7 @@ int __init dove_init_pmu(void)
domains_node = of_get_child_by_name(np_pmu, "domains");
if (!domains_node) {
- pr_err("%s: failed to find domains sub-node\n", np_pmu->name);
+ pr_err("%pOFn: failed to find domains sub-node\n", np_pmu);
return 0;
}
@@ -396,7 +396,7 @@ int __init dove_init_pmu(void)
pmu->pmc_base = of_iomap(pmu->of_node, 0);
pmu->pmu_base = of_iomap(pmu->of_node, 1);
if (!pmu->pmc_base || !pmu->pmu_base) {
- pr_err("%s: failed to map PMU\n", np_pmu->name);
+ pr_err("%pOFn: failed to map PMU\n", np_pmu);
iounmap(pmu->pmu_base);
iounmap(pmu->pmc_base);
kfree(pmu);
@@ -414,7 +414,7 @@ int __init dove_init_pmu(void)
break;
domain->pmu = pmu;
- domain->base.name = kstrdup(np->name, GFP_KERNEL);
+ domain->base.name = kasprintf(GFP_KERNEL, "%pOFn", np);
if (!domain->base.name) {
kfree(domain);
break;
@@ -444,7 +444,7 @@ int __init dove_init_pmu(void)
/* Loss of the interrupt controller is not a fatal error. */
parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
if (!parent_irq) {
- pr_err("%s: no interrupt specified\n", np_pmu->name);
+ pr_err("%pOFn: no interrupt specified\n", np_pmu);
} else {
ret = dove_init_pmu_irq(pmu, parent_irq);
if (ret)
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 9b17f72349ed..321a92613a7e 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -310,6 +310,37 @@ int dpaa2_io_service_rearm(struct dpaa2_io *d,
EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
/**
+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_fq(&pd, fqid);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
+
+/**
* dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
* @d: the given DPIO service.
* @channelid: the given channel id.
@@ -342,6 +373,33 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
/**
+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
+
+/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
* @d: the given DPIO service.
* @qdid: the given queuing destination id.
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 6fd5fef5f39b..109b38de3176 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -419,7 +419,7 @@ static size_t fqd_sz, pfdr_sz;
static int zero_priv_mem(phys_addr_t addr, size_t sz)
{
/* map as cacheable, non-guarded */
- void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+ void __iomem *tmpp = ioremap_cache(addr, sz);
if (!tmpp)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index f744c214f680..f78c34647ca2 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -131,7 +131,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
pdev = of_find_device_by_node(np2);
if (!pdev) {
- pr_err("%s: failed to lookup pdev\n", np2->name);
+ pr_err("%pOFn: failed to lookup pdev\n", np2);
of_node_put(np2);
return -EINVAL;
}
@@ -153,7 +153,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
pdev = of_find_device_by_node(np2);
if (!pdev) {
ret = -EINVAL;
- pr_err("%s: failed to lookup pdev\n", np2->name);
+ pr_err("%pOFn: failed to lookup pdev\n", np2);
of_node_put(np2);
goto err_miss_siram_property;
}
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 57af8a537332..4bda793ba6ae 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -219,7 +219,7 @@ static int apr_add_device(struct device *dev, struct device_node *np,
adev->domain_id = id->domain_id;
adev->version = id->svc_version;
if (np)
- strncpy(adev->name, np->name, APR_NAME_SIZE);
+ snprintf(adev->name, APR_NAME_SIZE, "%pOFn", np);
else
strncpy(adev->name, id->name, APR_NAME_SIZE);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 6dff8682155f..6f86a726bb45 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -392,21 +392,21 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
error = of_property_read_u32(node, "reg", &id);
if (error) {
dev_err(pmu->dev,
- "%s: failed to retrieve domain id (reg): %d\n",
- node->name, error);
+ "%pOFn: failed to retrieve domain id (reg): %d\n",
+ node, error);
return -EINVAL;
}
if (id >= pmu->info->num_domains) {
- dev_err(pmu->dev, "%s: invalid domain id %d\n",
- node->name, id);
+ dev_err(pmu->dev, "%pOFn: invalid domain id %d\n",
+ node, id);
return -EINVAL;
}
pd_info = &pmu->info->domain_info[id];
if (!pd_info) {
- dev_err(pmu->dev, "%s: undefined domain id %d\n",
- node->name, id);
+ dev_err(pmu->dev, "%pOFn: undefined domain id %d\n",
+ node, id);
return -EINVAL;
}
@@ -424,8 +424,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
if (!pd->clks)
return -ENOMEM;
} else {
- dev_dbg(pmu->dev, "%s: doesn't have clocks: %d\n",
- node->name, pd->num_clks);
+ dev_dbg(pmu->dev, "%pOFn: doesn't have clocks: %d\n",
+ node, pd->num_clks);
pd->num_clks = 0;
}
@@ -434,8 +434,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
if (IS_ERR(pd->clks[i].clk)) {
error = PTR_ERR(pd->clks[i].clk);
dev_err(pmu->dev,
- "%s: failed to get clk at index %d: %d\n",
- node->name, i, error);
+ "%pOFn: failed to get clk at index %d: %d\n",
+ node, i, error);
return error;
}
}
@@ -486,8 +486,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
error = rockchip_pd_power(pd, true);
if (error) {
dev_err(pmu->dev,
- "failed to power on domain '%s': %d\n",
- node->name, error);
+ "failed to power on domain '%pOFn': %d\n",
+ node, error);
goto err_unprepare_clocks;
}
@@ -575,24 +575,24 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
error = of_property_read_u32(parent, "reg", &idx);
if (error) {
dev_err(pmu->dev,
- "%s: failed to retrieve domain id (reg): %d\n",
- parent->name, error);
+ "%pOFn: failed to retrieve domain id (reg): %d\n",
+ parent, error);
goto err_out;
}
parent_domain = pmu->genpd_data.domains[idx];
error = rockchip_pm_add_one_domain(pmu, np);
if (error) {
- dev_err(pmu->dev, "failed to handle node %s: %d\n",
- np->name, error);
+ dev_err(pmu->dev, "failed to handle node %pOFn: %d\n",
+ np, error);
goto err_out;
}
error = of_property_read_u32(np, "reg", &idx);
if (error) {
dev_err(pmu->dev,
- "%s: failed to retrieve domain id (reg): %d\n",
- np->name, error);
+ "%pOFn: failed to retrieve domain id (reg): %d\n",
+ np, error);
goto err_out;
}
child_domain = pmu->genpd_data.domains[idx];
@@ -683,16 +683,16 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
for_each_available_child_of_node(np, node) {
error = rockchip_pm_add_one_domain(pmu, node);
if (error) {
- dev_err(dev, "failed to handle node %s: %d\n",
- node->name, error);
+ dev_err(dev, "failed to handle node %pOFn: %d\n",
+ node, error);
of_node_put(node);
goto err_out;
}
error = rockchip_pm_add_subdomain(pmu, node);
if (error < 0) {
- dev_err(dev, "failed to handle subdomain node %s: %d\n",
- node->name, error);
+ dev_err(dev, "failed to handle subdomain node %pOFn: %d\n",
+ node, error);
of_node_put(node);
goto err_out;
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 2d6f3fcf3211..acbe63e925d5 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -796,7 +796,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
- pr_err("powergate lookup failed for %s: %d\n", np->name, id);
+ pr_err("powergate lookup failed for %pOFn: %d\n", np, id);
goto free_mem;
}
@@ -816,13 +816,13 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = tegra_powergate_of_get_clks(pg, np);
if (err < 0) {
- pr_err("failed to get clocks for %s: %d\n", np->name, err);
+ pr_err("failed to get clocks for %pOFn: %d\n", np, err);
goto set_available;
}
err = tegra_powergate_of_get_resets(pg, np, off);
if (err < 0) {
- pr_err("failed to get resets for %s: %d\n", np->name, err);
+ pr_err("failed to get resets for %pOFn: %d\n", np, err);
goto remove_clks;
}
@@ -851,15 +851,15 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = pm_genpd_init(&pg->genpd, NULL, off);
if (err < 0) {
- pr_err("failed to initialise PM domain %s: %d\n", np->name,
+ pr_err("failed to initialise PM domain %pOFn: %d\n", np,
err);
goto remove_resets;
}
err = of_genpd_add_provider_simple(np, &pg->genpd);
if (err < 0) {
- pr_err("failed to add PM domain provider for %s: %d\n",
- np->name, err);
+ pr_err("failed to add PM domain provider for %pOFn: %d\n",
+ np, err);
goto remove_genpd;
}
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 224d7ddeeb76..bbd4e5bc8707 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -544,15 +544,15 @@ static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
ret = of_address_to_resource(node, index, &res);
if (ret) {
- dev_err(dev, "Can't translate of node(%s) address for index(%d)\n",
- node->name, index);
+ dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n",
+ node, index);
return ERR_PTR(ret);
}
regs = devm_ioremap_resource(kdev->dev, &res);
if (IS_ERR(regs))
- dev_err(dev, "Failed to map register base for index(%d) node(%s)\n",
- index, node->name);
+ dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n",
+ index, node);
if (_size)
*_size = resource_size(&res);
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 6755f2af5619..b5d5673c255c 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1382,15 +1382,15 @@ static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
ret = of_address_to_resource(node, index, &res);
if (ret) {
- dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
- node->name, index);
+ dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
+ node, index);
return ERR_PTR(ret);
}
regs = devm_ioremap_resource(kdev->dev, &res);
if (IS_ERR(regs))
- dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
- index, node->name);
+ dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
+ index, node);
return regs;
}
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index dcc0ff9f0c22..1cbfedfc20ef 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -35,6 +35,11 @@ int sdw_add_bus_master(struct sdw_bus *bus)
INIT_LIST_HEAD(&bus->slaves);
INIT_LIST_HEAD(&bus->m_rt_list);
+ /*
+ * Initialize multi_link flag
+ * TODO: populate this flag by reading property from FW node
+ */
+ bus->multi_link = false;
if (bus->ops->read_prop) {
ret = bus->ops->read_prop(bus);
if (ret < 0) {
@@ -175,6 +180,7 @@ static inline int do_transfer_defer(struct sdw_bus *bus,
defer->msg = msg;
defer->length = msg->len;
+ init_completion(&defer->complete);
for (i = 0; i <= retry; i++) {
resp = bus->ops->xfer_msg_defer(bus, msg, defer);
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index 3b15c4e25a3a..c77de05b8100 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -4,6 +4,8 @@
#ifndef __SDW_BUS_H
#define __SDW_BUS_H
+#define DEFAULT_BANK_SWITCH_TIMEOUT 3000
+
#if IS_ENABLED(CONFIG_ACPI)
int sdw_acpi_find_slaves(struct sdw_bus *bus);
#else
@@ -99,6 +101,7 @@ struct sdw_slave_runtime {
* this stream, can be zero.
* @slave_rt_list: Slave runtime list
* @port_list: List of Master Ports configured for this stream, can be zero.
+ * @stream_node: sdw_stream_runtime master_list node
* @bus_node: sdw_bus m_rt_list node
*/
struct sdw_master_runtime {
@@ -108,6 +111,7 @@ struct sdw_master_runtime {
unsigned int ch_count;
struct list_head slave_rt_list;
struct list_head port_list;
+ struct list_head stream_node;
struct list_head bus_node;
};
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 0a8990e758f9..c5ee97ee7886 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -398,6 +398,69 @@ static int intel_config_stream(struct sdw_intel *sdw,
}
/*
+ * bank switch routines
+ */
+
+static int intel_pre_bank_switch(struct sdw_bus *bus)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ void __iomem *shim = sdw->res->shim;
+ int sync_reg;
+
+ /* Write to register only for multi-link */
+ if (!bus->multi_link)
+ return 0;
+
+ /* Read SYNC register */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+ sync_reg |= SDW_SHIM_SYNC_CMDSYNC << sdw->instance;
+ intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
+
+ return 0;
+}
+
+static int intel_post_bank_switch(struct sdw_bus *bus)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ void __iomem *shim = sdw->res->shim;
+ int sync_reg, ret;
+
+ /* Write to register only for multi-link */
+ if (!bus->multi_link)
+ return 0;
+
+ /* Read SYNC register */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+
+ /*
+ * post_bank_switch() ops is called from the bus in loop for
+ * all the Masters in the steam with the expectation that
+ * we trigger the bankswitch for the only first Master in the list
+ * and do nothing for the other Masters
+ *
+ * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
+ */
+ if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK))
+ return 0;
+
+ /*
+ * Set SyncGO bit to synchronously trigger a bank switch for
+ * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
+ * the Masters.
+ */
+ sync_reg |= SDW_SHIM_SYNC_SYNCGO;
+
+ ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
+ SDW_SHIM_SYNC_SYNCGO);
+ if (ret < 0)
+ dev_err(sdw->cdns.dev, "Post bank switch failed: %d", ret);
+
+ return ret;
+}
+
+/*
* DAI routines
*/
@@ -750,6 +813,8 @@ static struct sdw_master_ops sdw_intel_ops = {
.xfer_msg_defer = cdns_xfer_msg_defer,
.reset_page_addr = cdns_reset_page_addr,
.set_bus_conf = cdns_bus_conf,
+ .pre_bank_switch = intel_pre_bank_switch,
+ .post_bank_switch = intel_post_bank_switch,
};
/*
@@ -780,9 +845,6 @@ static int intel_probe(struct platform_device *pdev)
sdw_intel_ops.read_prop = intel_prop_read;
sdw->cdns.bus.ops = &sdw_intel_ops;
- sdw_intel_ops.read_prop = intel_prop_read;
- sdw->cdns.bus.ops = &sdw_intel_ops;
-
platform_set_drvdata(pdev, sdw);
ret = sdw_add_bus_master(&sdw->cdns.bus);
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index d1ea6b4d0ad3..5c8a20d99878 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -151,7 +151,7 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
struct acpi_device *adev;
if (acpi_bus_get_device(handle, &adev)) {
- dev_err(&adev->dev, "Couldn't find ACPI handle\n");
+ pr_err("%s: Couldn't find ACPI handle\n", __func__);
return AE_NOT_FOUND;
}
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index e5c7e1ef6318..bd879b1a76c8 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -626,9 +626,10 @@ static int sdw_program_params(struct sdw_bus *bus)
return ret;
}
-static int sdw_bank_switch(struct sdw_bus *bus)
+static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
{
int col_index, row_index;
+ bool multi_link;
struct sdw_msg *wr_msg;
u8 *wbuf = NULL;
int ret = 0;
@@ -638,6 +639,8 @@ static int sdw_bank_switch(struct sdw_bus *bus)
if (!wr_msg)
return -ENOMEM;
+ bus->defer_msg.msg = wr_msg;
+
wbuf = kzalloc(sizeof(*wbuf), GFP_KERNEL);
if (!wbuf) {
ret = -ENOMEM;
@@ -658,17 +661,29 @@ static int sdw_bank_switch(struct sdw_bus *bus)
SDW_MSG_FLAG_WRITE, wbuf);
wr_msg->ssp_sync = true;
- ret = sdw_transfer(bus, wr_msg);
+ /*
+ * Set the multi_link flag only when both the hardware supports
+ * and there is a stream handled by multiple masters
+ */
+ multi_link = bus->multi_link && (m_rt_count > 1);
+
+ if (multi_link)
+ ret = sdw_transfer_defer(bus, wr_msg, &bus->defer_msg);
+ else
+ ret = sdw_transfer(bus, wr_msg);
+
if (ret < 0) {
dev_err(bus->dev, "Slave frame_ctrl reg write failed");
goto error;
}
- kfree(wr_msg);
- kfree(wbuf);
- bus->defer_msg.msg = NULL;
- bus->params.curr_bank = !bus->params.curr_bank;
- bus->params.next_bank = !bus->params.next_bank;
+ if (!multi_link) {
+ kfree(wr_msg);
+ kfree(wbuf);
+ bus->defer_msg.msg = NULL;
+ bus->params.curr_bank = !bus->params.curr_bank;
+ bus->params.next_bank = !bus->params.next_bank;
+ }
return 0;
@@ -679,37 +694,138 @@ error_1:
return ret;
}
+/**
+ * sdw_ml_sync_bank_switch: Multilink register bank switch
+ *
+ * @bus: SDW bus instance
+ *
+ * Caller function should free the buffers on error
+ */
+static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
+{
+ unsigned long time_left;
+
+ if (!bus->multi_link)
+ return 0;
+
+ /* Wait for completion of transfer */
+ time_left = wait_for_completion_timeout(&bus->defer_msg.complete,
+ bus->bank_switch_timeout);
+
+ if (!time_left) {
+ dev_err(bus->dev, "Controller Timed out on bank switch");
+ return -ETIMEDOUT;
+ }
+
+ bus->params.curr_bank = !bus->params.curr_bank;
+ bus->params.next_bank = !bus->params.next_bank;
+
+ if (bus->defer_msg.msg) {
+ kfree(bus->defer_msg.msg->buf);
+ kfree(bus->defer_msg.msg);
+ }
+
+ return 0;
+}
+
static int do_bank_switch(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
+ struct sdw_master_runtime *m_rt = NULL;
const struct sdw_master_ops *ops;
- struct sdw_bus *bus = m_rt->bus;
+ struct sdw_bus *bus = NULL;
+ bool multi_link = false;
int ret = 0;
- ops = bus->ops;
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ ops = bus->ops;
+
+ if (bus->multi_link) {
+ multi_link = true;
+ mutex_lock(&bus->msg_lock);
+ }
+
+ /* Pre-bank switch */
+ if (ops->pre_bank_switch) {
+ ret = ops->pre_bank_switch(bus);
+ if (ret < 0) {
+ dev_err(bus->dev,
+ "Pre bank switch op failed: %d", ret);
+ goto msg_unlock;
+ }
+ }
- /* Pre-bank switch */
- if (ops->pre_bank_switch) {
- ret = ops->pre_bank_switch(bus);
+ /*
+ * Perform Bank switch operation.
+ * For multi link cases, the actual bank switch is
+ * synchronized across all Masters and happens later as a
+ * part of post_bank_switch ops.
+ */
+ ret = sdw_bank_switch(bus, stream->m_rt_count);
if (ret < 0) {
- dev_err(bus->dev, "Pre bank switch op failed: %d", ret);
- return ret;
+ dev_err(bus->dev, "Bank switch failed: %d", ret);
+ goto error;
+
}
}
- /* Bank switch */
- ret = sdw_bank_switch(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Bank switch failed: %d", ret);
- return ret;
- }
+ /*
+ * For multi link cases, it is expected that the bank switch is
+ * triggered by the post_bank_switch for the first Master in the list
+ * and for the other Masters the post_bank_switch() should return doing
+ * nothing.
+ */
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ ops = bus->ops;
+
+ /* Post-bank switch */
+ if (ops->post_bank_switch) {
+ ret = ops->post_bank_switch(bus);
+ if (ret < 0) {
+ dev_err(bus->dev,
+ "Post bank switch op failed: %d", ret);
+ goto error;
+ }
+ } else if (bus->multi_link && stream->m_rt_count > 1) {
+ dev_err(bus->dev,
+ "Post bank switch ops not implemented");
+ goto error;
+ }
+
+ /* Set the bank switch timeout to default, if not set */
+ if (!bus->bank_switch_timeout)
+ bus->bank_switch_timeout = DEFAULT_BANK_SWITCH_TIMEOUT;
- /* Post-bank switch */
- if (ops->post_bank_switch) {
- ret = ops->post_bank_switch(bus);
+ /* Check if bank switch was successful */
+ ret = sdw_ml_sync_bank_switch(bus);
if (ret < 0) {
dev_err(bus->dev,
- "Post bank switch op failed: %d", ret);
+ "multi link bank switch failed: %d", ret);
+ goto error;
+ }
+
+ mutex_unlock(&bus->msg_lock);
+ }
+
+ return ret;
+
+error:
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+
+ bus = m_rt->bus;
+
+ kfree(bus->defer_msg.msg->buf);
+ kfree(bus->defer_msg.msg);
+ }
+
+msg_unlock:
+
+ if (multi_link) {
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ if (mutex_is_locked(&bus->msg_lock))
+ mutex_unlock(&bus->msg_lock);
}
}
@@ -747,12 +863,29 @@ struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name)
return NULL;
stream->name = stream_name;
+ INIT_LIST_HEAD(&stream->master_list);
stream->state = SDW_STREAM_ALLOCATED;
+ stream->m_rt_count = 0;
return stream;
}
EXPORT_SYMBOL(sdw_alloc_stream);
+static struct sdw_master_runtime
+*sdw_find_master_rt(struct sdw_bus *bus,
+ struct sdw_stream_runtime *stream)
+{
+ struct sdw_master_runtime *m_rt = NULL;
+
+ /* Retrieve Bus handle if already available */
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ if (m_rt->bus == bus)
+ return m_rt;
+ }
+
+ return NULL;
+}
+
/**
* sdw_alloc_master_rt() - Allocates and initialize Master runtime handle
*
@@ -769,12 +902,11 @@ static struct sdw_master_runtime
{
struct sdw_master_runtime *m_rt;
- m_rt = stream->m_rt;
-
/*
* check if Master is already allocated (as a result of Slave adding
* it first), if so skip allocation and go to configure
*/
+ m_rt = sdw_find_master_rt(bus, stream);
if (m_rt)
goto stream_config;
@@ -785,7 +917,7 @@ static struct sdw_master_runtime
/* Initialization of Master runtime handle */
INIT_LIST_HEAD(&m_rt->port_list);
INIT_LIST_HEAD(&m_rt->slave_rt_list);
- stream->m_rt = m_rt;
+ list_add_tail(&m_rt->stream_node, &stream->master_list);
list_add_tail(&m_rt->bus_node, &bus->m_rt_list);
@@ -843,17 +975,21 @@ static void sdw_slave_port_release(struct sdw_bus *bus,
struct sdw_stream_runtime *stream)
{
struct sdw_port_runtime *p_rt, *_p_rt;
- struct sdw_master_runtime *m_rt = stream->m_rt;
+ struct sdw_master_runtime *m_rt;
struct sdw_slave_runtime *s_rt;
- list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
- if (s_rt->slave != slave)
- continue;
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
- list_for_each_entry_safe(p_rt, _p_rt,
- &s_rt->port_list, port_node) {
- list_del(&p_rt->port_node);
- kfree(p_rt);
+ if (s_rt->slave != slave)
+ continue;
+
+ list_for_each_entry_safe(p_rt, _p_rt,
+ &s_rt->port_list, port_node) {
+
+ list_del(&p_rt->port_node);
+ kfree(p_rt);
+ }
}
}
}
@@ -870,16 +1006,18 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
struct sdw_stream_runtime *stream)
{
struct sdw_slave_runtime *s_rt, *_s_rt;
- struct sdw_master_runtime *m_rt = stream->m_rt;
+ struct sdw_master_runtime *m_rt;
- /* Retrieve Slave runtime handle */
- list_for_each_entry_safe(s_rt, _s_rt,
- &m_rt->slave_rt_list, m_rt_node) {
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ /* Retrieve Slave runtime handle */
+ list_for_each_entry_safe(s_rt, _s_rt,
+ &m_rt->slave_rt_list, m_rt_node) {
- if (s_rt->slave == slave) {
- list_del(&s_rt->m_rt_node);
- kfree(s_rt);
- return;
+ if (s_rt->slave == slave) {
+ list_del(&s_rt->m_rt_node);
+ kfree(s_rt);
+ return;
+ }
}
}
}
@@ -887,6 +1025,7 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
/**
* sdw_release_master_stream() - Free Master runtime handle
*
+ * @m_rt: Master runtime node
* @stream: Stream runtime handle.
*
* This function is to be called with bus_lock held
@@ -894,9 +1033,9 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
* handle. If this is called first then sdw_release_slave_stream() will have
* no effect as Slave(s) runtime handle would already be freed up.
*/
-static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
+static void sdw_release_master_stream(struct sdw_master_runtime *m_rt,
+ struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
struct sdw_slave_runtime *s_rt, *_s_rt;
list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
@@ -904,7 +1043,9 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
sdw_release_slave_stream(s_rt->slave, stream);
}
+ list_del(&m_rt->stream_node);
list_del(&m_rt->bus_node);
+ kfree(m_rt);
}
/**
@@ -918,13 +1059,23 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
int sdw_stream_remove_master(struct sdw_bus *bus,
struct sdw_stream_runtime *stream)
{
+ struct sdw_master_runtime *m_rt, *_m_rt;
+
mutex_lock(&bus->bus_lock);
- sdw_release_master_stream(stream);
- sdw_master_port_release(bus, stream->m_rt);
- stream->state = SDW_STREAM_RELEASED;
- kfree(stream->m_rt);
- stream->m_rt = NULL;
+ list_for_each_entry_safe(m_rt, _m_rt,
+ &stream->master_list, stream_node) {
+
+ if (m_rt->bus != bus)
+ continue;
+
+ sdw_master_port_release(bus, m_rt);
+ sdw_release_master_stream(m_rt, stream);
+ stream->m_rt_count--;
+ }
+
+ if (list_empty(&stream->master_list))
+ stream->state = SDW_STREAM_RELEASED;
mutex_unlock(&bus->bus_lock);
@@ -1107,6 +1258,18 @@ int sdw_stream_add_master(struct sdw_bus *bus,
mutex_lock(&bus->bus_lock);
+ /*
+ * For multi link streams, add the second master only if
+ * the bus supports it.
+ * Check if bus->multi_link is set
+ */
+ if (!bus->multi_link && stream->m_rt_count > 0) {
+ dev_err(bus->dev,
+ "Multilink not supported, link %d", bus->link_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
m_rt = sdw_alloc_master_rt(bus, stream_config, stream);
if (!m_rt) {
dev_err(bus->dev,
@@ -1124,10 +1287,12 @@ int sdw_stream_add_master(struct sdw_bus *bus,
if (ret)
goto stream_error;
+ stream->m_rt_count++;
+
goto unlock;
stream_error:
- sdw_release_master_stream(stream);
+ sdw_release_master_stream(m_rt, stream);
unlock:
mutex_unlock(&bus->bus_lock);
return ret;
@@ -1205,7 +1370,7 @@ stream_error:
* we hit error so cleanup the stream, release all Slave(s) and
* Master runtime
*/
- sdw_release_master_stream(stream);
+ sdw_release_master_stream(m_rt, stream);
error:
mutex_unlock(&slave->bus->bus_lock);
return ret;
@@ -1245,33 +1410,82 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
return NULL;
}
+/**
+ * sdw_acquire_bus_lock: Acquire bus lock for all Master runtime(s)
+ *
+ * @stream: SoundWire stream
+ *
+ * Acquire bus_lock for each of the master runtime(m_rt) part of this
+ * stream to reconfigure the bus.
+ * NOTE: This function is called from SoundWire stream ops and is
+ * expected that a global lock is held before acquiring bus_lock.
+ */
+static void sdw_acquire_bus_lock(struct sdw_stream_runtime *stream)
+{
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
+
+ /* Iterate for all Master(s) in Master list */
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+
+ mutex_lock(&bus->bus_lock);
+ }
+}
+
+/**
+ * sdw_release_bus_lock: Release bus lock for all Master runtime(s)
+ *
+ * @stream: SoundWire stream
+ *
+ * Release the previously held bus_lock after reconfiguring the bus.
+ * NOTE: This function is called from SoundWire stream ops and is
+ * expected that a global lock is held before releasing bus_lock.
+ */
+static void sdw_release_bus_lock(struct sdw_stream_runtime *stream)
+{
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
+
+ /* Iterate for all Master(s) in Master list */
+ list_for_each_entry_reverse(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ mutex_unlock(&bus->bus_lock);
+ }
+}
+
static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
- struct sdw_bus *bus = m_rt->bus;
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
struct sdw_master_prop *prop = NULL;
struct sdw_bus_params params;
int ret;
- prop = &bus->prop;
- memcpy(&params, &bus->params, sizeof(params));
+ /* Prepare Master(s) and Slave(s) port(s) associated with stream */
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ prop = &bus->prop;
+ memcpy(&params, &bus->params, sizeof(params));
- /* TODO: Support Asynchronous mode */
- if ((prop->max_freq % stream->params.rate) != 0) {
- dev_err(bus->dev, "Async mode not supported");
- return -EINVAL;
- }
+ /* TODO: Support Asynchronous mode */
+ if ((prop->max_freq % stream->params.rate) != 0) {
+ dev_err(bus->dev, "Async mode not supported");
+ return -EINVAL;
+ }
- /* Increment cumulative bus bandwidth */
- /* TODO: Update this during Device-Device support */
- bus->params.bandwidth += m_rt->stream->params.rate *
- m_rt->ch_count * m_rt->stream->params.bps;
+ /* Increment cumulative bus bandwidth */
+ /* TODO: Update this during Device-Device support */
+ bus->params.bandwidth += m_rt->stream->params.rate *
+ m_rt->ch_count * m_rt->stream->params.bps;
+
+ /* Program params */
+ ret = sdw_program_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Program params failed: %d", ret);
+ goto restore_params;
+ }
- /* Program params */
- ret = sdw_program_params(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
- goto restore_params;
}
ret = do_bank_switch(stream);
@@ -1280,12 +1494,16 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
goto restore_params;
}
- /* Prepare port(s) on the new clock configuration */
- ret = sdw_prep_deprep_ports(m_rt, true);
- if (ret < 0) {
- dev_err(bus->dev, "Prepare port(s) failed ret = %d",
- ret);
- return ret;
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+
+ /* Prepare port(s) on the new clock configuration */
+ ret = sdw_prep_deprep_ports(m_rt, true);
+ if (ret < 0) {
+ dev_err(bus->dev, "Prepare port(s) failed ret = %d",
+ ret);
+ return ret;
+ }
}
stream->state = SDW_STREAM_PREPARED;
@@ -1313,35 +1531,40 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
return -EINVAL;
}
- mutex_lock(&stream->m_rt->bus->bus_lock);
+ sdw_acquire_bus_lock(stream);
ret = _sdw_prepare_stream(stream);
if (ret < 0)
pr_err("Prepare for stream:%s failed: %d", stream->name, ret);
- mutex_unlock(&stream->m_rt->bus->bus_lock);
+ sdw_release_bus_lock(stream);
return ret;
}
EXPORT_SYMBOL(sdw_prepare_stream);
static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
- struct sdw_bus *bus = m_rt->bus;
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
int ret;
- /* Program params */
- ret = sdw_program_params(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
- return ret;
- }
+ /* Enable Master(s) and Slave(s) port(s) associated with stream */
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
- /* Enable port(s) */
- ret = sdw_enable_disable_ports(m_rt, true);
- if (ret < 0) {
- dev_err(bus->dev, "Enable port(s) failed ret: %d", ret);
- return ret;
+ /* Program params */
+ ret = sdw_program_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Program params failed: %d", ret);
+ return ret;
+ }
+
+ /* Enable port(s) */
+ ret = sdw_enable_disable_ports(m_rt, true);
+ if (ret < 0) {
+ dev_err(bus->dev, "Enable port(s) failed ret: %d", ret);
+ return ret;
+ }
}
ret = do_bank_switch(stream);
@@ -1370,37 +1593,42 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
return -EINVAL;
}
- mutex_lock(&stream->m_rt->bus->bus_lock);
+ sdw_acquire_bus_lock(stream);
ret = _sdw_enable_stream(stream);
if (ret < 0)
pr_err("Enable for stream:%s failed: %d", stream->name, ret);
- mutex_unlock(&stream->m_rt->bus->bus_lock);
+ sdw_release_bus_lock(stream);
return ret;
}
EXPORT_SYMBOL(sdw_enable_stream);
static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
- struct sdw_bus *bus = m_rt->bus;
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
int ret;
- /* Disable port(s) */
- ret = sdw_enable_disable_ports(m_rt, false);
- if (ret < 0) {
- dev_err(bus->dev, "Disable port(s) failed: %d", ret);
- return ret;
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ /* Disable port(s) */
+ ret = sdw_enable_disable_ports(m_rt, false);
+ if (ret < 0) {
+ dev_err(bus->dev, "Disable port(s) failed: %d", ret);
+ return ret;
+ }
}
-
stream->state = SDW_STREAM_DISABLED;
- /* Program params */
- ret = sdw_program_params(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
- return ret;
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ /* Program params */
+ ret = sdw_program_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Program params failed: %d", ret);
+ return ret;
+ }
}
return do_bank_switch(stream);
@@ -1422,43 +1650,46 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
return -EINVAL;
}
- mutex_lock(&stream->m_rt->bus->bus_lock);
+ sdw_acquire_bus_lock(stream);
ret = _sdw_disable_stream(stream);
if (ret < 0)
pr_err("Disable for stream:%s failed: %d", stream->name, ret);
- mutex_unlock(&stream->m_rt->bus->bus_lock);
+ sdw_release_bus_lock(stream);
return ret;
}
EXPORT_SYMBOL(sdw_disable_stream);
static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
- struct sdw_bus *bus = m_rt->bus;
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
int ret = 0;
- /* De-prepare port(s) */
- ret = sdw_prep_deprep_ports(m_rt, false);
- if (ret < 0) {
- dev_err(bus->dev, "De-prepare port(s) failed: %d", ret);
- return ret;
- }
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ /* De-prepare port(s) */
+ ret = sdw_prep_deprep_ports(m_rt, false);
+ if (ret < 0) {
+ dev_err(bus->dev, "De-prepare port(s) failed: %d", ret);
+ return ret;
+ }
- stream->state = SDW_STREAM_DEPREPARED;
+ /* TODO: Update this during Device-Device support */
+ bus->params.bandwidth -= m_rt->stream->params.rate *
+ m_rt->ch_count * m_rt->stream->params.bps;
- /* TODO: Update this during Device-Device support */
- bus->params.bandwidth -= m_rt->stream->params.rate *
- m_rt->ch_count * m_rt->stream->params.bps;
+ /* Program params */
+ ret = sdw_program_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Program params failed: %d", ret);
+ return ret;
+ }
- /* Program params */
- ret = sdw_program_params(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
- return ret;
}
+ stream->state = SDW_STREAM_DEPREPARED;
return do_bank_switch(stream);
}
@@ -1478,13 +1709,12 @@ int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
return -EINVAL;
}
- mutex_lock(&stream->m_rt->bus->bus_lock);
-
+ sdw_acquire_bus_lock(stream);
ret = _sdw_deprepare_stream(stream);
if (ret < 0)
pr_err("De-prepare for stream:%d failed: %d", ret, ret);
- mutex_unlock(&stream->m_rt->bus->bus_lock);
+ sdw_release_bus_lock(stream);
return ret;
}
EXPORT_SYMBOL(sdw_deprepare_stream);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f756450a8914..7d3a5c94727e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -83,6 +83,14 @@ config SPI_ATMEL
This selects a driver for the Atmel SPI Controller, present on
many AT91 ARM chips.
+config SPI_AT91_USART
+ tristate "Atmel USART Controller SPI driver"
+ depends on (ARCH_AT91 || COMPILE_TEST)
+ depends on MFD_AT91_USART
+ help
+ This selects a driver for the AT91 USART Controller as SPI Master,
+ present on AT91 and SAMA5 SoC series.
+
config SPI_AU1550
tristate "Au1550/Au1200/Au1300 SPI Controller"
depends on MIPS_ALCHEMY
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index df04dfbe7d70..3575205c5c27 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
+obj-$(CONFIG_SPI_AT91_USART) += spi-at91-usart.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_AXI_SPI_ENGINE) += spi-axi-spi-engine.o
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
new file mode 100644
index 000000000000..a924657642fa
--- /dev/null
+++ b/drivers/spi/spi-at91-usart.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for AT91 USART Controllers as SPI
+//
+// Copyright (C) 2018 Microchip Technology Inc.
+//
+// Author: Radu Pirea <radu.pirea@microchip.com>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+
+#define US_CR 0x00
+#define US_MR 0x04
+#define US_IER 0x08
+#define US_IDR 0x0C
+#define US_CSR 0x14
+#define US_RHR 0x18
+#define US_THR 0x1C
+#define US_BRGR 0x20
+#define US_VERSION 0xFC
+
+#define US_CR_RSTRX BIT(2)
+#define US_CR_RSTTX BIT(3)
+#define US_CR_RXEN BIT(4)
+#define US_CR_RXDIS BIT(5)
+#define US_CR_TXEN BIT(6)
+#define US_CR_TXDIS BIT(7)
+
+#define US_MR_SPI_MASTER 0x0E
+#define US_MR_CHRL GENMASK(7, 6)
+#define US_MR_CPHA BIT(8)
+#define US_MR_CPOL BIT(16)
+#define US_MR_CLKO BIT(18)
+#define US_MR_WRDBT BIT(20)
+#define US_MR_LOOP BIT(15)
+
+#define US_IR_RXRDY BIT(0)
+#define US_IR_TXRDY BIT(1)
+#define US_IR_OVRE BIT(5)
+
+#define US_BRGR_SIZE BIT(16)
+
+#define US_MIN_CLK_DIV 0x06
+#define US_MAX_CLK_DIV BIT(16)
+
+#define US_RESET (US_CR_RSTRX | US_CR_RSTTX)
+#define US_DISABLE (US_CR_RXDIS | US_CR_TXDIS)
+#define US_ENABLE (US_CR_RXEN | US_CR_TXEN)
+#define US_OVRE_RXRDY_IRQS (US_IR_OVRE | US_IR_RXRDY)
+
+#define US_INIT \
+ (US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
+
+/* Register access macros */
+#define at91_usart_spi_readl(port, reg) \
+ readl_relaxed((port)->regs + US_##reg)
+#define at91_usart_spi_writel(port, reg, value) \
+ writel_relaxed((value), (port)->regs + US_##reg)
+
+#define at91_usart_spi_readb(port, reg) \
+ readb_relaxed((port)->regs + US_##reg)
+#define at91_usart_spi_writeb(port, reg, value) \
+ writeb_relaxed((value), (port)->regs + US_##reg)
+
+struct at91_usart_spi {
+ struct spi_transfer *current_transfer;
+ void __iomem *regs;
+ struct device *dev;
+ struct clk *clk;
+
+ /*used in interrupt to protect data reading*/
+ spinlock_t lock;
+
+ int irq;
+ unsigned int current_tx_remaining_bytes;
+ unsigned int current_rx_remaining_bytes;
+
+ u32 spi_clk;
+ u32 status;
+
+ bool xfer_failed;
+};
+
+static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_TXRDY;
+}
+
+static inline u32 at91_usart_spi_rx_ready(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_RXRDY;
+}
+
+static inline u32 at91_usart_spi_check_overrun(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_OVRE;
+}
+
+static inline u32 at91_usart_spi_read_status(struct at91_usart_spi *aus)
+{
+ aus->status = at91_usart_spi_readl(aus, CSR);
+ return aus->status;
+}
+
+static inline void at91_usart_spi_tx(struct at91_usart_spi *aus)
+{
+ unsigned int len = aus->current_transfer->len;
+ unsigned int remaining = aus->current_tx_remaining_bytes;
+ const u8 *tx_buf = aus->current_transfer->tx_buf;
+
+ if (!remaining)
+ return;
+
+ if (at91_usart_spi_tx_ready(aus)) {
+ at91_usart_spi_writeb(aus, THR, tx_buf[len - remaining]);
+ aus->current_tx_remaining_bytes--;
+ }
+}
+
+static inline void at91_usart_spi_rx(struct at91_usart_spi *aus)
+{
+ int len = aus->current_transfer->len;
+ int remaining = aus->current_rx_remaining_bytes;
+ u8 *rx_buf = aus->current_transfer->rx_buf;
+
+ if (!remaining)
+ return;
+
+ rx_buf[len - remaining] = at91_usart_spi_readb(aus, RHR);
+ aus->current_rx_remaining_bytes--;
+}
+
+static inline void
+at91_usart_spi_set_xfer_speed(struct at91_usart_spi *aus,
+ struct spi_transfer *xfer)
+{
+ at91_usart_spi_writel(aus, BRGR,
+ DIV_ROUND_UP(aus->spi_clk, xfer->speed_hz));
+}
+
+static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_controller *controller = dev_id;
+ struct at91_usart_spi *aus = spi_master_get_devdata(controller);
+
+ spin_lock(&aus->lock);
+ at91_usart_spi_read_status(aus);
+
+ if (at91_usart_spi_check_overrun(aus)) {
+ aus->xfer_failed = true;
+ at91_usart_spi_writel(aus, IDR, US_IR_OVRE | US_IR_RXRDY);
+ spin_unlock(&aus->lock);
+ return IRQ_HANDLED;
+ }
+
+ if (at91_usart_spi_rx_ready(aus)) {
+ at91_usart_spi_rx(aus);
+ spin_unlock(&aus->lock);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock(&aus->lock);
+
+ return IRQ_NONE;
+}
+
+static int at91_usart_spi_setup(struct spi_device *spi)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
+ u32 *ausd = spi->controller_state;
+ unsigned int mr = at91_usart_spi_readl(aus, MR);
+ u8 bits = spi->bits_per_word;
+
+ if (bits != 8) {
+ dev_dbg(&spi->dev, "Only 8 bits per word are supported\n");
+ return -EINVAL;
+ }
+
+ if (spi->mode & SPI_CPOL)
+ mr |= US_MR_CPOL;
+ else
+ mr &= ~US_MR_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ mr |= US_MR_CPHA;
+ else
+ mr &= ~US_MR_CPHA;
+
+ if (spi->mode & SPI_LOOP)
+ mr |= US_MR_LOOP;
+ else
+ mr &= ~US_MR_LOOP;
+
+ if (!ausd) {
+ ausd = kzalloc(sizeof(*ausd), GFP_KERNEL);
+ if (!ausd)
+ return -ENOMEM;
+
+ spi->controller_state = ausd;
+ }
+
+ *ausd = mr;
+
+ dev_dbg(&spi->dev,
+ "setup: bpw %u mode 0x%x -> mr %d %08x\n",
+ bits, spi->mode, spi->chip_select, mr);
+
+ return 0;
+}
+
+static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_set_xfer_speed(aus, xfer);
+ aus->xfer_failed = false;
+ aus->current_transfer = xfer;
+ aus->current_tx_remaining_bytes = xfer->len;
+ aus->current_rx_remaining_bytes = xfer->len;
+
+ while ((aus->current_tx_remaining_bytes ||
+ aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
+ at91_usart_spi_read_status(aus);
+ at91_usart_spi_tx(aus);
+ cpu_relax();
+ }
+
+ if (aus->xfer_failed) {
+ dev_err(aus->dev, "Overrun!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct spi_device *spi = message->spi;
+ u32 *ausd = spi->controller_state;
+
+ at91_usart_spi_writel(aus, CR, US_ENABLE);
+ at91_usart_spi_writel(aus, IER, US_OVRE_RXRDY_IRQS);
+ at91_usart_spi_writel(aus, MR, *ausd);
+
+ return 0;
+}
+
+static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
+ at91_usart_spi_writel(aus, IDR, US_OVRE_RXRDY_IRQS);
+
+ return 0;
+}
+
+static void at91_usart_spi_cleanup(struct spi_device *spi)
+{
+ struct at91_usart_spi_device *ausd = spi->controller_state;
+
+ spi->controller_state = NULL;
+ kfree(ausd);
+}
+
+static void at91_usart_spi_init(struct at91_usart_spi *aus)
+{
+ at91_usart_spi_writel(aus, MR, US_INIT);
+ at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
+}
+
+static int at91_usart_gpio_setup(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.parent->of_node;
+ int i;
+ int ret;
+ int nb;
+
+ if (!np)
+ return -EINVAL;
+
+ nb = of_gpio_named_count(np, "cs-gpios");
+ for (i = 0; i < nb; i++) {
+ int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+
+ if (cs_gpio < 0)
+ return cs_gpio;
+
+ if (gpio_is_valid(cs_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, cs_gpio,
+ GPIOF_DIR_OUT,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int at91_usart_spi_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct spi_controller *controller;
+ struct at91_usart_spi *aus;
+ struct clk *clk;
+ int irq;
+ int ret;
+
+ regs = platform_get_resource(to_platform_device(pdev->dev.parent),
+ IORESOURCE_MEM, 0);
+ if (!regs)
+ return -EINVAL;
+
+ irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
+ if (irq < 0)
+ return irq;
+
+ clk = devm_clk_get(pdev->dev.parent, "usart");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = -ENOMEM;
+ controller = spi_alloc_master(&pdev->dev, sizeof(*aus));
+ if (!controller)
+ goto at91_usart_spi_probe_fail;
+
+ ret = at91_usart_gpio_setup(pdev);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
+ controller->dev.of_node = pdev->dev.parent->of_node;
+ controller->bits_per_word_mask = SPI_BPW_MASK(8);
+ controller->setup = at91_usart_spi_setup;
+ controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->transfer_one = at91_usart_spi_transfer_one;
+ controller->prepare_message = at91_usart_spi_prepare_message;
+ controller->unprepare_message = at91_usart_spi_unprepare_message;
+ controller->cleanup = at91_usart_spi_cleanup;
+ controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
+ US_MIN_CLK_DIV);
+ controller->min_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
+ US_MAX_CLK_DIV);
+ platform_set_drvdata(pdev, controller);
+
+ aus = spi_master_get_devdata(controller);
+
+ aus->dev = &pdev->dev;
+ aus->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(aus->regs)) {
+ ret = PTR_ERR(aus->regs);
+ goto at91_usart_spi_probe_fail;
+ }
+
+ aus->irq = irq;
+ aus->clk = clk;
+
+ ret = devm_request_irq(&pdev->dev, irq, at91_usart_spi_interrupt, 0,
+ dev_name(&pdev->dev), controller);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ aus->spi_clk = clk_get_rate(clk);
+ at91_usart_spi_init(aus);
+
+ spin_lock_init(&aus->lock);
+ ret = devm_spi_register_master(&pdev->dev, controller);
+ if (ret)
+ goto at91_usart_fail_register_master;
+
+ dev_info(&pdev->dev,
+ "AT91 USART SPI Controller version 0x%x at %pa (irq %d)\n",
+ at91_usart_spi_readl(aus, VERSION),
+ &regs->start, irq);
+
+ return 0;
+
+at91_usart_fail_register_master:
+ clk_disable_unprepare(clk);
+at91_usart_spi_probe_fail:
+ spi_master_put(controller);
+ return ret;
+}
+
+static int at91_usart_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ clk_disable_unprepare(aus->clk);
+
+ return 0;
+}
+
+static const struct of_device_id at91_usart_spi_dt_ids[] = {
+ { .compatible = "microchip,at91sam9g45-usart-spi"},
+ { /* sentinel */}
+};
+
+MODULE_DEVICE_TABLE(of, at91_usart_spi_dt_ids);
+
+static struct platform_driver at91_usart_spi_driver = {
+ .driver = {
+ .name = "at91_usart_spi",
+ },
+ .probe = at91_usart_spi_probe,
+ .remove = at91_usart_spi_remove,
+};
+
+module_platform_driver(at91_usart_spi_driver);
+
+MODULE_DESCRIPTION("Microchip AT91 USART SPI Controller driver");
+MODULE_AUTHOR("Radu Pirea <radu.pirea@microchip.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:at91_usart_spi");
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
index 546a47156101..8cf0617d4ea0 100644
--- a/drivers/staging/erofs/namei.c
+++ b/drivers/staging/erofs/namei.c
@@ -223,18 +223,13 @@ static struct dentry *erofs_lookup(struct inode *dir,
if (err == -ENOENT) {
/* negative dentry */
inode = NULL;
- goto negative_out;
- } else if (unlikely(err))
- return ERR_PTR(err);
-
- debugln("%s, %s (nid %llu) found, d_type %u", __func__,
- dentry->d_name.name, nid, d_type);
-
- inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
-
-negative_out:
+ } else if (unlikely(err)) {
+ inode = ERR_PTR(err);
+ } else {
+ debugln("%s, %s (nid %llu) found, d_type %u", __func__,
+ dentry->d_name.name, nid, d_type);
+ inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
+ }
return d_splice_alias(inode, dentry);
}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 9f18be14dda6..f38f1f74fcd6 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -49,9 +49,9 @@ struct rtllib_tkip_data {
u32 dot11RSNAStatsTKIPLocalMICFailures;
int key_idx;
- struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_sync_skcipher *rx_tfm_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_sync_skcipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16];
@@ -66,8 +66,7 @@ static void *rtllib_tkip_init(int key_idx)
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm_arc4)) {
pr_debug("Could not allocate crypto API arc4\n");
priv->tx_tfm_arc4 = NULL;
@@ -81,8 +80,7 @@ static void *rtllib_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm_arc4)) {
pr_debug("Could not allocate crypto API arc4\n");
priv->rx_tfm_arc4 = NULL;
@@ -100,9 +98,9 @@ static void *rtllib_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_skcipher(priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -116,9 +114,9 @@ static void rtllib_tkip_deinit(void *priv)
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_skcipher(_priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -337,7 +335,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
@@ -349,8 +347,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
sg_init_one(&sg, pos, len+4);
- crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+ crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
ret = crypto_skcipher_encrypt(req);
@@ -420,7 +418,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
if ((iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
@@ -447,8 +445,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
sg_init_one(&sg, pos, plen+4);
- crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+ crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
err = crypto_skcipher_decrypt(req);
@@ -664,9 +662,9 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
struct rtllib_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index b3343a5d0fd6..d11ec39171d5 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -27,8 +27,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_skcipher *tx_tfm;
- struct crypto_skcipher *rx_tfm;
+ struct crypto_sync_skcipher *tx_tfm;
+ struct crypto_sync_skcipher *rx_tfm;
};
@@ -41,13 +41,13 @@ static void *prism2_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm)) {
pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
priv->tx_tfm = NULL;
goto fail;
}
- priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm)) {
pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
priv->rx_tfm = NULL;
@@ -61,8 +61,8 @@ static void *prism2_wep_init(int keyidx)
fail:
if (priv) {
- crypto_free_skcipher(priv->tx_tfm);
- crypto_free_skcipher(priv->rx_tfm);
+ crypto_free_sync_skcipher(priv->tx_tfm);
+ crypto_free_sync_skcipher(priv->rx_tfm);
kfree(priv);
}
return NULL;
@@ -74,8 +74,8 @@ static void prism2_wep_deinit(void *priv)
struct prism2_wep_data *_priv = priv;
if (_priv) {
- crypto_free_skcipher(_priv->tx_tfm);
- crypto_free_skcipher(_priv->rx_tfm);
+ crypto_free_sync_skcipher(_priv->tx_tfm);
+ crypto_free_sync_skcipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -135,7 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
@@ -146,8 +146,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[3] = crc >> 24;
sg_init_one(&sg, pos, len+4);
- crypto_skcipher_setkey(wep->tx_tfm, key, klen);
- skcipher_request_set_tfm(req, wep->tx_tfm);
+ crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
+ skcipher_request_set_sync_tfm(req, wep->tx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
err = crypto_skcipher_encrypt(req);
@@ -199,11 +199,11 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
sg_init_one(&sg, pos, plen+4);
- crypto_skcipher_setkey(wep->rx_tfm, key, klen);
- skcipher_request_set_tfm(req, wep->rx_tfm);
+ crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
+ skcipher_request_set_sync_tfm(req, wep->rx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
err = crypto_skcipher_decrypt(req);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 1088fa0aee0e..829fa4bd253c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -53,9 +53,9 @@ struct ieee80211_tkip_data {
int key_idx;
- struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_sync_skcipher *rx_tfm_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_sync_skcipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
@@ -71,8 +71,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
@@ -88,8 +87,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
@@ -110,9 +108,9 @@ static void *ieee80211_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_skcipher(priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -126,9 +124,9 @@ static void ieee80211_tkip_deinit(void *priv)
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_skcipher(_priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -340,7 +338,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
@@ -348,9 +346,9 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, len+4);
- skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+ skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
ret = crypto_skcipher_encrypt(req);
@@ -418,7 +416,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
if (iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
@@ -440,10 +438,10 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
- crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, plen+4);
- skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+ skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
@@ -663,9 +661,9 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index b9f86be9e52b..d4a1bf0caa7a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -32,8 +32,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_skcipher *tx_tfm;
- struct crypto_skcipher *rx_tfm;
+ struct crypto_sync_skcipher *tx_tfm;
+ struct crypto_sync_skcipher *rx_tfm;
};
@@ -46,10 +46,10 @@ static void *prism2_wep_init(int keyidx)
return NULL;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm))
goto free_priv;
- priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm))
goto free_tx;
@@ -58,7 +58,7 @@ static void *prism2_wep_init(int keyidx)
return priv;
free_tx:
- crypto_free_skcipher(priv->tx_tfm);
+ crypto_free_sync_skcipher(priv->tx_tfm);
free_priv:
kfree(priv);
return NULL;
@@ -70,8 +70,8 @@ static void prism2_wep_deinit(void *priv)
struct prism2_wep_data *_priv = priv;
if (_priv) {
- crypto_free_skcipher(_priv->tx_tfm);
- crypto_free_skcipher(_priv->rx_tfm);
+ crypto_free_sync_skcipher(_priv->tx_tfm);
+ crypto_free_sync_skcipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -128,7 +128,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
@@ -138,10 +138,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(wep->tx_tfm, key, klen);
+ crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
sg_init_one(&sg, pos, len+4);
- skcipher_request_set_tfm(req, wep->tx_tfm);
+ skcipher_request_set_sync_tfm(req, wep->tx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
@@ -193,12 +193,12 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
- crypto_skcipher_setkey(wep->rx_tfm, key, klen);
+ crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
sg_init_one(&sg, pos, plen+4);
- skcipher_request_set_tfm(req, wep->rx_tfm);
+ skcipher_request_set_sync_tfm(req, wep->rx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 8de16016b6de..71888b979ab5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -598,9 +598,12 @@ out:
mutex_unlock(&cdev_list_lock);
}
+static void __cxgbit_free_conn(struct cxgbit_sock *csk);
+
void cxgbit_free_np(struct iscsi_np *np)
{
struct cxgbit_np *cnp = np->np_context;
+ struct cxgbit_sock *csk, *tmp;
cnp->com.state = CSK_STATE_DEAD;
if (cnp->com.cdev)
@@ -608,6 +611,13 @@ void cxgbit_free_np(struct iscsi_np *np)
else
cxgbit_free_all_np(cnp);
+ spin_lock_bh(&cnp->np_accept_lock);
+ list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
+ list_del_init(&csk->accept_node);
+ __cxgbit_free_conn(csk);
+ }
+ spin_unlock_bh(&cnp->np_accept_lock);
+
np->np_context = NULL;
cxgbit_put_cnp(cnp);
}
@@ -705,9 +715,9 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk)
csk->tid, 600, __func__);
}
-void cxgbit_free_conn(struct iscsi_conn *conn)
+static void __cxgbit_free_conn(struct cxgbit_sock *csk)
{
- struct cxgbit_sock *csk = conn->context;
+ struct iscsi_conn *conn = csk->conn;
bool release = false;
pr_debug("%s: state %d\n",
@@ -716,7 +726,7 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
spin_lock_bh(&csk->lock);
switch (csk->com.state) {
case CSK_STATE_ESTABLISHED:
- if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+ if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
csk->com.state = CSK_STATE_CLOSING;
cxgbit_send_halfclose(csk);
} else {
@@ -741,6 +751,11 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
cxgbit_put_csk(csk);
}
+void cxgbit_free_conn(struct iscsi_conn *conn)
+{
+ __cxgbit_free_conn(conn->context);
+}
+
static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
{
csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
@@ -803,6 +818,7 @@ void _cxgbit_free_csk(struct kref *kref)
spin_unlock_bh(&cdev->cskq.lock);
cxgbit_free_skb(csk);
+ cxgbit_put_cnp(csk->cnp);
cxgbit_put_cdev(cdev);
kfree(csk);
@@ -1351,6 +1367,7 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
goto rel_skb;
}
+ cxgbit_get_cnp(cnp);
cxgbit_get_cdev(cdev);
spin_lock(&cdev->cskq.lock);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index cc756a123fd8..c1d5a173553d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4355,7 +4355,7 @@ int iscsit_close_session(struct iscsi_session *sess)
transport_deregister_session(sess->se_sess);
if (sess->sess_ops->ErrorRecoveryLevel == 2)
- iscsit_free_connection_recovery_entires(sess);
+ iscsit_free_connection_recovery_entries(sess);
iscsit_free_all_ooo_cmdsns(sess);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 718fe9a1b709..1193cf884a28 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -770,21 +770,8 @@ void iscsit_handle_time2retain_timeout(struct timer_list *t)
pr_err("Time2Retain timer expired for SID: %u, cleaning up"
" iSCSI session.\n", sess->sid);
- {
- struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
-
- if (tiqn) {
- spin_lock(&tiqn->sess_err_stats.lock);
- strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
- (void *)sess->sess_ops->InitiatorName);
- tiqn->sess_err_stats.last_sess_failure_type =
- ISCSI_SESS_ERR_CXN_TIMEOUT;
- tiqn->sess_err_stats.cxn_timeout_errors++;
- atomic_long_inc(&sess->conn_timeout_errors);
- spin_unlock(&tiqn->sess_err_stats.lock);
- }
- }
+ iscsit_fill_cxn_timeout_err_stats(sess);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_close_session(sess);
}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 5efa42b939a1..a211e8154f4c 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -1169,15 +1169,21 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
na = iscsit_tpg_get_node_attrib(sess);
if (!sess->sess_ops->ErrorRecoveryLevel) {
- pr_debug("Unable to recover from DataOut timeout while"
- " in ERL=0.\n");
+ pr_err("Unable to recover from DataOut timeout while"
+ " in ERL=0, closing iSCSI connection for I_T Nexus"
+ " %s,i,0x%6phN,%s,t,0x%02x\n",
+ sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
goto failure;
}
if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
- pr_debug("Command ITT: 0x%08x exceeded max retries"
- " for DataOUT timeout %u, closing iSCSI connection.\n",
- cmd->init_task_tag, na->dataout_timeout_retries);
+ pr_err("Command ITT: 0x%08x exceeded max retries"
+ " for DataOUT timeout %u, closing iSCSI connection for"
+ " I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
+ cmd->init_task_tag, na->dataout_timeout_retries,
+ sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
goto failure;
}
@@ -1224,6 +1230,7 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
failure:
spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_fill_cxn_timeout_err_stats(sess);
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 8df9c90f3db3..b08b620b1bf0 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -125,7 +125,7 @@ struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
return NULL;
}
-void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+void iscsit_free_connection_recovery_entries(struct iscsi_session *sess)
{
struct iscsi_cmd *cmd, *cmd_tmp;
struct iscsi_conn_recovery *cr, *cr_tmp;
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 93e180d68d07..a39b0caf2337 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -13,7 +13,7 @@ extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32
extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
struct iscsi_session *, u16);
-extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
+extern void iscsit_free_connection_recovery_entries(struct iscsi_session *);
extern int iscsit_remove_active_connection_recovery_entry(
struct iscsi_conn_recovery *, struct iscsi_session *);
extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bb90c80ff388..ae3209efd0e0 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -578,7 +578,7 @@ int iscsi_login_post_auth_non_zero_tsih(
}
/*
- * Check for any connection recovery entires containing CID.
+ * Check for any connection recovery entries containing CID.
* We use the original ExpStatSN sent in the first login request
* to acknowledge commands for the failed connection.
*
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index df0a39811dc2..bb98882bdaa7 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -328,10 +328,10 @@ static ssize_t iscsi_stat_tgt_attr_fail_intr_name_show(struct config_item *item,
{
struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
- unsigned char buf[224];
+ unsigned char buf[ISCSI_IQN_LEN];
spin_lock(&lstat->lock);
- snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
+ snprintf(buf, ISCSI_IQN_LEN, "%s", lstat->last_intr_fail_name[0] ?
lstat->last_intr_fail_name : NONE);
spin_unlock(&lstat->lock);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 49be1e41290c..1227872227dc 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -915,6 +915,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
void iscsit_handle_nopin_response_timeout(struct timer_list *t)
{
struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
+ struct iscsi_session *sess = conn->sess;
iscsit_inc_conn_usage_count(conn);
@@ -925,28 +926,14 @@ void iscsit_handle_nopin_response_timeout(struct timer_list *t)
return;
}
- pr_debug("Did not receive response to NOPIN on CID: %hu on"
- " SID: %u, failing connection.\n", conn->cid,
- conn->sess->sid);
+ pr_err("Did not receive response to NOPIN on CID: %hu, failing"
+ " connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
+ conn->cid, sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
- {
- struct iscsi_portal_group *tpg = conn->sess->tpg;
- struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
-
- if (tiqn) {
- spin_lock_bh(&tiqn->sess_err_stats.lock);
- strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
- conn->sess->sess_ops->InitiatorName);
- tiqn->sess_err_stats.last_sess_failure_type =
- ISCSI_SESS_ERR_CXN_TIMEOUT;
- tiqn->sess_err_stats.cxn_timeout_errors++;
- atomic_long_inc(&conn->sess->conn_timeout_errors);
- spin_unlock_bh(&tiqn->sess_err_stats.lock);
- }
- }
-
+ iscsit_fill_cxn_timeout_err_stats(sess);
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
@@ -1405,3 +1392,22 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
return tpg->tpg_tiqn;
}
+
+void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *sess)
+{
+ struct iscsi_portal_group *tpg = sess->tpg;
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ if (!tiqn)
+ return;
+
+ spin_lock_bh(&tiqn->sess_err_stats.lock);
+ strlcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ sess->sess_ops->InitiatorName,
+ sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
+ tiqn->sess_err_stats.last_sess_failure_type =
+ ISCSI_SESS_ERR_CXN_TIMEOUT;
+ tiqn->sess_err_stats.cxn_timeout_errors++;
+ atomic_long_inc(&sess->conn_timeout_errors);
+ spin_unlock_bh(&tiqn->sess_err_stats.lock);
+}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index d66dfc212624..68e84803b0a1 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -67,5 +67,6 @@ extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
+extern void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *);
#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index ce1321a5cb7b..b5ed9c377060 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -514,7 +514,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
}
/* Always in 512 byte units for Linux/Block */
- block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ block_lba += sg->length >> SECTOR_SHIFT;
sectors -= 1;
}
@@ -635,14 +635,15 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
}
static int
-iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
+iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
+ struct sg_mapping_iter *miter)
{
struct se_device *dev = cmd->se_dev;
struct blk_integrity *bi;
struct bio_integrity_payload *bip;
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- struct scatterlist *sg;
- int i, rc;
+ int rc;
+ size_t resid, len;
bi = bdev_get_integrity(ib_dev->ibd_bd);
if (!bi) {
@@ -650,31 +651,39 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
return -ENODEV;
}
- bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
+ bip = bio_integrity_alloc(bio, GFP_NOIO,
+ min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
if (IS_ERR(bip)) {
pr_err("Unable to allocate bio_integrity_payload\n");
return PTR_ERR(bip);
}
- bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
- dev->prot_length;
- bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
+ bip_set_seed(bip, bio->bi_iter.bi_sector);
pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
(unsigned long long)bip->bip_iter.bi_sector);
- for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
+ resid = bip->bip_iter.bi_size;
+ while (resid > 0 && sg_miter_next(miter)) {
- rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
- sg->offset);
- if (rc != sg->length) {
+ len = min_t(size_t, miter->length, resid);
+ rc = bio_integrity_add_page(bio, miter->page, len,
+ offset_in_page(miter->addr));
+ if (rc != len) {
pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ sg_miter_stop(miter);
return -ENOMEM;
}
- pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
- sg_page(sg), sg->length, sg->offset);
+ pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
+ miter->page, len, offset_in_page(miter->addr));
+
+ resid -= len;
+ if (len < miter->length)
+ miter->consumed -= miter->length - len;
}
+ sg_miter_stop(miter);
return 0;
}
@@ -686,12 +695,13 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct se_device *dev = cmd->se_dev;
sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
struct iblock_req *ibr;
- struct bio *bio, *bio_start;
+ struct bio *bio;
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
unsigned bio_cnt;
- int i, op, op_flags = 0;
+ int i, rc, op, op_flags = 0;
+ struct sg_mapping_iter prot_miter;
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -726,13 +736,17 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio)
goto fail_free_ibr;
- bio_start = bio;
bio_list_init(&list);
bio_list_add(&list, bio);
refcount_set(&ibr->pending, 2);
bio_cnt = 1;
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
+ sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
+ op == REQ_OP_READ ? SG_MITER_FROM_SG :
+ SG_MITER_TO_SG);
+
for_each_sg(sgl, sg, sgl_nents, i) {
/*
* XXX: if the length the device accepts is shorter than the
@@ -741,6 +755,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
*/
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+ rc = iblock_alloc_bip(cmd, bio, &prot_miter);
+ if (rc)
+ goto fail_put_bios;
+ }
+
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
iblock_submit_bios(&list);
bio_cnt = 0;
@@ -757,12 +777,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
/* Always in 512 byte units for Linux/Block */
- block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ block_lba += sg->length >> SECTOR_SHIFT;
sg_num--;
}
if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
- int rc = iblock_alloc_bip(cmd, bio_start);
+ rc = iblock_alloc_bip(cmd, bio, &prot_miter);
if (rc)
goto fail_put_bios;
}
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 9cc3843404d4..cefc641145b3 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -9,7 +9,6 @@
#define IBLOCK_VERSION "4.0"
#define IBLOCK_MAX_CDBS 16
-#define IBLOCK_LBA_SHIFT 9
struct iblock_req {
refcount_t pending;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index ebac2b49b9c6..1ac1f7d2e6c9 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -360,6 +360,10 @@ static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
unsigned int offset;
sense_reason_t ret = TCM_NO_SENSE;
int i, count;
+
+ if (!success)
+ return 0;
+
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
@@ -425,14 +429,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
struct se_device *dev = cmd->se_dev;
sense_reason_t ret = TCM_NO_SENSE;
- /*
- * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
- * within target_complete_ok_work() if the command was successfully
- * sent to the backend driver.
- */
spin_lock_irq(&cmd->t_state_lock);
- if (cmd->transport_state & CMD_T_SENT) {
- cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+ if (success) {
*post_ret = 1;
if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
@@ -453,7 +451,8 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
- struct scatterlist *write_sg = NULL, *sg;
+ struct sg_table write_tbl = { };
+ struct scatterlist *write_sg, *sg;
unsigned char *buf = NULL, *addr;
struct sg_mapping_iter m;
unsigned int offset = 0, len;
@@ -494,14 +493,12 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
goto out;
}
- write_sg = kmalloc_array(cmd->t_data_nents, sizeof(*write_sg),
- GFP_KERNEL);
- if (!write_sg) {
+ if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
pr_err("Unable to allocate compare_and_write sg\n");
ret = TCM_OUT_OF_RESOURCES;
goto out;
}
- sg_init_table(write_sg, cmd->t_data_nents);
+ write_sg = write_tbl.sgl;
/*
* Setup verify and write data payloads from total NumberLBAs.
*/
@@ -597,7 +594,7 @@ out:
* sbc_compare_and_write() before the original READ I/O submission.
*/
up(&dev->caw_sem);
- kfree(write_sg);
+ sg_free_table(&write_tbl);
kfree(buf);
return ret;
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 86c0156e6c88..4cf33e2cc705 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(target_submit_tmr);
void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason)
{
- int ret = 0, post_ret = 0;
+ int ret = 0;
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
@@ -1789,13 +1789,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
*/
transport_complete_task_attr(cmd);
- /*
- * Handle special case for COMPARE_AND_WRITE failure, where the
- * callback is expected to drop the per device ->caw_sem.
- */
- if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
- cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd, false, &post_ret);
+ if (cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd, false, NULL);
if (transport_check_aborted_status(cmd, 1))
return;
@@ -2012,7 +2007,7 @@ void target_execute_cmd(struct se_cmd *cmd)
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*
- * If the received CDB has aleady been aborted stop processing it here.
+ * If the received CDB has already been aborted stop processing it here.
*/
spin_lock_irq(&cmd->t_state_lock);
if (__transport_check_aborted_status(cmd, 1)) {
@@ -2516,7 +2511,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
}
/*
- * Determine is the TCM fabric module has already allocated physical
+ * Determine if the TCM fabric module has already allocated physical
* memory, and is directly calling transport_generic_map_mem_to_cmd()
* beforehand.
*/
@@ -2754,7 +2749,7 @@ static void target_release_cmd_kref(struct kref *kref)
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_del_init(&se_cmd->se_cmd_list);
- if (list_empty(&se_sess->sess_cmd_list))
+ if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
wake_up(&se_sess->cmd_list_wq);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
@@ -2907,7 +2902,7 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
spin_lock_irq(&se_sess->sess_cmd_lock);
do {
- ret = wait_event_interruptible_lock_irq_timeout(
+ ret = wait_event_lock_irq_timeout(
se_sess->cmd_list_wq,
list_empty(&se_sess->sess_cmd_list),
se_sess->sess_cmd_lock, 180 * HZ);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 2718a933c0c6..70adcfdca8d1 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -391,7 +391,6 @@ out:
struct xcopy_pt_cmd {
bool remote_port;
struct se_cmd se_cmd;
- struct xcopy_op *xcopy_op;
struct completion xpt_passthrough_sem;
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
};
@@ -596,8 +595,6 @@ static int target_xcopy_setup_pt_cmd(
* X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
*/
target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
-
- xpt_cmd->xcopy_op = xop;
target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
cmd->tag = 0;
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index 3be9519654e5..cf3fad2cb871 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -2,7 +2,7 @@
* TURBOchannel bus services.
*
* Copyright (c) Harald Koerfgen, 1998
- * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki
+ * Copyright (c) 2001, 2003, 2005, 2006, 2018 Maciej W. Rozycki
* Copyright (c) 2005 James Simmons
*
* This file is subject to the terms and conditions of the GNU
@@ -10,6 +10,7 @@
* directory of this archive for more details.
*/
#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -92,6 +93,11 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
tdev->dev.bus = &tc_bus_type;
tdev->slot = slot;
+ /* TURBOchannel has 34-bit DMA addressing (16GiB space). */
+ tdev->dma_mask = DMA_BIT_MASK(34);
+ tdev->dev.dma_mask = &tdev->dma_mask;
+ tdev->dev.coherent_dma_mask = DMA_BIT_MASK(34);
+
for (i = 0; i < 8; i++) {
tdev->firmware[i] =
readb(module + offset + TC_FIRM_VER + 4 * i);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 0e69edc77d18..5422523c03f8 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -432,7 +432,7 @@ source "drivers/thermal/samsung/Kconfig"
endmenu
menu "STMicroelectronics thermal drivers"
-depends on ARCH_STI && OF
+depends on (ARCH_STI || ARCH_STM32) && OF
source "drivers/thermal/st/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 610344eb3e03..82bb50dc6423 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -53,7 +53,7 @@ obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
-obj-$(CONFIG_ST_THERMAL) += st/
+obj-y += st/
obj-$(CONFIG_QCOM_TSENS) += qcom/
obj-y += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 2c2f6d93034e..92f67d40f2e9 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -526,8 +526,8 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev,
/* First memory region points towards the status register */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
+ if (!res)
+ return -EIO;
/*
* Edit the resource start address and length to map over all the
diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
index dd8dd947b7f0..01b0cb994457 100644
--- a/drivers/thermal/da9062-thermal.c
+++ b/drivers/thermal/da9062-thermal.c
@@ -106,7 +106,7 @@ static void da9062_thermal_poll_on(struct work_struct *work)
THERMAL_EVENT_UNSPECIFIED);
delay = msecs_to_jiffies(thermal->zone->passive_delay);
- schedule_delayed_work(&thermal->work, delay);
+ queue_delayed_work(system_freezable_wq, &thermal->work, delay);
return;
}
@@ -125,7 +125,7 @@ static irqreturn_t da9062_thermal_irq_handler(int irq, void *data)
struct da9062_thermal *thermal = data;
disable_irq_nosync(thermal->irq);
- schedule_delayed_work(&thermal->work, 0);
+ queue_delayed_work(system_freezable_wq, &thermal->work, 0);
return IRQ_HANDLED;
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 761d0559c268..c4111a98f1a7 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -55,25 +55,39 @@
#define HI3660_TEMP_STEP (205)
#define HI3660_TEMP_LAG (4000)
-#define HI6220_DEFAULT_SENSOR 2
-#define HI3660_DEFAULT_SENSOR 1
+#define HI6220_CLUSTER0_SENSOR 2
+#define HI6220_CLUSTER1_SENSOR 1
+
+#define HI3660_LITTLE_SENSOR 0
+#define HI3660_BIG_SENSOR 1
+#define HI3660_G3D_SENSOR 2
+#define HI3660_MODEM_SENSOR 3
+
+struct hisi_thermal_data;
struct hisi_thermal_sensor {
+ struct hisi_thermal_data *data;
struct thermal_zone_device *tzd;
+ const char *irq_name;
uint32_t id;
uint32_t thres_temp;
};
+struct hisi_thermal_ops {
+ int (*get_temp)(struct hisi_thermal_sensor *sensor);
+ int (*enable_sensor)(struct hisi_thermal_sensor *sensor);
+ int (*disable_sensor)(struct hisi_thermal_sensor *sensor);
+ int (*irq_handler)(struct hisi_thermal_sensor *sensor);
+ int (*probe)(struct hisi_thermal_data *data);
+};
+
struct hisi_thermal_data {
- int (*get_temp)(struct hisi_thermal_data *data);
- int (*enable_sensor)(struct hisi_thermal_data *data);
- int (*disable_sensor)(struct hisi_thermal_data *data);
- int (*irq_handler)(struct hisi_thermal_data *data);
+ const struct hisi_thermal_ops *ops;
+ struct hisi_thermal_sensor *sensor;
struct platform_device *pdev;
struct clk *clk;
- struct hisi_thermal_sensor sensor;
void __iomem *regs;
- int irq;
+ int nr_sensors;
};
/*
@@ -266,30 +280,40 @@ static inline void hi6220_thermal_hdak_set(void __iomem *addr, int value)
(value << 4), addr + HI6220_TEMP0_CFG);
}
-static int hi6220_thermal_irq_handler(struct hisi_thermal_data *data)
+static int hi6220_thermal_irq_handler(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
hi6220_thermal_alarm_clear(data->regs, 1);
return 0;
}
-static int hi3660_thermal_irq_handler(struct hisi_thermal_data *data)
+static int hi3660_thermal_irq_handler(struct hisi_thermal_sensor *sensor)
{
- hi3660_thermal_alarm_clear(data->regs, data->sensor.id, 1);
+ struct hisi_thermal_data *data = sensor->data;
+
+ hi3660_thermal_alarm_clear(data->regs, sensor->id, 1);
return 0;
}
-static int hi6220_thermal_get_temp(struct hisi_thermal_data *data)
+static int hi6220_thermal_get_temp(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
return hi6220_thermal_get_temperature(data->regs);
}
-static int hi3660_thermal_get_temp(struct hisi_thermal_data *data)
+static int hi3660_thermal_get_temp(struct hisi_thermal_sensor *sensor)
{
- return hi3660_thermal_get_temperature(data->regs, data->sensor.id);
+ struct hisi_thermal_data *data = sensor->data;
+
+ return hi3660_thermal_get_temperature(data->regs, sensor->id);
}
-static int hi6220_thermal_disable_sensor(struct hisi_thermal_data *data)
+static int hi6220_thermal_disable_sensor(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
/* disable sensor module */
hi6220_thermal_enable(data->regs, 0);
hi6220_thermal_alarm_enable(data->regs, 0);
@@ -300,16 +324,18 @@ static int hi6220_thermal_disable_sensor(struct hisi_thermal_data *data)
return 0;
}
-static int hi3660_thermal_disable_sensor(struct hisi_thermal_data *data)
+static int hi3660_thermal_disable_sensor(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
/* disable sensor module */
- hi3660_thermal_alarm_enable(data->regs, data->sensor.id, 0);
+ hi3660_thermal_alarm_enable(data->regs, sensor->id, 0);
return 0;
}
-static int hi6220_thermal_enable_sensor(struct hisi_thermal_data *data)
+static int hi6220_thermal_enable_sensor(struct hisi_thermal_sensor *sensor)
{
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_data *data = sensor->data;
int ret;
/* enable clock for tsensor */
@@ -345,10 +371,10 @@ static int hi6220_thermal_enable_sensor(struct hisi_thermal_data *data)
return 0;
}
-static int hi3660_thermal_enable_sensor(struct hisi_thermal_data *data)
+static int hi3660_thermal_enable_sensor(struct hisi_thermal_sensor *sensor)
{
unsigned int value;
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_data *data = sensor->data;
/* disable interrupt */
hi3660_thermal_alarm_enable(data->regs, sensor->id, 0);
@@ -371,21 +397,8 @@ static int hi6220_thermal_probe(struct hisi_thermal_data *data)
{
struct platform_device *pdev = data->pdev;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
- data->get_temp = hi6220_thermal_get_temp;
- data->enable_sensor = hi6220_thermal_enable_sensor;
- data->disable_sensor = hi6220_thermal_disable_sensor;
- data->irq_handler = hi6220_thermal_irq_handler;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- dev_err(dev, "failed to get io address\n");
- return PTR_ERR(data->regs);
- }
-
data->clk = devm_clk_get(dev, "thermal_clk");
if (IS_ERR(data->clk)) {
ret = PTR_ERR(data->clk);
@@ -394,11 +407,14 @@ static int hi6220_thermal_probe(struct hisi_thermal_data *data)
return ret;
}
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0)
- return data->irq;
+ data->sensor = devm_kzalloc(dev, sizeof(*data->sensor), GFP_KERNEL);
+ if (!data->sensor)
+ return -ENOMEM;
- data->sensor.id = HI6220_DEFAULT_SENSOR;
+ data->sensor[0].id = HI6220_CLUSTER0_SENSOR;
+ data->sensor[0].irq_name = "tsensor_intr";
+ data->sensor[0].data = data;
+ data->nr_sensors = 1;
return 0;
}
@@ -407,38 +423,34 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
{
struct platform_device *pdev = data->pdev;
struct device *dev = &pdev->dev;
- struct resource *res;
- data->get_temp = hi3660_thermal_get_temp;
- data->enable_sensor = hi3660_thermal_enable_sensor;
- data->disable_sensor = hi3660_thermal_disable_sensor;
- data->irq_handler = hi3660_thermal_irq_handler;
+ data->nr_sensors = 2;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- dev_err(dev, "failed to get io address\n");
- return PTR_ERR(data->regs);
- }
+ data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) *
+ data->nr_sensors, GFP_KERNEL);
+ if (!data->sensor)
+ return -ENOMEM;
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0)
- return data->irq;
+ data->sensor[0].id = HI3660_BIG_SENSOR;
+ data->sensor[0].irq_name = "tsensor_a73";
+ data->sensor[0].data = data;
- data->sensor.id = HI3660_DEFAULT_SENSOR;
+ data->sensor[1].id = HI3660_LITTLE_SENSOR;
+ data->sensor[1].irq_name = "tsensor_a53";
+ data->sensor[1].data = data;
return 0;
}
static int hisi_thermal_get_temp(void *__data, int *temp)
{
- struct hisi_thermal_data *data = __data;
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_sensor *sensor = __data;
+ struct hisi_thermal_data *data = sensor->data;
- *temp = data->get_temp(data);
+ *temp = data->ops->get_temp(sensor);
- dev_dbg(&data->pdev->dev, "id=%d, temp=%d, thres=%d\n",
- sensor->id, *temp, sensor->thres_temp);
+ dev_dbg(&data->pdev->dev, "tzd=%p, id=%d, temp=%d, thres=%d\n",
+ sensor->tzd, sensor->id, *temp, sensor->thres_temp);
return 0;
}
@@ -449,38 +461,39 @@ static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
{
- struct hisi_thermal_data *data = dev;
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_sensor *sensor = dev;
+ struct hisi_thermal_data *data = sensor->data;
int temp = 0;
- data->irq_handler(data);
+ data->ops->irq_handler(sensor);
- hisi_thermal_get_temp(data, &temp);
+ hisi_thermal_get_temp(sensor, &temp);
if (temp >= sensor->thres_temp) {
- dev_crit(&data->pdev->dev, "THERMAL ALARM: %d > %d\n",
- temp, sensor->thres_temp);
+ dev_crit(&data->pdev->dev,
+ "sensor <%d> THERMAL ALARM: %d > %d\n",
+ sensor->id, temp, sensor->thres_temp);
- thermal_zone_device_update(data->sensor.tzd,
+ thermal_zone_device_update(sensor->tzd,
THERMAL_EVENT_UNSPECIFIED);
} else {
- dev_crit(&data->pdev->dev, "THERMAL ALARM stopped: %d < %d\n",
- temp, sensor->thres_temp);
+ dev_crit(&data->pdev->dev,
+ "sensor <%d> THERMAL ALARM stopped: %d < %d\n",
+ sensor->id, temp, sensor->thres_temp);
}
return IRQ_HANDLED;
}
static int hisi_thermal_register_sensor(struct platform_device *pdev,
- struct hisi_thermal_data *data,
struct hisi_thermal_sensor *sensor)
{
int ret, i;
const struct thermal_trip *trip;
sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->id, data,
+ sensor->id, sensor,
&hisi_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
ret = PTR_ERR(sensor->tzd);
@@ -502,14 +515,30 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
return 0;
}
+static const struct hisi_thermal_ops hi6220_ops = {
+ .get_temp = hi6220_thermal_get_temp,
+ .enable_sensor = hi6220_thermal_enable_sensor,
+ .disable_sensor = hi6220_thermal_disable_sensor,
+ .irq_handler = hi6220_thermal_irq_handler,
+ .probe = hi6220_thermal_probe,
+};
+
+static const struct hisi_thermal_ops hi3660_ops = {
+ .get_temp = hi3660_thermal_get_temp,
+ .enable_sensor = hi3660_thermal_enable_sensor,
+ .disable_sensor = hi3660_thermal_disable_sensor,
+ .irq_handler = hi3660_thermal_irq_handler,
+ .probe = hi3660_thermal_probe,
+};
+
static const struct of_device_id of_hisi_thermal_match[] = {
{
.compatible = "hisilicon,tsensor",
- .data = hi6220_thermal_probe
+ .data = &hi6220_ops,
},
{
.compatible = "hisilicon,hi3660-tsensor",
- .data = hi3660_thermal_probe
+ .data = &hi3660_ops,
},
{ /* end */ }
};
@@ -527,9 +556,9 @@ static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor,
static int hisi_thermal_probe(struct platform_device *pdev)
{
struct hisi_thermal_data *data;
- int (*platform_probe)(struct hisi_thermal_data *);
struct device *dev = &pdev->dev;
- int ret;
+ struct resource *res;
+ int i, ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -537,41 +566,50 @@ static int hisi_thermal_probe(struct platform_device *pdev)
data->pdev = pdev;
platform_set_drvdata(pdev, data);
+ data->ops = of_device_get_match_data(dev);
- platform_probe = of_device_get_match_data(dev);
- if (!platform_probe) {
- dev_err(dev, "failed to get probe func\n");
- return -EINVAL;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ dev_err(dev, "failed to get io address\n");
+ return PTR_ERR(data->regs);
}
- ret = platform_probe(data);
+ ret = data->ops->probe(data);
if (ret)
return ret;
- ret = hisi_thermal_register_sensor(pdev, data,
- &data->sensor);
- if (ret) {
- dev_err(dev, "failed to register thermal sensor: %d\n", ret);
- return ret;
- }
+ for (i = 0; i < data->nr_sensors; i++) {
+ struct hisi_thermal_sensor *sensor = &data->sensor[i];
- ret = data->enable_sensor(data);
- if (ret) {
- dev_err(dev, "Failed to setup the sensor: %d\n", ret);
- return ret;
- }
+ ret = hisi_thermal_register_sensor(pdev, sensor);
+ if (ret) {
+ dev_err(dev, "failed to register thermal sensor: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = platform_get_irq_byname(pdev, sensor->irq_name);
+ if (ret < 0)
+ return ret;
- if (data->irq) {
- ret = devm_request_threaded_irq(dev, data->irq, NULL,
- hisi_thermal_alarm_irq_thread,
- IRQF_ONESHOT, "hisi_thermal", data);
+ ret = devm_request_threaded_irq(dev, ret, NULL,
+ hisi_thermal_alarm_irq_thread,
+ IRQF_ONESHOT, sensor->irq_name,
+ sensor);
if (ret < 0) {
- dev_err(dev, "failed to request alarm irq: %d\n", ret);
+ dev_err(dev, "Failed to request alarm irq: %d\n", ret);
return ret;
}
- }
- hisi_thermal_toggle_sensor(&data->sensor, true);
+ ret = data->ops->enable_sensor(sensor);
+ if (ret) {
+ dev_err(dev, "Failed to setup the sensor: %d\n", ret);
+ return ret;
+ }
+
+ hisi_thermal_toggle_sensor(sensor, true);
+ }
return 0;
}
@@ -579,11 +617,14 @@ static int hisi_thermal_probe(struct platform_device *pdev)
static int hisi_thermal_remove(struct platform_device *pdev)
{
struct hisi_thermal_data *data = platform_get_drvdata(pdev);
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ int i;
- hisi_thermal_toggle_sensor(sensor, false);
+ for (i = 0; i < data->nr_sensors; i++) {
+ struct hisi_thermal_sensor *sensor = &data->sensor[i];
- data->disable_sensor(data);
+ hisi_thermal_toggle_sensor(sensor, false);
+ data->ops->disable_sensor(sensor);
+ }
return 0;
}
@@ -592,8 +633,10 @@ static int hisi_thermal_remove(struct platform_device *pdev)
static int hisi_thermal_suspend(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
+ int i;
- data->disable_sensor(data);
+ for (i = 0; i < data->nr_sensors; i++)
+ data->ops->disable_sensor(&data->sensor[i]);
return 0;
}
@@ -601,8 +644,12 @@ static int hisi_thermal_suspend(struct device *dev)
static int hisi_thermal_resume(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
+ int i, ret = 0;
+
+ for (i = 0; i < data->nr_sensors; i++)
+ ret |= data->ops->enable_sensor(&data->sensor[i]);
- return data->enable_sensor(data);
+ return ret;
}
#endif
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index aa452acb60b6..15661549eb67 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -725,7 +725,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
} else {
ret = imx_init_from_tempmon_data(pdev);
if (ret) {
- dev_err(&pdev->dev, "failed to init from from fsl,tempmon-data\n");
+ dev_err(&pdev->dev, "failed to init from fsl,tempmon-data\n");
return ret;
}
}
@@ -762,9 +762,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"failed to get thermal clk: %d\n", ret);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto cpufreq_put;
}
/*
@@ -777,9 +775,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
ret = clk_prepare_enable(data->thermal_clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto cpufreq_put;
}
data->tz = thermal_zone_device_register("imx_thermal_zone",
@@ -792,10 +788,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
ret = PTR_ERR(data->tz);
dev_err(&pdev->dev,
"failed to register thermal zone device %d\n", ret);
- clk_disable_unprepare(data->thermal_clk);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto clk_disable;
}
dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC"
@@ -827,14 +820,20 @@ static int imx_thermal_probe(struct platform_device *pdev)
0, "imx_thermal", data);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
- clk_disable_unprepare(data->thermal_clk);
- thermal_zone_device_unregister(data->tz);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto thermal_zone_unregister;
}
return 0;
+
+thermal_zone_unregister:
+ thermal_zone_device_unregister(data->tz);
+clk_disable:
+ clk_disable_unprepare(data->thermal_clk);
+cpufreq_put:
+ cpufreq_cooling_unregister(data->cdev);
+ cpufreq_cpu_put(data->policy);
+
+ return ret;
}
static int imx_thermal_remove(struct platform_device *pdev)
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 4f2816559205..4bfdb4a1e47d 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -19,23 +19,34 @@
/*** Private data structures to represent thermal device tree data ***/
/**
- * struct __thermal_bind_param - a match between trip and cooling device
+ * struct __thermal_cooling_bind_param - a cooling device for a trip point
* @cooling_device: a pointer to identify the referred cooling device
- * @trip_id: the trip point index
- * @usage: the percentage (from 0 to 100) of cooling contribution
* @min: minimum cooling state used at this trip point
* @max: maximum cooling state used at this trip point
*/
-struct __thermal_bind_params {
+struct __thermal_cooling_bind_param {
struct device_node *cooling_device;
- unsigned int trip_id;
- unsigned int usage;
unsigned long min;
unsigned long max;
};
/**
+ * struct __thermal_bind_param - a match between trip and cooling device
+ * @tcbp: a pointer to an array of cooling devices
+ * @count: number of elements in array
+ * @trip_id: the trip point index
+ * @usage: the percentage (from 0 to 100) of cooling contribution
+ */
+
+struct __thermal_bind_params {
+ struct __thermal_cooling_bind_param *tcbp;
+ unsigned int count;
+ unsigned int trip_id;
+ unsigned int usage;
+};
+
+/**
* struct __thermal_zone - internal representation of a thermal zone
* @mode: current thermal zone device mode (enabled/disabled)
* @passive_delay: polling interval while passive cooling is activated
@@ -192,25 +203,31 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
{
struct __thermal_zone *data = thermal->devdata;
- int i;
+ struct __thermal_bind_params *tbp;
+ struct __thermal_cooling_bind_param *tcbp;
+ int i, j;
if (!data || IS_ERR(data))
return -ENODEV;
/* find where to bind */
for (i = 0; i < data->num_tbps; i++) {
- struct __thermal_bind_params *tbp = data->tbps + i;
+ tbp = data->tbps + i;
- if (tbp->cooling_device == cdev->np) {
- int ret;
+ for (j = 0; j < tbp->count; j++) {
+ tcbp = tbp->tcbp + j;
- ret = thermal_zone_bind_cooling_device(thermal,
+ if (tcbp->cooling_device == cdev->np) {
+ int ret;
+
+ ret = thermal_zone_bind_cooling_device(thermal,
tbp->trip_id, cdev,
- tbp->max,
- tbp->min,
+ tcbp->max,
+ tcbp->min,
tbp->usage);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
}
}
@@ -221,22 +238,28 @@ static int of_thermal_unbind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
{
struct __thermal_zone *data = thermal->devdata;
- int i;
+ struct __thermal_bind_params *tbp;
+ struct __thermal_cooling_bind_param *tcbp;
+ int i, j;
if (!data || IS_ERR(data))
return -ENODEV;
/* find where to unbind */
for (i = 0; i < data->num_tbps; i++) {
- struct __thermal_bind_params *tbp = data->tbps + i;
+ tbp = data->tbps + i;
+
+ for (j = 0; j < tbp->count; j++) {
+ tcbp = tbp->tcbp + j;
- if (tbp->cooling_device == cdev->np) {
- int ret;
+ if (tcbp->cooling_device == cdev->np) {
+ int ret;
- ret = thermal_zone_unbind_cooling_device(thermal,
- tbp->trip_id, cdev);
- if (ret)
- return ret;
+ ret = thermal_zone_unbind_cooling_device(thermal,
+ tbp->trip_id, cdev);
+ if (ret)
+ return ret;
+ }
}
}
@@ -486,8 +509,8 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
if (sensor_specs.args_count >= 1) {
id = sensor_specs.args[0];
WARN(sensor_specs.args_count > 1,
- "%s: too many cells in sensor specifier %d\n",
- sensor_specs.np->name, sensor_specs.args_count);
+ "%pOFn: too many cells in sensor specifier %d\n",
+ sensor_specs.np, sensor_specs.args_count);
} else {
id = 0;
}
@@ -655,8 +678,9 @@ static int thermal_of_populate_bind_params(struct device_node *np,
int ntrips)
{
struct of_phandle_args cooling_spec;
+ struct __thermal_cooling_bind_param *__tcbp;
struct device_node *trip;
- int ret, i;
+ int ret, i, count;
u32 prop;
/* Default weight. Usage is optional */
@@ -683,20 +707,44 @@ static int thermal_of_populate_bind_params(struct device_node *np,
goto end;
}
- ret = of_parse_phandle_with_args(np, "cooling-device", "#cooling-cells",
- 0, &cooling_spec);
- if (ret < 0) {
- pr_err("missing cooling_device property\n");
+ count = of_count_phandle_with_args(np, "cooling-device",
+ "#cooling-cells");
+ if (!count) {
+ pr_err("Add a cooling_device property with at least one device\n");
goto end;
}
- __tbp->cooling_device = cooling_spec.np;
- if (cooling_spec.args_count >= 2) { /* at least min and max */
- __tbp->min = cooling_spec.args[0];
- __tbp->max = cooling_spec.args[1];
- } else {
- pr_err("wrong reference to cooling device, missing limits\n");
+
+ __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
+ if (!__tcbp)
+ goto end;
+
+ for (i = 0; i < count; i++) {
+ ret = of_parse_phandle_with_args(np, "cooling-device",
+ "#cooling-cells", i, &cooling_spec);
+ if (ret < 0) {
+ pr_err("Invalid cooling-device entry\n");
+ goto free_tcbp;
+ }
+
+ __tcbp[i].cooling_device = cooling_spec.np;
+
+ if (cooling_spec.args_count >= 2) { /* at least min and max */
+ __tcbp[i].min = cooling_spec.args[0];
+ __tcbp[i].max = cooling_spec.args[1];
+ } else {
+ pr_err("wrong reference to cooling device, missing limits\n");
+ }
}
+ __tbp->tcbp = __tcbp;
+ __tbp->count = count;
+
+ goto end;
+
+free_tcbp:
+ for (i = i - 1; i >= 0; i--)
+ of_node_put(__tcbp[i].cooling_device);
+ kfree(__tcbp);
end:
of_node_put(trip);
@@ -903,8 +951,16 @@ finish:
return tz;
free_tbps:
- for (i = i - 1; i >= 0; i--)
- of_node_put(tz->tbps[i].cooling_device);
+ for (i = i - 1; i >= 0; i--) {
+ struct __thermal_bind_params *tbp = tz->tbps + i;
+ int j;
+
+ for (j = 0; j < tbp->count; j++)
+ of_node_put(tbp->tcbp[j].cooling_device);
+
+ kfree(tbp->tcbp);
+ }
+
kfree(tz->tbps);
free_trips:
for (i = 0; i < tz->ntrips; i++)
@@ -920,10 +976,18 @@ free_tz:
static inline void of_thermal_free_zone(struct __thermal_zone *tz)
{
- int i;
+ struct __thermal_bind_params *tbp;
+ int i, j;
+
+ for (i = 0; i < tz->num_tbps; i++) {
+ tbp = tz->tbps + i;
+
+ for (j = 0; j < tbp->count; j++)
+ of_node_put(tbp->tcbp[j].cooling_device);
+
+ kfree(tbp->tcbp);
+ }
- for (i = 0; i < tz->num_tbps; i++)
- of_node_put(tz->tbps[i].cooling_device);
kfree(tz->tbps);
for (i = 0; i < tz->ntrips; i++)
of_node_put(tz->trips[i].np);
@@ -963,8 +1027,8 @@ int __init of_parse_thermal_zones(void)
tz = thermal_of_build_thermal_zone(child);
if (IS_ERR(tz)) {
- pr_err("failed to build thermal zone %s: %ld\n",
- child->name,
+ pr_err("failed to build thermal zone %pOFn: %ld\n",
+ child,
PTR_ERR(tz));
continue;
}
@@ -998,7 +1062,7 @@ int __init of_parse_thermal_zones(void)
tz->passive_delay,
tz->polling_delay);
if (IS_ERR(zone)) {
- pr_err("Failed to build %s zone %ld\n", child->name,
+ pr_err("Failed to build %pOFn zone %ld\n", child,
PTR_ERR(zone));
kfree(tzp);
kfree(ops);
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index ad4f3a8d6560..b2d5d5bf4a9b 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -23,6 +23,8 @@
#include <linux/regmap.h>
#include <linux/thermal.h>
+#include "thermal_core.h"
+
#define QPNP_TM_REG_TYPE 0x04
#define QPNP_TM_REG_SUBTYPE 0x05
#define QPNP_TM_REG_STATUS 0x08
@@ -37,9 +39,11 @@
#define STATUS_GEN2_STATE_MASK GENMASK(6, 4)
#define STATUS_GEN2_STATE_SHIFT 4
-#define SHUTDOWN_CTRL1_OVERRIDE_MASK GENMASK(7, 6)
+#define SHUTDOWN_CTRL1_OVERRIDE_S2 BIT(6)
#define SHUTDOWN_CTRL1_THRESHOLD_MASK GENMASK(1, 0)
+#define SHUTDOWN_CTRL1_RATE_25HZ BIT(3)
+
#define ALARM_CTRL_FORCE_ENABLE BIT(7)
/*
@@ -56,12 +60,19 @@
#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
#define THRESH_MIN 0
+#define THRESH_MAX 3
+
+/* Stage 2 Threshold Min: 125 C */
+#define STAGE2_THRESHOLD_MIN 125000
+/* Stage 2 Threshold Max: 140 C */
+#define STAGE2_THRESHOLD_MAX 140000
/* Temperature in Milli Celsius reported during stage 0 if no ADC is present */
#define DEFAULT_TEMP 37000
struct qpnp_tm_chip {
struct regmap *map;
+ struct device *dev;
struct thermal_zone_device *tz_dev;
unsigned int subtype;
long temp;
@@ -69,6 +80,10 @@ struct qpnp_tm_chip {
unsigned int stage;
unsigned int prev_stage;
unsigned int base;
+ /* protects .thresh, .stage and chip registers */
+ struct mutex lock;
+ bool initialized;
+
struct iio_channel *adc;
};
@@ -125,6 +140,8 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
unsigned int stage, stage_new, stage_old;
int ret;
+ WARN_ON(!mutex_is_locked(&chip->lock));
+
ret = qpnp_tm_get_temp_stage(chip);
if (ret < 0)
return ret;
@@ -163,8 +180,15 @@ static int qpnp_tm_get_temp(void *data, int *temp)
if (!temp)
return -EINVAL;
+ if (!chip->initialized) {
+ *temp = DEFAULT_TEMP;
+ return 0;
+ }
+
if (!chip->adc) {
+ mutex_lock(&chip->lock);
ret = qpnp_tm_update_temp_no_adc(chip);
+ mutex_unlock(&chip->lock);
if (ret < 0)
return ret;
} else {
@@ -180,8 +204,72 @@ static int qpnp_tm_get_temp(void *data, int *temp)
return 0;
}
+static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
+ int temp)
+{
+ u8 reg;
+ bool disable_s2_shutdown = false;
+
+ WARN_ON(!mutex_is_locked(&chip->lock));
+
+ /*
+ * Default: S2 and S3 shutdown enabled, thresholds at
+ * 105C/125C/145C, monitoring at 25Hz
+ */
+ reg = SHUTDOWN_CTRL1_RATE_25HZ;
+
+ if (temp == THERMAL_TEMP_INVALID ||
+ temp < STAGE2_THRESHOLD_MIN) {
+ chip->thresh = THRESH_MIN;
+ goto skip;
+ }
+
+ if (temp <= STAGE2_THRESHOLD_MAX) {
+ chip->thresh = THRESH_MAX -
+ ((STAGE2_THRESHOLD_MAX - temp) /
+ TEMP_THRESH_STEP);
+ disable_s2_shutdown = true;
+ } else {
+ chip->thresh = THRESH_MAX;
+
+ if (chip->adc)
+ disable_s2_shutdown = true;
+ else
+ dev_warn(chip->dev,
+ "No ADC is configured and critical temperature is above the maximum stage 2 threshold of 140 C! Configuring stage 2 shutdown at 140 C.\n");
+ }
+
+skip:
+ reg |= chip->thresh;
+ if (disable_s2_shutdown)
+ reg |= SHUTDOWN_CTRL1_OVERRIDE_S2;
+
+ return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
+}
+
+static int qpnp_tm_set_trip_temp(void *data, int trip, int temp)
+{
+ struct qpnp_tm_chip *chip = data;
+ const struct thermal_trip *trip_points;
+ int ret;
+
+ trip_points = of_thermal_get_trip_points(chip->tz_dev);
+ if (!trip_points)
+ return -EINVAL;
+
+ if (trip_points[trip].type != THERMAL_TRIP_CRITICAL)
+ return 0;
+
+ mutex_lock(&chip->lock);
+ ret = qpnp_tm_update_critical_trip_temp(chip, temp);
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
static const struct thermal_zone_of_device_ops qpnp_tm_sensor_ops = {
.get_temp = qpnp_tm_get_temp,
+ .set_trip_temp = qpnp_tm_set_trip_temp,
};
static irqreturn_t qpnp_tm_isr(int irq, void *data)
@@ -193,6 +281,29 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static int qpnp_tm_get_critical_trip_temp(struct qpnp_tm_chip *chip)
+{
+ int ntrips;
+ const struct thermal_trip *trips;
+ int i;
+
+ ntrips = of_thermal_get_ntrips(chip->tz_dev);
+ if (ntrips <= 0)
+ return THERMAL_TEMP_INVALID;
+
+ trips = of_thermal_get_trip_points(chip->tz_dev);
+ if (!trips)
+ return THERMAL_TEMP_INVALID;
+
+ for (i = 0; i < ntrips; i++) {
+ if (of_thermal_is_trip_valid(chip->tz_dev, i) &&
+ trips[i].type == THERMAL_TRIP_CRITICAL)
+ return trips[i].temperature;
+ }
+
+ return THERMAL_TEMP_INVALID;
+}
+
/*
* This function initializes the internal temp value based on only the
* current thermal stage and threshold. Setup threshold control and
@@ -203,17 +314,20 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
unsigned int stage;
int ret;
u8 reg = 0;
+ int crit_temp;
+
+ mutex_lock(&chip->lock);
ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg);
if (ret < 0)
- return ret;
+ goto out;
chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK;
chip->temp = DEFAULT_TEMP;
ret = qpnp_tm_get_temp_stage(chip);
if (ret < 0)
- return ret;
+ goto out;
chip->stage = ret;
stage = chip->subtype == QPNP_TM_SUBTYPE_GEN1
@@ -224,21 +338,19 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
(stage - 1) * TEMP_STAGE_STEP +
TEMP_THRESH_MIN;
- /*
- * Set threshold and disable software override of stage 2 and 3
- * shutdowns.
- */
- chip->thresh = THRESH_MIN;
- reg &= ~(SHUTDOWN_CTRL1_OVERRIDE_MASK | SHUTDOWN_CTRL1_THRESHOLD_MASK);
- reg |= chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
- ret = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
+ crit_temp = qpnp_tm_get_critical_trip_temp(chip);
+ ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp);
if (ret < 0)
- return ret;
+ goto out;
/* Enable the thermal alarm PMIC module in always-on mode. */
reg = ALARM_CTRL_FORCE_ENABLE;
ret = qpnp_tm_write(chip, QPNP_TM_REG_ALARM_CTRL, reg);
+ chip->initialized = true;
+
+out:
+ mutex_unlock(&chip->lock);
return ret;
}
@@ -257,6 +369,9 @@ static int qpnp_tm_probe(struct platform_device *pdev)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, chip);
+ chip->dev = &pdev->dev;
+
+ mutex_init(&chip->lock);
chip->map = dev_get_regmap(pdev->dev.parent, NULL);
if (!chip->map)
@@ -302,6 +417,18 @@ static int qpnp_tm_probe(struct platform_device *pdev)
chip->subtype = subtype;
+ /*
+ * Register the sensor before initializing the hardware to be able to
+ * read the trip points. get_temp() returns the default temperature
+ * before the hardware initialization is completed.
+ */
+ chip->tz_dev = devm_thermal_zone_of_sensor_register(
+ &pdev->dev, 0, chip, &qpnp_tm_sensor_ops);
+ if (IS_ERR(chip->tz_dev)) {
+ dev_err(&pdev->dev, "failed to register sensor\n");
+ return PTR_ERR(chip->tz_dev);
+ }
+
ret = qpnp_tm_init(chip);
if (ret < 0) {
dev_err(&pdev->dev, "init failed\n");
@@ -313,12 +440,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- chip->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
- &qpnp_tm_sensor_ops);
- if (IS_ERR(chip->tz_dev)) {
- dev_err(&pdev->dev, "failed to register sensor\n");
- return PTR_ERR(chip->tz_dev);
- }
+ thermal_zone_device_update(chip->tz_dev, THERMAL_EVENT_UNSPECIFIED);
return 0;
}
diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c
index fdf561b8b81d..c6dd620ac029 100644
--- a/drivers/thermal/qcom/tsens-8916.c
+++ b/drivers/thermal/qcom/tsens-8916.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/platform_device.h>
@@ -109,5 +100,6 @@ static const struct tsens_ops ops_8916 = {
const struct tsens_data data_8916 = {
.num_sensors = 5,
.ops = &ops_8916,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
.hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
};
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index 0451277d3a8f..0f0adb302a7b 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/platform_device.h>
@@ -69,7 +60,7 @@ static int suspend_8960(struct tsens_device *tmdev)
{
int ret;
unsigned int mask;
- struct regmap *map = tmdev->map;
+ struct regmap *map = tmdev->tm_map;
ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold);
if (ret)
@@ -94,7 +85,7 @@ static int suspend_8960(struct tsens_device *tmdev)
static int resume_8960(struct tsens_device *tmdev)
{
int ret;
- struct regmap *map = tmdev->map;
+ struct regmap *map = tmdev->tm_map;
ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
if (ret)
@@ -126,12 +117,12 @@ static int enable_8960(struct tsens_device *tmdev, int id)
int ret;
u32 reg, mask;
- ret = regmap_read(tmdev->map, CNTL_ADDR, &reg);
+ ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg);
if (ret)
return ret;
mask = BIT(id + SENSOR0_SHIFT);
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg | SW_RST);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg | SW_RST);
if (ret)
return ret;
@@ -140,7 +131,7 @@ static int enable_8960(struct tsens_device *tmdev, int id)
else
reg |= mask | SLP_CLK_ENA_8660 | EN;
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg);
if (ret)
return ret;
@@ -157,7 +148,7 @@ static void disable_8960(struct tsens_device *tmdev)
mask <<= SENSOR0_SHIFT;
mask |= EN;
- ret = regmap_read(tmdev->map, CNTL_ADDR, &reg_cntl);
+ ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg_cntl);
if (ret)
return;
@@ -168,7 +159,7 @@ static void disable_8960(struct tsens_device *tmdev)
else
reg_cntl &= ~SLP_CLK_ENA_8660;
- regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
}
static int init_8960(struct tsens_device *tmdev)
@@ -176,8 +167,8 @@ static int init_8960(struct tsens_device *tmdev)
int ret, i;
u32 reg_cntl;
- tmdev->map = dev_get_regmap(tmdev->dev, NULL);
- if (!tmdev->map)
+ tmdev->tm_map = dev_get_regmap(tmdev->dev, NULL);
+ if (!tmdev->tm_map)
return -ENODEV;
/*
@@ -193,14 +184,14 @@ static int init_8960(struct tsens_device *tmdev)
}
reg_cntl = SW_RST;
- ret = regmap_update_bits(tmdev->map, CNTL_ADDR, SW_RST, reg_cntl);
+ ret = regmap_update_bits(tmdev->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
if (ret)
return ret;
if (tmdev->num_sensors > 1) {
reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
reg_cntl &= ~SW_RST;
- ret = regmap_update_bits(tmdev->map, CONFIG_ADDR,
+ ret = regmap_update_bits(tmdev->tm_map, CONFIG_ADDR,
CONFIG_MASK, CONFIG);
} else {
reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
@@ -209,12 +200,12 @@ static int init_8960(struct tsens_device *tmdev)
}
reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT;
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
reg_cntl |= EN;
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
@@ -261,12 +252,12 @@ static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp)
timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
do {
- ret = regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy);
+ ret = regmap_read(tmdev->tm_map, INT_STATUS_ADDR, &trdy);
if (ret)
return ret;
if (!(trdy & TRDY_MASK))
continue;
- ret = regmap_read(tmdev->map, s->status, &code);
+ ret = regmap_read(tmdev->tm_map, s->status, &code);
if (ret)
return ret;
*temp = code_to_mdegC(code, s);
diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-8974.c
index 9baf77e8cbe3..3d3fda3d731b 100644
--- a/drivers/thermal/qcom/tsens-8974.c
+++ b/drivers/thermal/qcom/tsens-8974.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/platform_device.h>
@@ -241,4 +232,5 @@ static const struct tsens_ops ops_8974 = {
const struct tsens_data data_8974 = {
.num_sensors = 11,
.ops = &ops_8974,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index 6207d8d92351..3be4be2e0465 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/err.h>
@@ -21,7 +12,11 @@
#include <linux/regmap.h>
#include "tsens.h"
-#define S0_ST_ADDR 0x1030
+/* SROT */
+#define TSENS_EN BIT(0)
+
+/* TM */
+#define STATUS_OFFSET 0x30
#define SN_ADDR_OFFSET 0x4
#define SN_ST_TEMP_MASK 0x3ff
#define CAL_DEGC_PT1 30
@@ -107,8 +102,8 @@ int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
unsigned int status_reg;
int last_temp = 0, ret;
- status_reg = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
- ret = regmap_read(tmdev->map, status_reg, &code);
+ status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * SN_ADDR_OFFSET;
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
last_temp = code & SN_ST_TEMP_MASK;
@@ -126,29 +121,52 @@ static const struct regmap_config tsens_config = {
int __init init_common(struct tsens_device *tmdev)
{
- void __iomem *base;
+ void __iomem *tm_base, *srot_base;
struct resource *res;
+ u32 code;
+ int ret;
struct platform_device *op = of_find_device_by_node(tmdev->dev->of_node);
+ u16 ctrl_offset = tmdev->reg_offsets[SROT_CTRL_OFFSET];
if (!op)
return -EINVAL;
- /* The driver only uses the TM register address space for now */
if (op->num_resources > 1) {
+ /* DT with separate SROT and TM address space */
tmdev->tm_offset = 0;
+ res = platform_get_resource(op, IORESOURCE_MEM, 1);
+ srot_base = devm_ioremap_resource(&op->dev, res);
+ if (IS_ERR(srot_base))
+ return PTR_ERR(srot_base);
+
+ tmdev->srot_map = devm_regmap_init_mmio(tmdev->dev,
+ srot_base, &tsens_config);
+ if (IS_ERR(tmdev->srot_map))
+ return PTR_ERR(tmdev->srot_map);
+
} else {
/* old DTs where SROT and TM were in a contiguous 2K block */
tmdev->tm_offset = 0x1000;
}
res = platform_get_resource(op, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&op->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- tmdev->map = devm_regmap_init_mmio(tmdev->dev, base, &tsens_config);
- if (IS_ERR(tmdev->map))
- return PTR_ERR(tmdev->map);
+ tm_base = devm_ioremap_resource(&op->dev, res);
+ if (IS_ERR(tm_base))
+ return PTR_ERR(tm_base);
+
+ tmdev->tm_map = devm_regmap_init_mmio(tmdev->dev, tm_base, &tsens_config);
+ if (IS_ERR(tmdev->tm_map))
+ return PTR_ERR(tmdev->tm_map);
+
+ if (tmdev->srot_map) {
+ ret = regmap_read(tmdev->srot_map, ctrl_offset, &code);
+ if (ret)
+ return ret;
+ if (!(code & TSENS_EN)) {
+ dev_err(tmdev->dev, "tsens device is not enabled\n");
+ return -ENODEV;
+ }
+ }
return 0;
}
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 44da02f594ac..381a212872bf 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -21,7 +21,7 @@ static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
int ret;
status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * 4;
- ret = regmap_read(tmdev->map, status_reg, &code);
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
last_temp = code & LAST_TEMP_MASK;
@@ -29,7 +29,7 @@ static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
goto done;
/* Try a second time */
- ret = regmap_read(tmdev->map, status_reg, &code);
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
if (code & STATUS_VALID_BIT) {
@@ -40,7 +40,7 @@ static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
}
/* Try a third/last time */
- ret = regmap_read(tmdev->map, status_reg, &code);
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
if (code & STATUS_VALID_BIT) {
@@ -68,10 +68,12 @@ static const struct tsens_ops ops_generic_v2 = {
const struct tsens_data data_tsens_v2 = {
.ops = &ops_generic_v2,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
};
/* Kept around for backward compatibility with old msm8996.dtsi */
const struct tsens_data data_8996 = {
.num_sensors = 13,
.ops = &ops_generic_v2,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index a2c9bfae3d86..f1ec9bbe4717 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/err.h>
@@ -89,11 +80,6 @@ static int tsens_register(struct tsens_device *tmdev)
{
int i;
struct thermal_zone_device *tzd;
- u32 *hw_id, n = tmdev->num_sensors;
-
- hw_id = devm_kcalloc(tmdev->dev, n, sizeof(u32), GFP_KERNEL);
- if (!hw_id)
- return -ENOMEM;
for (i = 0; i < tmdev->num_sensors; i++) {
tmdev->sensor[i].tmdev = tmdev;
@@ -158,6 +144,9 @@ static int tsens_probe(struct platform_device *pdev)
else
tmdev->sensor[i].hw_id = i;
}
+ for (i = 0; i < REG_ARRAY_SIZE; i++) {
+ tmdev->reg_offsets[i] = data->reg_offsets[i];
+ }
if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->get_temp)
return -EINVAL;
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 14331eb45a86..7b7feee5dc46 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -1,15 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+
#ifndef __QCOM_TSENS_H__
#define __QCOM_TSENS_H__
@@ -55,15 +48,23 @@ struct tsens_ops {
int (*get_trend)(struct tsens_device *, int, enum thermal_trend *);
};
+enum reg_list {
+ SROT_CTRL_OFFSET,
+
+ REG_ARRAY_SIZE,
+};
+
/**
* struct tsens_data - tsens instance specific data
* @num_sensors: Max number of sensors supported by platform
* @ops: operations the tsens instance supports
* @hw_ids: Subset of sensors ids supported by platform, if not the first n
+ * @reg_offsets: Register offsets for commonly used registers
*/
struct tsens_data {
const u32 num_sensors;
const struct tsens_ops *ops;
+ const u16 reg_offsets[REG_ARRAY_SIZE];
unsigned int *hw_ids;
};
@@ -76,8 +77,10 @@ struct tsens_context {
struct tsens_device {
struct device *dev;
u32 num_sensors;
- struct regmap *map;
+ struct regmap *tm_map;
+ struct regmap *srot_map;
u32 tm_offset;
+ u16 reg_offsets[REG_ARRAY_SIZE];
struct tsens_context ctx;
const struct tsens_ops *ops;
struct tsens_sensor sensor[0];
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 450ed66edf58..18c711b19514 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -119,8 +119,8 @@ static int qoriq_tmu_get_sensor_id(void)
if (sensor_specs.args_count >= 1) {
id = sensor_specs.args[0];
WARN(sensor_specs.args_count > 1,
- "%s: too many cells in sensor specifier %d\n",
- sensor_specs.np->name, sensor_specs.args_count);
+ "%pOFn: too many cells in sensor specifier %d\n",
+ sensor_specs.np, sensor_specs.args_count);
} else {
id = 0;
}
@@ -294,6 +294,7 @@ static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
static const struct of_device_id qoriq_tmu_match[] = {
{ .compatible = "fsl,qoriq-tmu", },
+ { .compatible = "fsl,imx8mq-tmu", },
{},
};
MODULE_DEVICE_TABLE(of, qoriq_tmu_match);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 7aed5337bdd3..75786cc8e2f9 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -318,9 +318,11 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
}
static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
+ { .compatible = "renesas,r8a774a1-thermal", },
{ .compatible = "renesas,r8a7795-thermal", },
{ .compatible = "renesas,r8a7796-thermal", },
{ .compatible = "renesas,r8a77965-thermal", },
+ { .compatible = "renesas,r8a77980-thermal", },
{},
};
MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 78f932822d38..8014a207d8d9 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -113,6 +113,10 @@ static const struct of_device_id rcar_thermal_dt_ids[] = {
.data = &rcar_gen2_thermal,
},
{
+ .compatible = "renesas,thermal-r8a77970",
+ .data = &rcar_gen3_thermal,
+ },
+ {
.compatible = "renesas,thermal-r8a77995",
.data = &rcar_gen3_thermal,
},
@@ -434,8 +438,8 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
rcar_thermal_for_each_priv(priv, common) {
if (rcar_thermal_had_changed(priv, status)) {
rcar_thermal_irq_disable(priv);
- schedule_delayed_work(&priv->work,
- msecs_to_jiffies(300));
+ queue_delayed_work(system_freezable_wq, &priv->work,
+ msecs_to_jiffies(300));
}
}
@@ -453,6 +457,7 @@ static int rcar_thermal_remove(struct platform_device *pdev)
rcar_thermal_for_each_priv(priv, common) {
rcar_thermal_irq_disable(priv);
+ cancel_delayed_work_sync(&priv->work);
if (priv->chip->use_of_thermal)
thermal_remove_hwmon_sysfs(priv->zone);
else
@@ -492,7 +497,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
pm_runtime_get_sync(dev);
for (i = 0; i < chip->nirqs; i++) {
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, i);
if (!irq)
continue;
if (!common->base) {
diff --git a/drivers/thermal/st/Kconfig b/drivers/thermal/st/Kconfig
index 490fdbe22eea..b80f9a9e4f8f 100644
--- a/drivers/thermal/st/Kconfig
+++ b/drivers/thermal/st/Kconfig
@@ -1,3 +1,7 @@
+#
+# STMicroelectronics thermal drivers configuration
+#
+
config ST_THERMAL
tristate "Thermal sensors on STMicroelectronics STi series of SoCs"
help
@@ -10,3 +14,13 @@ config ST_THERMAL_SYSCFG
config ST_THERMAL_MEMMAP
select ST_THERMAL
tristate "STi series memory mapped access based thermal sensors"
+
+config STM32_THERMAL
+ tristate "Thermal framework support on STMicroelectronics STM32 series of SoCs"
+ depends on MACH_STM32MP157
+ default y
+ help
+ Support for thermal framework on STMicroelectronics STM32 series of
+ SoCs. This thermal driver allows to access to general thermal framework
+ functionalities and to acces to SoC sensor functionalities. This
+ configuration is fully dependent of MACH_STM32MP157.
diff --git a/drivers/thermal/st/Makefile b/drivers/thermal/st/Makefile
index b38878977bd8..b2b9e9b96296 100644
--- a/drivers/thermal/st/Makefile
+++ b/drivers/thermal/st/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_ST_THERMAL) := st_thermal.o
obj-$(CONFIG_ST_THERMAL_SYSCFG) += st_thermal_syscfg.o
obj-$(CONFIG_ST_THERMAL_MEMMAP) += st_thermal_memmap.o
+obj-$(CONFIG_STM32_THERMAL) := stm_thermal.o \ No newline at end of file
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
new file mode 100644
index 000000000000..47623da0f91b
--- /dev/null
+++ b/drivers/thermal/st/stm_thermal.c
@@ -0,0 +1,760 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: David Hernandez Sanchez <david.hernandezsanchez@st.com> for
+ * STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#include "../thermal_core.h"
+#include "../thermal_hwmon.h"
+
+/* DTS register offsets */
+#define DTS_CFGR1_OFFSET 0x0
+#define DTS_T0VALR1_OFFSET 0x8
+#define DTS_RAMPVALR_OFFSET 0X10
+#define DTS_ITR1_OFFSET 0x14
+#define DTS_DR_OFFSET 0x1C
+#define DTS_SR_OFFSET 0x20
+#define DTS_ITENR_OFFSET 0x24
+#define DTS_CIFR_OFFSET 0x28
+
+/* DTS_CFGR1 register mask definitions */
+#define HSREF_CLK_DIV_MASK GENMASK(30, 24)
+#define TS1_SMP_TIME_MASK GENMASK(19, 16)
+#define TS1_INTRIG_SEL_MASK GENMASK(11, 8)
+
+/* DTS_T0VALR1 register mask definitions */
+#define TS1_T0_MASK GENMASK(17, 16)
+#define TS1_FMT0_MASK GENMASK(15, 0)
+
+/* DTS_RAMPVALR register mask definitions */
+#define TS1_RAMP_COEFF_MASK GENMASK(15, 0)
+
+/* DTS_ITR1 register mask definitions */
+#define TS1_HITTHD_MASK GENMASK(31, 16)
+#define TS1_LITTHD_MASK GENMASK(15, 0)
+
+/* DTS_DR register mask definitions */
+#define TS1_MFREQ_MASK GENMASK(15, 0)
+
+/* Less significant bit position definitions */
+#define TS1_T0_POS 16
+#define TS1_SMP_TIME_POS 16
+#define TS1_HITTHD_POS 16
+#define HSREF_CLK_DIV_POS 24
+
+/* DTS_CFGR1 bit definitions */
+#define TS1_EN BIT(0)
+#define TS1_START BIT(4)
+#define REFCLK_SEL BIT(20)
+#define REFCLK_LSE REFCLK_SEL
+#define Q_MEAS_OPT BIT(21)
+#define CALIBRATION_CONTROL Q_MEAS_OPT
+
+/* DTS_SR bit definitions */
+#define TS_RDY BIT(15)
+/* Bit definitions below are common for DTS_SR, DTS_ITENR and DTS_CIFR */
+#define HIGH_THRESHOLD BIT(2)
+#define LOW_THRESHOLD BIT(1)
+
+/* Constants */
+#define ADJUST 100
+#define ONE_MHZ 1000000
+#define POLL_TIMEOUT 5000
+#define STARTUP_TIME 40
+#define TS1_T0_VAL0 30
+#define TS1_T0_VAL1 130
+#define NO_HW_TRIG 0
+
+/* The Thermal Framework expects millidegrees */
+#define mcelsius(temp) ((temp) * 1000)
+
+/* The Sensor expects oC degrees */
+#define celsius(temp) ((temp) / 1000)
+
+struct stm_thermal_sensor {
+ struct device *dev;
+ struct thermal_zone_device *th_dev;
+ enum thermal_device_mode mode;
+ struct clk *clk;
+ int high_temp;
+ int low_temp;
+ int temp_critical;
+ int temp_passive;
+ unsigned int low_temp_enabled;
+ int num_trips;
+ int irq;
+ unsigned int irq_enabled;
+ void __iomem *base;
+ int t0, fmt0, ramp_coeff;
+};
+
+static irqreturn_t stm_thermal_alarm_irq(int irq, void *sdata)
+{
+ struct stm_thermal_sensor *sensor = sdata;
+
+ disable_irq_nosync(irq);
+ sensor->irq_enabled = false;
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t stm_thermal_alarm_irq_thread(int irq, void *sdata)
+{
+ u32 value;
+ struct stm_thermal_sensor *sensor = sdata;
+
+ /* read IT reason in SR and clear flags */
+ value = readl_relaxed(sensor->base + DTS_SR_OFFSET);
+
+ if ((value & LOW_THRESHOLD) == LOW_THRESHOLD)
+ writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ if ((value & HIGH_THRESHOLD) == HIGH_THRESHOLD)
+ writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
+
+ return IRQ_HANDLED;
+}
+
+static int stm_sensor_power_on(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+ u32 value;
+
+ /* Enable sensor */
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+ value |= TS1_EN;
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ /*
+ * The DTS block can be enabled by setting TSx_EN bit in
+ * DTS_CFGRx register. It requires a startup time of
+ * 40μs. Use 5 ms as arbitrary timeout.
+ */
+ ret = readl_poll_timeout(sensor->base + DTS_SR_OFFSET,
+ value, (value & TS_RDY),
+ STARTUP_TIME, POLL_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Start continuous measuring */
+ value = readl_relaxed(sensor->base +
+ DTS_CFGR1_OFFSET);
+ value |= TS1_START;
+ writel_relaxed(value, sensor->base +
+ DTS_CFGR1_OFFSET);
+
+ return 0;
+}
+
+static int stm_sensor_power_off(struct stm_thermal_sensor *sensor)
+{
+ u32 value;
+
+ /* Stop measuring */
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+ value &= ~TS1_START;
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ /* Ensure stop is taken into account */
+ usleep_range(STARTUP_TIME, POLL_TIMEOUT);
+
+ /* Disable sensor */
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+ value &= ~TS1_EN;
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ /* Ensure disable is taken into account */
+ return readl_poll_timeout(sensor->base + DTS_SR_OFFSET, value,
+ !(value & TS_RDY),
+ STARTUP_TIME, POLL_TIMEOUT);
+}
+
+static int stm_thermal_calibration(struct stm_thermal_sensor *sensor)
+{
+ u32 value, clk_freq;
+ u32 prescaler;
+
+ /* Figure out prescaler value for PCLK during calibration */
+ clk_freq = clk_get_rate(sensor->clk);
+ if (!clk_freq)
+ return -EINVAL;
+
+ prescaler = 0;
+ clk_freq /= ONE_MHZ;
+ if (clk_freq) {
+ while (prescaler <= clk_freq)
+ prescaler++;
+ }
+
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+
+ /* Clear prescaler */
+ value &= ~HSREF_CLK_DIV_MASK;
+
+ /* Set prescaler. pclk_freq/prescaler < 1MHz */
+ value |= (prescaler << HSREF_CLK_DIV_POS);
+
+ /* Select PCLK as reference clock */
+ value &= ~REFCLK_SEL;
+
+ /* Set maximal sampling time for better precision */
+ value |= TS1_SMP_TIME_MASK;
+
+ /* Measure with calibration */
+ value &= ~CALIBRATION_CONTROL;
+
+ /* select trigger */
+ value &= ~TS1_INTRIG_SEL_MASK;
+ value |= NO_HW_TRIG;
+
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ return 0;
+}
+
+/* Fill in DTS structure with factory sensor values */
+static int stm_thermal_read_factory_settings(struct stm_thermal_sensor *sensor)
+{
+ /* Retrieve engineering calibration temperature */
+ sensor->t0 = readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET) &
+ TS1_T0_MASK;
+ if (!sensor->t0)
+ sensor->t0 = TS1_T0_VAL0;
+ else
+ sensor->t0 = TS1_T0_VAL1;
+
+ /* Retrieve fmt0 and put it on Hz */
+ sensor->fmt0 = ADJUST * readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET)
+ & TS1_FMT0_MASK;
+
+ /* Retrieve ramp coefficient */
+ sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) &
+ TS1_RAMP_COEFF_MASK;
+
+ if (!sensor->fmt0 || !sensor->ramp_coeff) {
+ dev_err(sensor->dev, "%s: wrong setting\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(sensor->dev, "%s: T0 = %doC, FMT0 = %dHz, RAMP_COEFF = %dHz/oC",
+ __func__, sensor->t0, sensor->fmt0, sensor->ramp_coeff);
+
+ return 0;
+}
+
+static int stm_thermal_calculate_threshold(struct stm_thermal_sensor *sensor,
+ int temp, u32 *th)
+{
+ int freqM;
+ u32 sampling_time;
+
+ /* Retrieve the number of periods to sample */
+ sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
+ TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
+
+ /* Figure out the CLK_PTAT frequency for a given temperature */
+ freqM = ((temp - sensor->t0) * sensor->ramp_coeff)
+ + sensor->fmt0;
+
+ dev_dbg(sensor->dev, "%s: freqM for threshold = %d Hz",
+ __func__, freqM);
+
+ /* Figure out the threshold sample number */
+ *th = clk_get_rate(sensor->clk);
+ if (!*th)
+ return -EINVAL;
+
+ *th = *th / freqM;
+
+ *th *= sampling_time;
+
+ return 0;
+}
+
+static int stm_thermal_set_threshold(struct stm_thermal_sensor *sensor)
+{
+ u32 value, th;
+ int ret;
+
+ value = readl_relaxed(sensor->base + DTS_ITR1_OFFSET);
+
+ /* Erase threshold content */
+ value &= ~(TS1_LITTHD_MASK | TS1_HITTHD_MASK);
+
+ /* Retrieve the sample threshold number th for a given temperature */
+ ret = stm_thermal_calculate_threshold(sensor, sensor->high_temp, &th);
+ if (ret)
+ return ret;
+
+ value |= th & TS1_LITTHD_MASK;
+
+ if (sensor->low_temp_enabled) {
+ /* Retrieve the sample threshold */
+ ret = stm_thermal_calculate_threshold(sensor, sensor->low_temp,
+ &th);
+ if (ret)
+ return ret;
+
+ value |= (TS1_HITTHD_MASK & (th << TS1_HITTHD_POS));
+ }
+
+ /* Write value on the Low interrupt threshold */
+ writel_relaxed(value, sensor->base + DTS_ITR1_OFFSET);
+
+ return 0;
+}
+
+/* Disable temperature interrupt */
+static int stm_disable_irq(struct stm_thermal_sensor *sensor)
+{
+ u32 value;
+
+ /* Disable IT generation for low and high thresholds */
+ value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
+ writel_relaxed(value & ~(LOW_THRESHOLD | HIGH_THRESHOLD),
+ sensor->base + DTS_ITENR_OFFSET);
+
+ dev_dbg(sensor->dev, "%s: IT disabled on sensor side", __func__);
+
+ return 0;
+}
+
+/* Enable temperature interrupt */
+static int stm_enable_irq(struct stm_thermal_sensor *sensor)
+{
+ u32 value;
+
+ /*
+ * Code below enables High temperature threshold using a low threshold
+ * sampling value
+ */
+
+ /* Make sure LOW_THRESHOLD IT is clear before enabling */
+ writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ /* Enable IT generation for low threshold */
+ value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
+ value |= LOW_THRESHOLD;
+
+ /* Enable the low temperature threshold if needed */
+ if (sensor->low_temp_enabled) {
+ /* Make sure HIGH_THRESHOLD IT is clear before enabling */
+ writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ /* Enable IT generation for high threshold */
+ value |= HIGH_THRESHOLD;
+ }
+
+ /* Enable thresholds */
+ writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
+
+ dev_dbg(sensor->dev, "%s: IT enabled on sensor side", __func__);
+
+ return 0;
+}
+
+static int stm_thermal_update_threshold(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+
+ sensor->mode = THERMAL_DEVICE_DISABLED;
+
+ ret = stm_sensor_power_off(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_disable_irq(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_thermal_set_threshold(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_enable_irq(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_sensor_power_on(sensor);
+ if (ret)
+ return ret;
+
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
+ return 0;
+}
+
+/* Callback to get temperature from HW */
+static int stm_thermal_get_temp(void *data, int *temp)
+{
+ struct stm_thermal_sensor *sensor = data;
+ u32 sampling_time;
+ int freqM, ret;
+
+ if (sensor->mode != THERMAL_DEVICE_ENABLED)
+ return -EAGAIN;
+
+ /* Retrieve the number of samples */
+ ret = readl_poll_timeout(sensor->base + DTS_DR_OFFSET, freqM,
+ (freqM & TS1_MFREQ_MASK), STARTUP_TIME,
+ POLL_TIMEOUT);
+
+ if (ret)
+ return ret;
+
+ if (!freqM)
+ return -ENODATA;
+
+ /* Retrieve the number of periods sampled */
+ sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
+ TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
+
+ /* Figure out the number of samples per period */
+ freqM /= sampling_time;
+
+ /* Figure out the CLK_PTAT frequency */
+ freqM = clk_get_rate(sensor->clk) / freqM;
+ if (!freqM)
+ return -EINVAL;
+
+ dev_dbg(sensor->dev, "%s: freqM=%d\n", __func__, freqM);
+
+ /* Figure out the temperature in mili celsius */
+ *temp = mcelsius(sensor->t0 + ((freqM - sensor->fmt0) /
+ sensor->ramp_coeff));
+
+ dev_dbg(sensor->dev, "%s: temperature = %d millicelsius",
+ __func__, *temp);
+
+ /* Update thresholds */
+ if (sensor->num_trips > 1) {
+ /* Update alarm threshold value to next higher trip point */
+ if (sensor->high_temp == sensor->temp_passive &&
+ celsius(*temp) >= sensor->temp_passive) {
+ sensor->high_temp = sensor->temp_critical;
+ sensor->low_temp = sensor->temp_passive;
+ sensor->low_temp_enabled = true;
+ ret = stm_thermal_update_threshold(sensor);
+ if (ret)
+ return ret;
+ }
+
+ if (sensor->high_temp == sensor->temp_critical &&
+ celsius(*temp) < sensor->temp_passive) {
+ sensor->high_temp = sensor->temp_passive;
+ sensor->low_temp_enabled = false;
+ ret = stm_thermal_update_threshold(sensor);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Re-enable alarm IRQ if temperature below critical
+ * temperature
+ */
+ if (!sensor->irq_enabled &&
+ (celsius(*temp) < sensor->temp_critical)) {
+ sensor->irq_enabled = true;
+ enable_irq(sensor->irq);
+ }
+ }
+
+ return 0;
+}
+
+/* Registers DTS irq to be visible by GIC */
+static int stm_register_irq(struct stm_thermal_sensor *sensor)
+{
+ struct device *dev = sensor->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ sensor->irq = platform_get_irq(pdev, 0);
+ if (sensor->irq < 0) {
+ dev_err(dev, "%s: Unable to find IRQ\n", __func__);
+ return sensor->irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, sensor->irq,
+ stm_thermal_alarm_irq,
+ stm_thermal_alarm_irq_thread,
+ IRQF_ONESHOT,
+ dev->driver->name, sensor);
+ if (ret) {
+ dev_err(dev, "%s: Failed to register IRQ %d\n", __func__,
+ sensor->irq);
+ return ret;
+ }
+
+ sensor->irq_enabled = true;
+
+ dev_dbg(dev, "%s: thermal IRQ registered", __func__);
+
+ return 0;
+}
+
+static int stm_thermal_sensor_off(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+
+ ret = stm_sensor_power_off(sensor);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(sensor->clk);
+
+ return 0;
+}
+
+static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+ struct device *dev = sensor->dev;
+
+ ret = clk_prepare_enable(sensor->clk);
+ if (ret)
+ return ret;
+
+ ret = stm_thermal_calibration(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
+ /* Set threshold(s) for IRQ */
+ ret = stm_thermal_set_threshold(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
+ ret = stm_enable_irq(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
+ ret = stm_sensor_power_on(sensor);
+ if (ret) {
+ dev_err(dev, "%s: failed to power on sensor\n", __func__);
+ goto irq_disable;
+ }
+
+ return 0;
+
+irq_disable:
+ stm_disable_irq(sensor);
+
+thermal_unprepare:
+ clk_disable_unprepare(sensor->clk);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stm_thermal_suspend(struct device *dev)
+{
+ int ret;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ ret = stm_thermal_sensor_off(sensor);
+ if (ret)
+ return ret;
+
+ sensor->mode = THERMAL_DEVICE_DISABLED;
+
+ return 0;
+}
+
+static int stm_thermal_resume(struct device *dev)
+{
+ int ret;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ ret = stm_thermal_prepare(sensor);
+ if (ret)
+ return ret;
+
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume);
+
+static const struct thermal_zone_of_device_ops stm_tz_ops = {
+ .get_temp = stm_thermal_get_temp,
+};
+
+static const struct of_device_id stm_thermal_of_match[] = {
+ { .compatible = "st,stm32-thermal"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stm_thermal_of_match);
+
+static int stm_thermal_probe(struct platform_device *pdev)
+{
+ struct stm_thermal_sensor *sensor;
+ struct resource *res;
+ const struct thermal_trip *trip;
+ void __iomem *base;
+ int ret, i;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "%s: device tree node not found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sensor);
+
+ sensor->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* Populate sensor */
+ sensor->base = base;
+
+ ret = stm_thermal_read_factory_settings(sensor);
+ if (ret)
+ return ret;
+
+ sensor->clk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(sensor->clk)) {
+ dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n",
+ __func__);
+ return PTR_ERR(sensor->clk);
+ }
+
+ /* Register IRQ into GIC */
+ ret = stm_register_irq(sensor);
+ if (ret)
+ return ret;
+
+ sensor->th_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
+ sensor,
+ &stm_tz_ops);
+
+ if (IS_ERR(sensor->th_dev)) {
+ dev_err(&pdev->dev, "%s: thermal zone sensor registering KO\n",
+ __func__);
+ ret = PTR_ERR(sensor->th_dev);
+ return ret;
+ }
+
+ if (!sensor->th_dev->ops->get_crit_temp) {
+ /* Critical point must be provided */
+ ret = -EINVAL;
+ goto err_tz;
+ }
+
+ ret = sensor->th_dev->ops->get_crit_temp(sensor->th_dev,
+ &sensor->temp_critical);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Not able to read critical_temp: %d\n", ret);
+ goto err_tz;
+ }
+
+ sensor->temp_critical = celsius(sensor->temp_critical);
+
+ /* Set thresholds for IRQ */
+ sensor->high_temp = sensor->temp_critical;
+
+ trip = of_thermal_get_trip_points(sensor->th_dev);
+ sensor->num_trips = of_thermal_get_ntrips(sensor->th_dev);
+
+ /* Find out passive temperature if it exists */
+ for (i = (sensor->num_trips - 1); i >= 0; i--) {
+ if (trip[i].type == THERMAL_TRIP_PASSIVE) {
+ sensor->temp_passive = celsius(trip[i].temperature);
+ /* Update high temperature threshold */
+ sensor->high_temp = sensor->temp_passive;
+ }
+ }
+
+ /*
+ * Ensure low_temp_enabled flag is disabled.
+ * By disabling low_temp_enabled, low threshold IT will not be
+ * configured neither enabled because it is not needed as high
+ * threshold is set on the lowest temperature trip point after
+ * probe.
+ */
+ sensor->low_temp_enabled = false;
+
+ /* Configure and enable HW sensor */
+ ret = stm_thermal_prepare(sensor);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Not able to enable sensor: %d\n", ret);
+ goto err_tz;
+ }
+
+ /*
+ * Thermal_zone doesn't enable hwmon as default,
+ * enable it here
+ */
+ sensor->th_dev->tzp->no_hwmon = false;
+ ret = thermal_add_hwmon_sysfs(sensor->th_dev);
+ if (ret)
+ goto err_tz;
+
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
+ dev_info(&pdev->dev, "%s: Driver initialized successfully\n",
+ __func__);
+
+ return 0;
+
+err_tz:
+ thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev);
+ return ret;
+}
+
+static int stm_thermal_remove(struct platform_device *pdev)
+{
+ struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ stm_thermal_sensor_off(sensor);
+ thermal_remove_hwmon_sysfs(sensor->th_dev);
+ thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev);
+
+ return 0;
+}
+
+static struct platform_driver stm_thermal_driver = {
+ .driver = {
+ .name = "stm_thermal",
+ .pm = &stm_thermal_pm_ops,
+ .of_match_table = stm_thermal_of_match,
+ },
+ .probe = stm_thermal_probe,
+ .remove = stm_thermal_remove,
+};
+module_platform_driver(stm_thermal_driver);
+
+MODULE_DESCRIPTION("STMicroelectronics STM32 Thermal Sensor Driver");
+MODULE_AUTHOR("David Hernandez Sanchez <david.hernandezsanchez@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm_thermal");
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index c2277b8ee88d..9553305c63ea 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Thunderbolt Cactus Ridge driver - capabilities lookup
+ * Thunderbolt driver - capabilities lookup
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#include <linux/slab.h>
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 37a7f4c735d0..73b386de4d15 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Thunderbolt Cactus Ridge driver - control channel and configuration commands
+ * Thunderbolt driver - control channel and configuration commands
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#include <linux/crc32.h>
@@ -631,7 +632,7 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
}
- tb_ctl_info(ctl, "control channel created\n");
+ tb_ctl_dbg(ctl, "control channel created\n");
return ctl;
err:
tb_ctl_free(ctl);
@@ -662,8 +663,7 @@ void tb_ctl_free(struct tb_ctl *ctl)
tb_ctl_pkg_free(ctl->rx_packets[i]);
- if (ctl->frame_pool)
- dma_pool_destroy(ctl->frame_pool);
+ dma_pool_destroy(ctl->frame_pool);
kfree(ctl);
}
@@ -673,7 +673,7 @@ void tb_ctl_free(struct tb_ctl *ctl)
void tb_ctl_start(struct tb_ctl *ctl)
{
int i;
- tb_ctl_info(ctl, "control channel starting...\n");
+ tb_ctl_dbg(ctl, "control channel starting...\n");
tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
tb_ring_start(ctl->rx);
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
@@ -702,7 +702,7 @@ void tb_ctl_stop(struct tb_ctl *ctl)
if (!list_empty(&ctl->request_queue))
tb_ctl_WARN(ctl, "dangling request in request_queue\n");
INIT_LIST_HEAD(&ctl->request_queue);
- tb_ctl_info(ctl, "control channel stopped\n");
+ tb_ctl_dbg(ctl, "control channel stopped\n");
}
/* public interface, commands */
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 3062e0b5f71e..2f1a1e111110 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Thunderbolt Cactus Ridge driver - control channel and configuration commands
+ * Thunderbolt driver - control channel and configuration commands
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#ifndef _TB_CFG
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
index f2701194f810..847dd07a7b17 100644
--- a/drivers/thunderbolt/dma_port.c
+++ b/drivers/thunderbolt/dma_port.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt DMA configuration based mailbox support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/thunderbolt/dma_port.h b/drivers/thunderbolt/dma_port.h
index c4a69e0fbff7..7deadd97ce31 100644
--- a/drivers/thunderbolt/dma_port.h
+++ b/drivers/thunderbolt/dma_port.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt DMA configuration based mailbox support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef DMA_PORT_H_
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 092381e2accf..93e562f18d40 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt bus support
*
* Copyright (C) 2017, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/device.h>
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 3e8caf22c294..81e8ac4c5805 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Thunderbolt Cactus Ridge driver - eeprom access
+ * Thunderbolt driver - eeprom access
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#include <linux/crc32.h>
@@ -540,7 +541,7 @@ int tb_drom_read(struct tb_switch *sw)
return res;
size &= 0x3ff;
size += TB_DROM_DATA_START;
- tb_sw_info(sw, "reading drom (length: %#x)\n", size);
+ tb_sw_dbg(sw, "reading drom (length: %#x)\n", size);
if (size < sizeof(*header)) {
tb_sw_warn(sw, "drom too small, aborting\n");
return -EIO;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 28fc4ce75edb..e3fc920af682 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Internal Thunderbolt Connection Manager. This is a firmware running on
* the Thunderbolt host controller performing most of the low-level
@@ -6,10 +7,6 @@
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 5cd6bdfa068f..9aa44f9762a3 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1,10 +1,11 @@
/*
- * Thunderbolt Cactus Ridge driver - NHI driver
+ * Thunderbolt driver - NHI driver
*
* The NHI (native host interface) is the pci device that allows us to send and
* receive frames from the thunderbolt bus.
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#include <linux/pm_runtime.h>
@@ -95,9 +96,9 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
else
new = old & ~mask;
- dev_info(&ring->nhi->pdev->dev,
- "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
- active ? "enabling" : "disabling", reg, bit, old, new);
+ dev_dbg(&ring->nhi->pdev->dev,
+ "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
+ active ? "enabling" : "disabling", reg, bit, old, new);
if (new == old)
dev_WARN(&ring->nhi->pdev->dev,
@@ -476,8 +477,9 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
void *poll_data)
{
struct tb_ring *ring = NULL;
- dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
- transmit ? "TX" : "RX", hop, size);
+
+ dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
+ transmit ? "TX" : "RX", hop, size);
/* Tx Ring 2 is reserved for E2E workaround */
if (transmit && hop == RING_E2E_UNUSED_HOPID)
@@ -585,8 +587,8 @@ void tb_ring_start(struct tb_ring *ring)
dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
goto err;
}
- dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
- RING_TYPE(ring), ring->hop);
+ dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
+ RING_TYPE(ring), ring->hop);
if (ring->flags & RING_FLAG_FRAME) {
/* Means 4096 */
@@ -647,8 +649,8 @@ void tb_ring_stop(struct tb_ring *ring)
{
spin_lock_irq(&ring->nhi->lock);
spin_lock(&ring->lock);
- dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
- RING_TYPE(ring), ring->hop);
+ dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
+ RING_TYPE(ring), ring->hop);
if (ring->nhi->going_away)
goto err;
if (!ring->running) {
@@ -716,10 +718,8 @@ void tb_ring_free(struct tb_ring *ring)
ring->descriptors_dma = 0;
- dev_info(&ring->nhi->pdev->dev,
- "freeing %s %d\n",
- RING_TYPE(ring),
- ring->hop);
+ dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
+ ring->hop);
/**
* ring->work can no longer be scheduled (it is scheduled only
@@ -931,7 +931,8 @@ static int nhi_runtime_resume(struct device *dev)
static void nhi_shutdown(struct tb_nhi *nhi)
{
int i;
- dev_info(&nhi->pdev->dev, "shutdown\n");
+
+ dev_dbg(&nhi->pdev->dev, "shutdown\n");
for (i = 0; i < nhi->hop_count; i++) {
if (nhi->tx_rings[i])
@@ -1059,7 +1060,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
- dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+ dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
res = tb_domain_add(tb);
if (res) {
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 1696a4560948..1b5d47ecd3ed 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Thunderbolt Cactus Ridge driver - NHI driver
+ * Thunderbolt driver - NHI driver
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#ifndef DSL3510_H_
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index b3e49d19c01e..a60bd98c1d04 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -3,6 +3,7 @@
* Thunderbolt driver - NHI registers
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#ifndef NHI_REGS_H_
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index ff49ad880bfd..a11956522bac 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -13,19 +13,19 @@
static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
{
- tb_port_info(port, " Hop through port %d to hop %d (%s)\n",
- hop->out_port, hop->next_hop,
- hop->enable ? "enabled" : "disabled");
- tb_port_info(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
- hop->weight, hop->priority,
- hop->initial_credits, hop->drop_packages);
- tb_port_info(port, " Counter enabled: %d Counter index: %d\n",
- hop->counter_enable, hop->counter);
- tb_port_info(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
- hop->ingress_fc, hop->egress_fc,
- hop->ingress_shared_buffer, hop->egress_shared_buffer);
- tb_port_info(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
- hop->unknown1, hop->unknown2, hop->unknown3);
+ tb_port_dbg(port, " Hop through port %d to hop %d (%s)\n",
+ hop->out_port, hop->next_hop,
+ hop->enable ? "enabled" : "disabled");
+ tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
+ hop->weight, hop->priority,
+ hop->initial_credits, hop->drop_packages);
+ tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
+ hop->counter_enable, hop->counter);
+ tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
+ hop->ingress_fc, hop->egress_fc,
+ hop->ingress_shared_buffer, hop->egress_shared_buffer);
+ tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
+ hop->unknown1, hop->unknown2, hop->unknown3);
}
/**
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
index 8fe913a95b4a..b2f0d6386cee 100644
--- a/drivers/thunderbolt/property.c
+++ b/drivers/thunderbolt/property.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt XDomain property support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/err.h>
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 7442bc4c6433..52ff854f0d6c 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Thunderbolt Cactus Ridge driver - switch/port utility functions
+ * Thunderbolt driver - switch/port utility functions
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#include <linux/delay.h>
@@ -436,15 +437,15 @@ static const char *tb_port_type(struct tb_regs_port_header *port)
static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
{
- tb_info(tb,
- " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
- port->port_number, port->vendor_id, port->device_id,
- port->revision, port->thunderbolt_version, tb_port_type(port),
- port->type);
- tb_info(tb, " Max hop id (in/out): %d/%d\n",
- port->max_in_hop_id, port->max_out_hop_id);
- tb_info(tb, " Max counters: %d\n", port->max_counters);
- tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits);
+ tb_dbg(tb,
+ " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
+ port->port_number, port->vendor_id, port->device_id,
+ port->revision, port->thunderbolt_version, tb_port_type(port),
+ port->type);
+ tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
+ port->max_in_hop_id, port->max_out_hop_id);
+ tb_dbg(tb, " Max counters: %d\n", port->max_counters);
+ tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
}
/**
@@ -605,20 +606,18 @@ static int tb_init_port(struct tb_port *port)
static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
{
- tb_info(tb,
- " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
- sw->vendor_id, sw->device_id, sw->revision,
- sw->thunderbolt_version);
- tb_info(tb, " Max Port Number: %d\n", sw->max_port_number);
- tb_info(tb, " Config:\n");
- tb_info(tb,
+ tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+ sw->vendor_id, sw->device_id, sw->revision,
+ sw->thunderbolt_version);
+ tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number);
+ tb_dbg(tb, " Config:\n");
+ tb_dbg(tb,
" Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
- sw->upstream_port_number, sw->depth,
- (((u64) sw->route_hi) << 32) | sw->route_lo,
- sw->enabled, sw->plug_events_delay);
- tb_info(tb,
- " unknown1: %#x unknown4: %#x\n",
- sw->__unknown1, sw->__unknown4);
+ sw->upstream_port_number, sw->depth,
+ (((u64) sw->route_hi) << 32) | sw->route_lo,
+ sw->enabled, sw->plug_events_delay);
+ tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
+ sw->__unknown1, sw->__unknown4);
}
/**
@@ -634,7 +633,7 @@ int tb_switch_reset(struct tb *tb, u64 route)
header.route_lo = route,
header.enabled = true,
};
- tb_info(tb, "resetting switch at %llx\n", route);
+ tb_dbg(tb, "resetting switch at %llx\n", route);
res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
0, 2, 2, 2);
if (res.err)
@@ -1139,7 +1138,7 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
goto err_free_sw_ports;
- tb_info(tb, "current switch config:\n");
+ tb_dbg(tb, "current switch config:\n");
tb_dump_switch(tb, &sw->config);
/* configure switch */
@@ -1246,9 +1245,8 @@ int tb_switch_configure(struct tb_switch *sw)
int ret;
route = tb_route(sw);
- tb_info(tb,
- "initializing Switch at %#llx (depth: %d, up port: %d)\n",
- route, tb_route_length(route), sw->config.upstream_port_number);
+ tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
+ route, tb_route_length(route), sw->config.upstream_port_number);
if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
tb_sw_warn(sw, "unknown switch vendor id %#x\n",
@@ -1386,13 +1384,13 @@ int tb_switch_add(struct tb_switch *sw)
tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
return ret;
}
- tb_sw_info(sw, "uid: %#llx\n", sw->uid);
+ tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
tb_switch_set_uuid(sw);
for (i = 0; i <= sw->config.max_port_number; i++) {
if (sw->ports[i].disabled) {
- tb_port_info(&sw->ports[i], "disabled by eeprom\n");
+ tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
continue;
}
ret = tb_init_port(&sw->ports[i]);
@@ -1405,6 +1403,14 @@ int tb_switch_add(struct tb_switch *sw)
if (ret)
return ret;
+ if (tb_route(sw)) {
+ dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
+ sw->vendor, sw->device);
+ if (sw->vendor_name && sw->device_name)
+ dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
+ sw->device_name);
+ }
+
ret = tb_switch_nvm_add(sw);
if (ret) {
device_del(&sw->dev);
@@ -1456,6 +1462,9 @@ void tb_switch_remove(struct tb_switch *sw)
tb_plug_events_active(sw, false);
tb_switch_nvm_remove(sw);
+
+ if (tb_route(sw))
+ dev_info(&sw->dev, "device disconnected\n");
device_unregister(&sw->dev);
}
@@ -1483,7 +1492,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
int tb_switch_resume(struct tb_switch *sw)
{
int i, err;
- tb_sw_info(sw, "resuming switch\n");
+ tb_sw_dbg(sw, "resuming switch\n");
/*
* Check for UID of the connected switches except for root
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1424581fd9af..30e02c716f6c 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -404,10 +404,10 @@ static int tb_suspend_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
- tb_info(tb, "suspending...\n");
+ tb_dbg(tb, "suspending...\n");
tb_switch_suspend(tb->root_switch);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
- tb_info(tb, "suspend finished\n");
+ tb_dbg(tb, "suspend finished\n");
return 0;
}
@@ -417,7 +417,7 @@ static int tb_resume_noirq(struct tb *tb)
struct tb_cm *tcm = tb_priv(tb);
struct tb_pci_tunnel *tunnel, *n;
- tb_info(tb, "resuming...\n");
+ tb_dbg(tb, "resuming...\n");
/* remove any pci devices the firmware might have setup */
tb_switch_reset(tb, 0);
@@ -432,12 +432,12 @@ static int tb_resume_noirq(struct tb *tb)
* the pcie links need some time to get going.
* 100ms works for me...
*/
- tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
+ tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
msleep(100);
}
/* Allow tb_handle_hotplug to progress events */
tcm->hotplug_active = true;
- tb_info(tb, "resume finished\n");
+ tb_dbg(tb, "resume finished\n");
return 0;
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 5067d69d0501..52584c4003e3 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
+ * Thunderbolt driver - bus logic (NHI independent)
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#ifndef TB_H_
@@ -327,7 +328,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
-
+#define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg)
#define __TB_SW_PRINT(level, sw, fmt, arg...) \
do { \
@@ -338,7 +339,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
-
+#define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg)
#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
do { \
@@ -352,6 +353,8 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
__TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
#define tb_port_info(port, fmt, arg...) \
__TB_PORT_PRINT(tb_info, port, fmt, ##arg)
+#define tb_port_dbg(port, fmt, arg...) \
+ __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg)
struct tb *icm_probe(struct tb_nhi *nhi);
struct tb *tb_probe(struct tb_nhi *nhi);
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 2487e162c885..02c84aa3d018 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt control channel messages
*
* Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
* Copyright (C) 2017, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _TB_MSGS
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 693b0353c3fe..6f1ff04ee195 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Thunderbolt Cactus Ridge driver - Port/Switch config area registers
+ * Thunderbolt driver - Port/Switch config area registers
*
* Every thunderbolt device consists (logically) of a switch with multiple
* ports. Every port contains up to four config regions (HOPS, PORT, SWITCH,
* COUNTERS) which are used to configure the device.
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#ifndef _TB_REGS
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index db8bece63327..e27dd8beb94b 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt XDomain discovery protocol support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index df8bd0c7b97d..32886c304641 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -118,6 +118,7 @@ config SERIAL_ATMEL
depends on ARCH_AT91 || COMPILE_TEST
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
+ select MFD_AT91_USART
help
This enables the driver for the on-chip UARTs of the Atmel
AT91 processors.
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 8e4428725848..267d4d1de3f8 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -193,8 +193,7 @@ static struct console atmel_console;
#if defined(CONFIG_OF)
static const struct of_device_id atmel_serial_dt_ids[] = {
- { .compatible = "atmel,at91rm9200-usart" },
- { .compatible = "atmel,at91sam9260-usart" },
+ { .compatible = "atmel,at91rm9200-usart-serial" },
{ /* sentinel */ }
};
#endif
@@ -915,6 +914,7 @@ static void atmel_tx_dma(struct uart_port *port)
static int atmel_prepare_tx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct device *mfd_dev = port->dev->parent;
dma_cap_mask_t mask;
struct dma_slave_config config;
int ret, nent;
@@ -922,7 +922,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
+ atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
if (atmel_port->chan_tx == NULL)
goto chan_err;
dev_info(port->dev, "using %s for tx DMA transfers\n",
@@ -1093,6 +1093,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
static int atmel_prepare_rx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct device *mfd_dev = port->dev->parent;
struct dma_async_tx_descriptor *desc;
dma_cap_mask_t mask;
struct dma_slave_config config;
@@ -1104,7 +1105,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
dma_cap_zero(mask);
dma_cap_set(DMA_CYCLIC, mask);
- atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
+ atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
if (atmel_port->chan_rx == NULL)
goto chan_err;
dev_info(port->dev, "using %s for rx DMA transfers\n",
@@ -2222,8 +2223,8 @@ static const char *atmel_type(struct uart_port *port)
*/
static void atmel_release_port(struct uart_port *port)
{
- struct platform_device *pdev = to_platform_device(port->dev);
- int size = pdev->resource[0].end - pdev->resource[0].start + 1;
+ struct platform_device *mpdev = to_platform_device(port->dev->parent);
+ int size = resource_size(mpdev->resource);
release_mem_region(port->mapbase, size);
@@ -2238,8 +2239,8 @@ static void atmel_release_port(struct uart_port *port)
*/
static int atmel_request_port(struct uart_port *port)
{
- struct platform_device *pdev = to_platform_device(port->dev);
- int size = pdev->resource[0].end - pdev->resource[0].start + 1;
+ struct platform_device *mpdev = to_platform_device(port->dev->parent);
+ int size = resource_size(mpdev->resource);
if (!request_mem_region(port->mapbase, size, "atmel_serial"))
return -EBUSY;
@@ -2341,27 +2342,28 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
{
int ret;
struct uart_port *port = &atmel_port->uart;
+ struct platform_device *mpdev = to_platform_device(pdev->dev.parent);
atmel_init_property(atmel_port, pdev);
atmel_set_ops(port);
- uart_get_rs485_mode(&pdev->dev, &port->rs485);
+ uart_get_rs485_mode(&mpdev->dev, &port->rs485);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
port->ops = &atmel_pops;
port->fifosize = 1;
port->dev = &pdev->dev;
- port->mapbase = pdev->resource[0].start;
- port->irq = pdev->resource[1].start;
+ port->mapbase = mpdev->resource[0].start;
+ port->irq = mpdev->resource[1].start;
port->rs485_config = atmel_config_rs485;
- port->membase = NULL;
+ port->membase = NULL;
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
/* for console, the clock could already be configured */
if (!atmel_port->clk) {
- atmel_port->clk = clk_get(&pdev->dev, "usart");
+ atmel_port->clk = clk_get(&mpdev->dev, "usart");
if (IS_ERR(atmel_port->clk)) {
ret = PTR_ERR(atmel_port->clk);
atmel_port->clk = NULL;
@@ -2694,13 +2696,22 @@ static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
static int atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *atmel_port;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.parent->of_node;
void *data;
int ret = -ENODEV;
bool rs485_enabled;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
+ /*
+ * In device tree there is no node with "atmel,at91rm9200-usart-serial"
+ * as compatible string. This driver is probed by at91-usart mfd driver
+ * which is just a wrapper over the atmel_serial driver and
+ * spi-at91-usart driver. All attributes needed by this driver are
+ * found in of_node of parent.
+ */
+ pdev->dev.of_node = np;
+
ret = of_alias_get_id(np, "serial");
if (ret < 0)
/* port id not found in platform data nor device-tree aliases:
@@ -2836,6 +2847,7 @@ static int atmel_serial_remove(struct platform_device *pdev)
clk_put(atmel_port->clk);
atmel_port->clk = NULL;
+ pdev->dev.of_node = NULL;
return ret;
}
@@ -2846,7 +2858,7 @@ static struct platform_driver atmel_serial_driver = {
.suspend = atmel_serial_suspend,
.resume = atmel_serial_resume,
.driver = {
- .name = "atmel_usart",
+ .name = "atmel_usart_serial",
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
},
};
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 70a7981b94b3..85644669fbe7 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -274,6 +274,8 @@ static struct class uio_class = {
.dev_groups = uio_groups,
};
+static bool uio_class_registered;
+
/*
* device functions
*/
@@ -668,7 +670,7 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
struct page *page;
unsigned long offset;
void *addr;
- int ret = 0;
+ vm_fault_t ret = 0;
int mi;
mutex_lock(&idev->info_lock);
@@ -736,7 +738,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
return -EINVAL;
vma->vm_ops = &uio_physical_vm_ops;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (idev->info->mem[mi].memtype == UIO_MEM_PHYS)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/*
* We cannot use the vm_iomap_memory() helper here,
@@ -793,18 +796,19 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
}
switch (idev->info->mem[mi].memtype) {
- case UIO_MEM_PHYS:
- ret = uio_mmap_physical(vma);
- break;
- case UIO_MEM_LOGICAL:
- case UIO_MEM_VIRTUAL:
- ret = uio_mmap_logical(vma);
- break;
- default:
- ret = -EINVAL;
+ case UIO_MEM_IOVA:
+ case UIO_MEM_PHYS:
+ ret = uio_mmap_physical(vma);
+ break;
+ case UIO_MEM_LOGICAL:
+ case UIO_MEM_VIRTUAL:
+ ret = uio_mmap_logical(vma);
+ break;
+ default:
+ ret = -EINVAL;
}
-out:
+ out:
mutex_unlock(&idev->info_lock);
return ret;
}
@@ -876,6 +880,9 @@ static int init_uio_class(void)
printk(KERN_ERR "class_register failed for uio\n");
goto err_class_register;
}
+
+ uio_class_registered = true;
+
return 0;
err_class_register:
@@ -886,6 +893,7 @@ exit:
static void release_uio_class(void)
{
+ uio_class_registered = false;
class_unregister(&uio_class);
uio_major_cleanup();
}
@@ -912,6 +920,9 @@ int __uio_register_device(struct module *owner,
struct uio_device *idev;
int ret = 0;
+ if (!uio_class_registered)
+ return -EPROBE_DEFER;
+
if (!parent || !info || !info->name || !info->version)
return -EINVAL;
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index e1134a4d97f3..003badaef5f3 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -163,7 +163,8 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to kmalloc\n");
goto bad2;
}
- uioinfo->name = pdev->dev.of_node->name;
+ uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
+ pdev->dev.of_node);
uioinfo->version = "devicetree";
/* Multiple IRQs are not supported */
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index bbc17effae5e..9cc37fe07d35 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -382,8 +382,7 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
}
/* set all UIO data */
- if (node->name)
- info->mem[0].name = kstrdup(node->name, GFP_KERNEL);
+ info->mem[0].name = kasprintf(GFP_KERNEL, "%pOFn", node);
info->mem[0].addr = res.start;
info->mem[0].size = resource_size(&res);
info->mem[0].memtype = UIO_MEM_PHYS;
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index e401be8321ab..c2493d011225 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -17,7 +17,6 @@
* # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
* > /sys/bus/vmbus/drivers/uio_hv_generic/bind
*/
-#define DEBUG 1
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
@@ -33,13 +32,13 @@
#include "../hv/hyperv_vmbus.h"
-#define DRIVER_VERSION "0.02.0"
+#define DRIVER_VERSION "0.02.1"
#define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>"
#define DRIVER_DESC "Generic UIO driver for VMBus devices"
#define HV_RING_SIZE 512 /* pages */
-#define SEND_BUFFER_SIZE (15 * 1024 * 1024)
-#define RECV_BUFFER_SIZE (15 * 1024 * 1024)
+#define SEND_BUFFER_SIZE (16 * 1024 * 1024)
+#define RECV_BUFFER_SIZE (31 * 1024 * 1024)
/*
* List of resources to be mapped to user space
@@ -56,6 +55,7 @@ enum hv_uio_map {
struct hv_uio_private_data {
struct uio_info info;
struct hv_device *device;
+ atomic_t refcnt;
void *recv_buf;
u32 recv_gpadl;
@@ -129,13 +129,12 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
{
struct vmbus_channel *channel
= container_of(kobj, struct vmbus_channel, kobj);
- struct hv_device *dev = channel->primary_channel->device_obj;
- u16 q_idx = channel->offermsg.offer.sub_channel_index;
+ void *ring_buffer = page_address(channel->ringbuffer_page);
- dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
- q_idx, vma_pages(vma), vma->vm_pgoff);
+ if (channel->state != CHANNEL_OPENED_STATE)
+ return -ENODEV;
- return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
+ return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
channel->ringbuffer_pagecount << PAGE_SHIFT);
}
@@ -176,58 +175,104 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
}
}
+/* free the reserved buffers for send and receive */
static void
hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
{
- if (pdata->send_gpadl)
+ if (pdata->send_gpadl) {
vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl);
- vfree(pdata->send_buf);
+ pdata->send_gpadl = 0;
+ vfree(pdata->send_buf);
+ }
- if (pdata->recv_gpadl)
+ if (pdata->recv_gpadl) {
vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl);
- vfree(pdata->recv_buf);
+ pdata->recv_gpadl = 0;
+ vfree(pdata->recv_buf);
+ }
+}
+
+/* VMBus primary channel is opened on first use */
+static int
+hv_uio_open(struct uio_info *info, struct inode *inode)
+{
+ struct hv_uio_private_data *pdata
+ = container_of(info, struct hv_uio_private_data, info);
+ struct hv_device *dev = pdata->device;
+ int ret;
+
+ if (atomic_inc_return(&pdata->refcnt) != 1)
+ return 0;
+
+ ret = vmbus_connect_ring(dev->channel,
+ hv_uio_channel_cb, dev->channel);
+
+ if (ret == 0)
+ dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+ else
+ atomic_dec(&pdata->refcnt);
+
+ return ret;
+}
+
+/* VMBus primary channel is closed on last close */
+static int
+hv_uio_release(struct uio_info *info, struct inode *inode)
+{
+ struct hv_uio_private_data *pdata
+ = container_of(info, struct hv_uio_private_data, info);
+ struct hv_device *dev = pdata->device;
+ int ret = 0;
+
+ if (atomic_dec_and_test(&pdata->refcnt))
+ ret = vmbus_disconnect_ring(dev->channel);
+
+ return ret;
}
static int
hv_uio_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
+ struct vmbus_channel *channel = dev->channel;
struct hv_uio_private_data *pdata;
+ void *ring_buffer;
int ret;
+ /* Communicating with host has to be via shared memory not hypercall */
+ if (!channel->offermsg.monitor_allocated) {
+ dev_err(&dev->device, "vmbus channel requires hypercall\n");
+ return -ENOTSUPP;
+ }
+
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
- HV_RING_SIZE * PAGE_SIZE, NULL, 0,
- hv_uio_channel_cb, dev->channel);
+ ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE,
+ HV_RING_SIZE * PAGE_SIZE);
if (ret)
goto fail;
- /* Communicating with host has to be via shared memory not hypercall */
- if (!dev->channel->offermsg.monitor_allocated) {
- dev_err(&dev->device, "vmbus channel requires hypercall\n");
- ret = -ENOTSUPP;
- goto fail_close;
- }
-
- dev->channel->inbound.ring_buffer->interrupt_mask = 1;
- set_channel_read_mode(dev->channel, HV_CALL_ISR);
+ set_channel_read_mode(channel, HV_CALL_ISR);
/* Fill general uio info */
pdata->info.name = "uio_hv_generic";
pdata->info.version = DRIVER_VERSION;
pdata->info.irqcontrol = hv_uio_irqcontrol;
+ pdata->info.open = hv_uio_open;
+ pdata->info.release = hv_uio_release;
pdata->info.irq = UIO_IRQ_CUSTOM;
+ atomic_set(&pdata->refcnt, 0);
/* mem resources */
pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
+ ring_buffer = page_address(channel->ringbuffer_page);
pdata->info.mem[TXRX_RING_MAP].addr
- = (uintptr_t)dev->channel->ringbuffer_pages;
+ = (uintptr_t)virt_to_phys(ring_buffer);
pdata->info.mem[TXRX_RING_MAP].size
- = dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
- pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
+ = channel->ringbuffer_pagecount << PAGE_SHIFT;
+ pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_IOVA;
pdata->info.mem[INT_PAGE_MAP].name = "int_page";
pdata->info.mem[INT_PAGE_MAP].addr
@@ -247,7 +292,7 @@ hv_uio_probe(struct hv_device *dev,
goto fail_close;
}
- ret = vmbus_establish_gpadl(dev->channel, pdata->recv_buf,
+ ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
RECV_BUFFER_SIZE, &pdata->recv_gpadl);
if (ret)
goto fail_close;
@@ -261,14 +306,13 @@ hv_uio_probe(struct hv_device *dev,
pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
-
pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
if (pdata->send_buf == NULL) {
ret = -ENOMEM;
goto fail_close;
}
- ret = vmbus_establish_gpadl(dev->channel, pdata->send_buf,
+ ret = vmbus_establish_gpadl(channel, pdata->send_buf,
SEND_BUFFER_SIZE, &pdata->send_gpadl);
if (ret)
goto fail_close;
@@ -290,10 +334,10 @@ hv_uio_probe(struct hv_device *dev,
goto fail_close;
}
- vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
- vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
+ vmbus_set_chn_rescind_callback(channel, hv_uio_rescind);
+ vmbus_set_sc_create_callback(channel, hv_uio_new_channel);
- ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
+ ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
if (ret)
dev_notice(&dev->device,
"sysfs create ring bin file failed; %d\n", ret);
@@ -304,7 +348,6 @@ hv_uio_probe(struct hv_device *dev,
fail_close:
hv_uio_cleanup(dev, pdata);
- vmbus_close(dev->channel);
fail:
kfree(pdata);
@@ -322,7 +365,8 @@ hv_uio_remove(struct hv_device *dev)
uio_unregister_device(&pdata->info);
hv_uio_cleanup(dev, pdata);
hv_set_drvdata(dev, NULL);
- vmbus_close(dev->channel);
+
+ vmbus_free_ring(dev->channel);
kfree(pdata);
return 0;
}
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index f598ecddc8a7..6c759934bff3 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -118,7 +118,8 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
- uioinfo->name = pdev->dev.of_node->name;
+ uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
+ pdev->dev.of_node);
uioinfo->version = "devicetree";
/* Multiple IRQs are not supported */
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 19f5f5f2a48a..09b37c0d075d 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -364,8 +364,7 @@ static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
ci_hdrc_imx_remove(pdev);
}
-#ifdef CONFIG_PM
-static int imx_controller_suspend(struct device *dev)
+static int __maybe_unused imx_controller_suspend(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
@@ -377,7 +376,7 @@ static int imx_controller_suspend(struct device *dev)
return 0;
}
-static int imx_controller_resume(struct device *dev)
+static int __maybe_unused imx_controller_resume(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
@@ -408,8 +407,7 @@ clk_disable:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
-static int ci_hdrc_imx_suspend(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_suspend(struct device *dev)
{
int ret;
@@ -431,7 +429,7 @@ static int ci_hdrc_imx_suspend(struct device *dev)
return imx_controller_suspend(dev);
}
-static int ci_hdrc_imx_resume(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_resume(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
@@ -445,9 +443,8 @@ static int ci_hdrc_imx_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM_SLEEP */
-static int ci_hdrc_imx_runtime_suspend(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_runtime_suspend(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
@@ -466,13 +463,11 @@ static int ci_hdrc_imx_runtime_suspend(struct device *dev)
return imx_controller_suspend(dev);
}
-static int ci_hdrc_imx_runtime_resume(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_runtime_resume(struct device *dev)
{
return imx_controller_resume(dev);
}
-#endif /* CONFIG_PM */
-
static const struct dev_pm_ops ci_hdrc_imx_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ci_hdrc_imx_suspend, ci_hdrc_imx_resume)
SET_RUNTIME_PM_OPS(ci_hdrc_imx_runtime_suspend,
@@ -492,7 +487,7 @@ static struct platform_driver ci_hdrc_imx_driver = {
module_platform_driver(ci_hdrc_imx_driver);
MODULE_ALIAS("platform:imx-usb");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CI HDRC i.MX USB binding");
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>");
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 85fc6db48e44..7bfcbb23c2a4 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -53,6 +53,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
@@ -723,6 +724,24 @@ static int ci_get_platdata(struct device *dev,
else
cable->connected = false;
}
+
+ platdata->pctl = devm_pinctrl_get(dev);
+ if (!IS_ERR(platdata->pctl)) {
+ struct pinctrl_state *p;
+
+ p = pinctrl_lookup_state(platdata->pctl, "default");
+ if (!IS_ERR(p))
+ platdata->pins_default = p;
+
+ p = pinctrl_lookup_state(platdata->pctl, "host");
+ if (!IS_ERR(p))
+ platdata->pins_host = p;
+
+ p = pinctrl_lookup_state(platdata->pctl, "device");
+ if (!IS_ERR(p))
+ platdata->pins_device = p;
+ }
+
return 0;
}
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 4638d9b066be..d858a82c4f44 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -13,6 +13,7 @@
#include <linux/usb/hcd.h>
#include <linux/usb/chipidea.h>
#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
#include "../host/ehci.h"
@@ -153,6 +154,10 @@ static int host_start(struct ci_hdrc *ci)
}
}
+ if (ci->platdata->pins_host)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_host);
+
ret = usb_add_hcd(hcd, 0, 0);
if (ret) {
goto disable_reg;
@@ -197,6 +202,10 @@ static void host_stop(struct ci_hdrc *ci)
}
ci->hcd = NULL;
ci->otg.host = NULL;
+
+ if (ci->platdata->pins_host && ci->platdata->pins_default)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_default);
}
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index db4ceffcf2a6..f25d4827fd49 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -203,14 +203,17 @@ static void ci_otg_work(struct work_struct *work)
}
pm_runtime_get_sync(ci->dev);
+
if (ci->id_event) {
ci->id_event = false;
ci_handle_id_switch(ci);
- } else if (ci->b_sess_valid_event) {
+ }
+
+ if (ci->b_sess_valid_event) {
ci->b_sess_valid_event = false;
ci_handle_vbus_change(ci);
- } else
- dev_err(ci->dev, "unexpected event occurs at %s\n", __func__);
+ }
+
pm_runtime_put_sync(ci->dev);
enable_irq(ci->irq);
diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
index 7e7428e48bfa..4f8b8179ec96 100644
--- a/drivers/usb/chipidea/otg.h
+++ b/drivers/usb/chipidea/otg.h
@@ -17,7 +17,8 @@ void ci_handle_vbus_change(struct ci_hdrc *ci);
static inline void ci_otg_queue_work(struct ci_hdrc *ci)
{
disable_irq_nosync(ci->irq);
- queue_work(ci->wq, &ci->work);
+ if (queue_work(ci->wq, &ci->work) == false)
+ enable_irq(ci->irq);
}
#endif /* __DRIVERS_USB_CHIPIDEA_OTG_H */
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 9852ec5e6e01..829e947cabf5 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg-fsm.h>
@@ -1965,6 +1966,10 @@ void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
static int udc_id_switch_for_device(struct ci_hdrc *ci)
{
+ if (ci->platdata->pins_device)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_device);
+
if (ci->is_otg)
/* Clear and enable BSV irq */
hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
@@ -1983,6 +1988,10 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
ci->vbus_active = 0;
+
+ if (ci->platdata->pins_device && ci->platdata->pins_default)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_default);
}
/**
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 34ad5bf8acd8..def80ff547e4 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -343,6 +343,8 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
} else if (data->oc_polarity == 1) {
/* High active */
reg &= ~(MX6_BM_OVER_CUR_DIS | MX6_BM_OVER_CUR_POLARITY);
+ } else {
+ reg &= ~(MX6_BM_OVER_CUR_DIS);
}
writel(reg, usbmisc->base + data->index * 4);
@@ -633,6 +635,6 @@ static struct platform_driver usbmisc_imx_driver = {
module_platform_driver(usbmisc_imx_driver);
MODULE_ALIAS("platform:usbmisc-imx");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("driver for imx usb non-core registers");
MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>");
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 83ffa5a14c3d..4942122b2346 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -5,6 +5,7 @@
* Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
* Copyright (C) 2008 Novell, Inc.
* Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (C) 2018 IVI Foundation, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -21,21 +22,24 @@
#include <linux/compat.h>
#include <linux/usb/tmc.h>
+/* Increment API VERSION when changing tmc.h with new flags or ioctls
+ * or when changing a significant behavior of the driver.
+ */
+#define USBTMC_API_VERSION (2)
#define USBTMC_HEADER_SIZE 12
#define USBTMC_MINOR_BASE 176
-/*
- * Size of driver internal IO buffer. Must be multiple of 4 and at least as
- * large as wMaxPacketSize (which is usually 512 bytes).
- */
-#define USBTMC_SIZE_IOBUFFER 2048
-
/* Minimum USB timeout (in milliseconds) */
#define USBTMC_MIN_TIMEOUT 100
/* Default USB timeout (in milliseconds) */
#define USBTMC_TIMEOUT 5000
+/* Max number of urbs used in write transfers */
+#define MAX_URBS_IN_FLIGHT 16
+/* I/O buffer size used in generic read/write functions */
+#define USBTMC_BUFSIZE (4096)
+
/*
* Maximum number of read cycles to empty bulk in endpoint during CLEAR and
* ABORT_BULK_IN requests. Ends the loop if (for whatever reason) a short
@@ -79,6 +83,9 @@ struct usbtmc_device_data {
u8 bTag_last_write; /* needed for abort */
u8 bTag_last_read; /* needed for abort */
+ /* packet size of IN bulk */
+ u16 wMaxPacketSize;
+
/* data for interrupt in endpoint handling */
u8 bNotify1;
u8 bNotify2;
@@ -95,11 +102,6 @@ struct usbtmc_device_data {
/* coalesced usb488_caps from usbtmc_dev_capabilities */
__u8 usb488_caps;
- /* attributes from the USB TMC spec for this device */
- u8 TermChar;
- bool TermCharEnabled;
- bool auto_abort;
-
bool zombie; /* fd of disconnected device */
struct usbtmc_dev_capabilities capabilities;
@@ -121,13 +123,34 @@ struct usbtmc_file_data {
u32 timeout;
u8 srq_byte;
atomic_t srq_asserted;
+ atomic_t closing;
+ u8 bmTransferAttributes; /* member of DEV_DEP_MSG_IN */
+
u8 eom_val;
u8 term_char;
bool term_char_enabled;
+ bool auto_abort;
+
+ spinlock_t err_lock; /* lock for errors */
+
+ struct usb_anchor submitted;
+
+ /* data for generic_write */
+ struct semaphore limit_write_sem;
+ u32 out_transfer_size;
+ int out_status;
+
+ /* data for generic_read */
+ u32 in_transfer_size;
+ int in_status;
+ int in_urbs_used;
+ struct usb_anchor in_anchor;
+ wait_queue_head_t wait_bulk_in;
};
/* Forward declarations */
static struct usb_driver usbtmc_driver;
+static void usbtmc_draw_down(struct usbtmc_file_data *file_data);
static void usbtmc_delete(struct kref *kref)
{
@@ -153,6 +176,12 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
if (!file_data)
return -ENOMEM;
+ spin_lock_init(&file_data->err_lock);
+ sema_init(&file_data->limit_write_sem, MAX_URBS_IN_FLIGHT);
+ init_usb_anchor(&file_data->submitted);
+ init_usb_anchor(&file_data->in_anchor);
+ init_waitqueue_head(&file_data->wait_bulk_in);
+
data = usb_get_intfdata(intf);
/* Protect reference to data from file structure until release */
kref_get(&data->kref);
@@ -160,10 +189,12 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
mutex_lock(&data->io_mutex);
file_data->data = data;
- /* copy default values from device settings */
+ atomic_set(&file_data->closing, 0);
+
file_data->timeout = USBTMC_TIMEOUT;
- file_data->term_char = data->TermChar;
- file_data->term_char_enabled = data->TermCharEnabled;
+ file_data->term_char = '\n';
+ file_data->term_char_enabled = 0;
+ file_data->auto_abort = 0;
file_data->eom_val = 1;
INIT_LIST_HEAD(&file_data->file_elem);
@@ -178,6 +209,40 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
return 0;
}
+/*
+ * usbtmc_flush - called before file handle is closed
+ */
+static int usbtmc_flush(struct file *file, fl_owner_t id)
+{
+ struct usbtmc_file_data *file_data;
+ struct usbtmc_device_data *data;
+
+ file_data = file->private_data;
+ if (file_data == NULL)
+ return -ENODEV;
+
+ atomic_set(&file_data->closing, 1);
+ data = file_data->data;
+
+ /* wait for io to stop */
+ mutex_lock(&data->io_mutex);
+
+ usbtmc_draw_down(file_data);
+
+ spin_lock_irq(&file_data->err_lock);
+ file_data->in_status = 0;
+ file_data->in_transfer_size = 0;
+ file_data->in_urbs_used = 0;
+ file_data->out_status = 0;
+ file_data->out_transfer_size = 0;
+ spin_unlock_irq(&file_data->err_lock);
+
+ wake_up_interruptible_all(&data->waitq);
+ mutex_unlock(&data->io_mutex);
+
+ return 0;
+}
+
static int usbtmc_release(struct inode *inode, struct file *file)
{
struct usbtmc_file_data *file_data = file->private_data;
@@ -197,18 +262,17 @@ static int usbtmc_release(struct inode *inode, struct file *file)
return 0;
}
-static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
+static int usbtmc_ioctl_abort_bulk_in_tag(struct usbtmc_device_data *data,
+ u8 tag)
{
u8 *buffer;
struct device *dev;
int rv;
int n;
int actual;
- struct usb_host_interface *current_setting;
- int max_size;
dev = &data->intf->dev;
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+ buffer = kmalloc(USBTMC_BUFSIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -216,86 +280,88 @@ static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_ABORT_BULK_IN,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
- data->bTag_last_read, data->bulk_in,
- buffer, 2, USBTMC_TIMEOUT);
+ tag, data->bulk_in,
+ buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
- dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+ dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x with tag %02x\n",
+ buffer[0], buffer[1]);
if (buffer[0] == USBTMC_STATUS_FAILED) {
+ /* No transfer in progress and the Bulk-OUT FIFO is empty. */
rv = 0;
goto exit;
}
- if (buffer[0] != USBTMC_STATUS_SUCCESS) {
- dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n",
- buffer[0]);
- rv = -EPERM;
+ if (buffer[0] == USBTMC_STATUS_TRANSFER_NOT_IN_PROGRESS) {
+ /* The device returns this status if either:
+ * - There is a transfer in progress, but the specified bTag
+ * does not match.
+ * - There is no transfer in progress, but the Bulk-OUT FIFO
+ * is not empty.
+ */
+ rv = -ENOMSG;
goto exit;
}
- max_size = 0;
- current_setting = data->intf->cur_altsetting;
- for (n = 0; n < current_setting->desc.bNumEndpoints; n++)
- if (current_setting->endpoint[n].desc.bEndpointAddress ==
- data->bulk_in)
- max_size = usb_endpoint_maxp(&current_setting->endpoint[n].desc);
-
- if (max_size == 0) {
- dev_err(dev, "Couldn't get wMaxPacketSize\n");
+ if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+ dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n",
+ buffer[0]);
rv = -EPERM;
goto exit;
}
- dev_dbg(&data->intf->dev, "wMaxPacketSize is %d\n", max_size);
-
n = 0;
- do {
- dev_dbg(dev, "Reading from bulk in EP\n");
+usbtmc_abort_bulk_in_status:
+ dev_dbg(dev, "Reading from bulk in EP\n");
- rv = usb_bulk_msg(data->usb_dev,
- usb_rcvbulkpipe(data->usb_dev,
- data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER,
- &actual, USBTMC_TIMEOUT);
+ /* Data must be present. So use low timeout 300 ms */
+ actual = 0;
+ rv = usb_bulk_msg(data->usb_dev,
+ usb_rcvbulkpipe(data->usb_dev,
+ data->bulk_in),
+ buffer, USBTMC_BUFSIZE,
+ &actual, 300);
- n++;
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE, 16, 1,
+ buffer, actual, true);
- if (rv < 0) {
- dev_err(dev, "usb_bulk_msg returned %d\n", rv);
+ n++;
+
+ if (rv < 0) {
+ dev_err(dev, "usb_bulk_msg returned %d\n", rv);
+ if (rv != -ETIMEDOUT)
goto exit;
- }
- } while ((actual == max_size) &&
- (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
+ }
- if (actual == max_size) {
+ if (actual == USBTMC_BUFSIZE)
+ goto usbtmc_abort_bulk_in_status;
+
+ if (n >= USBTMC_MAX_READS_TO_CLEAR_BULK_IN) {
dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
rv = -EPERM;
goto exit;
}
- n = 0;
-
-usbtmc_abort_bulk_in_status:
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
0, data->bulk_in, buffer, 0x08,
- USBTMC_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
- dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+ dev_dbg(dev, "CHECK_ABORT_BULK_IN returned %x\n", buffer[0]);
if (buffer[0] == USBTMC_STATUS_SUCCESS) {
rv = 0;
@@ -303,46 +369,30 @@ usbtmc_abort_bulk_in_status:
}
if (buffer[0] != USBTMC_STATUS_PENDING) {
- dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+ dev_err(dev, "CHECK_ABORT_BULK_IN returned %x\n", buffer[0]);
rv = -EPERM;
goto exit;
}
- if (buffer[1] == 1)
- do {
- dev_dbg(dev, "Reading from bulk in EP\n");
-
- rv = usb_bulk_msg(data->usb_dev,
- usb_rcvbulkpipe(data->usb_dev,
- data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER,
- &actual, USBTMC_TIMEOUT);
-
- n++;
-
- if (rv < 0) {
- dev_err(dev, "usb_bulk_msg returned %d\n", rv);
- goto exit;
- }
- } while ((actual == max_size) &&
- (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
-
- if (actual == max_size) {
- dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
- USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
- rv = -EPERM;
- goto exit;
+ if ((buffer[1] & 1) > 0) {
+ /* The device has 1 or more queued packets the Host can read */
+ goto usbtmc_abort_bulk_in_status;
}
- goto usbtmc_abort_bulk_in_status;
-
+ /* The Host must send CHECK_ABORT_BULK_IN_STATUS at a later time. */
+ rv = -EAGAIN;
exit:
kfree(buffer);
return rv;
+}
+static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
+{
+ return usbtmc_ioctl_abort_bulk_in_tag(data, data->bTag_last_read);
}
-static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
+static int usbtmc_ioctl_abort_bulk_out_tag(struct usbtmc_device_data *data,
+ u8 tag)
{
struct device *dev;
u8 *buffer;
@@ -359,8 +409,8 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
- data->bTag_last_write, data->bulk_out,
- buffer, 2, USBTMC_TIMEOUT);
+ tag, data->bulk_out,
+ buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -379,12 +429,14 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
n = 0;
usbtmc_abort_bulk_out_check_status:
+ /* do not stress device with subsequent requests */
+ msleep(50);
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
0, data->bulk_out, buffer, 0x08,
- USBTMC_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
n++;
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -418,6 +470,11 @@ exit:
return rv;
}
+static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
+{
+ return usbtmc_ioctl_abort_bulk_out_tag(data, data->bTag_last_write);
+}
+
static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
void __user *arg)
{
@@ -457,7 +514,7 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
data->iin_bTag,
data->ifnum,
- buffer, 0x03, USBTMC_TIMEOUT);
+ buffer, 0x03, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "stb usb_control_msg returned %d\n", rv);
goto exit;
@@ -510,6 +567,54 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
return rv;
}
+static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
+ __u32 __user *arg)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev = &data->intf->dev;
+ int rv;
+ u32 timeout;
+ unsigned long expire;
+
+ if (!data->iin_ep_present) {
+ dev_dbg(dev, "no interrupt endpoint present\n");
+ return -EFAULT;
+ }
+
+ if (get_user(timeout, arg))
+ return -EFAULT;
+
+ expire = msecs_to_jiffies(timeout);
+
+ mutex_unlock(&data->io_mutex);
+
+ rv = wait_event_interruptible_timeout(
+ data->waitq,
+ atomic_read(&file_data->srq_asserted) != 0 ||
+ atomic_read(&file_data->closing),
+ expire);
+
+ mutex_lock(&data->io_mutex);
+
+ /* Note! disconnect or close could be called in the meantime */
+ if (atomic_read(&file_data->closing) || data->zombie)
+ rv = -ENODEV;
+
+ if (rv < 0) {
+ /* dev can be invalid now! */
+ pr_debug("%s - wait interrupted %d\n", __func__, rv);
+ return rv;
+ }
+
+ if (rv == 0) {
+ dev_dbg(dev, "%s - wait timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "%s - srq asserted\n", __func__);
+ return 0;
+}
+
static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
void __user *arg, unsigned int cmd)
{
@@ -543,7 +648,7 @@ static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
wValue,
data->ifnum,
- buffer, 0x01, USBTMC_TIMEOUT);
+ buffer, 0x01, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "simple usb_control_msg failed %d\n", rv);
goto exit;
@@ -610,6 +715,559 @@ static int usbtmc488_ioctl_trigger(struct usbtmc_file_data *file_data)
return 0;
}
+static struct urb *usbtmc_create_urb(void)
+{
+ const size_t bufsize = USBTMC_BUFSIZE;
+ u8 *dmabuf = NULL;
+ struct urb *urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (!urb)
+ return NULL;
+
+ dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!dmabuf) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ urb->transfer_buffer = dmabuf;
+ urb->transfer_buffer_length = bufsize;
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ return urb;
+}
+
+static void usbtmc_read_bulk_cb(struct urb *urb)
+{
+ struct usbtmc_file_data *file_data = urb->context;
+ int status = urb->status;
+ unsigned long flags;
+
+ /* sync/async unlink faults aren't errors */
+ if (status) {
+ if (!(/* status == -ENOENT || */
+ status == -ECONNRESET ||
+ status == -EREMOTEIO || /* Short packet */
+ status == -ESHUTDOWN))
+ dev_err(&file_data->data->intf->dev,
+ "%s - nonzero read bulk status received: %d\n",
+ __func__, status);
+
+ spin_lock_irqsave(&file_data->err_lock, flags);
+ if (!file_data->in_status)
+ file_data->in_status = status;
+ spin_unlock_irqrestore(&file_data->err_lock, flags);
+ }
+
+ spin_lock_irqsave(&file_data->err_lock, flags);
+ file_data->in_transfer_size += urb->actual_length;
+ dev_dbg(&file_data->data->intf->dev,
+ "%s - total size: %u current: %d status: %d\n",
+ __func__, file_data->in_transfer_size,
+ urb->actual_length, status);
+ spin_unlock_irqrestore(&file_data->err_lock, flags);
+ usb_anchor_urb(urb, &file_data->in_anchor);
+
+ wake_up_interruptible(&file_data->wait_bulk_in);
+ wake_up_interruptible(&file_data->data->waitq);
+}
+
+static inline bool usbtmc_do_transfer(struct usbtmc_file_data *file_data)
+{
+ bool data_or_error;
+
+ spin_lock_irq(&file_data->err_lock);
+ data_or_error = !usb_anchor_empty(&file_data->in_anchor)
+ || file_data->in_status;
+ spin_unlock_irq(&file_data->err_lock);
+ dev_dbg(&file_data->data->intf->dev, "%s: returns %d\n", __func__,
+ data_or_error);
+ return data_or_error;
+}
+
+static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
+ void __user *user_buffer,
+ u32 transfer_size,
+ u32 *transferred,
+ u32 flags)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev = &data->intf->dev;
+ u32 done = 0;
+ u32 remaining;
+ const u32 bufsize = USBTMC_BUFSIZE;
+ int retval = 0;
+ u32 max_transfer_size;
+ unsigned long expire;
+ int bufcount = 1;
+ int again = 0;
+
+ /* mutex already locked */
+
+ *transferred = done;
+
+ max_transfer_size = transfer_size;
+
+ if (flags & USBTMC_FLAG_IGNORE_TRAILER) {
+ /* The device may send extra alignment bytes (up to
+ * wMaxPacketSize – 1) to avoid sending a zero-length
+ * packet
+ */
+ remaining = transfer_size;
+ if ((max_transfer_size % data->wMaxPacketSize) == 0)
+ max_transfer_size += (data->wMaxPacketSize - 1);
+ } else {
+ /* round down to bufsize to avoid truncated data left */
+ if (max_transfer_size > bufsize) {
+ max_transfer_size =
+ roundup(max_transfer_size + 1 - bufsize,
+ bufsize);
+ }
+ remaining = max_transfer_size;
+ }
+
+ spin_lock_irq(&file_data->err_lock);
+
+ if (file_data->in_status) {
+ /* return the very first error */
+ retval = file_data->in_status;
+ spin_unlock_irq(&file_data->err_lock);
+ goto error;
+ }
+
+ if (flags & USBTMC_FLAG_ASYNC) {
+ if (usb_anchor_empty(&file_data->in_anchor))
+ again = 1;
+
+ if (file_data->in_urbs_used == 0) {
+ file_data->in_transfer_size = 0;
+ file_data->in_status = 0;
+ }
+ } else {
+ file_data->in_transfer_size = 0;
+ file_data->in_status = 0;
+ }
+
+ if (max_transfer_size == 0) {
+ bufcount = 0;
+ } else {
+ bufcount = roundup(max_transfer_size, bufsize) / bufsize;
+ if (bufcount > file_data->in_urbs_used)
+ bufcount -= file_data->in_urbs_used;
+ else
+ bufcount = 0;
+
+ if (bufcount + file_data->in_urbs_used > MAX_URBS_IN_FLIGHT) {
+ bufcount = MAX_URBS_IN_FLIGHT -
+ file_data->in_urbs_used;
+ }
+ }
+ spin_unlock_irq(&file_data->err_lock);
+
+ dev_dbg(dev, "%s: requested=%u flags=0x%X size=%u bufs=%d used=%d\n",
+ __func__, transfer_size, flags,
+ max_transfer_size, bufcount, file_data->in_urbs_used);
+
+ while (bufcount > 0) {
+ u8 *dmabuf = NULL;
+ struct urb *urb = usbtmc_create_urb();
+
+ if (!urb) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ dmabuf = urb->transfer_buffer;
+
+ usb_fill_bulk_urb(urb, data->usb_dev,
+ usb_rcvbulkpipe(data->usb_dev, data->bulk_in),
+ dmabuf, bufsize,
+ usbtmc_read_bulk_cb, file_data);
+
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ /* urb is anchored. We can release our reference. */
+ usb_free_urb(urb);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ goto error;
+ }
+ file_data->in_urbs_used++;
+ bufcount--;
+ }
+
+ if (again) {
+ dev_dbg(dev, "%s: ret=again\n", __func__);
+ return -EAGAIN;
+ }
+
+ if (user_buffer == NULL)
+ return -EINVAL;
+
+ expire = msecs_to_jiffies(file_data->timeout);
+
+ while (max_transfer_size > 0) {
+ u32 this_part;
+ struct urb *urb = NULL;
+
+ if (!(flags & USBTMC_FLAG_ASYNC)) {
+ dev_dbg(dev, "%s: before wait time %lu\n",
+ __func__, expire);
+ retval = wait_event_interruptible_timeout(
+ file_data->wait_bulk_in,
+ usbtmc_do_transfer(file_data),
+ expire);
+
+ dev_dbg(dev, "%s: wait returned %d\n",
+ __func__, retval);
+
+ if (retval <= 0) {
+ if (retval == 0)
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ urb = usb_get_from_anchor(&file_data->in_anchor);
+ if (!urb) {
+ if (!(flags & USBTMC_FLAG_ASYNC)) {
+ /* synchronous case: must not happen */
+ retval = -EFAULT;
+ goto error;
+ }
+
+ /* asynchronous case: ready, do not block or wait */
+ *transferred = done;
+ dev_dbg(dev, "%s: (async) done=%u ret=0\n",
+ __func__, done);
+ return 0;
+ }
+
+ file_data->in_urbs_used--;
+
+ if (max_transfer_size > urb->actual_length)
+ max_transfer_size -= urb->actual_length;
+ else
+ max_transfer_size = 0;
+
+ if (remaining > urb->actual_length)
+ this_part = urb->actual_length;
+ else
+ this_part = remaining;
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE, 16, 1,
+ urb->transfer_buffer, urb->actual_length, true);
+
+ if (copy_to_user(user_buffer + done,
+ urb->transfer_buffer, this_part)) {
+ usb_free_urb(urb);
+ retval = -EFAULT;
+ goto error;
+ }
+
+ remaining -= this_part;
+ done += this_part;
+
+ spin_lock_irq(&file_data->err_lock);
+ if (urb->status) {
+ /* return the very first error */
+ retval = file_data->in_status;
+ spin_unlock_irq(&file_data->err_lock);
+ usb_free_urb(urb);
+ goto error;
+ }
+ spin_unlock_irq(&file_data->err_lock);
+
+ if (urb->actual_length < bufsize) {
+ /* short packet or ZLP received => ready */
+ usb_free_urb(urb);
+ retval = 1;
+ break;
+ }
+
+ if (!(flags & USBTMC_FLAG_ASYNC) &&
+ max_transfer_size > (bufsize * file_data->in_urbs_used)) {
+ /* resubmit, since other buffers still not enough */
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ goto error;
+ }
+ file_data->in_urbs_used++;
+ }
+ usb_free_urb(urb);
+ retval = 0;
+ }
+
+error:
+ *transferred = done;
+
+ dev_dbg(dev, "%s: before kill\n", __func__);
+ /* Attention: killing urbs can take long time (2 ms) */
+ usb_kill_anchored_urbs(&file_data->submitted);
+ dev_dbg(dev, "%s: after kill\n", __func__);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+ file_data->in_urbs_used = 0;
+ file_data->in_status = 0; /* no spinlock needed here */
+ dev_dbg(dev, "%s: done=%u ret=%d\n", __func__, done, retval);
+
+ return retval;
+}
+
+static ssize_t usbtmc_ioctl_generic_read(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ struct usbtmc_message msg;
+ ssize_t retval = 0;
+
+ /* mutex already locked */
+
+ if (copy_from_user(&msg, arg, sizeof(struct usbtmc_message)))
+ return -EFAULT;
+
+ retval = usbtmc_generic_read(file_data, msg.message,
+ msg.transfer_size, &msg.transferred,
+ msg.flags);
+
+ if (put_user(msg.transferred,
+ &((struct usbtmc_message __user *)arg)->transferred))
+ return -EFAULT;
+
+ return retval;
+}
+
+static void usbtmc_write_bulk_cb(struct urb *urb)
+{
+ struct usbtmc_file_data *file_data = urb->context;
+ int wakeup = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&file_data->err_lock, flags);
+ file_data->out_transfer_size += urb->actual_length;
+
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ dev_err(&file_data->data->intf->dev,
+ "%s - nonzero write bulk status received: %d\n",
+ __func__, urb->status);
+
+ if (!file_data->out_status) {
+ file_data->out_status = urb->status;
+ wakeup = 1;
+ }
+ }
+ spin_unlock_irqrestore(&file_data->err_lock, flags);
+
+ dev_dbg(&file_data->data->intf->dev,
+ "%s - write bulk total size: %u\n",
+ __func__, file_data->out_transfer_size);
+
+ up(&file_data->limit_write_sem);
+ if (usb_anchor_empty(&file_data->submitted) || wakeup)
+ wake_up_interruptible(&file_data->data->waitq);
+}
+
+static ssize_t usbtmc_generic_write(struct usbtmc_file_data *file_data,
+ const void __user *user_buffer,
+ u32 transfer_size,
+ u32 *transferred,
+ u32 flags)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev;
+ u32 done = 0;
+ u32 remaining;
+ unsigned long expire;
+ const u32 bufsize = USBTMC_BUFSIZE;
+ struct urb *urb = NULL;
+ int retval = 0;
+ u32 timeout;
+
+ *transferred = 0;
+
+ /* Get pointer to private data structure */
+ dev = &data->intf->dev;
+
+ dev_dbg(dev, "%s: size=%u flags=0x%X sema=%u\n",
+ __func__, transfer_size, flags,
+ file_data->limit_write_sem.count);
+
+ if (flags & USBTMC_FLAG_APPEND) {
+ spin_lock_irq(&file_data->err_lock);
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+ if (retval < 0)
+ return retval;
+ } else {
+ spin_lock_irq(&file_data->err_lock);
+ file_data->out_transfer_size = 0;
+ file_data->out_status = 0;
+ spin_unlock_irq(&file_data->err_lock);
+ }
+
+ remaining = transfer_size;
+ if (remaining > INT_MAX)
+ remaining = INT_MAX;
+
+ timeout = file_data->timeout;
+ expire = msecs_to_jiffies(timeout);
+
+ while (remaining > 0) {
+ u32 this_part, aligned;
+ u8 *buffer = NULL;
+
+ if (flags & USBTMC_FLAG_ASYNC) {
+ if (down_trylock(&file_data->limit_write_sem)) {
+ retval = (done)?(0):(-EAGAIN);
+ goto exit;
+ }
+ } else {
+ retval = down_timeout(&file_data->limit_write_sem,
+ expire);
+ if (retval < 0) {
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ spin_lock_irq(&file_data->err_lock);
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+ if (retval < 0) {
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+
+ /* prepare next urb to send */
+ urb = usbtmc_create_urb();
+ if (!urb) {
+ retval = -ENOMEM;
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+ buffer = urb->transfer_buffer;
+
+ if (remaining > bufsize)
+ this_part = bufsize;
+ else
+ this_part = remaining;
+
+ if (copy_from_user(buffer, user_buffer + done, this_part)) {
+ retval = -EFAULT;
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, this_part, true);
+
+ /* fill bulk with 32 bit alignment to meet USBTMC specification
+ * (size + 3 & ~3) rounds up and simplifies user code
+ */
+ aligned = (this_part + 3) & ~3;
+ dev_dbg(dev, "write(size:%u align:%u done:%u)\n",
+ (unsigned int)this_part,
+ (unsigned int)aligned,
+ (unsigned int)done);
+
+ usb_fill_bulk_urb(urb, data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev, data->bulk_out),
+ urb->transfer_buffer, aligned,
+ usbtmc_write_bulk_cb, file_data);
+
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+
+ usb_free_urb(urb);
+ urb = NULL; /* urb will be finally released by usb driver */
+
+ remaining -= this_part;
+ done += this_part;
+ }
+
+ /* All urbs are on the fly */
+ if (!(flags & USBTMC_FLAG_ASYNC)) {
+ if (!usb_wait_anchor_empty_timeout(&file_data->submitted,
+ timeout)) {
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ retval = 0;
+ goto exit;
+
+error:
+ usb_kill_anchored_urbs(&file_data->submitted);
+exit:
+ usb_free_urb(urb);
+
+ spin_lock_irq(&file_data->err_lock);
+ if (!(flags & USBTMC_FLAG_ASYNC))
+ done = file_data->out_transfer_size;
+ if (!retval && file_data->out_status)
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+
+ *transferred = done;
+
+ dev_dbg(dev, "%s: done=%u, retval=%d, urbstat=%d\n",
+ __func__, done, retval, file_data->out_status);
+
+ return retval;
+}
+
+static ssize_t usbtmc_ioctl_generic_write(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ struct usbtmc_message msg;
+ ssize_t retval = 0;
+
+ /* mutex already locked */
+
+ if (copy_from_user(&msg, arg, sizeof(struct usbtmc_message)))
+ return -EFAULT;
+
+ retval = usbtmc_generic_write(file_data, msg.message,
+ msg.transfer_size, &msg.transferred,
+ msg.flags);
+
+ if (put_user(msg.transferred,
+ &((struct usbtmc_message __user *)arg)->transferred))
+ return -EFAULT;
+
+ return retval;
+}
+
+/*
+ * Get the generic write result
+ */
+static ssize_t usbtmc_ioctl_write_result(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ u32 transferred;
+ int retval;
+
+ spin_lock_irq(&file_data->err_lock);
+ transferred = file_data->out_transfer_size;
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+
+ if (put_user(transferred, (__u32 __user *)arg))
+ return -EFAULT;
+
+ return retval;
+}
+
/*
* Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-OUT endpoint.
* @transfer_size: number of bytes to request from the device.
@@ -619,7 +1277,7 @@ static int usbtmc488_ioctl_trigger(struct usbtmc_file_data *file_data)
* Also updates bTag_last_write.
*/
static int send_request_dev_dep_msg_in(struct usbtmc_file_data *file_data,
- size_t transfer_size)
+ u32 transfer_size)
{
struct usbtmc_device_data *data = file_data->data;
int retval;
@@ -662,12 +1320,11 @@ static int send_request_dev_dep_msg_in(struct usbtmc_file_data *file_data,
data->bTag++;
kfree(buffer);
- if (retval < 0) {
- dev_err(&data->intf->dev, "usb_bulk_msg in send_request_dev_dep_msg_in() returned %d\n", retval);
- return retval;
- }
+ if (retval < 0)
+ dev_err(&data->intf->dev, "%s returned %d\n",
+ __func__, retval);
- return 0;
+ return retval;
}
static ssize_t usbtmc_read(struct file *filp, char __user *buf,
@@ -676,20 +1333,20 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
struct device *dev;
+ const u32 bufsize = USBTMC_BUFSIZE;
u32 n_characters;
u8 *buffer;
int actual;
- size_t done;
- size_t remaining;
+ u32 done = 0;
+ u32 remaining;
int retval;
- size_t this_part;
/* Get pointer to private data structure */
file_data = filp->private_data;
data = file_data->data;
dev = &data->intf->dev;
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+ buffer = kmalloc(bufsize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -699,124 +1356,116 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
goto exit;
}
- dev_dbg(dev, "usb_bulk_msg_in: count(%zu)\n", count);
+ if (count > INT_MAX)
+ count = INT_MAX;
+
+ dev_dbg(dev, "%s(count:%zu)\n", __func__, count);
retval = send_request_dev_dep_msg_in(file_data, count);
if (retval < 0) {
- if (data->auto_abort)
+ if (file_data->auto_abort)
usbtmc_ioctl_abort_bulk_out(data);
goto exit;
}
/* Loop until we have fetched everything we requested */
remaining = count;
- this_part = remaining;
- done = 0;
+ actual = 0;
- while (remaining > 0) {
- /* Send bulk URB */
- retval = usb_bulk_msg(data->usb_dev,
- usb_rcvbulkpipe(data->usb_dev,
- data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER, &actual,
- file_data->timeout);
-
- dev_dbg(dev, "usb_bulk_msg: retval(%u), done(%zu), remaining(%zu), actual(%d)\n", retval, done, remaining, actual);
+ /* Send bulk URB */
+ retval = usb_bulk_msg(data->usb_dev,
+ usb_rcvbulkpipe(data->usb_dev,
+ data->bulk_in),
+ buffer, bufsize, &actual,
+ file_data->timeout);
- /* Store bTag (in case we need to abort) */
- data->bTag_last_read = data->bTag;
+ dev_dbg(dev, "%s: bulk_msg retval(%u), actual(%d)\n",
+ __func__, retval, actual);
- if (retval < 0) {
- dev_dbg(dev, "Unable to read data, error %d\n", retval);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
-
- /* Parse header in first packet */
- if (done == 0) {
- /* Sanity checks for the header */
- if (actual < USBTMC_HEADER_SIZE) {
- dev_err(dev, "Device sent too small first packet: %u < %u\n", actual, USBTMC_HEADER_SIZE);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ /* Store bTag (in case we need to abort) */
+ data->bTag_last_read = data->bTag;
- if (buffer[0] != 2) {
- dev_err(dev, "Device sent reply with wrong MsgID: %u != 2\n", buffer[0]);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ if (retval < 0) {
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- if (buffer[1] != data->bTag_last_write) {
- dev_err(dev, "Device sent reply with wrong bTag: %u != %u\n", buffer[1], data->bTag_last_write);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ /* Sanity checks for the header */
+ if (actual < USBTMC_HEADER_SIZE) {
+ dev_err(dev, "Device sent too small first packet: %u < %u\n",
+ actual, USBTMC_HEADER_SIZE);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- /* How many characters did the instrument send? */
- n_characters = buffer[4] +
- (buffer[5] << 8) +
- (buffer[6] << 16) +
- (buffer[7] << 24);
+ if (buffer[0] != 2) {
+ dev_err(dev, "Device sent reply with wrong MsgID: %u != 2\n",
+ buffer[0]);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- if (n_characters > this_part) {
- dev_err(dev, "Device wants to return more data than requested: %u > %zu\n", n_characters, count);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ if (buffer[1] != data->bTag_last_write) {
+ dev_err(dev, "Device sent reply with wrong bTag: %u != %u\n",
+ buffer[1], data->bTag_last_write);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- /* Remove the USBTMC header */
- actual -= USBTMC_HEADER_SIZE;
+ /* How many characters did the instrument send? */
+ n_characters = buffer[4] +
+ (buffer[5] << 8) +
+ (buffer[6] << 16) +
+ (buffer[7] << 24);
- /* Check if the message is smaller than requested */
- if (remaining > n_characters)
- remaining = n_characters;
- /* Remove padding if it exists */
- if (actual > remaining)
- actual = remaining;
+ file_data->bmTransferAttributes = buffer[8];
- dev_dbg(dev, "Bulk-IN header: N_characters(%u), bTransAttr(%u)\n", n_characters, buffer[8]);
+ dev_dbg(dev, "Bulk-IN header: N_characters(%u), bTransAttr(%u)\n",
+ n_characters, buffer[8]);
- remaining -= actual;
+ if (n_characters > remaining) {
+ dev_err(dev, "Device wants to return more data than requested: %u > %zu\n",
+ n_characters, count);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- /* Terminate if end-of-message bit received from device */
- if ((buffer[8] & 0x01) && (actual >= n_characters))
- remaining = 0;
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, actual, true);
- dev_dbg(dev, "Bulk-IN header: remaining(%zu), buf(%p), buffer(%p) done(%zu)\n", remaining,buf,buffer,done);
+ remaining = n_characters;
+ /* Remove the USBTMC header */
+ actual -= USBTMC_HEADER_SIZE;
- /* Copy buffer to user space */
- if (copy_to_user(buf + done, &buffer[USBTMC_HEADER_SIZE], actual)) {
- /* There must have been an addressing problem */
- retval = -EFAULT;
- goto exit;
- }
- done += actual;
- }
- else {
- if (actual > remaining)
- actual = remaining;
+ /* Remove padding if it exists */
+ if (actual > remaining)
+ actual = remaining;
- remaining -= actual;
+ remaining -= actual;
- dev_dbg(dev, "Bulk-IN header cont: actual(%u), done(%zu), remaining(%zu), buf(%p), buffer(%p)\n", actual, done, remaining,buf,buffer);
+ /* Copy buffer to user space */
+ if (copy_to_user(buf, &buffer[USBTMC_HEADER_SIZE], actual)) {
+ /* There must have been an addressing problem */
+ retval = -EFAULT;
+ goto exit;
+ }
- /* Copy buffer to user space */
- if (copy_to_user(buf + done, buffer, actual)) {
- /* There must have been an addressing problem */
- retval = -EFAULT;
- goto exit;
- }
- done += actual;
- }
+ if ((actual + USBTMC_HEADER_SIZE) == bufsize) {
+ retval = usbtmc_generic_read(file_data, buf + actual,
+ remaining,
+ &done,
+ USBTMC_FLAG_IGNORE_TRAILER);
+ if (retval < 0)
+ goto exit;
}
+ done += actual;
/* Update file position value */
*f_pos = *f_pos + done;
@@ -833,113 +1482,152 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
{
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
+ struct urb *urb = NULL;
+ ssize_t retval = 0;
u8 *buffer;
- int retval;
- int actual;
- unsigned long int n_bytes;
- int remaining;
- int done;
- int this_part;
+ u32 remaining, done;
+ u32 transfersize, aligned, buflen;
file_data = filp->private_data;
data = file_data->data;
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
mutex_lock(&data->io_mutex);
+
if (data->zombie) {
retval = -ENODEV;
goto exit;
}
- remaining = count;
done = 0;
- while (remaining > 0) {
- if (remaining > USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE) {
- this_part = USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE;
- buffer[8] = 0;
- } else {
- this_part = remaining;
- buffer[8] = file_data->eom_val;
- }
+ spin_lock_irq(&file_data->err_lock);
+ file_data->out_transfer_size = 0;
+ file_data->out_status = 0;
+ spin_unlock_irq(&file_data->err_lock);
- /* Setup IO buffer for DEV_DEP_MSG_OUT message */
- buffer[0] = 1;
- buffer[1] = data->bTag;
- buffer[2] = ~data->bTag;
- buffer[3] = 0; /* Reserved */
- buffer[4] = this_part >> 0;
- buffer[5] = this_part >> 8;
- buffer[6] = this_part >> 16;
- buffer[7] = this_part >> 24;
- /* buffer[8] is set above... */
- buffer[9] = 0; /* Reserved */
- buffer[10] = 0; /* Reserved */
- buffer[11] = 0; /* Reserved */
-
- if (copy_from_user(&buffer[USBTMC_HEADER_SIZE], buf + done, this_part)) {
- retval = -EFAULT;
- goto exit;
- }
+ if (!count)
+ goto exit;
- n_bytes = roundup(USBTMC_HEADER_SIZE + this_part, 4);
- memset(buffer + USBTMC_HEADER_SIZE + this_part, 0, n_bytes - (USBTMC_HEADER_SIZE + this_part));
+ if (down_trylock(&file_data->limit_write_sem)) {
+ /* previous calls were async */
+ retval = -EBUSY;
+ goto exit;
+ }
- do {
- retval = usb_bulk_msg(data->usb_dev,
- usb_sndbulkpipe(data->usb_dev,
- data->bulk_out),
- buffer, n_bytes,
- &actual, file_data->timeout);
- if (retval != 0)
- break;
- n_bytes -= actual;
- } while (n_bytes);
-
- data->bTag_last_write = data->bTag;
+ urb = usbtmc_create_urb();
+ if (!urb) {
+ retval = -ENOMEM;
+ up(&file_data->limit_write_sem);
+ goto exit;
+ }
+
+ buffer = urb->transfer_buffer;
+ buflen = urb->transfer_buffer_length;
+
+ if (count > INT_MAX) {
+ transfersize = INT_MAX;
+ buffer[8] = 0;
+ } else {
+ transfersize = count;
+ buffer[8] = file_data->eom_val;
+ }
+
+ /* Setup IO buffer for DEV_DEP_MSG_OUT message */
+ buffer[0] = 1;
+ buffer[1] = data->bTag;
+ buffer[2] = ~data->bTag;
+ buffer[3] = 0; /* Reserved */
+ buffer[4] = transfersize >> 0;
+ buffer[5] = transfersize >> 8;
+ buffer[6] = transfersize >> 16;
+ buffer[7] = transfersize >> 24;
+ /* buffer[8] is set above... */
+ buffer[9] = 0; /* Reserved */
+ buffer[10] = 0; /* Reserved */
+ buffer[11] = 0; /* Reserved */
+
+ remaining = transfersize;
+
+ if (transfersize + USBTMC_HEADER_SIZE > buflen) {
+ transfersize = buflen - USBTMC_HEADER_SIZE;
+ aligned = buflen;
+ } else {
+ aligned = (transfersize + (USBTMC_HEADER_SIZE + 3)) & ~3;
+ }
+
+ if (copy_from_user(&buffer[USBTMC_HEADER_SIZE], buf, transfersize)) {
+ retval = -EFAULT;
+ up(&file_data->limit_write_sem);
+ goto exit;
+ }
+
+ dev_dbg(&data->intf->dev, "%s(size:%u align:%u)\n", __func__,
+ (unsigned int)transfersize, (unsigned int)aligned);
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, aligned, true);
+
+ usb_fill_bulk_urb(urb, data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev, data->bulk_out),
+ urb->transfer_buffer, aligned,
+ usbtmc_write_bulk_cb, file_data);
+
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ up(&file_data->limit_write_sem);
+ goto exit;
+ }
+
+ remaining -= transfersize;
+
+ data->bTag_last_write = data->bTag;
+ data->bTag++;
+
+ if (!data->bTag)
data->bTag++;
- if (!data->bTag)
- data->bTag++;
+ /* call generic_write even when remaining = 0 */
+ retval = usbtmc_generic_write(file_data, buf + transfersize, remaining,
+ &done, USBTMC_FLAG_APPEND);
+ /* truncate alignment bytes */
+ if (done > remaining)
+ done = remaining;
- if (retval < 0) {
- dev_err(&data->intf->dev,
- "Unable to send data, error %d\n", retval);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_out(data);
- goto exit;
- }
+ /*add size of first urb*/
+ done += transfersize;
- remaining -= this_part;
- done += this_part;
+ if (retval < 0) {
+ usb_kill_anchored_urbs(&file_data->submitted);
+
+ dev_err(&data->intf->dev,
+ "Unable to send data, error %d\n", (int)retval);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_out(data);
+ goto exit;
}
- retval = count;
+ retval = done;
exit:
+ usb_free_urb(urb);
mutex_unlock(&data->io_mutex);
- kfree(buffer);
return retval;
}
static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
{
- struct usb_host_interface *current_setting;
- struct usb_endpoint_descriptor *desc;
struct device *dev;
u8 *buffer;
int rv;
int n;
int actual = 0;
- int max_size;
dev = &data->intf->dev;
dev_dbg(dev, "Sending INITIATE_CLEAR request\n");
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+ buffer = kmalloc(USBTMC_BUFSIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -947,7 +1635,7 @@ static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_CLEAR,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 1, USBTMC_TIMEOUT);
+ 0, 0, buffer, 1, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
@@ -961,22 +1649,6 @@ static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
goto exit;
}
- max_size = 0;
- current_setting = data->intf->cur_altsetting;
- for (n = 0; n < current_setting->desc.bNumEndpoints; n++) {
- desc = &current_setting->endpoint[n].desc;
- if (desc->bEndpointAddress == data->bulk_in)
- max_size = usb_endpoint_maxp(desc);
- }
-
- if (max_size == 0) {
- dev_err(dev, "Couldn't get wMaxPacketSize\n");
- rv = -EPERM;
- goto exit;
- }
-
- dev_dbg(dev, "wMaxPacketSize is %d\n", max_size);
-
n = 0;
usbtmc_clear_check_status:
@@ -987,7 +1659,7 @@ usbtmc_clear_check_status:
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_CLEAR_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 2, USBTMC_TIMEOUT);
+ 0, 0, buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
@@ -1004,15 +1676,20 @@ usbtmc_clear_check_status:
goto exit;
}
- if (buffer[1] == 1)
+ if ((buffer[1] & 1) != 0) {
do {
dev_dbg(dev, "Reading from bulk in EP\n");
+ actual = 0;
rv = usb_bulk_msg(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev,
data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER,
- &actual, USBTMC_TIMEOUT);
+ buffer, USBTMC_BUFSIZE,
+ &actual, USB_CTRL_GET_TIMEOUT);
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, actual, true);
+
n++;
if (rv < 0) {
@@ -1020,10 +1697,15 @@ usbtmc_clear_check_status:
rv);
goto exit;
}
- } while ((actual == max_size) &&
+ } while ((actual == USBTMC_BUFSIZE) &&
(n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
+ } else {
+ /* do not stress device with subsequent requests */
+ msleep(50);
+ n++;
+ }
- if (actual == max_size) {
+ if (n >= USBTMC_MAX_READS_TO_CLEAR_BULK_IN) {
dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
rv = -EPERM;
@@ -1037,7 +1719,7 @@ usbtmc_clear_bulk_out_halt:
rv = usb_clear_halt(data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out));
if (rv < 0) {
- dev_err(dev, "usb_control_msg returned %d\n", rv);
+ dev_err(dev, "usb_clear_halt returned %d\n", rv);
goto exit;
}
rv = 0;
@@ -1054,12 +1736,9 @@ static int usbtmc_ioctl_clear_out_halt(struct usbtmc_device_data *data)
rv = usb_clear_halt(data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out));
- if (rv < 0) {
- dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
- rv);
- return rv;
- }
- return 0;
+ if (rv < 0)
+ dev_err(&data->usb_dev->dev, "%s returned %d\n", __func__, rv);
+ return rv;
}
static int usbtmc_ioctl_clear_in_halt(struct usbtmc_device_data *data)
@@ -1069,11 +1748,33 @@ static int usbtmc_ioctl_clear_in_halt(struct usbtmc_device_data *data)
rv = usb_clear_halt(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev, data->bulk_in));
- if (rv < 0) {
- dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
- rv);
- return rv;
- }
+ if (rv < 0)
+ dev_err(&data->usb_dev->dev, "%s returned %d\n", __func__, rv);
+ return rv;
+}
+
+static int usbtmc_ioctl_cancel_io(struct usbtmc_file_data *file_data)
+{
+ spin_lock_irq(&file_data->err_lock);
+ file_data->in_status = -ECANCELED;
+ file_data->out_status = -ECANCELED;
+ spin_unlock_irq(&file_data->err_lock);
+ usb_kill_anchored_urbs(&file_data->submitted);
+ return 0;
+}
+
+static int usbtmc_ioctl_cleanup_io(struct usbtmc_file_data *file_data)
+{
+ usb_kill_anchored_urbs(&file_data->submitted);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+ spin_lock_irq(&file_data->err_lock);
+ file_data->in_status = 0;
+ file_data->in_transfer_size = 0;
+ file_data->out_status = 0;
+ file_data->out_transfer_size = 0;
+ spin_unlock_irq(&file_data->err_lock);
+
+ file_data->in_urbs_used = 0;
return 0;
}
@@ -1090,7 +1791,7 @@ static int get_capabilities(struct usbtmc_device_data *data)
rv = usb_control_msg(data->usb_dev, usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_GET_CAPABILITIES,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 0x18, USBTMC_TIMEOUT);
+ 0, 0, buffer, 0x18, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto err_out;
@@ -1147,72 +1848,6 @@ static const struct attribute_group capability_attr_grp = {
.attrs = capability_attrs,
};
-static ssize_t TermChar_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbtmc_device_data *data = usb_get_intfdata(intf);
-
- return sprintf(buf, "%c\n", data->TermChar);
-}
-
-static ssize_t TermChar_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbtmc_device_data *data = usb_get_intfdata(intf);
-
- if (count < 1)
- return -EINVAL;
- data->TermChar = buf[0];
- return count;
-}
-static DEVICE_ATTR_RW(TermChar);
-
-#define data_attribute(name) \
-static ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct usb_interface *intf = to_usb_interface(dev); \
- struct usbtmc_device_data *data = usb_get_intfdata(intf); \
- \
- return sprintf(buf, "%d\n", data->name); \
-} \
-static ssize_t name##_store(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct usb_interface *intf = to_usb_interface(dev); \
- struct usbtmc_device_data *data = usb_get_intfdata(intf); \
- ssize_t result; \
- unsigned val; \
- \
- result = sscanf(buf, "%u\n", &val); \
- if (result != 1) \
- result = -EINVAL; \
- data->name = val; \
- if (result < 0) \
- return result; \
- else \
- return count; \
-} \
-static DEVICE_ATTR_RW(name)
-
-data_attribute(TermCharEnabled);
-data_attribute(auto_abort);
-
-static struct attribute *data_attrs[] = {
- &dev_attr_TermChar.attr,
- &dev_attr_TermCharEnabled.attr,
- &dev_attr_auto_abort.attr,
- NULL,
-};
-
-static const struct attribute_group data_attr_grp = {
- .attrs = data_attrs,
-};
-
static int usbtmc_ioctl_indicator_pulse(struct usbtmc_device_data *data)
{
struct device *dev;
@@ -1229,7 +1864,7 @@ static int usbtmc_ioctl_indicator_pulse(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INDICATOR_PULSE,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 0x01, USBTMC_TIMEOUT);
+ 0, 0, buffer, 0x01, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -1250,6 +1885,63 @@ exit:
return rv;
}
+static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
+ void __user *arg)
+{
+ struct device *dev = &data->intf->dev;
+ struct usbtmc_ctrlrequest request;
+ u8 *buffer = NULL;
+ int rv;
+ unsigned long res;
+
+ res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest));
+ if (res)
+ return -EFAULT;
+
+ if (request.req.wLength > USBTMC_BUFSIZE)
+ return -EMSGSIZE;
+
+ if (request.req.wLength) {
+ buffer = kmalloc(request.req.wLength, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ if ((request.req.bRequestType & USB_DIR_IN) == 0) {
+ /* Send control data to device */
+ res = copy_from_user(buffer, request.data,
+ request.req.wLength);
+ if (res) {
+ rv = -EFAULT;
+ goto exit;
+ }
+ }
+ }
+
+ rv = usb_control_msg(data->usb_dev,
+ usb_rcvctrlpipe(data->usb_dev, 0),
+ request.req.bRequest,
+ request.req.bRequestType,
+ request.req.wValue,
+ request.req.wIndex,
+ buffer, request.req.wLength, USB_CTRL_GET_TIMEOUT);
+
+ if (rv < 0) {
+ dev_err(dev, "%s failed %d\n", __func__, rv);
+ goto exit;
+ }
+
+ if (rv && (request.req.bRequestType & USB_DIR_IN)) {
+ /* Read control data from device */
+ res = copy_to_user(request.data, buffer, rv);
+ if (res)
+ rv = -EFAULT;
+ }
+
+ exit:
+ kfree(buffer);
+ return rv;
+}
+
/*
* Get the usb timeout value
*/
@@ -1331,6 +2023,7 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
int retval = -EBADRQC;
+ __u8 tmp_byte;
file_data = file->private_data;
data = file_data->data;
@@ -1366,6 +2059,10 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
retval = usbtmc_ioctl_abort_bulk_in(data);
break;
+ case USBTMC_IOCTL_CTRL_REQUEST:
+ retval = usbtmc_ioctl_request(data, (void __user *)arg);
+ break;
+
case USBTMC_IOCTL_GET_TIMEOUT:
retval = usbtmc_ioctl_get_timeout(file_data,
(void __user *)arg);
@@ -1386,12 +2083,29 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
(void __user *)arg);
break;
+ case USBTMC_IOCTL_WRITE:
+ retval = usbtmc_ioctl_generic_write(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_READ:
+ retval = usbtmc_ioctl_generic_read(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_WRITE_RESULT:
+ retval = usbtmc_ioctl_write_result(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_API_VERSION:
+ retval = put_user(USBTMC_API_VERSION,
+ (__u32 __user *)arg);
+ break;
+
case USBTMC488_IOCTL_GET_CAPS:
- retval = copy_to_user((void __user *)arg,
- &data->usb488_caps,
- sizeof(data->usb488_caps));
- if (retval)
- retval = -EFAULT;
+ retval = put_user(data->usb488_caps,
+ (unsigned char __user *)arg);
break;
case USBTMC488_IOCTL_READ_STB:
@@ -1417,6 +2131,30 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case USBTMC488_IOCTL_TRIGGER:
retval = usbtmc488_ioctl_trigger(file_data);
break;
+
+ case USBTMC488_IOCTL_WAIT_SRQ:
+ retval = usbtmc488_ioctl_wait_srq(file_data,
+ (__u32 __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_MSG_IN_ATTR:
+ retval = put_user(file_data->bmTransferAttributes,
+ (__u8 __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_AUTO_ABORT:
+ retval = get_user(tmp_byte, (unsigned char __user *)arg);
+ if (retval == 0)
+ file_data->auto_abort = !!tmp_byte;
+ break;
+
+ case USBTMC_IOCTL_CANCEL_IO:
+ retval = usbtmc_ioctl_cancel_io(file_data);
+ break;
+
+ case USBTMC_IOCTL_CLEANUP_IO:
+ retval = usbtmc_ioctl_cleanup_io(file_data);
+ break;
}
skip_io_on_zombie:
@@ -1446,7 +2184,28 @@ static __poll_t usbtmc_poll(struct file *file, poll_table *wait)
poll_wait(file, &data->waitq, wait);
- mask = (atomic_read(&file_data->srq_asserted)) ? EPOLLPRI : 0;
+ /* Note that EPOLLPRI is now assigned to SRQ, and
+ * EPOLLIN|EPOLLRDNORM to normal read data.
+ */
+ mask = 0;
+ if (atomic_read(&file_data->srq_asserted))
+ mask |= EPOLLPRI;
+
+ /* Note that the anchor submitted includes all urbs for BULK IN
+ * and OUT. So EPOLLOUT is signaled when BULK OUT is empty and
+ * all BULK IN urbs are completed and moved to in_anchor.
+ */
+ if (usb_anchor_empty(&file_data->submitted))
+ mask |= (EPOLLOUT | EPOLLWRNORM);
+ if (!usb_anchor_empty(&file_data->in_anchor))
+ mask |= (EPOLLIN | EPOLLRDNORM);
+
+ spin_lock_irq(&file_data->err_lock);
+ if (file_data->in_status || file_data->out_status)
+ mask |= EPOLLERR;
+ spin_unlock_irq(&file_data->err_lock);
+
+ dev_dbg(&data->intf->dev, "poll mask = %x\n", mask);
no_poll:
mutex_unlock(&data->io_mutex);
@@ -1459,6 +2218,7 @@ static const struct file_operations fops = {
.write = usbtmc_write,
.open = usbtmc_open,
.release = usbtmc_release,
+ .flush = usbtmc_flush,
.unlocked_ioctl = usbtmc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = usbtmc_ioctl,
@@ -1552,7 +2312,9 @@ static void usbtmc_free_int(struct usbtmc_device_data *data)
return;
usb_kill_urb(data->iin_urb);
kfree(data->iin_buffer);
+ data->iin_buffer = NULL;
usb_free_urb(data->iin_urb);
+ data->iin_urb = NULL;
kref_put(&data->kref, usbtmc_delete);
}
@@ -1585,8 +2347,6 @@ static int usbtmc_probe(struct usb_interface *intf,
/* Initialize USBTMC bTag and other fields */
data->bTag = 1;
- data->TermCharEnabled = 0;
- data->TermChar = '\n';
/* 2 <= bTag <= 127 USBTMC-USB488 subclass specification 4.3.1 */
data->iin_bTag = 2;
@@ -1603,6 +2363,7 @@ static int usbtmc_probe(struct usb_interface *intf,
}
data->bulk_in = bulk_in->bEndpointAddress;
+ data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
data->bulk_out = bulk_out->bEndpointAddress;
@@ -1659,12 +2420,10 @@ static int usbtmc_probe(struct usb_interface *intf,
}
}
- retcode = sysfs_create_group(&intf->dev.kobj, &data_attr_grp);
-
retcode = usb_register_dev(intf, &usbtmc_class);
if (retcode) {
- dev_err(&intf->dev, "Not able to get a minor"
- " (base %u, slice default): %d\n", USBTMC_MINOR_BASE,
+ dev_err(&intf->dev, "Not able to get a minor (base %u, slice default): %d\n",
+ USBTMC_MINOR_BASE,
retcode);
goto error_register;
}
@@ -1674,7 +2433,6 @@ static int usbtmc_probe(struct usb_interface *intf,
error_register:
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
- sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
usbtmc_free_int(data);
err_put:
kref_put(&data->kref, usbtmc_delete);
@@ -1684,26 +2442,103 @@ err_put:
static void usbtmc_disconnect(struct usb_interface *intf)
{
struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ struct list_head *elem;
usb_deregister_dev(intf, &usbtmc_class);
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
- sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
mutex_lock(&data->io_mutex);
data->zombie = 1;
wake_up_interruptible_all(&data->waitq);
+ list_for_each(elem, &data->file_list) {
+ struct usbtmc_file_data *file_data;
+
+ file_data = list_entry(elem,
+ struct usbtmc_file_data,
+ file_elem);
+ usb_kill_anchored_urbs(&file_data->submitted);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+ }
mutex_unlock(&data->io_mutex);
usbtmc_free_int(data);
kref_put(&data->kref, usbtmc_delete);
}
+static void usbtmc_draw_down(struct usbtmc_file_data *file_data)
+{
+ int time;
+
+ time = usb_wait_anchor_empty_timeout(&file_data->submitted, 1000);
+ if (!time)
+ usb_kill_anchored_urbs(&file_data->submitted);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+}
+
static int usbtmc_suspend(struct usb_interface *intf, pm_message_t message)
{
- /* this driver does not have pending URBs */
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ struct list_head *elem;
+
+ if (!data)
+ return 0;
+
+ mutex_lock(&data->io_mutex);
+ list_for_each(elem, &data->file_list) {
+ struct usbtmc_file_data *file_data;
+
+ file_data = list_entry(elem,
+ struct usbtmc_file_data,
+ file_elem);
+ usbtmc_draw_down(file_data);
+ }
+
+ if (data->iin_ep_present && data->iin_urb)
+ usb_kill_urb(data->iin_urb);
+
+ mutex_unlock(&data->io_mutex);
return 0;
}
static int usbtmc_resume(struct usb_interface *intf)
{
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ int retcode = 0;
+
+ if (data->iin_ep_present && data->iin_urb)
+ retcode = usb_submit_urb(data->iin_urb, GFP_KERNEL);
+ if (retcode)
+ dev_err(&intf->dev, "Failed to submit iin_urb\n");
+
+ return retcode;
+}
+
+static int usbtmc_pre_reset(struct usb_interface *intf)
+{
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ struct list_head *elem;
+
+ if (!data)
+ return 0;
+
+ mutex_lock(&data->io_mutex);
+
+ list_for_each(elem, &data->file_list) {
+ struct usbtmc_file_data *file_data;
+
+ file_data = list_entry(elem,
+ struct usbtmc_file_data,
+ file_elem);
+ usbtmc_ioctl_cancel_io(file_data);
+ }
+
+ return 0;
+}
+
+static int usbtmc_post_reset(struct usb_interface *intf)
+{
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+
+ mutex_unlock(&data->io_mutex);
+
return 0;
}
@@ -1714,6 +2549,8 @@ static struct usb_driver usbtmc_driver = {
.disconnect = usbtmc_disconnect,
.suspend = usbtmc_suspend,
.resume = usbtmc_resume,
+ .pre_reset = usbtmc_pre_reset,
+ .post_reset = usbtmc_post_reset,
};
module_usb_driver(usbtmc_driver);
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 77eef8acff94..f641342cdec0 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -101,12 +101,8 @@ void hcd_buffer_destroy(struct usb_hcd *hcd)
return;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
- struct dma_pool *pool = hcd->pool[i];
-
- if (pool) {
- dma_pool_destroy(pool);
- hcd->pool[i] = NULL;
- }
+ dma_pool_destroy(hcd->pool[i]);
+ hcd->pool[i] = NULL;
}
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index a1f225f077cd..53564386ed57 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -510,7 +510,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *priv)
{
struct device *dev;
- struct usb_device *udev;
int retval = 0;
if (!iface)
@@ -524,8 +523,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
if (!iface->authorized)
return -ENODEV;
- udev = interface_to_usbdev(iface);
-
dev->driver = &driver->drvwrap.driver;
usb_set_intfdata(iface, priv);
iface->needs_binding = 0;
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index bc8242bc4564..356b05c82dbc 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -21,6 +21,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <uapi/linux/usb/audio.h>
#include "usb.h"
static inline const char *plural(int n)
@@ -42,6 +43,16 @@ static int is_activesync(struct usb_interface_descriptor *desc)
&& desc->bInterfaceProtocol == 1;
}
+static bool is_audio(struct usb_interface_descriptor *desc)
+{
+ return desc->bInterfaceClass == USB_CLASS_AUDIO;
+}
+
+static bool is_uac3_config(struct usb_interface_descriptor *desc)
+{
+ return desc->bInterfaceProtocol == UAC_VERSION_3;
+}
+
int usb_choose_configuration(struct usb_device *udev)
{
int i;
@@ -121,6 +132,22 @@ int usb_choose_configuration(struct usb_device *udev)
#endif
}
+ /*
+ * Select first configuration as default for audio so that
+ * devices that don't comply with UAC3 protocol are supported.
+ * But, still iterate through other configurations and
+ * select UAC3 compliant config if present.
+ */
+ if (i == 0 && num_configs > 1 && desc && is_audio(desc)) {
+ best = c;
+ continue;
+ }
+
+ if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) {
+ best = c;
+ break;
+ }
+
/* From the remaining configs, choose the first one whose
* first interface is for a non-vendor-specific class.
* Reason: Linux is more likely to have a class driver
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 1c21955fe7c0..487025d31d44 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1738,7 +1738,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
- unsigned long flags;
urb->hcpriv = NULL;
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
@@ -1755,20 +1754,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
-
- /*
- * We disable local IRQs here avoid possible deadlock because
- * drivers may call spin_lock() to hold lock which might be
- * acquired in one hard interrupt handler.
- *
- * The local_irq_save()/local_irq_restore() around complete()
- * will be removed if current USB drivers have been cleaned up
- * and no one may trigger the above deadlock situation when
- * running complete() in tasklet.
- */
- local_irq_save(flags);
urb->complete(urb);
- local_irq_restore(flags);
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 462ce49f683a..c6077d582d29 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -28,6 +28,7 @@
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/pm_qos.h>
+#include <linux/kobject.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
@@ -2660,11 +2661,13 @@ static bool use_new_scheme(struct usb_device *udev, int retry,
{
int old_scheme_first_port =
port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME;
+ int quick_enumeration = (udev->speed == USB_SPEED_HIGH);
if (udev->speed >= USB_SPEED_SUPER)
return false;
- return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first);
+ return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first
+ || quick_enumeration);
}
/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
@@ -5147,6 +5150,42 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
usb_lock_port(port_dev);
}
+/* Handle notifying userspace about hub over-current events */
+static void port_over_current_notify(struct usb_port *port_dev)
+{
+ static char *envp[] = { NULL, NULL, NULL };
+ struct device *hub_dev;
+ char *port_dev_path;
+
+ sysfs_notify(&port_dev->dev.kobj, NULL, "over_current_count");
+
+ hub_dev = port_dev->dev.parent;
+
+ if (!hub_dev)
+ return;
+
+ port_dev_path = kobject_get_path(&port_dev->dev.kobj, GFP_KERNEL);
+ if (!port_dev_path)
+ return;
+
+ envp[0] = kasprintf(GFP_KERNEL, "OVER_CURRENT_PORT=%s", port_dev_path);
+ if (!envp[0])
+ goto exit_path;
+
+ envp[1] = kasprintf(GFP_KERNEL, "OVER_CURRENT_COUNT=%u",
+ port_dev->over_current_count);
+ if (!envp[1])
+ goto exit;
+
+ kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
+
+ kfree(envp[1]);
+exit:
+ kfree(envp[0]);
+exit_path:
+ kfree(port_dev_path);
+}
+
static void port_event(struct usb_hub *hub, int port1)
__must_hold(&port_dev->status_lock)
{
@@ -5189,6 +5228,7 @@ static void port_event(struct usb_hub *hub, int port1)
if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
u16 status = 0, unused;
port_dev->over_current_count++;
+ port_over_current_notify(port_dev);
dev_dbg(&port_dev->dev, "over-current change #%u\n",
port_dev->over_current_count);
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index 9879767452a2..38b2c776c4b4 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -23,10 +23,11 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
struct list_head *list)
{
struct usb_phy_roothub *roothub_entry;
- struct phy *phy = devm_of_phy_get_by_index(dev, dev->of_node, index);
+ struct phy *phy;
- if (IS_ERR_OR_NULL(phy)) {
- if (!phy || PTR_ERR(phy) == -ENODEV)
+ phy = devm_of_phy_get_by_index(dev, dev->of_node, index);
+ if (IS_ERR(phy)) {
+ if (PTR_ERR(phy) == -ENODEV)
return 0;
else
return PTR_ERR(phy);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 4a2143195395..1a06a4b5fbb1 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -16,6 +16,15 @@ static int usb_port_block_power_off;
static const struct attribute_group *port_dev_group[];
+static ssize_t location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+
+ return sprintf(buf, "0x%08x\n", port_dev->location);
+}
+static DEVICE_ATTR_RO(location);
+
static ssize_t connect_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -140,6 +149,7 @@ static DEVICE_ATTR_RW(usb3_lpm_permit);
static struct attribute *port_dev_attrs[] = {
&dev_attr_connect_type.attr,
+ &dev_attr_location.attr,
&dev_attr_quirks.attr,
&dev_attr_over_current_count.attr,
NULL,
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index cc9c93affa14..30bab8463c96 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -393,6 +393,20 @@ enum dwc2_ep0_state {
* 0 - No
* 1 - Yes
* @hird_threshold: Value of BESL or HIRD Threshold.
+ * @ref_clk_per: Indicates in terms of pico seconds the period
+ * of ref_clk.
+ * 62500 - 16MHz
+ * 58823 - 17MHz
+ * 52083 - 19.2MHz
+ * 50000 - 20MHz
+ * 41666 - 24MHz
+ * 33333 - 30MHz (default)
+ * 25000 - 40MHz
+ * @sof_cnt_wkup_alert: Indicates in term of number of SOF's after which
+ * the controller should generate an interrupt if the
+ * device had been in L1 state until that period.
+ * This is used by SW to initiate Remote WakeUp in the
+ * controller so as to sync to the uF number from the host.
* @activate_stm_fs_transceiver: Activate internal transceiver using GGPIO
* register.
* 0 - Deactivate the transceiver (default)
@@ -416,6 +430,9 @@ enum dwc2_ep0_state {
* back to DWC2_SPEED_PARAM_HIGH while device is gone.
* 0 - No (default)
* 1 - Yes
+ * @service_interval: Enable service interval based scheduling.
+ * 0 - No
+ * 1 - Yes
*
* The following parameters may be specified when starting the module. These
* parameters define how the DWC_otg controller should be configured. A
@@ -461,6 +478,7 @@ struct dwc2_core_params {
bool lpm_clock_gating;
bool besl;
bool hird_threshold_en;
+ bool service_interval;
u8 hird_threshold;
bool activate_stm_fs_transceiver;
bool ipg_isoc_en;
@@ -468,6 +486,10 @@ struct dwc2_core_params {
u32 max_transfer_size;
u32 ahbcfg;
+ /* GREFCLK parameters */
+ u32 ref_clk_per;
+ u16 sof_cnt_wkup_alert;
+
/* Host parameters */
bool host_dma;
bool dma_desc_enable;
@@ -605,6 +627,10 @@ struct dwc2_core_params {
* FIFO sizing is enabled 16 to 32768
* Actual maximum value is autodetected and also
* the default.
+ * @service_interval_mode: For enabling service interval based scheduling in the
+ * controller.
+ * 0 - Disable
+ * 1 - Enable
*/
struct dwc2_hw_params {
unsigned op_mode:3;
@@ -635,6 +661,7 @@ struct dwc2_hw_params {
unsigned utmi_phy_data_width:2;
unsigned lpm_mode:1;
unsigned ipg_isoc_en:1;
+ unsigned service_interval_mode:1;
u32 snpsid;
u32 dev_ep_dirs;
u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
@@ -1354,6 +1381,7 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg);
int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg);
int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg);
void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg);
+void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg);
#else
static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2)
{ return 0; }
@@ -1388,6 +1416,7 @@ static inline int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
static inline int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg) {}
#endif
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 22d015b0424f..7f62f4cdc265 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -701,6 +701,7 @@ static int params_show(struct seq_file *seq, void *v)
print_param(seq, p, besl);
print_param(seq, p, hird_threshold_en);
print_param(seq, p, hird_threshold);
+ print_param(seq, p, service_interval);
print_param(seq, p, host_dma);
print_param(seq, p, g_dma);
print_param(seq, p, g_dma_desc);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 220c0f9b89b0..2d6d2c8244de 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -123,6 +123,24 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
}
/**
+ * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
+ * by one.
+ * @hs_ep: The endpoint.
+ *
+ * This function used in service interval based scheduling flow to calculate
+ * descriptor frame number filed value. For service interval mode frame
+ * number in descriptor should point to last (u)frame in the interval.
+ *
+ */
+static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
+{
+ if (hs_ep->target_frame)
+ hs_ep->target_frame -= 1;
+ else
+ hs_ep->target_frame = DSTS_SOFFN_LIMIT;
+}
+
+/**
* dwc2_hsotg_en_gsint - enable one or more of the general interrupt
* @hsotg: The device state
* @ints: A bitmask of the interrupts to enable
@@ -228,6 +246,27 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts2;
+ u32 gintmsk2;
+
+ gintsts2 = dwc2_readl(hsotg, GINTSTS2);
+ gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
+
+ if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
+ dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
+ dwc2_clear_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
+ dwc2_set_bit(hsotg, DCFG, DCTL_RMTWKUPSIG);
+ }
+}
+
+/**
* dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
* TX FIFOs
*
@@ -2812,6 +2851,23 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
if (using_desc_dma(hsotg)) {
hs_ep->target_frame = hsotg->frame_number;
dwc2_gadget_incr_frame_num(hs_ep);
+
+ /* In service interval mode target_frame must
+ * be set to last (u)frame of the service interval.
+ */
+ if (hsotg->params.service_interval) {
+ /* Set target_frame to the first (u)frame of
+ * the service interval
+ */
+ hs_ep->target_frame &= ~hs_ep->interval + 1;
+
+ /* Set target_frame to the last (u)frame of
+ * the service interval
+ */
+ dwc2_gadget_incr_frame_num(hs_ep);
+ dwc2_gadget_dec_frame_num_by_one(hs_ep);
+ }
+
dwc2_gadget_start_isoc_ddma(hs_ep);
return;
}
@@ -3109,6 +3165,8 @@ static void kill_all_requests(struct dwc2_hsotg *hsotg,
dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
}
+static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
+
/**
* dwc2_hsotg_disconnect - disconnect service
* @hsotg: The device state.
@@ -3127,13 +3185,12 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
hsotg->connected = 0;
hsotg->test_mode = 0;
+ /* all endpoints should be shutdown */
for (ep = 0; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
- kill_all_requests(hsotg, hsotg->eps_in[ep],
- -ESHUTDOWN);
+ dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
- kill_all_requests(hsotg, hsotg->eps_out[ep],
- -ESHUTDOWN);
+ dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
}
call_gadget(hsotg, disconnect);
@@ -3191,13 +3248,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
u32 val;
u32 usbcfg;
u32 dcfg = 0;
+ int ep;
/* Kill any ep0 requests as controller will be reinitialized */
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
- if (!is_usb_reset)
+ if (!is_usb_reset) {
if (dwc2_core_reset(hsotg, true))
return;
+ } else {
+ /* all endpoints should be shutdown */
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+ if (hsotg->eps_out[ep])
+ dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+ }
+ }
/*
* we must now enable ep0 ready for host detection and then
@@ -3312,6 +3379,10 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
}
+ /* Enable Service Interval mode if supported */
+ if (using_desc_dma(hsotg) && hsotg->params.service_interval)
+ dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
+
dwc2_writel(hsotg, 0, DAINTMSK);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
@@ -3368,6 +3439,10 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
/* configure the core to support LPM */
dwc2_gadget_init_lpm(hsotg);
+ /* program GREFCLK register if needed */
+ if (using_desc_dma(hsotg) && hsotg->params.service_interval)
+ dwc2_gadget_program_ref_clk(hsotg);
+
/* must be at-least 3ms to allow bus to see disconnect */
mdelay(3);
@@ -3676,6 +3751,10 @@ irq_retry:
if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
goto irq_retry;
+ /* Check WKUP_ALERT interrupt*/
+ if (hsotg->params.service_interval)
+ dwc2_gadget_wkup_alert_handler(hsotg);
+
spin_unlock(&hsotg->lock);
return IRQ_HANDLED;
@@ -3993,6 +4072,7 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
unsigned long flags;
u32 epctrl_reg;
u32 ctrl;
+ int locked;
dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
@@ -4008,7 +4088,9 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
- spin_lock_irqsave(&hsotg->lock, flags);
+ locked = spin_is_locked(&hsotg->lock);
+ if (!locked)
+ spin_lock_irqsave(&hsotg->lock, flags);
ctrl = dwc2_readl(hsotg, epctrl_reg);
@@ -4032,7 +4114,9 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
hs_ep->fifo_index = 0;
hs_ep->fifo_size = 0;
- spin_unlock_irqrestore(&hsotg->lock, flags);
+ if (!locked)
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
return 0;
}
@@ -4944,6 +5028,29 @@ void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
dwc2_writel(hsotg, val, GLPMCFG);
dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
+
+ /* Unmask WKUP_ALERT Interrupt */
+ if (hsotg->params.service_interval)
+ dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
+}
+
+/**
+ * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ */
+void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
+{
+ u32 val = 0;
+
+ val |= GREFCLK_REF_CLK_MODE;
+ val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
+ val |= hsotg->params.sof_cnt_wkup_alert <<
+ GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
+
+ dwc2_writel(hsotg, val, GREFCLK);
+ dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
}
/**
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 2bd6e6bfc241..dd82fa516f3f 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -358,16 +358,10 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
{
- int ret;
-
- hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
- if (IS_ERR(hsotg->vbus_supply)) {
- ret = PTR_ERR(hsotg->vbus_supply);
- hsotg->vbus_supply = NULL;
- return ret == -ENODEV ? 0 : ret;
- }
+ if (hsotg->vbus_supply)
+ return regulator_enable(hsotg->vbus_supply);
- return regulator_enable(hsotg->vbus_supply);
+ return 0;
}
static int dwc2_vbus_supply_exit(struct dwc2_hsotg *hsotg)
@@ -1328,14 +1322,11 @@ static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
u32 remaining_count;
u32 byte_count;
u32 dword_count;
- u32 __iomem *data_fifo;
u32 *data_buf = (u32 *)chan->xfer_buf;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
- data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
-
remaining_count = chan->xfer_len - chan->xfer_count;
if (remaining_count > chan->max_packet)
byte_count = chan->max_packet;
@@ -3564,6 +3555,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
u32 port_status;
u32 speed;
u32 pcgctl;
+ u32 pwr;
switch (typereq) {
case ClearHubFeature:
@@ -3612,8 +3604,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
hprt0 &= ~HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
+ if (pwr)
+ dwc2_vbus_supply_exit(hsotg);
break;
case USB_PORT_FEAT_INDICATOR:
@@ -3823,8 +3818,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"SetPortFeature - USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
hprt0 |= HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
+ if (!pwr)
+ dwc2_vbus_supply_init(hsotg);
break;
case USB_PORT_FEAT_RESET:
@@ -3841,6 +3839,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dwc2_writel(hsotg, 0, PCGCTL);
hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
/* Clear suspend bit if resetting from suspend state */
hprt0 &= ~HPRT0_SUSP;
@@ -3854,6 +3853,8 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"In host mode, hprt0=%08x\n", hprt0);
dwc2_writel(hsotg, hprt0, HPRT0);
+ if (!pwr)
+ dwc2_vbus_supply_init(hsotg);
}
/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
@@ -4393,6 +4394,8 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
struct usb_bus *bus = hcd_to_bus(hcd);
unsigned long flags;
+ u32 hprt0;
+ int ret;
dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
@@ -4408,6 +4411,17 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
dwc2_hcd_reinit(hsotg);
+ hprt0 = dwc2_read_hprt0(hsotg);
+ /* Has vbus power been turned on in dwc2_core_host_init ? */
+ if (hprt0 & HPRT0_PWR) {
+ /* Enable external vbus supply before resuming root hub */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ ret = dwc2_vbus_supply_init(hsotg);
+ if (ret)
+ return ret;
+ spin_lock_irqsave(&hsotg->lock, flags);
+ }
+
/* Initialize and connect root hub if one is not already attached */
if (bus->root_hub) {
dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
@@ -4417,7 +4431,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
spin_unlock_irqrestore(&hsotg->lock, flags);
- return dwc2_vbus_supply_init(hsotg);
+ return 0;
}
/*
@@ -4428,6 +4442,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
unsigned long flags;
+ u32 hprt0;
/* Turn off all host-specific interrupts */
dwc2_disable_host_interrupts(hsotg);
@@ -4436,6 +4451,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
synchronize_irq(hcd->irq);
spin_lock_irqsave(&hsotg->lock, flags);
+ hprt0 = dwc2_read_hprt0(hsotg);
/* Ensure hcd is disconnected */
dwc2_hcd_disconnect(hsotg, true);
dwc2_hcd_stop(hsotg);
@@ -4444,7 +4460,9 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irqrestore(&hsotg->lock, flags);
- dwc2_vbus_supply_exit(hsotg);
+ /* keep balanced supply init/exit by checking HPRT0_PWR */
+ if (hprt0 & HPRT0_PWR)
+ dwc2_vbus_supply_exit(hsotg);
usleep_range(1000, 3000);
}
@@ -4482,7 +4500,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
hprt0 |= HPRT0_SUSP;
hprt0 &= ~HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_vbus_supply_exit(hsotg);
+ spin_lock_irqsave(&hsotg->lock, flags);
}
/* Enter partial_power_down */
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 0ca8e7bc7aaf..2b1ea441b7d4 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -312,6 +312,7 @@
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
#define GHWCFG4_ACG_SUPPORTED BIT(12)
#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11)
+#define GHWCFG4_SERVICE_INTERVAL_SUPPORTED BIT(10)
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
@@ -404,6 +405,19 @@
#define ADPCTL_PRB_DSCHRG_MASK (0x3 << 0)
#define ADPCTL_PRB_DSCHRG_SHIFT 0
+#define GREFCLK HSOTG_REG(0x0064)
+#define GREFCLK_REFCLKPER_MASK (0x1ffff << 15)
+#define GREFCLK_REFCLKPER_SHIFT 15
+#define GREFCLK_REF_CLK_MODE BIT(14)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_MASK (0x3ff)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT 0
+
+#define GINTMSK2 HSOTG_REG(0x0068)
+#define GINTMSK2_WKUP_ALERT_INT_MSK BIT(0)
+
+#define GINTSTS2 HSOTG_REG(0x006c)
+#define GINTSTS2_WKUP_ALERT_INT BIT(0)
+
#define HPTXFSIZ HSOTG_REG(0x100)
/* Use FIFOSIZE_* constants to access this register */
@@ -443,6 +457,7 @@
#define DCFG_DEVSPD_FS48 3
#define DCTL HSOTG_REG(0x804)
+#define DCTL_SERVICE_INTERVAL_SUPPORTED BIT(19)
#define DCTL_PWRONPRGDONE BIT(11)
#define DCTL_CGOUTNAK BIT(10)
#define DCTL_SGOUTNAK BIT(9)
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index bf7052e037d6..7c1b6938f212 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -81,6 +81,7 @@ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
p->host_perio_tx_fifo_size = 256;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
GAHBCFG_HBSTLEN_SHIFT;
+ p->power_down = 0;
}
static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg)
@@ -299,9 +300,12 @@ static void dwc2_set_default_params(struct dwc2_hsotg *hsotg)
p->hird_threshold_en = true;
p->hird_threshold = 4;
p->ipg_isoc_en = false;
+ p->service_interval = false;
p->max_packet_count = hw->max_packet_count;
p->max_transfer_size = hw->max_transfer_size;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR << GAHBCFG_HBSTLEN_SHIFT;
+ p->ref_clk_per = 33333;
+ p->sof_cnt_wkup_alert = 100;
if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
(hsotg->dr_mode == USB_DR_MODE_OTG)) {
@@ -592,6 +596,7 @@ static void dwc2_check_params(struct dwc2_hsotg *hsotg)
CHECK_BOOL(besl, (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a));
CHECK_BOOL(hird_threshold_en, hsotg->params.lpm);
CHECK_RANGE(hird_threshold, 0, hsotg->params.besl ? 12 : 7, 0);
+ CHECK_BOOL(service_interval, hw->service_interval_mode);
CHECK_RANGE(max_packet_count,
15, hw->max_packet_count,
hw->max_packet_count);
@@ -780,6 +785,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
hw->acg_enable = !!(hwcfg4 & GHWCFG4_ACG_SUPPORTED);
hw->ipg_isoc_en = !!(hwcfg4 & GHWCFG4_IPG_ISOC_SUPPORTED);
+ hw->service_interval_mode = !!(hwcfg4 &
+ GHWCFG4_SERVICE_INTERVAL_SUPPORTED);
/* fifo sizes */
hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 577642895b57..c0b64d483552 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -432,6 +432,14 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
return retval;
+ hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
+ if (IS_ERR(hsotg->vbus_supply)) {
+ retval = PTR_ERR(hsotg->vbus_supply);
+ hsotg->vbus_supply = NULL;
+ if (retval != -ENODEV)
+ return retval;
+ }
+
retval = dwc2_lowlevel_hw_enable(hsotg);
if (retval)
return retval;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 518ead12458d..1a0404fda596 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -113,7 +113,7 @@ config USB_DWC3_ST
config USB_DWC3_QCOM
tristate "Qualcomm Platform"
- depends on ARCH_QCOM || COMPILE_TEST
+ depends on EXTCON && (ARCH_QCOM || COMPILE_TEST)
depends on OF
default USB_DWC3
help
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 88c80fcc39f5..becfbb87f791 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -756,7 +756,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
/* check if current dwc3 is on simulation board */
if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
- dev_info(dwc->dev, "Running with FPGA optmizations\n");
+ dev_info(dwc->dev, "Running with FPGA optimizations\n");
dwc->is_fpga = true;
}
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index a94fb1ba8f2c..cb7fcd7c0ad8 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -13,80 +13,30 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/usb_phy_generic.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regulator/consumer.h>
+#define DWC3_EXYNOS_MAX_CLOCKS 4
+
+struct dwc3_exynos_driverdata {
+ const char *clk_names[DWC3_EXYNOS_MAX_CLOCKS];
+ int num_clks;
+ int suspend_clk_idx;
+};
+
struct dwc3_exynos {
- struct platform_device *usb2_phy;
- struct platform_device *usb3_phy;
struct device *dev;
- struct clk *clk;
- struct clk *susp_clk;
- struct clk *axius_clk;
+ const char **clk_names;
+ struct clk *clks[DWC3_EXYNOS_MAX_CLOCKS];
+ int num_clks;
+ int suspend_clk_idx;
struct regulator *vdd33;
struct regulator *vdd10;
};
-static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos)
-{
- struct usb_phy_generic_platform_data pdata;
- struct platform_device *pdev;
- int ret;
-
- memset(&pdata, 0x00, sizeof(pdata));
-
- pdev = platform_device_alloc("usb_phy_generic", PLATFORM_DEVID_AUTO);
- if (!pdev)
- return -ENOMEM;
-
- exynos->usb2_phy = pdev;
- pdata.type = USB_PHY_TYPE_USB2;
- pdata.gpio_reset = -1;
-
- ret = platform_device_add_data(exynos->usb2_phy, &pdata, sizeof(pdata));
- if (ret)
- goto err1;
-
- pdev = platform_device_alloc("usb_phy_generic", PLATFORM_DEVID_AUTO);
- if (!pdev) {
- ret = -ENOMEM;
- goto err1;
- }
-
- exynos->usb3_phy = pdev;
- pdata.type = USB_PHY_TYPE_USB3;
-
- ret = platform_device_add_data(exynos->usb3_phy, &pdata, sizeof(pdata));
- if (ret)
- goto err2;
-
- ret = platform_device_add(exynos->usb2_phy);
- if (ret)
- goto err2;
-
- ret = platform_device_add(exynos->usb3_phy);
- if (ret)
- goto err3;
-
- return 0;
-
-err3:
- platform_device_del(exynos->usb2_phy);
-
-err2:
- platform_device_put(exynos->usb3_phy);
-
-err1:
- platform_device_put(exynos->usb2_phy);
-
- return ret;
-}
-
static int dwc3_exynos_remove_child(struct device *dev, void *unused)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -101,47 +51,42 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
struct dwc3_exynos *exynos;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
-
- int ret;
+ const struct dwc3_exynos_driverdata *driver_data;
+ int i, ret;
exynos = devm_kzalloc(dev, sizeof(*exynos), GFP_KERNEL);
if (!exynos)
return -ENOMEM;
- platform_set_drvdata(pdev, exynos);
+ driver_data = of_device_get_match_data(dev);
+ exynos->dev = dev;
+ exynos->num_clks = driver_data->num_clks;
+ exynos->clk_names = (const char **)driver_data->clk_names;
+ exynos->suspend_clk_idx = driver_data->suspend_clk_idx;
- exynos->dev = dev;
+ platform_set_drvdata(pdev, exynos);
- exynos->clk = devm_clk_get(dev, "usbdrd30");
- if (IS_ERR(exynos->clk)) {
- dev_err(dev, "couldn't get clock\n");
- return -EINVAL;
+ for (i = 0; i < exynos->num_clks; i++) {
+ exynos->clks[i] = devm_clk_get(dev, exynos->clk_names[i]);
+ if (IS_ERR(exynos->clks[i])) {
+ dev_err(dev, "failed to get clock: %s\n",
+ exynos->clk_names[i]);
+ return PTR_ERR(exynos->clks[i]);
+ }
}
- ret = clk_prepare_enable(exynos->clk);
- if (ret)
- return ret;
- exynos->susp_clk = devm_clk_get(dev, "usbdrd30_susp_clk");
- if (IS_ERR(exynos->susp_clk))
- exynos->susp_clk = NULL;
- ret = clk_prepare_enable(exynos->susp_clk);
- if (ret)
- goto susp_clk_err;
-
- if (of_device_is_compatible(node, "samsung,exynos7-dwusb3")) {
- exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
- if (IS_ERR(exynos->axius_clk)) {
- dev_err(dev, "no AXI UpScaler clk specified\n");
- ret = -ENODEV;
- goto axius_clk_err;
+ for (i = 0; i < exynos->num_clks; i++) {
+ ret = clk_prepare_enable(exynos->clks[i]);
+ if (ret) {
+ while (--i > 0)
+ clk_disable_unprepare(exynos->clks[i]);
+ return ret;
}
- ret = clk_prepare_enable(exynos->axius_clk);
- if (ret)
- goto axius_clk_err;
- } else {
- exynos->axius_clk = NULL;
}
+ if (exynos->suspend_clk_idx >= 0)
+ clk_prepare_enable(exynos->clks[exynos->suspend_clk_idx]);
+
exynos->vdd33 = devm_regulator_get(dev, "vdd33");
if (IS_ERR(exynos->vdd33)) {
ret = PTR_ERR(exynos->vdd33);
@@ -164,12 +109,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
goto vdd10_err;
}
- ret = dwc3_exynos_register_phys(exynos);
- if (ret) {
- dev_err(dev, "couldn't register PHYs\n");
- goto phys_err;
- }
-
if (node) {
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
@@ -185,32 +124,31 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
return 0;
populate_err:
- platform_device_unregister(exynos->usb2_phy);
- platform_device_unregister(exynos->usb3_phy);
-phys_err:
regulator_disable(exynos->vdd10);
vdd10_err:
regulator_disable(exynos->vdd33);
vdd33_err:
- clk_disable_unprepare(exynos->axius_clk);
-axius_clk_err:
- clk_disable_unprepare(exynos->susp_clk);
-susp_clk_err:
- clk_disable_unprepare(exynos->clk);
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
+
+ if (exynos->suspend_clk_idx >= 0)
+ clk_disable_unprepare(exynos->clks[exynos->suspend_clk_idx]);
+
return ret;
}
static int dwc3_exynos_remove(struct platform_device *pdev)
{
struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
+ int i;
device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
- platform_device_unregister(exynos->usb2_phy);
- platform_device_unregister(exynos->usb3_phy);
- clk_disable_unprepare(exynos->axius_clk);
- clk_disable_unprepare(exynos->susp_clk);
- clk_disable_unprepare(exynos->clk);
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
+
+ if (exynos->suspend_clk_idx >= 0)
+ clk_disable_unprepare(exynos->clks[exynos->suspend_clk_idx]);
regulator_disable(exynos->vdd33);
regulator_disable(exynos->vdd10);
@@ -218,10 +156,36 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
return 0;
}
+static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
+ .clk_names = { "usbdrd30" },
+ .num_clks = 1,
+ .suspend_clk_idx = -1,
+};
+
+static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
+ .clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
+ .num_clks = 4,
+ .suspend_clk_idx = 1,
+};
+
+static const struct dwc3_exynos_driverdata exynos7_drvdata = {
+ .clk_names = { "usbdrd30", "usbdrd30_susp_clk", "usbdrd30_axius_clk" },
+ .num_clks = 3,
+ .suspend_clk_idx = 1,
+};
+
static const struct of_device_id exynos_dwc3_match[] = {
- { .compatible = "samsung,exynos5250-dwusb3" },
- { .compatible = "samsung,exynos7-dwusb3" },
- {},
+ {
+ .compatible = "samsung,exynos5250-dwusb3",
+ .data = &exynos5250_drvdata,
+ }, {
+ .compatible = "samsung,exynos5433-dwusb3",
+ .data = &exynos5433_drvdata,
+ }, {
+ .compatible = "samsung,exynos7-dwusb3",
+ .data = &exynos7_drvdata,
+ }, {
+ }
};
MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
@@ -229,9 +193,10 @@ MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
static int dwc3_exynos_suspend(struct device *dev)
{
struct dwc3_exynos *exynos = dev_get_drvdata(dev);
+ int i;
- clk_disable(exynos->axius_clk);
- clk_disable(exynos->clk);
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
regulator_disable(exynos->vdd33);
regulator_disable(exynos->vdd10);
@@ -242,7 +207,7 @@ static int dwc3_exynos_suspend(struct device *dev)
static int dwc3_exynos_resume(struct device *dev)
{
struct dwc3_exynos *exynos = dev_get_drvdata(dev);
- int ret;
+ int i, ret;
ret = regulator_enable(exynos->vdd33);
if (ret) {
@@ -255,13 +220,14 @@ static int dwc3_exynos_resume(struct device *dev)
return ret;
}
- clk_enable(exynos->clk);
- clk_enable(exynos->axius_clk);
-
- /* runtime set active to reflect active state. */
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ for (i = 0; i < exynos->num_clks; i++) {
+ ret = clk_prepare_enable(exynos->clks[i]);
+ if (ret) {
+ while (--i > 0)
+ clk_disable_unprepare(exynos->clks[i]);
+ return ret;
+ }
+ }
return 0;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2b53194081ba..679c12e14522 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -270,27 +270,36 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
u32 timeout = 1000;
+ u32 saved_config = 0;
u32 reg;
int cmd_status = 0;
- int susphy = false;
int ret = -EINVAL;
/*
- * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
- * we're issuing an endpoint command, we must check if
- * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
+ * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
+ * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
+ * endpoint command.
*
- * We will also set SUSPHY bit to what it was before returning as stated
- * by the same section on Synopsys databook.
+ * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
+ * settings. Restore them after the command is completed.
+ *
+ * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
*/
if (dwc->gadget.speed <= USB_SPEED_HIGH) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
- susphy = true;
+ saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
+
+ if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
+ saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+ }
+
+ if (saved_config)
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
@@ -389,9 +398,9 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
}
}
- if (unlikely(susphy)) {
+ if (saved_config) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ reg |= saved_config;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index e15e896f356c..165653a5e45d 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -717,17 +717,14 @@ static void xdbc_handle_port_status(struct xdbc_trb *evt_trb)
static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
{
- size_t remain_length;
u32 comp_code;
int ep_id;
comp_code = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2]));
- remain_length = EVENT_TRB_LEN(le32_to_cpu(evt_trb->field[2]));
ep_id = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3]));
switch (comp_code) {
case COMP_SUCCESS:
- remain_length = 0;
case COMP_SHORT_PACKET:
break;
case COMP_TRB_ERROR:
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index d582921f7257..db2d4980cb35 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -22,12 +22,8 @@
* controlled by two clock sources :
* CLK_5 := c_srate, and CLK_6 := p_srate
*/
-#define USB_OUT_IT_ID 1
-#define IO_IN_IT_ID 2
-#define IO_OUT_OT_ID 3
-#define USB_IN_OT_ID 4
-#define USB_OUT_CLK_ID 5
-#define USB_IN_CLK_ID 6
+#define USB_OUT_CLK_ID (out_clk_src_desc.bClockID)
+#define USB_IN_CLK_ID (in_clk_src_desc.bClockID)
#define CONTROL_ABSENT 0
#define CONTROL_RDONLY 1
@@ -43,6 +39,9 @@
#define UNFLW_CTRL 8
#define OVFLW_CTRL 10
+#define EPIN_EN(_opts) ((_opts)->p_chmask != 0)
+#define EPOUT_EN(_opts) ((_opts)->c_chmask != 0)
+
struct f_uac2 {
struct g_audio g_audio;
u8 ac_intf, as_in_intf, as_out_intf;
@@ -135,7 +134,7 @@ static struct uac_clock_source_descriptor in_clk_src_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC2_CLOCK_SOURCE,
- .bClockID = USB_IN_CLK_ID,
+ /* .bClockID = DYNAMIC */
.bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED,
.bmControls = (CONTROL_RDONLY << CLK_FREQ_CTRL),
.bAssocTerminal = 0,
@@ -147,7 +146,7 @@ static struct uac_clock_source_descriptor out_clk_src_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC2_CLOCK_SOURCE,
- .bClockID = USB_OUT_CLK_ID,
+ /* .bClockID = DYNAMIC */
.bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED,
.bmControls = (CONTROL_RDONLY << CLK_FREQ_CTRL),
.bAssocTerminal = 0,
@@ -159,10 +158,10 @@ static struct uac2_input_terminal_descriptor usb_out_it_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
- .bTerminalID = USB_OUT_IT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
- .bCSourceID = USB_OUT_CLK_ID,
+ /* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -173,10 +172,10 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
- .bTerminalID = IO_IN_IT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED),
.bAssocTerminal = 0,
- .bCSourceID = USB_IN_CLK_ID,
+ /* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -187,11 +186,11 @@ static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
- .bTerminalID = USB_IN_OT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
- .bSourceID = IO_IN_IT_ID,
- .bCSourceID = USB_IN_CLK_ID,
+ /* .bSourceID = DYNAMIC */
+ /* .bCSourceID = DYNAMIC */
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -201,11 +200,11 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
- .bTerminalID = IO_OUT_OT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED),
.bAssocTerminal = 0,
- .bSourceID = USB_OUT_IT_ID,
- .bCSourceID = USB_OUT_CLK_ID,
+ /* .bSourceID = DYNAMIC */
+ /* .bCSourceID = DYNAMIC */
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -253,7 +252,7 @@ static struct uac2_as_header_descriptor as_out_hdr_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
- .bTerminalLink = USB_OUT_IT_ID,
+ /* .bTerminalLink = DYNAMIC */
.bmControls = 0,
.bFormatType = UAC_FORMAT_TYPE_I,
.bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM),
@@ -330,7 +329,7 @@ static struct uac2_as_header_descriptor as_in_hdr_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
- .bTerminalLink = USB_IN_OT_ID,
+ /* .bTerminalLink = DYNAMIC */
.bmControls = 0,
.bFormatType = UAC_FORMAT_TYPE_I,
.bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM),
@@ -471,6 +470,125 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
le16_to_cpu(ep_desc->wMaxPacketSize)));
}
+/* Use macro to overcome line length limitation */
+#define USBDHDR(p) (struct usb_descriptor_header *)(p)
+
+static void setup_descriptor(struct f_uac2_opts *opts)
+{
+ /* patch descriptors */
+ int i = 1; /* ID's start with 1 */
+
+ if (EPOUT_EN(opts))
+ usb_out_it_desc.bTerminalID = i++;
+ if (EPIN_EN(opts))
+ io_in_it_desc.bTerminalID = i++;
+ if (EPOUT_EN(opts))
+ io_out_ot_desc.bTerminalID = i++;
+ if (EPIN_EN(opts))
+ usb_in_ot_desc.bTerminalID = i++;
+ if (EPOUT_EN(opts))
+ out_clk_src_desc.bClockID = i++;
+ if (EPIN_EN(opts))
+ in_clk_src_desc.bClockID = i++;
+
+ usb_out_it_desc.bCSourceID = out_clk_src_desc.bClockID;
+ usb_in_ot_desc.bSourceID = io_in_it_desc.bTerminalID;
+ usb_in_ot_desc.bCSourceID = in_clk_src_desc.bClockID;
+ io_in_it_desc.bCSourceID = in_clk_src_desc.bClockID;
+ io_out_ot_desc.bCSourceID = out_clk_src_desc.bClockID;
+ io_out_ot_desc.bSourceID = usb_out_it_desc.bTerminalID;
+ as_out_hdr_desc.bTerminalLink = usb_out_it_desc.bTerminalID;
+ as_in_hdr_desc.bTerminalLink = usb_in_ot_desc.bTerminalID;
+
+ iad_desc.bInterfaceCount = 1;
+ ac_hdr_desc.wTotalLength = 0;
+
+ if (EPIN_EN(opts)) {
+ u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength);
+
+ len += sizeof(in_clk_src_desc);
+ len += sizeof(usb_in_ot_desc);
+ len += sizeof(io_in_it_desc);
+ ac_hdr_desc.wTotalLength = cpu_to_le16(len);
+ iad_desc.bInterfaceCount++;
+ }
+ if (EPOUT_EN(opts)) {
+ u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength);
+
+ len += sizeof(out_clk_src_desc);
+ len += sizeof(usb_out_it_desc);
+ len += sizeof(io_out_ot_desc);
+ ac_hdr_desc.wTotalLength = cpu_to_le16(len);
+ iad_desc.bInterfaceCount++;
+ }
+
+ i = 0;
+ fs_audio_desc[i++] = USBDHDR(&iad_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_ac_if_desc);
+ fs_audio_desc[i++] = USBDHDR(&ac_hdr_desc);
+ if (EPIN_EN(opts))
+ fs_audio_desc[i++] = USBDHDR(&in_clk_src_desc);
+ if (EPOUT_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&out_clk_src_desc);
+ fs_audio_desc[i++] = USBDHDR(&usb_out_it_desc);
+ }
+ if (EPIN_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&io_in_it_desc);
+ fs_audio_desc[i++] = USBDHDR(&usb_in_ot_desc);
+ }
+ if (EPOUT_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&io_out_ot_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_as_out_if0_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_as_out_if1_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_out_hdr_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_out_fmt1_desc);
+ fs_audio_desc[i++] = USBDHDR(&fs_epout_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_iso_out_desc);
+ }
+ if (EPIN_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&std_as_in_if0_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_as_in_if1_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_in_hdr_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_in_fmt1_desc);
+ fs_audio_desc[i++] = USBDHDR(&fs_epin_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_iso_in_desc);
+ }
+ fs_audio_desc[i] = NULL;
+
+ i = 0;
+ hs_audio_desc[i++] = USBDHDR(&iad_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_ac_if_desc);
+ hs_audio_desc[i++] = USBDHDR(&ac_hdr_desc);
+ if (EPIN_EN(opts))
+ hs_audio_desc[i++] = USBDHDR(&in_clk_src_desc);
+ if (EPOUT_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&out_clk_src_desc);
+ hs_audio_desc[i++] = USBDHDR(&usb_out_it_desc);
+ }
+ if (EPIN_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&io_in_it_desc);
+ hs_audio_desc[i++] = USBDHDR(&usb_in_ot_desc);
+ }
+ if (EPOUT_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&io_out_ot_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_as_out_if0_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_as_out_if1_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_out_hdr_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_out_fmt1_desc);
+ hs_audio_desc[i++] = USBDHDR(&hs_epout_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_iso_out_desc);
+ }
+ if (EPIN_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&std_as_in_if0_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_as_in_if1_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_in_hdr_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_in_fmt1_desc);
+ hs_audio_desc[i++] = USBDHDR(&hs_epin_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_iso_in_desc);
+ }
+ hs_audio_desc[i] = NULL;
+}
+
static int
afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
{
@@ -530,25 +648,29 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
uac2->ac_intf = ret;
uac2->ac_alt = 0;
- ret = usb_interface_id(cfg, fn);
- if (ret < 0) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return ret;
+ if (EPOUT_EN(uac2_opts)) {
+ ret = usb_interface_id(cfg, fn);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+ std_as_out_if0_desc.bInterfaceNumber = ret;
+ std_as_out_if1_desc.bInterfaceNumber = ret;
+ uac2->as_out_intf = ret;
+ uac2->as_out_alt = 0;
}
- std_as_out_if0_desc.bInterfaceNumber = ret;
- std_as_out_if1_desc.bInterfaceNumber = ret;
- uac2->as_out_intf = ret;
- uac2->as_out_alt = 0;
- ret = usb_interface_id(cfg, fn);
- if (ret < 0) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return ret;
+ if (EPIN_EN(uac2_opts)) {
+ ret = usb_interface_id(cfg, fn);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+ std_as_in_if0_desc.bInterfaceNumber = ret;
+ std_as_in_if1_desc.bInterfaceNumber = ret;
+ uac2->as_in_intf = ret;
+ uac2->as_in_alt = 0;
}
- std_as_in_if0_desc.bInterfaceNumber = ret;
- std_as_in_if1_desc.bInterfaceNumber = ret;
- uac2->as_in_intf = ret;
- uac2->as_in_alt = 0;
/* Calculate wMaxPacketSize according to audio bandwidth */
set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
@@ -556,16 +678,20 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
- agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
- if (!agdev->out_ep) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return -ENODEV;
+ if (EPOUT_EN(uac2_opts)) {
+ agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
+ if (!agdev->out_ep) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -ENODEV;
+ }
}
- agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
- if (!agdev->in_ep) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return -ENODEV;
+ if (EPIN_EN(uac2_opts)) {
+ agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
+ if (!agdev->in_ep) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -ENODEV;
+ }
}
agdev->in_ep_maxpsize = max_t(u16,
@@ -578,6 +704,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
+ setup_descriptor(uac2_opts);
+
ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL,
NULL);
if (ret)
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index d8ce7868fe22..8c99392df593 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -197,12 +197,6 @@ static const struct usb_descriptor_header * const uvc_ss_streaming[] = {
NULL,
};
-void uvc_set_trace_param(unsigned int trace)
-{
- uvc_gadget_trace_param = trace;
-}
-EXPORT_SYMBOL(uvc_set_trace_param);
-
/* --------------------------------------------------------------------------
* Control requests
*/
@@ -232,13 +226,8 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
- /* printk(KERN_INFO "setup request %02x %02x value %04x index %04x %04x\n",
- * ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue),
- * le16_to_cpu(ctrl->wIndex), le16_to_cpu(ctrl->wLength));
- */
-
if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) {
- INFO(f->config->cdev, "invalid request type\n");
+ uvcg_info(f, "invalid request type\n");
return -EINVAL;
}
@@ -272,7 +261,7 @@ uvc_function_get_alt(struct usb_function *f, unsigned interface)
{
struct uvc_device *uvc = to_uvc(f);
- INFO(f->config->cdev, "uvc_function_get_alt(%u)\n", interface);
+ uvcg_info(f, "%s(%u)\n", __func__, interface);
if (interface == uvc->control_intf)
return 0;
@@ -291,13 +280,13 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
int ret;
- INFO(cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt);
+ uvcg_info(f, "%s(%u, %u)\n", __func__, interface, alt);
if (interface == uvc->control_intf) {
if (alt)
return -EINVAL;
- INFO(cdev, "reset UVC Control\n");
+ uvcg_info(f, "reset UVC Control\n");
usb_ep_disable(uvc->control_ep);
if (!uvc->control_ep->desc)
@@ -348,7 +337,7 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
if (!uvc->video.ep)
return -EINVAL;
- INFO(cdev, "reset UVC\n");
+ uvcg_info(f, "reset UVC\n");
usb_ep_disable(uvc->video.ep);
ret = config_ep_by_speed(f->config->cdev->gadget,
@@ -373,7 +362,7 @@ uvc_function_disable(struct usb_function *f)
struct uvc_device *uvc = to_uvc(f);
struct v4l2_event v4l2_event;
- INFO(f->config->cdev, "uvc_function_disable\n");
+ uvcg_info(f, "%s()\n", __func__);
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_DISCONNECT;
@@ -392,21 +381,19 @@ uvc_function_disable(struct usb_function *f)
void
uvc_function_connect(struct uvc_device *uvc)
{
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
int ret;
if ((ret = usb_function_activate(&uvc->func)) < 0)
- INFO(cdev, "UVC connect failed with %d\n", ret);
+ uvcg_info(&uvc->func, "UVC connect failed with %d\n", ret);
}
void
uvc_function_disconnect(struct uvc_device *uvc)
{
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
int ret;
if ((ret = usb_function_deactivate(&uvc->func)) < 0)
- INFO(cdev, "UVC disconnect failed with %d\n", ret);
+ uvcg_info(&uvc->func, "UVC disconnect failed with %d\n", ret);
}
/* --------------------------------------------------------------------------
@@ -605,7 +592,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
struct f_uvc_opts *opts;
int ret = -EINVAL;
- INFO(cdev, "uvc_function_bind\n");
+ uvcg_info(f, "%s()\n", __func__);
opts = fi_to_f_uvc_opts(f->fi);
/* Sanity check the streaming endpoint module parameters.
@@ -618,8 +605,8 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
if (opts->streaming_maxburst &&
(opts->streaming_maxpacket % 1024) != 0) {
opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
- INFO(cdev, "overriding streaming_maxpacket to %d\n",
- opts->streaming_maxpacket);
+ uvcg_info(f, "overriding streaming_maxpacket to %d\n",
+ opts->streaming_maxpacket);
}
/* Fill in the FS/HS/SS Video Streaming specific descriptors from the
@@ -658,7 +645,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
/* Allocate endpoints. */
ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
if (!ep) {
- INFO(cdev, "Unable to allocate control EP\n");
+ uvcg_info(f, "Unable to allocate control EP\n");
goto error;
}
uvc->control_ep = ep;
@@ -672,7 +659,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
if (!ep) {
- INFO(cdev, "Unable to allocate streaming EP\n");
+ uvcg_info(f, "Unable to allocate streaming EP\n");
goto error;
}
uvc->video.ep = ep;
@@ -699,12 +686,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc_iad.bFirstInterface = ret;
uvc_control_intf.bInterfaceNumber = ret;
uvc->control_intf = ret;
+ opts->control_interface = ret;
if ((ret = usb_interface_id(c, f)) < 0)
goto error;
uvc_streaming_intf_alt0.bInterfaceNumber = ret;
uvc_streaming_intf_alt1.bInterfaceNumber = ret;
uvc->streaming_intf = ret;
+ opts->streaming_interface = ret;
/* Copy descriptors */
f->fs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
@@ -743,19 +732,19 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc->control_req->context = uvc;
if (v4l2_device_register(&cdev->gadget->dev, &uvc->v4l2_dev)) {
- printk(KERN_INFO "v4l2_device_register failed\n");
+ uvcg_err(f, "failed to register V4L2 device\n");
goto error;
}
/* Initialise video. */
- ret = uvcg_video_init(&uvc->video);
+ ret = uvcg_video_init(&uvc->video, uvc);
if (ret < 0)
goto error;
/* Register a V4L2 device. */
ret = uvc_register_video(uvc);
if (ret < 0) {
- printk(KERN_INFO "Unable to register video device\n");
+ uvcg_err(f, "failed to register video device\n");
goto error;
}
@@ -792,6 +781,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
struct uvc_output_terminal_descriptor *od;
struct uvc_color_matching_descriptor *md;
struct uvc_descriptor_header **ctl_cls;
+ int ret;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
@@ -868,7 +858,12 @@ static struct usb_function_instance *uvc_alloc_inst(void)
opts->streaming_interval = 1;
opts->streaming_maxpacket = 1024;
- uvcg_attach_configfs(opts);
+ ret = uvcg_attach_configfs(opts);
+ if (ret < 0) {
+ kfree(opts);
+ return ERR_PTR(ret);
+ }
+
return &opts->func_inst;
}
@@ -886,7 +881,7 @@ static void uvc_unbind(struct usb_configuration *c, struct usb_function *f)
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
- INFO(cdev, "%s\n", __func__);
+ uvcg_info(f, "%s\n", __func__);
device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
video_unregister_device(&uvc->vdev);
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 2ed292e94fbc..5242d489e20a 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -25,6 +25,9 @@ struct f_uvc_opts {
unsigned int streaming_maxpacket;
unsigned int streaming_maxburst;
+ unsigned int control_interface;
+ unsigned int streaming_interface;
+
/*
* Control descriptors array pointers for full-/high-speed and
* super-speed. They point by default to the uvc_fs_control_cls and
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 93cf78b420fe..099d650082e5 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -24,6 +24,7 @@
struct usb_ep;
struct usb_request;
struct uvc_descriptor_header;
+struct uvc_device;
/* ------------------------------------------------------------------------
* Debugging, printing and logging
@@ -51,14 +52,12 @@ extern unsigned int uvc_gadget_trace_param;
printk(KERN_DEBUG "uvcvideo: " msg); \
} while (0)
-#define uvc_warn_once(dev, warn, msg...) \
- do { \
- if (!test_and_set_bit(warn, &dev->warnings)) \
- printk(KERN_INFO "uvcvideo: " msg); \
- } while (0)
-
-#define uvc_printk(level, msg...) \
- printk(level "uvcvideo: " msg)
+#define uvcg_dbg(f, fmt, args...) \
+ dev_dbg(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
+#define uvcg_info(f, fmt, args...) \
+ dev_info(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
+#define uvcg_err(f, fmt, args...) \
+ dev_err(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
/* ------------------------------------------------------------------------
* Driver specific constants
@@ -73,6 +72,7 @@ extern unsigned int uvc_gadget_trace_param;
*/
struct uvc_video {
+ struct uvc_device *uvc;
struct usb_ep *ep;
/* Frame parameters */
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index b51f0d278826..bc1e2af566c3 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -9,9 +9,16 @@
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
*/
+
+#include <linux/sort.h>
+
#include "u_uvc.h"
#include "uvc_configfs.h"
+/* -----------------------------------------------------------------------------
+ * Global Utility Structures and Macros
+ */
+
#define UVCG_STREAMING_CONTROL_SIZE 1
#define UVC_ATTR(prefix, cname, aname) \
@@ -31,13 +38,93 @@ static struct configfs_attribute prefix##attr_##cname = { \
.show = prefix##cname##_show, \
}
+#define le8_to_cpu(x) (x)
+#define cpu_to_le8(x) (x)
+
+static int uvcg_config_compare_u32(const void *l, const void *r)
+{
+ u32 li = *(const u32 *)l;
+ u32 ri = *(const u32 *)r;
+
+ return li < ri ? -1 : li == ri ? 0 : 1;
+}
+
static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_uvc_opts,
func_inst.group);
}
-/* control/header/<NAME> */
+struct uvcg_config_group_type {
+ struct config_item_type type;
+ const char *name;
+ const struct uvcg_config_group_type **children;
+ int (*create_children)(struct config_group *group);
+};
+
+static void uvcg_config_item_release(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ kfree(group);
+}
+
+static struct configfs_item_operations uvcg_config_item_ops = {
+ .release = uvcg_config_item_release,
+};
+
+static int uvcg_config_create_group(struct config_group *parent,
+ const struct uvcg_config_group_type *type);
+
+static int uvcg_config_create_children(struct config_group *group,
+ const struct uvcg_config_group_type *type)
+{
+ const struct uvcg_config_group_type **child;
+ int ret;
+
+ if (type->create_children)
+ return type->create_children(group);
+
+ for (child = type->children; child && *child; ++child) {
+ ret = uvcg_config_create_group(group, *child);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uvcg_config_create_group(struct config_group *parent,
+ const struct uvcg_config_group_type *type)
+{
+ struct config_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ config_group_init_type_name(group, type->name, &type->type);
+ configfs_add_default_group(group, parent);
+
+ return uvcg_config_create_children(group, type);
+}
+
+static void uvcg_config_remove_children(struct config_group *group)
+{
+ struct config_group *child, *n;
+
+ list_for_each_entry_safe(child, n, &group->default_groups, group_entry) {
+ list_del(&child->group_entry);
+ uvcg_config_remove_children(child);
+ config_item_put(&child->cg_item);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * control/header/<NAME>
+ * control/header
+ */
+
DECLARE_UVC_HEADER_DESCRIPTOR(1);
struct uvcg_control_header {
@@ -51,9 +138,9 @@ static struct uvcg_control_header *to_uvcg_control_header(struct config_item *it
return container_of(item, struct uvcg_control_header, item);
}
-#define UVCG_CTRL_HDR_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \
+#define UVCG_CTRL_HDR_ATTR(cname, aname, bits, limit) \
static ssize_t uvcg_control_header_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
struct uvcg_control_header *ch = to_uvcg_control_header(item); \
struct f_uvc_opts *opts; \
@@ -67,7 +154,7 @@ static ssize_t uvcg_control_header_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(ch->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(ch->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -83,7 +170,7 @@ uvcg_control_header_##cname##_store(struct config_item *item, \
struct config_item *opts_item; \
struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\
int ret; \
- uxx num; \
+ u##bits num; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
@@ -96,7 +183,7 @@ uvcg_control_header_##cname##_store(struct config_item *item, \
goto end; \
} \
\
- ret = str2u(page, 0, &num); \
+ ret = kstrtou##bits(page, 0, &num); \
if (ret) \
goto end; \
\
@@ -104,7 +191,7 @@ uvcg_control_header_##cname##_store(struct config_item *item, \
ret = -EINVAL; \
goto end; \
} \
- ch->desc.aname = vnoc(num); \
+ ch->desc.aname = cpu_to_le##bits(num); \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
@@ -114,11 +201,9 @@ end: \
\
UVC_ATTR(uvcg_control_header_, cname, aname)
-UVCG_CTRL_HDR_ATTR(bcd_uvc, bcdUVC, le16_to_cpu, kstrtou16, u16, cpu_to_le16,
- 0xffff);
+UVCG_CTRL_HDR_ATTR(bcd_uvc, bcdUVC, 16, 0xffff);
-UVCG_CTRL_HDR_ATTR(dw_clock_frequency, dwClockFrequency, le32_to_cpu, kstrtou32,
- u32, cpu_to_le32, 0x7fffffff);
+UVCG_CTRL_HDR_ATTR(dw_clock_frequency, dwClockFrequency, 32, 0x7fffffff);
#undef UVCG_CTRL_HDR_ATTR
@@ -129,6 +214,7 @@ static struct configfs_attribute *uvcg_control_header_attrs[] = {
};
static const struct config_item_type uvcg_control_header_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_control_header_attrs,
.ct_owner = THIS_MODULE,
};
@@ -153,60 +239,42 @@ static struct config_item *uvcg_control_header_make(struct config_group *group,
return &h->item;
}
-static void uvcg_control_header_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_control_header *h = to_uvcg_control_header(item);
-
- kfree(h);
-}
-
-/* control/header */
-static struct uvcg_control_header_grp {
- struct config_group group;
-} uvcg_control_header_grp;
-
static struct configfs_group_operations uvcg_control_header_grp_ops = {
.make_item = uvcg_control_header_make,
- .drop_item = uvcg_control_header_drop,
};
-static const struct config_item_type uvcg_control_header_grp_type = {
- .ct_group_ops = &uvcg_control_header_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_control_header_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_control_header_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "header",
};
-/* control/processing/default */
-static struct uvcg_default_processing {
- struct config_group group;
-} uvcg_default_processing;
-
-static inline struct uvcg_default_processing
-*to_uvcg_default_processing(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_processing, group);
-}
+/* -----------------------------------------------------------------------------
+ * control/processing/default
+ */
-#define UVCG_DEFAULT_PROCESSING_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_PROCESSING_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_processing_##cname##_show( \
struct config_item *item, char *page) \
{ \
- struct uvcg_default_processing *dp = to_uvcg_default_processing(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_processing_unit_descriptor *pd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent; \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
pd = &opts->uvc_processing; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(pd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(pd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -215,37 +283,33 @@ static ssize_t uvcg_default_processing_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_processing_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_PROCESSING_ATTR(b_unit_id, bUnitID, identity_conv);
-UVCG_DEFAULT_PROCESSING_ATTR(b_source_id, bSourceID, identity_conv);
-UVCG_DEFAULT_PROCESSING_ATTR(w_max_multiplier, wMaxMultiplier, le16_to_cpu);
-UVCG_DEFAULT_PROCESSING_ATTR(i_processing, iProcessing, identity_conv);
-
-#undef identity_conv
+UVCG_DEFAULT_PROCESSING_ATTR(b_unit_id, bUnitID, 8);
+UVCG_DEFAULT_PROCESSING_ATTR(b_source_id, bSourceID, 8);
+UVCG_DEFAULT_PROCESSING_ATTR(w_max_multiplier, wMaxMultiplier, 16);
+UVCG_DEFAULT_PROCESSING_ATTR(i_processing, iProcessing, 8);
#undef UVCG_DEFAULT_PROCESSING_ATTR
static ssize_t uvcg_default_processing_bm_controls_show(
struct config_item *item, char *page)
{
- struct uvcg_default_processing *dp = to_uvcg_default_processing(item);
+ struct config_group *group = to_config_group(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
- struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex;
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_processing_unit_descriptor *pd;
int result, i;
char *pg = page;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
- opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent;
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
pd = &opts->uvc_processing;
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < pd->bControlSize; ++i) {
- result += sprintf(pg, "%d\n", pd->bmControls[i]);
+ result += sprintf(pg, "%u\n", pd->bmControls[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
@@ -266,54 +330,55 @@ static struct configfs_attribute *uvcg_default_processing_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_processing_type = {
- .ct_attrs = uvcg_default_processing_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_processing_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_processing_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_processing {}; */
-
-/* control/processing */
-static struct uvcg_processing_grp {
- struct config_group group;
-} uvcg_processing_grp;
+/* -----------------------------------------------------------------------------
+ * control/processing
+ */
-static const struct config_item_type uvcg_processing_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_processing_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "processing",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_processing_type,
+ NULL,
+ },
};
-/* control/terminal/camera/default */
-static struct uvcg_default_camera {
- struct config_group group;
-} uvcg_default_camera;
-
-static inline struct uvcg_default_camera
-*to_uvcg_default_camera(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_camera, group);
-}
+/* -----------------------------------------------------------------------------
+ * control/terminal/camera/default
+ */
-#define UVCG_DEFAULT_CAMERA_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_CAMERA_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_camera_##cname##_show( \
struct config_item *item, char *page) \
{ \
- struct uvcg_default_camera *dc = to_uvcg_default_camera(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_camera_terminal_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent-> \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent-> \
ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_camera_terminal; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(cd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -323,44 +388,40 @@ static ssize_t uvcg_default_camera_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_camera_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_CAMERA_ATTR(b_terminal_id, bTerminalID, identity_conv);
-UVCG_DEFAULT_CAMERA_ATTR(w_terminal_type, wTerminalType, le16_to_cpu);
-UVCG_DEFAULT_CAMERA_ATTR(b_assoc_terminal, bAssocTerminal, identity_conv);
-UVCG_DEFAULT_CAMERA_ATTR(i_terminal, iTerminal, identity_conv);
+UVCG_DEFAULT_CAMERA_ATTR(b_terminal_id, bTerminalID, 8);
+UVCG_DEFAULT_CAMERA_ATTR(w_terminal_type, wTerminalType, 16);
+UVCG_DEFAULT_CAMERA_ATTR(b_assoc_terminal, bAssocTerminal, 8);
+UVCG_DEFAULT_CAMERA_ATTR(i_terminal, iTerminal, 8);
UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_min, wObjectiveFocalLengthMin,
- le16_to_cpu);
+ 16);
UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_max, wObjectiveFocalLengthMax,
- le16_to_cpu);
+ 16);
UVCG_DEFAULT_CAMERA_ATTR(w_ocular_focal_length, wOcularFocalLength,
- le16_to_cpu);
-
-#undef identity_conv
+ 16);
#undef UVCG_DEFAULT_CAMERA_ATTR
static ssize_t uvcg_default_camera_bm_controls_show(
struct config_item *item, char *page)
{
- struct uvcg_default_camera *dc = to_uvcg_default_camera(item);
+ struct config_group *group = to_config_group(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
- struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex;
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_camera_terminal_descriptor *cd;
int result, i;
char *pg = page;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
- opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent->
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent->
ci_parent;
opts = to_f_uvc_opts(opts_item);
cd = &opts->uvc_camera_terminal;
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < cd->bControlSize; ++i) {
- result += sprintf(pg, "%d\n", cd->bmControls[i]);
+ result += sprintf(pg, "%u\n", cd->bmControls[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
@@ -383,54 +444,55 @@ static struct configfs_attribute *uvcg_default_camera_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_camera_type = {
- .ct_attrs = uvcg_default_camera_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_camera_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_camera_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_camera {}; */
-
-/* control/terminal/camera */
-static struct uvcg_camera_grp {
- struct config_group group;
-} uvcg_camera_grp;
+/* -----------------------------------------------------------------------------
+ * control/terminal/camera
+ */
-static const struct config_item_type uvcg_camera_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_camera_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "camera",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_camera_type,
+ NULL,
+ },
};
-/* control/terminal/output/default */
-static struct uvcg_default_output {
- struct config_group group;
-} uvcg_default_output;
-
-static inline struct uvcg_default_output
-*to_uvcg_default_output(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_output, group);
-}
+/* -----------------------------------------------------------------------------
+ * control/terminal/output/default
+ */
-#define UVCG_DEFAULT_OUTPUT_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_OUTPUT_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_output_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
- struct uvcg_default_output *dout = to_uvcg_default_output(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dout->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_output_terminal_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dout->group.cg_item.ci_parent->ci_parent-> \
+ opts_item = group->cg_item.ci_parent->ci_parent-> \
ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_output_terminal; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(cd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -440,15 +502,11 @@ static ssize_t uvcg_default_output_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_output_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, identity_conv);
-UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, le16_to_cpu);
-UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, identity_conv);
-UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, identity_conv);
-UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, identity_conv);
-
-#undef identity_conv
+UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8);
+UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16);
+UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8);
+UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, 8);
+UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8);
#undef UVCG_DEFAULT_OUTPUT_ATTR
@@ -461,47 +519,68 @@ static struct configfs_attribute *uvcg_default_output_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_output_type = {
- .ct_attrs = uvcg_default_output_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_output_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_output_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_output {}; */
-
-/* control/terminal/output */
-static struct uvcg_output_grp {
- struct config_group group;
-} uvcg_output_grp;
+/* -----------------------------------------------------------------------------
+ * control/terminal/output
+ */
-static const struct config_item_type uvcg_output_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_output_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "output",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_output_type,
+ NULL,
+ },
};
-/* control/terminal */
-static struct uvcg_terminal_grp {
- struct config_group group;
-} uvcg_terminal_grp;
+/* -----------------------------------------------------------------------------
+ * control/terminal
+ */
-static const struct config_item_type uvcg_terminal_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_terminal_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "terminal",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_camera_grp_type,
+ &uvcg_output_grp_type,
+ NULL,
+ },
};
-/* control/class/{fs} */
-static struct uvcg_control_class {
- struct config_group group;
-} uvcg_control_class_fs, uvcg_control_class_ss;
+/* -----------------------------------------------------------------------------
+ * control/class/{fs|ss}
+ */
+struct uvcg_control_class_group {
+ struct config_group group;
+ const char *name;
+};
static inline struct uvc_descriptor_header
**uvcg_get_ctl_class_arr(struct config_item *i, struct f_uvc_opts *o)
{
- struct uvcg_control_class *cl = container_of(to_config_group(i),
- struct uvcg_control_class, group);
+ struct uvcg_control_class_group *group =
+ container_of(i, struct uvcg_control_class_group,
+ group.cg_item);
- if (cl == &uvcg_control_class_fs)
+ if (!strcmp(group->name, "fs"))
return o->uvc_fs_control_cls;
- if (cl == &uvcg_control_class_ss)
+ if (!strcmp(group->name, "ss"))
return o->uvc_ss_control_cls;
return NULL;
@@ -544,6 +623,7 @@ static int uvcg_control_class_allow_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
@@ -579,10 +659,12 @@ static void uvcg_control_class_drop_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_control_class_item_ops = {
+ .release = uvcg_config_item_release,
.allow_link = uvcg_control_class_allow_link,
.drop_link = uvcg_control_class_drop_link,
};
@@ -592,37 +674,99 @@ static const struct config_item_type uvcg_control_class_type = {
.ct_owner = THIS_MODULE,
};
-/* control/class */
-static struct uvcg_control_class_grp {
- struct config_group group;
-} uvcg_control_class_grp;
+/* -----------------------------------------------------------------------------
+ * control/class
+ */
+
+static int uvcg_control_class_create_children(struct config_group *parent)
+{
+ static const char * const names[] = { "fs", "ss" };
+ unsigned int i;
-static const struct config_item_type uvcg_control_class_grp_type = {
- .ct_owner = THIS_MODULE,
+ for (i = 0; i < ARRAY_SIZE(names); ++i) {
+ struct uvcg_control_class_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ group->name = names[i];
+
+ config_group_init_type_name(&group->group, group->name,
+ &uvcg_control_class_type);
+ configfs_add_default_group(&group->group, parent);
+ }
+
+ return 0;
+}
+
+static const struct uvcg_config_group_type uvcg_control_class_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "class",
+ .create_children = uvcg_control_class_create_children,
};
-/* control */
-static struct uvcg_control_grp {
- struct config_group group;
-} uvcg_control_grp;
+/* -----------------------------------------------------------------------------
+ * control
+ */
+
+static ssize_t uvcg_default_control_b_interface_number_show(
+ struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int result = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = item->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result += sprintf(page, "%u\n", opts->control_interface);
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+UVC_ATTR_RO(uvcg_default_control_, b_interface_number, bInterfaceNumber);
-static const struct config_item_type uvcg_control_grp_type = {
- .ct_owner = THIS_MODULE,
+static struct configfs_attribute *uvcg_default_control_attrs[] = {
+ &uvcg_default_control_attr_b_interface_number,
+ NULL,
};
-/* streaming/uncompressed */
-static struct uvcg_uncompressed_grp {
- struct config_group group;
-} uvcg_uncompressed_grp;
+static const struct uvcg_config_group_type uvcg_control_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_control_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "control",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_control_header_grp_type,
+ &uvcg_processing_grp_type,
+ &uvcg_terminal_grp_type,
+ &uvcg_control_class_grp_type,
+ NULL,
+ },
+};
-/* streaming/mjpeg */
-static struct uvcg_mjpeg_grp {
- struct config_group group;
-} uvcg_mjpeg_grp;
+/* -----------------------------------------------------------------------------
+ * streaming/uncompressed
+ * streaming/mjpeg
+ */
-static struct config_item *fmt_parent[] = {
- &uvcg_uncompressed_grp.group.cg_item,
- &uvcg_mjpeg_grp.group.cg_item,
+static const char * const uvcg_format_names[] = {
+ "uncompressed",
+ "mjpeg",
};
enum uvcg_format_type {
@@ -706,7 +850,11 @@ struct uvcg_format_ptr {
struct list_head entry;
};
-/* streaming/header/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/header/<NAME>
+ * streaming/header
+ */
+
struct uvcg_streaming_header {
struct config_item item;
struct uvc_input_header_descriptor desc;
@@ -720,6 +868,8 @@ static struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item
return container_of(item, struct uvcg_streaming_header, item);
}
+static void uvcg_format_set_indices(struct config_group *fmt);
+
static int uvcg_streaming_header_allow_link(struct config_item *src,
struct config_item *target)
{
@@ -744,10 +894,22 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
goto out;
}
- for (i = 0; i < ARRAY_SIZE(fmt_parent); ++i)
- if (target->ci_parent == fmt_parent[i])
+ /*
+ * Linking is only allowed to direct children of the format nodes
+ * (streaming/uncompressed or streaming/mjpeg nodes). First check that
+ * the grand-parent of the target matches the grand-parent of the source
+ * (the streaming node), and then verify that the target parent is a
+ * format node.
+ */
+ if (src->ci_parent->ci_parent != target->ci_parent->ci_parent)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(uvcg_format_names); ++i) {
+ if (!strcmp(target->ci_parent->ci_name, uvcg_format_names[i]))
break;
- if (i == ARRAY_SIZE(fmt_parent))
+ }
+
+ if (i == ARRAY_SIZE(uvcg_format_names))
goto out;
target_fmt = container_of(to_config_group(target), struct uvcg_format,
@@ -755,6 +917,8 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
if (!target_fmt)
goto out;
+ uvcg_format_set_indices(to_config_group(target));
+
format_ptr = kzalloc(sizeof(*format_ptr), GFP_KERNEL);
if (!format_ptr) {
ret = -ENOMEM;
@@ -764,6 +928,7 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
format_ptr->fmt = target_fmt;
list_add_tail(&format_ptr->entry, &src_hdr->formats);
++src_hdr->num_fmt;
+ ++target_fmt->linked;
out:
mutex_unlock(&opts->lock);
@@ -801,19 +966,22 @@ static void uvcg_streaming_header_drop_link(struct config_item *src,
break;
}
+ --target_fmt->linked;
+
out:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_streaming_header_item_ops = {
- .allow_link = uvcg_streaming_header_allow_link,
- .drop_link = uvcg_streaming_header_drop_link,
+ .release = uvcg_config_item_release,
+ .allow_link = uvcg_streaming_header_allow_link,
+ .drop_link = uvcg_streaming_header_drop_link,
};
-#define UVCG_STREAMING_HEADER_ATTR(cname, aname, conv) \
+#define UVCG_STREAMING_HEADER_ATTR(cname, aname, bits) \
static ssize_t uvcg_streaming_header_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
struct uvcg_streaming_header *sh = to_uvcg_streaming_header(item); \
struct f_uvc_opts *opts; \
@@ -827,7 +995,7 @@ static ssize_t uvcg_streaming_header_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(sh->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(sh->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -836,16 +1004,11 @@ static ssize_t uvcg_streaming_header_##cname##_show( \
\
UVC_ATTR_RO(uvcg_streaming_header_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_STREAMING_HEADER_ATTR(bm_info, bmInfo, identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_terminal_link, bTerminalLink, identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_still_capture_method, bStillCaptureMethod,
- identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_trigger_support, bTriggerSupport, identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_trigger_usage, bTriggerUsage, identity_conv);
-
-#undef identity_conv
+UVCG_STREAMING_HEADER_ATTR(bm_info, bmInfo, 8);
+UVCG_STREAMING_HEADER_ATTR(b_terminal_link, bTerminalLink, 8);
+UVCG_STREAMING_HEADER_ATTR(b_still_capture_method, bStillCaptureMethod, 8);
+UVCG_STREAMING_HEADER_ATTR(b_trigger_support, bTriggerSupport, 8);
+UVCG_STREAMING_HEADER_ATTR(b_trigger_usage, bTriggerUsage, 8);
#undef UVCG_STREAMING_HEADER_ATTR
@@ -884,31 +1047,26 @@ static struct config_item
return &h->item;
}
-static void uvcg_streaming_header_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_streaming_header *h = to_uvcg_streaming_header(item);
-
- kfree(h);
-}
-
-/* streaming/header */
-static struct uvcg_streaming_header_grp {
- struct config_group group;
-} uvcg_streaming_header_grp;
-
static struct configfs_group_operations uvcg_streaming_header_grp_ops = {
.make_item = uvcg_streaming_header_make,
- .drop_item = uvcg_streaming_header_drop,
};
-static const struct config_item_type uvcg_streaming_header_grp_type = {
- .ct_group_ops = &uvcg_streaming_header_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_streaming_header_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_streaming_header_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "header",
};
-/* streaming/<mode>/<format>/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/<mode>/<format>/<NAME>
+ */
+
struct uvcg_frame {
+ struct config_item item;
+ enum uvcg_format_type fmt_type;
struct {
u8 b_length;
u8 b_descriptor_type;
@@ -924,8 +1082,6 @@ struct uvcg_frame {
u8 b_frame_interval_type;
} __attribute__((packed)) frame;
u32 *dw_frame_interval;
- enum uvcg_format_type fmt_type;
- struct config_item item;
};
static struct uvcg_frame *to_uvcg_frame(struct config_item *item)
@@ -933,7 +1089,7 @@ static struct uvcg_frame *to_uvcg_frame(struct config_item *item)
return container_of(item, struct uvcg_frame, item);
}
-#define UVCG_FRAME_ATTR(cname, aname, to_cpu_endian, to_little_endian, bits) \
+#define UVCG_FRAME_ATTR(cname, aname, bits) \
static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_frame *f = to_uvcg_frame(item); \
@@ -948,7 +1104,7 @@ static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", to_cpu_endian(f->frame.cname)); \
+ result = sprintf(page, "%u\n", f->frame.cname); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -963,8 +1119,8 @@ static ssize_t uvcg_frame_##cname##_store(struct config_item *item, \
struct config_item *opts_item; \
struct uvcg_format *fmt; \
struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\
+ typeof(f->frame.cname) num; \
int ret; \
- u##bits num; \
\
ret = kstrtou##bits(page, 0, &num); \
if (ret) \
@@ -982,7 +1138,7 @@ static ssize_t uvcg_frame_##cname##_store(struct config_item *item, \
goto end; \
} \
\
- f->frame.cname = to_little_endian(num); \
+ f->frame.cname = num; \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
@@ -992,20 +1148,48 @@ end: \
\
UVC_ATTR(uvcg_frame_, cname, aname);
-#define noop_conversion(x) (x)
+static ssize_t uvcg_frame_b_frame_index_show(struct config_item *item,
+ char *page)
+{
+ struct uvcg_frame *f = to_uvcg_frame(item);
+ struct uvcg_format *fmt;
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct config_item *fmt_item;
+ struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;
+ int result;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ fmt_item = f->item.ci_parent;
+ fmt = to_uvcg_format(fmt_item);
-UVCG_FRAME_ATTR(bm_capabilities, bmCapabilities, noop_conversion,
- noop_conversion, 8);
-UVCG_FRAME_ATTR(w_width, wWidth, le16_to_cpu, cpu_to_le16, 16);
-UVCG_FRAME_ATTR(w_height, wHeight, le16_to_cpu, cpu_to_le16, 16);
-UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, le32_to_cpu, cpu_to_le32, 32);
-UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, le32_to_cpu, cpu_to_le32, 32);
-UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize,
- le32_to_cpu, cpu_to_le32, 32);
-UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval,
- le32_to_cpu, cpu_to_le32, 32);
+ if (!fmt->linked) {
+ result = -EBUSY;
+ goto out;
+ }
-#undef noop_conversion
+ opts_item = fmt_item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%u\n", f->frame.b_frame_index);
+ mutex_unlock(&opts->lock);
+
+out:
+ mutex_unlock(su_mutex);
+ return result;
+}
+
+UVC_ATTR_RO(uvcg_frame_, b_frame_index, bFrameIndex);
+
+UVCG_FRAME_ATTR(bm_capabilities, bmCapabilities, 8);
+UVCG_FRAME_ATTR(w_width, wWidth, 16);
+UVCG_FRAME_ATTR(w_height, wHeight, 16);
+UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, 32);
+UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, 32);
+UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize, 32);
+UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval, 32);
#undef UVCG_FRAME_ATTR
@@ -1026,8 +1210,7 @@ static ssize_t uvcg_frame_dw_frame_interval_show(struct config_item *item,
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < frm->frame.b_frame_interval_type; ++i) {
- result += sprintf(pg, "%d\n",
- le32_to_cpu(frm->dw_frame_interval[i]));
+ result += sprintf(pg, "%u\n", frm->dw_frame_interval[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
@@ -1052,7 +1235,7 @@ static inline int __uvcg_fill_frm_intrv(char *buf, void *priv)
return ret;
interv = priv;
- **interv = cpu_to_le32(num);
+ **interv = num;
++*interv;
return 0;
@@ -1129,6 +1312,8 @@ static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
kfree(ch->dw_frame_interval);
ch->dw_frame_interval = frm_intrv;
ch->frame.b_frame_interval_type = n;
+ sort(ch->dw_frame_interval, n, sizeof(*ch->dw_frame_interval),
+ uvcg_config_compare_u32, NULL);
ret = len;
end:
@@ -1140,6 +1325,7 @@ end:
UVC_ATTR(uvcg_frame_, dw_frame_interval, dwFrameInterval);
static struct configfs_attribute *uvcg_frame_attrs[] = {
+ &uvcg_frame_attr_b_frame_index,
&uvcg_frame_attr_bm_capabilities,
&uvcg_frame_attr_w_width,
&uvcg_frame_attr_w_height,
@@ -1152,6 +1338,7 @@ static struct configfs_attribute *uvcg_frame_attrs[] = {
};
static const struct config_item_type uvcg_frame_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_frame_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1170,12 +1357,12 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
h->frame.b_descriptor_type = USB_DT_CS_INTERFACE;
h->frame.b_frame_index = 1;
- h->frame.w_width = cpu_to_le16(640);
- h->frame.w_height = cpu_to_le16(360);
- h->frame.dw_min_bit_rate = cpu_to_le32(18432000);
- h->frame.dw_max_bit_rate = cpu_to_le32(55296000);
- h->frame.dw_max_video_frame_buffer_size = cpu_to_le32(460800);
- h->frame.dw_default_frame_interval = cpu_to_le32(666666);
+ h->frame.w_width = 640;
+ h->frame.w_height = 360;
+ h->frame.dw_min_bit_rate = 18432000;
+ h->frame.dw_max_bit_rate = 55296000;
+ h->frame.dw_max_video_frame_buffer_size = 460800;
+ h->frame.dw_default_frame_interval = 666666;
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
@@ -1203,7 +1390,6 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
static void uvcg_frame_drop(struct config_group *group, struct config_item *item)
{
- struct uvcg_frame *h = to_uvcg_frame(item);
struct uvcg_format *fmt;
struct f_uvc_opts *opts;
struct config_item *opts_item;
@@ -1214,11 +1400,31 @@ static void uvcg_frame_drop(struct config_group *group, struct config_item *item
mutex_lock(&opts->lock);
fmt = to_uvcg_format(&group->cg_item);
--fmt->num_frames;
- kfree(h);
mutex_unlock(&opts->lock);
+
+ config_item_put(item);
+}
+
+static void uvcg_format_set_indices(struct config_group *fmt)
+{
+ struct config_item *ci;
+ unsigned int i = 1;
+
+ list_for_each_entry(ci, &fmt->cg_children, ci_entry) {
+ struct uvcg_frame *frm;
+
+ if (ci->ci_type != &uvcg_frame_type)
+ continue;
+
+ frm = to_uvcg_frame(ci);
+ frm->frame.b_frame_index = i++;
+ }
}
-/* streaming/uncompressed/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/uncompressed/<NAME>
+ */
+
struct uvcg_uncompressed {
struct uvcg_format fmt;
struct uvc_format_uncompressed desc;
@@ -1290,7 +1496,7 @@ end:
UVC_ATTR(uvcg_uncompressed_, guid_format, guidFormat);
-#define UVCG_UNCOMPRESSED_ATTR_RO(cname, aname, conv) \
+#define UVCG_UNCOMPRESSED_ATTR_RO(cname, aname, bits) \
static ssize_t uvcg_uncompressed_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -1306,7 +1512,7 @@ static ssize_t uvcg_uncompressed_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1315,7 +1521,7 @@ static ssize_t uvcg_uncompressed_##cname##_show( \
\
UVC_ATTR_RO(uvcg_uncompressed_, cname, aname);
-#define UVCG_UNCOMPRESSED_ATTR(cname, aname, conv) \
+#define UVCG_UNCOMPRESSED_ATTR(cname, aname, bits) \
static ssize_t uvcg_uncompressed_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -1331,7 +1537,7 @@ static ssize_t uvcg_uncompressed_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1378,16 +1584,12 @@ end: \
\
UVC_ATTR(uvcg_uncompressed_, cname, aname);
-#define identity_conv(x) (x)
-
-UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, identity_conv);
-UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex,
- identity_conv);
-UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, identity_conv);
-UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, identity_conv);
-UVCG_UNCOMPRESSED_ATTR_RO(bm_interface_flags, bmInterfaceFlags, identity_conv);
-
-#undef identity_conv
+UVCG_UNCOMPRESSED_ATTR_RO(b_format_index, bFormatIndex, 8);
+UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, 8);
+UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex, 8);
+UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8);
+UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8);
+UVCG_UNCOMPRESSED_ATTR_RO(bm_interface_flags, bmInterfaceFlags, 8);
#undef UVCG_UNCOMPRESSED_ATTR
#undef UVCG_UNCOMPRESSED_ATTR_RO
@@ -1410,6 +1612,7 @@ uvcg_uncompressed_bma_controls_store(struct config_item *item,
UVC_ATTR(uvcg_uncompressed_, bma_controls, bmaControls);
static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
+ &uvcg_uncompressed_attr_b_format_index,
&uvcg_uncompressed_attr_guid_format,
&uvcg_uncompressed_attr_b_bits_per_pixel,
&uvcg_uncompressed_attr_b_default_frame_index,
@@ -1421,6 +1624,7 @@ static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
};
static const struct config_item_type uvcg_uncompressed_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_uncompressed_group_ops,
.ct_attrs = uvcg_uncompressed_attrs,
.ct_owner = THIS_MODULE,
@@ -1457,25 +1661,23 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group,
return &h->fmt.group;
}
-static void uvcg_uncompressed_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_uncompressed *h = to_uvcg_uncompressed(item);
-
- kfree(h);
-}
-
static struct configfs_group_operations uvcg_uncompressed_grp_ops = {
.make_group = uvcg_uncompressed_make,
- .drop_item = uvcg_uncompressed_drop,
};
-static const struct config_item_type uvcg_uncompressed_grp_type = {
- .ct_group_ops = &uvcg_uncompressed_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_uncompressed_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_uncompressed_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "uncompressed",
};
-/* streaming/mjpeg/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/mjpeg/<NAME>
+ */
+
struct uvcg_mjpeg {
struct uvcg_format fmt;
struct uvc_format_mjpeg desc;
@@ -1493,7 +1695,7 @@ static struct configfs_group_operations uvcg_mjpeg_group_ops = {
.drop_item = uvcg_frame_drop,
};
-#define UVCG_MJPEG_ATTR_RO(cname, aname, conv) \
+#define UVCG_MJPEG_ATTR_RO(cname, aname, bits) \
static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \
@@ -1508,7 +1710,7 @@ static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1517,7 +1719,7 @@ static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
\
UVC_ATTR_RO(uvcg_mjpeg_, cname, aname)
-#define UVCG_MJPEG_ATTR(cname, aname, conv) \
+#define UVCG_MJPEG_ATTR(cname, aname, bits) \
static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \
@@ -1532,7 +1734,7 @@ static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1579,16 +1781,12 @@ end: \
\
UVC_ATTR(uvcg_mjpeg_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex,
- identity_conv);
-UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, identity_conv);
-UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, identity_conv);
-UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, identity_conv);
-UVCG_MJPEG_ATTR_RO(bm_interface_flags, bmInterfaceFlags, identity_conv);
-
-#undef identity_conv
+UVCG_MJPEG_ATTR_RO(b_format_index, bFormatIndex, 8);
+UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex, 8);
+UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, 8);
+UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8);
+UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8);
+UVCG_MJPEG_ATTR_RO(bm_interface_flags, bmInterfaceFlags, 8);
#undef UVCG_MJPEG_ATTR
#undef UVCG_MJPEG_ATTR_RO
@@ -1611,6 +1809,7 @@ uvcg_mjpeg_bma_controls_store(struct config_item *item,
UVC_ATTR(uvcg_mjpeg_, bma_controls, bmaControls);
static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
+ &uvcg_mjpeg_attr_b_format_index,
&uvcg_mjpeg_attr_b_default_frame_index,
&uvcg_mjpeg_attr_bm_flags,
&uvcg_mjpeg_attr_b_aspect_ratio_x,
@@ -1621,6 +1820,7 @@ static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
};
static const struct config_item_type uvcg_mjpeg_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_mjpeg_group_ops,
.ct_attrs = uvcg_mjpeg_attrs,
.ct_owner = THIS_MODULE,
@@ -1651,56 +1851,42 @@ static struct config_group *uvcg_mjpeg_make(struct config_group *group,
return &h->fmt.group;
}
-static void uvcg_mjpeg_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_mjpeg *h = to_uvcg_mjpeg(item);
-
- kfree(h);
-}
-
static struct configfs_group_operations uvcg_mjpeg_grp_ops = {
.make_group = uvcg_mjpeg_make,
- .drop_item = uvcg_mjpeg_drop,
};
-static const struct config_item_type uvcg_mjpeg_grp_type = {
- .ct_group_ops = &uvcg_mjpeg_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_mjpeg_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_mjpeg_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "mjpeg",
};
-/* streaming/color_matching/default */
-static struct uvcg_default_color_matching {
- struct config_group group;
-} uvcg_default_color_matching;
-
-static inline struct uvcg_default_color_matching
-*to_uvcg_default_color_matching(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_color_matching, group);
-}
+/* -----------------------------------------------------------------------------
+ * streaming/color_matching/default
+ */
-#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_color_matching_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
- struct uvcg_default_color_matching *dc = \
- to_uvcg_default_color_matching(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_color_matching_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent; \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_color_matching; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(cd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1709,16 +1895,10 @@ static ssize_t uvcg_default_color_matching_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_color_matching_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries,
- identity_conv);
+UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries, 8);
UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_transfer_characteristics,
- bTransferCharacteristics, identity_conv);
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients,
- identity_conv);
-
-#undef identity_conv
+ bTransferCharacteristics, 8);
+UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients, 8);
#undef UVCG_DEFAULT_COLOR_MATCHING_ATTR
@@ -1729,41 +1909,54 @@ static struct configfs_attribute *uvcg_default_color_matching_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_color_matching_type = {
- .ct_attrs = uvcg_default_color_matching_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_color_matching_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_color_matching_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_color_matching {}; */
-
-/* streaming/color_matching */
-static struct uvcg_color_matching_grp {
- struct config_group group;
-} uvcg_color_matching_grp;
+/* -----------------------------------------------------------------------------
+ * streaming/color_matching
+ */
-static const struct config_item_type uvcg_color_matching_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_color_matching_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "color_matching",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_color_matching_type,
+ NULL,
+ },
};
-/* streaming/class/{fs|hs|ss} */
-static struct uvcg_streaming_class {
- struct config_group group;
-} uvcg_streaming_class_fs, uvcg_streaming_class_hs, uvcg_streaming_class_ss;
+/* -----------------------------------------------------------------------------
+ * streaming/class/{fs|hs|ss}
+ */
+struct uvcg_streaming_class_group {
+ struct config_group group;
+ const char *name;
+};
static inline struct uvc_descriptor_header
***__uvcg_get_stream_class_arr(struct config_item *i, struct f_uvc_opts *o)
{
- struct uvcg_streaming_class *cl = container_of(to_config_group(i),
- struct uvcg_streaming_class, group);
+ struct uvcg_streaming_class_group *group =
+ container_of(i, struct uvcg_streaming_class_group,
+ group.cg_item);
- if (cl == &uvcg_streaming_class_fs)
+ if (!strcmp(group->name, "fs"))
return &o->uvc_fs_streaming_cls;
- if (cl == &uvcg_streaming_class_hs)
+ if (!strcmp(group->name, "hs"))
return &o->uvc_hs_streaming_cls;
- if (cl == &uvcg_streaming_class_ss)
+ if (!strcmp(group->name, "ss"))
return &o->uvc_ss_streaming_cls;
return NULL;
@@ -1922,24 +2115,22 @@ static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n,
struct uvcg_format *fmt = priv1;
if (fmt->type == UVCG_UNCOMPRESSED) {
- struct uvc_format_uncompressed *unc = *dest;
struct uvcg_uncompressed *u =
container_of(fmt, struct uvcg_uncompressed,
fmt);
+ u->desc.bFormatIndex = n + 1;
+ u->desc.bNumFrameDescriptors = fmt->num_frames;
memcpy(*dest, &u->desc, sizeof(u->desc));
*dest += sizeof(u->desc);
- unc->bNumFrameDescriptors = fmt->num_frames;
- unc->bFormatIndex = n + 1;
} else if (fmt->type == UVCG_MJPEG) {
- struct uvc_format_mjpeg *mjp = *dest;
struct uvcg_mjpeg *m =
container_of(fmt, struct uvcg_mjpeg, fmt);
+ m->desc.bFormatIndex = n + 1;
+ m->desc.bNumFrameDescriptors = fmt->num_frames;
memcpy(*dest, &m->desc, sizeof(m->desc));
*dest += sizeof(m->desc);
- mjp->bNumFrameDescriptors = fmt->num_frames;
- mjp->bFormatIndex = n + 1;
} else {
return -EINVAL;
}
@@ -2038,6 +2229,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
@@ -2078,10 +2270,12 @@ static void uvcg_streaming_class_drop_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_streaming_class_item_ops = {
+ .release = uvcg_config_item_release,
.allow_link = uvcg_streaming_class_allow_link,
.drop_link = uvcg_streaming_class_drop_link,
};
@@ -2091,36 +2285,109 @@ static const struct config_item_type uvcg_streaming_class_type = {
.ct_owner = THIS_MODULE,
};
-/* streaming/class */
-static struct uvcg_streaming_class_grp {
- struct config_group group;
-} uvcg_streaming_class_grp;
+/* -----------------------------------------------------------------------------
+ * streaming/class
+ */
+
+static int uvcg_streaming_class_create_children(struct config_group *parent)
+{
+ static const char * const names[] = { "fs", "hs", "ss" };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(names); ++i) {
+ struct uvcg_streaming_class_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
-static const struct config_item_type uvcg_streaming_class_grp_type = {
- .ct_owner = THIS_MODULE,
+ group->name = names[i];
+
+ config_group_init_type_name(&group->group, group->name,
+ &uvcg_streaming_class_type);
+ configfs_add_default_group(&group->group, parent);
+ }
+
+ return 0;
+}
+
+static const struct uvcg_config_group_type uvcg_streaming_class_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "class",
+ .create_children = uvcg_streaming_class_create_children,
};
-/* streaming */
-static struct uvcg_streaming_grp {
- struct config_group group;
-} uvcg_streaming_grp;
+/* -----------------------------------------------------------------------------
+ * streaming
+ */
-static const struct config_item_type uvcg_streaming_grp_type = {
- .ct_owner = THIS_MODULE,
+static ssize_t uvcg_default_streaming_b_interface_number_show(
+ struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int result = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = item->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result += sprintf(page, "%u\n", opts->streaming_interface);
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+UVC_ATTR_RO(uvcg_default_streaming_, b_interface_number, bInterfaceNumber);
+
+static struct configfs_attribute *uvcg_default_streaming_attrs[] = {
+ &uvcg_default_streaming_attr_b_interface_number,
+ NULL,
+};
+
+static const struct uvcg_config_group_type uvcg_streaming_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_streaming_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "streaming",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_streaming_header_grp_type,
+ &uvcg_uncompressed_grp_type,
+ &uvcg_mjpeg_grp_type,
+ &uvcg_color_matching_grp_type,
+ &uvcg_streaming_class_grp_type,
+ NULL,
+ },
};
-static void uvc_attr_release(struct config_item *item)
+/* -----------------------------------------------------------------------------
+ * UVC function
+ */
+
+static void uvc_func_item_release(struct config_item *item)
{
struct f_uvc_opts *opts = to_f_uvc_opts(item);
+ uvcg_config_remove_children(to_config_group(item));
usb_put_function_instance(&opts->func_inst);
}
-static struct configfs_item_operations uvc_item_ops = {
- .release = uvc_attr_release,
+static struct configfs_item_operations uvc_func_item_ops = {
+ .release = uvc_func_item_release,
};
-#define UVCG_OPTS_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \
+#define UVCG_OPTS_ATTR(cname, aname, limit) \
static ssize_t f_uvc_opts_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -2128,7 +2395,7 @@ static ssize_t f_uvc_opts_##cname##_show( \
int result; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(opts->cname)); \
+ result = sprintf(page, "%u\n", opts->cname); \
mutex_unlock(&opts->lock); \
\
return result; \
@@ -2139,8 +2406,8 @@ f_uvc_opts_##cname##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uvc_opts *opts = to_f_uvc_opts(item); \
+ unsigned int num; \
int ret; \
- uxx num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
@@ -2148,7 +2415,7 @@ f_uvc_opts_##cname##_store(struct config_item *item, \
goto end; \
} \
\
- ret = str2u(page, 0, &num); \
+ ret = kstrtouint(page, 0, &num); \
if (ret) \
goto end; \
\
@@ -2156,7 +2423,7 @@ f_uvc_opts_##cname##_store(struct config_item *item, \
ret = -EINVAL; \
goto end; \
} \
- opts->cname = vnoc(num); \
+ opts->cname = num; \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
@@ -2165,16 +2432,9 @@ end: \
\
UVC_ATTR(f_uvc_opts_, cname, cname)
-#define identity_conv(x) (x)
-
-UVCG_OPTS_ATTR(streaming_interval, streaming_interval, identity_conv,
- kstrtou8, u8, identity_conv, 16);
-UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, le16_to_cpu,
- kstrtou16, u16, le16_to_cpu, 3072);
-UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, identity_conv,
- kstrtou8, u8, identity_conv, 15);
-
-#undef identity_conv
+UVCG_OPTS_ATTR(streaming_interval, streaming_interval, 16);
+UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, 3072);
+UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, 15);
#undef UVCG_OPTS_ATTR
@@ -2185,123 +2445,31 @@ static struct configfs_attribute *uvc_attrs[] = {
NULL,
};
-static const struct config_item_type uvc_func_type = {
- .ct_item_ops = &uvc_item_ops,
- .ct_attrs = uvc_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvc_func_type = {
+ .type = {
+ .ct_item_ops = &uvc_func_item_ops,
+ .ct_attrs = uvc_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_control_grp_type,
+ &uvcg_streaming_grp_type,
+ NULL,
+ },
};
int uvcg_attach_configfs(struct f_uvc_opts *opts)
{
- config_group_init_type_name(&uvcg_control_header_grp.group,
- "header",
- &uvcg_control_header_grp_type);
-
- config_group_init_type_name(&uvcg_default_processing.group,
- "default", &uvcg_default_processing_type);
- config_group_init_type_name(&uvcg_processing_grp.group,
- "processing", &uvcg_processing_grp_type);
- configfs_add_default_group(&uvcg_default_processing.group,
- &uvcg_processing_grp.group);
-
- config_group_init_type_name(&uvcg_default_camera.group,
- "default", &uvcg_default_camera_type);
- config_group_init_type_name(&uvcg_camera_grp.group,
- "camera", &uvcg_camera_grp_type);
- configfs_add_default_group(&uvcg_default_camera.group,
- &uvcg_camera_grp.group);
-
- config_group_init_type_name(&uvcg_default_output.group,
- "default", &uvcg_default_output_type);
- config_group_init_type_name(&uvcg_output_grp.group,
- "output", &uvcg_output_grp_type);
- configfs_add_default_group(&uvcg_default_output.group,
- &uvcg_output_grp.group);
-
- config_group_init_type_name(&uvcg_terminal_grp.group,
- "terminal", &uvcg_terminal_grp_type);
- configfs_add_default_group(&uvcg_camera_grp.group,
- &uvcg_terminal_grp.group);
- configfs_add_default_group(&uvcg_output_grp.group,
- &uvcg_terminal_grp.group);
-
- config_group_init_type_name(&uvcg_control_class_fs.group,
- "fs", &uvcg_control_class_type);
- config_group_init_type_name(&uvcg_control_class_ss.group,
- "ss", &uvcg_control_class_type);
- config_group_init_type_name(&uvcg_control_class_grp.group,
- "class",
- &uvcg_control_class_grp_type);
- configfs_add_default_group(&uvcg_control_class_fs.group,
- &uvcg_control_class_grp.group);
- configfs_add_default_group(&uvcg_control_class_ss.group,
- &uvcg_control_class_grp.group);
-
- config_group_init_type_name(&uvcg_control_grp.group,
- "control",
- &uvcg_control_grp_type);
- configfs_add_default_group(&uvcg_control_header_grp.group,
- &uvcg_control_grp.group);
- configfs_add_default_group(&uvcg_processing_grp.group,
- &uvcg_control_grp.group);
- configfs_add_default_group(&uvcg_terminal_grp.group,
- &uvcg_control_grp.group);
- configfs_add_default_group(&uvcg_control_class_grp.group,
- &uvcg_control_grp.group);
-
- config_group_init_type_name(&uvcg_streaming_header_grp.group,
- "header",
- &uvcg_streaming_header_grp_type);
- config_group_init_type_name(&uvcg_uncompressed_grp.group,
- "uncompressed",
- &uvcg_uncompressed_grp_type);
- config_group_init_type_name(&uvcg_mjpeg_grp.group,
- "mjpeg",
- &uvcg_mjpeg_grp_type);
- config_group_init_type_name(&uvcg_default_color_matching.group,
- "default",
- &uvcg_default_color_matching_type);
- config_group_init_type_name(&uvcg_color_matching_grp.group,
- "color_matching",
- &uvcg_color_matching_grp_type);
- configfs_add_default_group(&uvcg_default_color_matching.group,
- &uvcg_color_matching_grp.group);
-
- config_group_init_type_name(&uvcg_streaming_class_fs.group,
- "fs", &uvcg_streaming_class_type);
- config_group_init_type_name(&uvcg_streaming_class_hs.group,
- "hs", &uvcg_streaming_class_type);
- config_group_init_type_name(&uvcg_streaming_class_ss.group,
- "ss", &uvcg_streaming_class_type);
- config_group_init_type_name(&uvcg_streaming_class_grp.group,
- "class", &uvcg_streaming_class_grp_type);
- configfs_add_default_group(&uvcg_streaming_class_fs.group,
- &uvcg_streaming_class_grp.group);
- configfs_add_default_group(&uvcg_streaming_class_hs.group,
- &uvcg_streaming_class_grp.group);
- configfs_add_default_group(&uvcg_streaming_class_ss.group,
- &uvcg_streaming_class_grp.group);
-
- config_group_init_type_name(&uvcg_streaming_grp.group,
- "streaming", &uvcg_streaming_grp_type);
- configfs_add_default_group(&uvcg_streaming_header_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_uncompressed_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_mjpeg_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_color_matching_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_streaming_class_grp.group,
- &uvcg_streaming_grp.group);
-
- config_group_init_type_name(&opts->func_inst.group,
- "",
- &uvc_func_type);
- configfs_add_default_group(&uvcg_control_grp.group,
- &opts->func_inst.group);
- configfs_add_default_group(&uvcg_streaming_grp.group,
- &opts->func_inst.group);
+ int ret;
- return 0;
+ config_group_init_type_name(&opts->func_inst.group, uvc_func_type.name,
+ &uvc_func_type.type);
+
+ ret = uvcg_config_create_children(&opts->func_inst.group,
+ &uvc_func_type);
+ if (ret < 0)
+ config_group_put(&opts->func_inst.group);
+
+ return ret;
}
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 7f1ca3b57823..a1183eccee22 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -115,8 +115,8 @@ uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt)
}
if (i == ARRAY_SIZE(uvc_formats)) {
- printk(KERN_INFO "Unsupported format 0x%08x.\n",
- fmt->fmt.pix.pixelformat);
+ uvcg_info(&uvc->func, "Unsupported format 0x%08x.\n",
+ fmt->fmt.pix.pixelformat);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index d3567b90343a..5c042f380708 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -125,6 +125,23 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
* Request handling
*/
+static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
+{
+ int ret;
+
+ ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
+ if (ret < 0) {
+ uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
+ ret);
+
+ /* Isochronous endpoints can't be halted. */
+ if (usb_endpoint_xfer_bulk(video->ep->desc))
+ usb_ep_set_halt(video->ep);
+ }
+
+ return ret;
+}
+
/*
* I somehow feel that synchronisation won't be easy to achieve here. We have
* three events that control USB requests submission:
@@ -169,13 +186,14 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
break;
case -ESHUTDOWN: /* disconnect from host. */
- printk(KERN_DEBUG "VS request cancelled.\n");
+ uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
uvcg_queue_cancel(queue, 1);
goto requeue;
default:
- printk(KERN_INFO "VS request completed with status %d.\n",
- req->status);
+ uvcg_info(&video->uvc->func,
+ "VS request completed with status %d.\n",
+ req->status);
uvcg_queue_cancel(queue, 0);
goto requeue;
}
@@ -189,14 +207,13 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
video->encode(req, video, buf);
- if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) {
- printk(KERN_INFO "Failed to queue request (%d).\n", ret);
- usb_ep_set_halt(ep);
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ ret = uvcg_video_ep_queue(video, req);
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+
+ if (ret < 0) {
uvcg_queue_cancel(queue, 0);
goto requeue;
}
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
return;
@@ -316,15 +333,13 @@ int uvcg_video_pump(struct uvc_video *video)
video->encode(req, video, buf);
/* Queue the USB request */
- ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
+ ret = uvcg_video_ep_queue(video, req);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
if (ret < 0) {
- printk(KERN_INFO "Failed to queue request (%d)\n", ret);
- usb_ep_set_halt(video->ep);
- spin_unlock_irqrestore(&queue->irqlock, flags);
uvcg_queue_cancel(queue, 0);
break;
}
- spin_unlock_irqrestore(&queue->irqlock, flags);
}
spin_lock_irqsave(&video->req_lock, flags);
@@ -342,8 +357,8 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
int ret;
if (video->ep == NULL) {
- printk(KERN_INFO "Video enable failed, device is "
- "uninitialized.\n");
+ uvcg_info(&video->uvc->func,
+ "Video enable failed, device is uninitialized.\n");
return -ENODEV;
}
@@ -375,11 +390,12 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
/*
* Initialize the UVC video stream.
*/
-int uvcg_video_init(struct uvc_video *video)
+int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
{
INIT_LIST_HEAD(&video->req_free);
spin_lock_init(&video->req_lock);
+ video->uvc = uvc;
video->fcc = V4L2_PIX_FMT_YUYV;
video->bpp = 16;
video->width = 320;
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index 7d77122b0ff9..278dc52c7604 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -18,6 +18,6 @@ int uvcg_video_pump(struct uvc_video *video);
int uvcg_video_enable(struct uvc_video *video, int enable);
-int uvcg_video_init(struct uvc_video *video);
+int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc);
#endif /* __UVC_VIDEO_H__ */
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 5939eb1e97f2..4a28e3fbeb0b 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -353,7 +353,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
/* Endpoint enabled ? */
if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
!ep->dev->enabled || ep->dev->suspended) {
- EPDBG(ep,"Enqueing request on wrong or disabled EP\n");
+ EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
return -ESHUTDOWN;
}
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 17147b8c771e..11247322d587 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2004,7 +2004,6 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
struct usba_udc *udc)
{
u32 val;
- const char *name;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
struct device_node *pp;
@@ -2018,6 +2017,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
udc->errata = match->data;
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
if (IS_ERR(udc->pmc))
+ udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc");
+ if (IS_ERR(udc->pmc))
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
if (udc->errata && IS_ERR(udc->pmc))
return ERR_CAST(udc->pmc);
@@ -2094,11 +2095,6 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
ep->can_dma = of_property_read_bool(pp, "atmel,can-dma");
ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
- ret = of_property_read_string(pp, "name", &name);
- if (ret) {
- dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
- goto err;
- }
sprintf(ep->name, "ep%d", ep->index);
ep->ep.name = ep->name;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index af88b48c1cea..87d6b12779f2 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -690,6 +690,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_connect);
* as a disconnect (when a VBUS session is active). Not all systems
* support software pullup controls.
*
+ * Following a successful disconnect, invoke the ->disconnect() callback
+ * for the current gadget driver so that UDC drivers don't need to.
+ *
* Returns zero on success, else negative errno.
*/
int usb_gadget_disconnect(struct usb_gadget *gadget)
@@ -711,8 +714,10 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
}
ret = gadget->ops->pullup(gadget, 0);
- if (!ret)
+ if (!ret) {
gadget->connected = 0;
+ gadget->udc->driver->disconnect(gadget);
+ }
out:
trace_usb_gadget_disconnect(gadget, ret);
@@ -1281,7 +1286,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
usb_gadget_disconnect(udc->gadget);
- udc->driver->disconnect(udc->gadget);
udc->driver->unbind(udc->gadget);
usb_gadget_udc_stop(udc);
@@ -1471,7 +1475,6 @@ static ssize_t soft_connect_store(struct device *dev,
usb_gadget_connect(udc->gadget);
} else if (sysfs_streq(buf, "disconnect")) {
usb_gadget_disconnect(udc->gadget);
- udc->driver->disconnect(udc->gadget);
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 587c5037ff07..bc6abaea907d 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -741,7 +741,7 @@ static void fotg210_get_status(struct fotg210_udc *fotg210,
fotg210->ep0_req->length = 2;
spin_unlock(&fotg210->lock);
- fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_KERNEL);
+ fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC);
spin_lock(&fotg210->lock);
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index be59309e848c..20141c3096f6 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2234,8 +2234,10 @@ static void fsl_udc_release(struct device *dev)
Internal structure setup functions
*******************************************************************/
/*------------------------------------------------------------------
- * init resource for globle controller
- * Return the udc handle on success or NULL on failure
+ * init resource for global controller called by fsl_udc_probe()
+ * On success the udc handle is initialized, on failure it is
+ * unchanged (reset).
+ * Return 0 on success and -1 on allocation failure
------------------------------------------------------------------*/
static int struct_udc_setup(struct fsl_udc *udc,
struct platform_device *pdev)
@@ -2247,8 +2249,10 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->phy_mode = pdata->phy_mode;
udc->eps = kcalloc(udc->max_ep, sizeof(struct fsl_ep), GFP_KERNEL);
- if (!udc->eps)
- return -1;
+ if (!udc->eps) {
+ ERR("kmalloc udc endpoint status failed\n");
+ goto eps_alloc_failed;
+ }
/* initialized QHs, take care of alignment */
size = udc->max_ep * sizeof(struct ep_queue_head);
@@ -2262,8 +2266,7 @@ static int struct_udc_setup(struct fsl_udc *udc,
&udc->ep_qh_dma, GFP_KERNEL);
if (!udc->ep_qh) {
ERR("malloc QHs for udc failed\n");
- kfree(udc->eps);
- return -1;
+ goto ep_queue_alloc_failed;
}
udc->ep_qh_size = size;
@@ -2272,8 +2275,17 @@ static int struct_udc_setup(struct fsl_udc *udc,
/* FIXME: fsl_alloc_request() ignores ep argument */
udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
struct fsl_req, req);
+ if (!udc->status_req) {
+ ERR("kzalloc for udc status request failed\n");
+ goto udc_status_alloc_failed;
+ }
+
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
+ if (!udc->status_req->req.buf) {
+ ERR("kzalloc for udc request buffer failed\n");
+ goto udc_req_buf_alloc_failed;
+ }
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
@@ -2281,6 +2293,18 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->remote_wakeup = 0; /* default to 0 on reset */
return 0;
+
+udc_req_buf_alloc_failed:
+ kfree(udc->status_req);
+udc_status_alloc_failed:
+ kfree(udc->ep_qh);
+ udc->ep_qh_size = 0;
+ep_queue_alloc_failed:
+ kfree(udc->eps);
+eps_alloc_failed:
+ udc->phy_mode = 0;
+ return -1;
+
}
/*----------------------------------------------------------------
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 95f52232493b..cafde053788b 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -185,7 +185,7 @@ static int process_ep_req(struct mv_udc *udc, int index,
else
bit_pos = 1 << (16 + curr_req->ep->ep_num);
- while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
+ while (curr_dqh->curr_dtd_ptr == curr_dtd->td_dma) {
if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
while (readl(&udc->op_regs->epstatus) & bit_pos)
udelay(1);
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index b02ab2a8d927..e7dae5379e04 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1550,9 +1550,6 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
spin_unlock_irqrestore(&dev->lock, flags);
- if (!is_on && dev->driver)
- dev->driver->disconnect(&dev->gadget);
-
return 0;
}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index e1656f361e08..cdffbd1e0316 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2437,6 +2437,9 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
else
usb3->forced_b_device = false;
+ if (usb3->workaround_for_vbus)
+ usb3_disconnect(usb3);
+
/* Let this driver call usb3_connect() anyway */
usb3_check_id(usb3);
@@ -2600,6 +2603,13 @@ static const struct renesas_usb3_priv renesas_usb3_priv_gen3 = {
.ramsize_per_pipe = SZ_4K,
};
+static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
+ .ramsize_per_ramif = SZ_16K,
+ .num_ramif = 4,
+ .ramsize_per_pipe = SZ_4K,
+ .workaround_for_vbus = true,
+};
+
static const struct of_device_id usb3_of_match[] = {
{
.compatible = "renesas,r8a7795-usb3-peri",
@@ -2618,6 +2628,10 @@ static const struct soc_device_attribute renesas_usb3_quirks_match[] = {
.soc_id = "r8a7795", .revision = "ES1.*",
.data = &renesas_usb3_priv_r8a7795_es1,
},
+ {
+ .soc_id = "r8a77990",
+ .data = &renesas_usb3_priv_r8a77990,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 6407e433bc78..b1f4104d1283 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1078,7 +1078,7 @@ static int xudc_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
unsigned long flags;
if (!ep->desc) {
- dev_dbg(udc->dev, "%s:queing request to disabled %s\n",
+ dev_dbg(udc->dev, "%s: queuing request to disabled %s\n",
__func__, ep->name);
return -ESHUTDOWN;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 1a4ea98cac2a..16758b12a5e9 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -276,7 +276,7 @@ config USB_EHCI_EXYNOS
Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
- bool "EHCI support for Marvell PXA/MMP USB controller"
+ tristate "EHCI support for Marvell PXA/MMP USB controller"
depends on (ARCH_PXA || ARCH_MMP)
select USB_EHCI_ROOT_HUB_TT
---help---
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index e6235269c151..84514f71ae44 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
obj-$(CONFIG_USB_FSL_USB2) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_EHCI_FSL) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o
+obj-$(CONFIG_USB_EHCI_MV) += ehci-mv.o
obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 8608ac513fb7..cdafa97f632d 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -730,9 +730,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
if (likely ((status & STS_ERR) == 0))
- COUNT (ehci->stats.normal);
+ INCR(ehci->stats.normal);
else
- COUNT (ehci->stats.error);
+ INCR(ehci->stats.error);
bh = 1;
}
@@ -756,7 +756,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
if (cmd & CMD_IAAD)
ehci_dbg(ehci, "IAA with IAAD still set?\n");
if (ehci->iaa_in_progress)
- COUNT(ehci->stats.iaa);
+ INCR(ehci->stats.iaa);
end_iaa_cycle(ehci);
}
@@ -1286,11 +1286,6 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_grlib_driver
#endif
-#ifdef CONFIG_USB_EHCI_MV
-#include "ehci-mv.c"
-#define PLATFORM_DRIVER ehci_mv_driver
-#endif
-
static int __init ehci_hcd_init(void)
{
int retval = 0;
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index de764459e05a..f26109eafdbf 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -12,24 +12,33 @@
#include <linux/err.h>
#include <linux/usb/otg.h>
#include <linux/platform_data/mv_usb.h>
+#include <linux/io.h>
+
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+/* registers */
+#define U2x_CAPREGS_OFFSET 0x100
#define CAPLENGTH_MASK (0xff)
-struct ehci_hcd_mv {
- struct usb_hcd *hcd;
+#define hcd_to_ehci_hcd_mv(h) ((struct ehci_hcd_mv *)hcd_to_ehci(h)->priv)
+struct ehci_hcd_mv {
/* Which mode does this ehci running OTG/Host ? */
int mode;
- void __iomem *phy_regs;
+ void __iomem *base;
void __iomem *cap_regs;
void __iomem *op_regs;
struct usb_phy *otg;
+ struct clk *clk;
- struct mv_usb_platform_data *pdata;
+ struct phy *phy;
- struct clk *clk;
+ int (*set_vbus)(unsigned int vbus);
};
static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
@@ -44,29 +53,20 @@ static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
{
- int retval;
-
ehci_clock_enable(ehci_mv);
- if (ehci_mv->pdata->phy_init) {
- retval = ehci_mv->pdata->phy_init(ehci_mv->phy_regs);
- if (retval)
- return retval;
- }
-
- return 0;
+ return phy_init(ehci_mv->phy);
}
static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
{
- if (ehci_mv->pdata->phy_deinit)
- ehci_mv->pdata->phy_deinit(ehci_mv->phy_regs);
+ phy_exit(ehci_mv->phy);
ehci_clock_disable(ehci_mv);
}
static int mv_ehci_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
- struct ehci_hcd_mv *ehci_mv = dev_get_drvdata(dev);
+ struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
int retval;
if (ehci_mv == NULL) {
@@ -83,46 +83,11 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
return retval;
}
-static const struct hc_driver mv_ehci_hc_driver = {
- .description = hcd_name,
- .product_desc = "Marvell EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
-
- /*
- * basic lifecycle operations
- */
- .reset = mv_ehci_reset,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
+static struct hc_driver __read_mostly ehci_platform_hc_driver;
+
+static const struct ehci_driver_overrides platform_overrides __initconst = {
+ .reset = mv_ehci_reset,
+ .extra_priv_size = sizeof(struct ehci_hcd_mv),
};
static int mv_ehci_probe(struct platform_device *pdev)
@@ -135,27 +100,29 @@ static int mv_ehci_probe(struct platform_device *pdev)
int retval = -ENODEV;
u32 offset;
- if (!pdata) {
- dev_err(&pdev->dev, "missing platform_data\n");
- return -ENODEV;
- }
-
if (usb_disabled())
return -ENODEV;
- hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
+ hcd = usb_create_hcd(&ehci_platform_hc_driver, &pdev->dev, "mv ehci");
if (!hcd)
return -ENOMEM;
- ehci_mv = devm_kzalloc(&pdev->dev, sizeof(*ehci_mv), GFP_KERNEL);
- if (ehci_mv == NULL) {
- retval = -ENOMEM;
- goto err_put_hcd;
+ platform_set_drvdata(pdev, hcd);
+ ehci_mv = hcd_to_ehci_hcd_mv(hcd);
+
+ ehci_mv->mode = MV_USB_MODE_HOST;
+ if (pdata) {
+ ehci_mv->mode = pdata->mode;
+ ehci_mv->set_vbus = pdata->set_vbus;
}
- platform_set_drvdata(pdev, ehci_mv);
- ehci_mv->pdata = pdata;
- ehci_mv->hcd = hcd;
+ ehci_mv->phy = devm_phy_get(&pdev->dev, "usb");
+ if (IS_ERR(ehci_mv->phy)) {
+ retval = PTR_ERR(ehci_mv->phy);
+ if (retval != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get phy.\n");
+ goto err_put_hcd;
+ }
ehci_mv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ehci_mv->clk)) {
@@ -164,17 +131,12 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
- ehci_mv->phy_regs = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(ehci_mv->phy_regs)) {
- retval = PTR_ERR(ehci_mv->phy_regs);
- goto err_put_hcd;
- }
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
- ehci_mv->cap_regs = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(ehci_mv->cap_regs)) {
- retval = PTR_ERR(ehci_mv->cap_regs);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ehci_mv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ehci_mv->base)) {
+ retval = PTR_ERR(ehci_mv->base);
goto err_put_hcd;
}
@@ -184,6 +146,8 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
+ ehci_mv->cap_regs =
+ (void __iomem *) ((unsigned long) ehci_mv->base + U2x_CAPREGS_OFFSET);
offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
ehci_mv->op_regs =
(void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
@@ -202,7 +166,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
ehci = hcd_to_ehci(hcd);
ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
- ehci_mv->mode = pdata->mode;
if (ehci_mv->mode == MV_USB_MODE_OTG) {
ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(ehci_mv->otg)) {
@@ -227,8 +190,8 @@ static int mv_ehci_probe(struct platform_device *pdev)
/* otg will enable clock before use as host */
mv_ehci_disable(ehci_mv);
} else {
- if (pdata->set_vbus)
- pdata->set_vbus(1);
+ if (ehci_mv->set_vbus)
+ ehci_mv->set_vbus(1);
retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
if (retval) {
@@ -239,9 +202,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
device_wakeup_enable(hcd->self.controller);
}
- if (pdata->private_init)
- pdata->private_init(ehci_mv->op_regs, ehci_mv->phy_regs);
-
dev_info(&pdev->dev,
"successful find EHCI device with regs 0x%p irq %d"
" working in %s mode\n", hcd->regs, hcd->irq,
@@ -250,8 +210,8 @@ static int mv_ehci_probe(struct platform_device *pdev)
return 0;
err_set_vbus:
- if (pdata->set_vbus)
- pdata->set_vbus(0);
+ if (ehci_mv->set_vbus)
+ ehci_mv->set_vbus(0);
err_disable_clk:
mv_ehci_disable(ehci_mv);
err_put_hcd:
@@ -262,8 +222,8 @@ err_put_hcd:
static int mv_ehci_remove(struct platform_device *pdev)
{
- struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_mv->hcd;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
if (hcd->rh_registered)
usb_remove_hcd(hcd);
@@ -272,8 +232,8 @@ static int mv_ehci_remove(struct platform_device *pdev)
otg_set_host(ehci_mv->otg->otg, NULL);
if (ehci_mv->mode == MV_USB_MODE_HOST) {
- if (ehci_mv->pdata->set_vbus)
- ehci_mv->pdata->set_vbus(0);
+ if (ehci_mv->set_vbus)
+ ehci_mv->set_vbus(0);
mv_ehci_disable(ehci_mv);
}
@@ -295,8 +255,7 @@ static const struct platform_device_id ehci_id_table[] = {
static void mv_ehci_shutdown(struct platform_device *pdev)
{
- struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_mv->hcd;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
if (!hcd->rh_registered)
return;
@@ -305,13 +264,41 @@ static void mv_ehci_shutdown(struct platform_device *pdev)
hcd->driver->shutdown(hcd);
}
+static const struct of_device_id ehci_mv_dt_ids[] = {
+ { .compatible = "marvell,pxau2o-ehci", },
+ {},
+};
+
static struct platform_driver ehci_mv_driver = {
.probe = mv_ehci_probe,
.remove = mv_ehci_remove,
.shutdown = mv_ehci_shutdown,
.driver = {
- .name = "mv-ehci",
- .bus = &platform_bus_type,
- },
+ .name = "mv-ehci",
+ .bus = &platform_bus_type,
+ .of_match_table = ehci_mv_dt_ids,
+ },
.id_table = ehci_id_table,
};
+
+static int __init ehci_platform_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
+ return platform_driver_register(&ehci_mv_driver);
+}
+module_init(ehci_platform_init);
+
+static void __exit ehci_platform_cleanup(void)
+{
+ platform_driver_unregister(&ehci_mv_driver);
+}
+module_exit(ehci_platform_cleanup);
+
+MODULE_DESCRIPTION("Marvell EHCI driver");
+MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
+MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>");
+MODULE_ALIAS("mv-ehci");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 327630405695..aa2f77f1506d 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -245,12 +245,12 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
}
if (unlikely(urb->unlinked)) {
- COUNT(ehci->stats.unlink);
+ INCR(ehci->stats.unlink);
} else {
/* report non-error and short read status as zero */
if (status == -EINPROGRESS || status == -EREMOTEIO)
status = 0;
- COUNT(ehci->stats.complete);
+ INCR(ehci->stats.complete);
}
#ifdef EHCI_URB_TRACE
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 4fcebda4b79d..a79c8ac0a55f 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -347,7 +347,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
*/
status = ehci_readl(ehci, &ehci->regs->status);
if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
- COUNT(ehci->stats.lost_iaa);
+ INCR(ehci->stats.lost_iaa);
ehci_writel(ehci, STS_IAA, &ehci->regs->status);
}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index c8e9a48e1d51..ac5e967907d1 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -235,9 +235,9 @@ struct ehci_hcd { /* one per controller */
/* irq statistics */
#ifdef EHCI_STATS
struct ehci_stats stats;
-# define COUNT(x) ((x)++)
+# define INCR(x) ((x)++)
#else
-# define COUNT(x)
+# define INCR(x) do {} while (0)
#endif
/* debug files */
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index e64eb47770c8..0da68df259c8 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -31,6 +31,7 @@
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
@@ -1285,7 +1286,7 @@ static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
*/
status = fotg210_readl(fotg210, &fotg210->regs->status);
if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
- COUNT(fotg210->stats.lost_iaa);
+ INCR(fotg210->stats.lost_iaa);
fotg210_writel(fotg210, STS_IAA,
&fotg210->regs->status);
}
@@ -2204,12 +2205,12 @@ __acquires(fotg210->lock)
}
if (unlikely(urb->unlinked)) {
- COUNT(fotg210->stats.unlink);
+ INCR(fotg210->stats.unlink);
} else {
/* report non-error and short read status as zero */
if (status == -EINPROGRESS || status == -EREMOTEIO)
status = 0;
- COUNT(fotg210->stats.complete);
+ INCR(fotg210->stats.complete);
}
#ifdef FOTG210_URB_TRACE
@@ -5153,9 +5154,9 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely((status & (STS_INT|STS_ERR)) != 0)) {
if (likely((status & STS_ERR) == 0))
- COUNT(fotg210->stats.normal);
+ INCR(fotg210->stats.normal);
else
- COUNT(fotg210->stats.error);
+ INCR(fotg210->stats.error);
bh = 1;
}
@@ -5180,7 +5181,7 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
if (cmd & CMD_IAAD)
fotg210_dbg(fotg210, "IAA with IAAD still set?\n");
if (fotg210->async_iaa) {
- COUNT(fotg210->stats.iaa);
+ INCR(fotg210->stats.iaa);
end_unlink_async(fotg210);
} else
fotg210_dbg(fotg210, "IAA with nothing unlinked?\n");
@@ -5596,7 +5597,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
- goto failed;
+ goto failed_put_hcd;
}
hcd->rsrc_start = res->start;
@@ -5606,22 +5607,43 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
fotg210->caps = hcd->regs;
+ /* It's OK not to supply this clock */
+ fotg210->pclk = clk_get(dev, "PCLK");
+ if (!IS_ERR(fotg210->pclk)) {
+ retval = clk_prepare_enable(fotg210->pclk);
+ if (retval) {
+ dev_err(dev, "failed to enable PCLK\n");
+ goto failed_put_hcd;
+ }
+ } else if (PTR_ERR(fotg210->pclk) == -EPROBE_DEFER) {
+ /*
+ * Percolate deferrals, for anything else,
+ * just live without the clocking.
+ */
+ retval = PTR_ERR(fotg210->pclk);
+ goto failed_dis_clk;
+ }
+
retval = fotg210_setup(hcd);
if (retval)
- goto failed;
+ goto failed_dis_clk;
fotg210_init(fotg210);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval) {
dev_err(dev, "failed to add hcd with err %d\n", retval);
- goto failed;
+ goto failed_dis_clk;
}
device_wakeup_enable(hcd->self.controller);
+ platform_set_drvdata(pdev, hcd);
return retval;
-failed:
+failed_dis_clk:
+ if (!IS_ERR(fotg210->pclk))
+ clk_disable_unprepare(fotg210->pclk);
+failed_put_hcd:
usb_put_hcd(hcd);
fail_create_hcd:
dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
@@ -5635,11 +5657,11 @@ fail_create_hcd:
*/
static int fotg210_hcd_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- if (!hcd)
- return 0;
+ if (!IS_ERR(fotg210->pclk))
+ clk_disable_unprepare(fotg210->pclk);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
index 7fcd785c7bc8..1b4db95e5c43 100644
--- a/drivers/usb/host/fotg210.h
+++ b/drivers/usb/host/fotg210.h
@@ -177,11 +177,14 @@ struct fotg210_hcd { /* one per controller */
/* irq statistics */
#ifdef FOTG210_STATS
struct fotg210_stats stats;
-# define COUNT(x) ((x)++)
+# define INCR(x) ((x)++)
#else
-# define COUNT(x)
+# define INCR(x) do {} while (0)
#endif
+ /* silicon clock */
+ struct clk *pclk;
+
/* debug files */
struct dentry *debug_dir;
};
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index e98673954020..ec6739ef3129 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -551,6 +551,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
pdata->overcurrent_pin[i] =
devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
i, GPIOD_IN);
+ if (!pdata->overcurrent_pin[i])
+ continue;
if (IS_ERR(pdata->overcurrent_pin[i])) {
err = PTR_ERR(pdata->overcurrent_pin[i]);
dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 3625a5c1a41b..3ce71cbfbb58 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -783,15 +783,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
/* disable interrupts */
writel((u32) ~0, base + OHCI_INTRDISABLE);
- /* Reset the USB bus, if the controller isn't already in RESET */
- if (control & OHCI_HCFS) {
- /* Go into RESET, preserving RWC (and possibly IR) */
- writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
- readl(base + OHCI_CONTROL);
-
- /* drive bus reset for at least 50 ms (7.1.7.5) */
- msleep(50);
- }
+ /* Go into the USB_RESET state, preserving RWC (and possibly IR) */
+ writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+ readl(base + OHCI_CONTROL);
/* software reset of the controller, preserving HcFmInterval */
if (!no_fminterval)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 7e2a531ba321..12eea73d9f20 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -900,6 +900,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
set_bit(wIndex, &bus_state->resuming_ports);
bus_state->resume_done[wIndex] = timeout;
mod_timer(&hcd->rh_timer, timeout);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
}
/* Has resume been signalled for USB_RESUME_TIME yet? */
} else if (time_after_eq(jiffies,
@@ -940,6 +941,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
clear_bit(wIndex, &bus_state->rexit_ports);
}
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
bus_state->port_c_suspend |= 1 << wIndex;
bus_state->suspended_ports &= ~(1 << wIndex);
} else {
@@ -962,6 +964,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
(raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
bus_state->resume_done[wIndex] = 0;
clear_bit(wIndex, &bus_state->resuming_ports);
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
}
@@ -1337,6 +1340,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
set_bit(wIndex, &bus_state->resuming_ports);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
xhci_set_link_state(xhci, ports[wIndex],
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1345,6 +1349,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, ports[wIndex],
XDEV_U0);
clear_bit(wIndex, &bus_state->resuming_ports);
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
}
bus_state->port_c_suspend |= 1 << wIndex;
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index fa33d6e5b1cb..fea555570ad4 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -13,14 +13,20 @@
#include "xhci.h"
#include "xhci-mtk.h"
+#define SSP_BW_BOUNDARY 130000
#define SS_BW_BOUNDARY 51000
/* table 5-5. High-speed Isoc Transaction Limits in usb_20 spec */
#define HS_BW_BOUNDARY 6144
/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
#define FS_PAYLOAD_MAX 188
+/*
+ * max number of microframes for split transfer,
+ * for fs isoc in : 1 ss + 1 idle + 7 cs
+ */
+#define TT_MICROFRAMES_MAX 9
/* mtk scheduler bitmasks */
-#define EP_BPKTS(p) ((p) & 0x3f)
+#define EP_BPKTS(p) ((p) & 0x7f)
#define EP_BCSCOUNT(p) (((p) & 0x7) << 8)
#define EP_BBM(p) ((p) << 11)
#define EP_BOFFSET(p) ((p) & 0x3fff)
@@ -51,7 +57,7 @@ static int get_bw_index(struct xhci_hcd *xhci, struct usb_device *udev,
virt_dev = xhci->devs[udev->slot_id];
- if (udev->speed == USB_SPEED_SUPER) {
+ if (udev->speed >= USB_SPEED_SUPER) {
if (usb_endpoint_dir_out(&ep->desc))
bw_index = (virt_dev->real_port - 1) * 2;
else
@@ -64,25 +70,167 @@ static int get_bw_index(struct xhci_hcd *xhci, struct usb_device *udev,
return bw_index;
}
+static u32 get_esit(struct xhci_ep_ctx *ep_ctx)
+{
+ u32 esit;
+
+ esit = 1 << CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
+ if (esit > XHCI_MTK_MAX_ESIT)
+ esit = XHCI_MTK_MAX_ESIT;
+
+ return esit;
+}
+
+static struct mu3h_sch_tt *find_tt(struct usb_device *udev)
+{
+ struct usb_tt *utt = udev->tt;
+ struct mu3h_sch_tt *tt, **tt_index, **ptt;
+ unsigned int port;
+ bool allocated_index = false;
+
+ if (!utt)
+ return NULL; /* Not below a TT */
+
+ /*
+ * Find/create our data structure.
+ * For hubs with a single TT, we get it directly.
+ * For hubs with multiple TTs, there's an extra level of pointers.
+ */
+ tt_index = NULL;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ if (!tt_index) { /* Create the index array */
+ tt_index = kcalloc(utt->hub->maxchild,
+ sizeof(*tt_index), GFP_KERNEL);
+ if (!tt_index)
+ return ERR_PTR(-ENOMEM);
+ utt->hcpriv = tt_index;
+ allocated_index = true;
+ }
+ port = udev->ttport - 1;
+ ptt = &tt_index[port];
+ } else {
+ port = 0;
+ ptt = (struct mu3h_sch_tt **) &utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt) { /* Create the mu3h_sch_tt */
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt) {
+ if (allocated_index) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
+ }
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&tt->ep_list);
+ tt->usb_tt = utt;
+ tt->tt_port = port;
+ *ptt = tt;
+ }
+
+ return tt;
+}
+
+/* Release the TT above udev, if it's not in use */
+static void drop_tt(struct usb_device *udev)
+{
+ struct usb_tt *utt = udev->tt;
+ struct mu3h_sch_tt *tt, **tt_index, **ptt;
+ int i, cnt;
+
+ if (!utt || !utt->hcpriv)
+ return; /* Not below a TT, or never allocated */
+
+ cnt = 0;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ ptt = &tt_index[udev->ttport - 1];
+ /* How many entries are left in tt_index? */
+ for (i = 0; i < utt->hub->maxchild; ++i)
+ cnt += !!tt_index[i];
+ } else {
+ tt_index = NULL;
+ ptt = (struct mu3h_sch_tt **)&utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt || !list_empty(&tt->ep_list))
+ return; /* never allocated , or still in use*/
+
+ *ptt = NULL;
+ kfree(tt);
+
+ if (cnt == 1) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
+ }
+}
+
+static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
+ struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
+{
+ struct mu3h_sch_ep_info *sch_ep;
+ struct mu3h_sch_tt *tt = NULL;
+ u32 len_bw_budget_table;
+ size_t mem_size;
+
+ if (is_fs_or_ls(udev->speed))
+ len_bw_budget_table = TT_MICROFRAMES_MAX;
+ else if ((udev->speed >= USB_SPEED_SUPER)
+ && usb_endpoint_xfer_isoc(&ep->desc))
+ len_bw_budget_table = get_esit(ep_ctx);
+ else
+ len_bw_budget_table = 1;
+
+ mem_size = sizeof(struct mu3h_sch_ep_info) +
+ len_bw_budget_table * sizeof(u32);
+ sch_ep = kzalloc(mem_size, GFP_KERNEL);
+ if (!sch_ep)
+ return ERR_PTR(-ENOMEM);
+
+ if (is_fs_or_ls(udev->speed)) {
+ tt = find_tt(udev);
+ if (IS_ERR(tt)) {
+ kfree(sch_ep);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ sch_ep->sch_tt = tt;
+ sch_ep->ep = ep;
+
+ return sch_ep;
+}
+
static void setup_sch_info(struct usb_device *udev,
struct xhci_ep_ctx *ep_ctx, struct mu3h_sch_ep_info *sch_ep)
{
u32 ep_type;
- u32 ep_interval;
- u32 max_packet_size;
+ u32 maxpkt;
u32 max_burst;
u32 mult;
u32 esit_pkts;
+ u32 max_esit_payload;
+ u32 *bwb_table = sch_ep->bw_budget_table;
+ int i;
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
- ep_interval = CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
- max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
+ maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2));
mult = CTX_TO_EP_MULT(le32_to_cpu(ep_ctx->ep_info));
-
- sch_ep->esit = 1 << ep_interval;
+ max_esit_payload =
+ (CTX_TO_MAX_ESIT_PAYLOAD_HI(
+ le32_to_cpu(ep_ctx->ep_info)) << 16) |
+ CTX_TO_MAX_ESIT_PAYLOAD(le32_to_cpu(ep_ctx->tx_info));
+
+ sch_ep->esit = get_esit(ep_ctx);
+ sch_ep->ep_type = ep_type;
+ sch_ep->maxpkt = maxpkt;
sch_ep->offset = 0;
sch_ep->burst_mode = 0;
+ sch_ep->repeat = 0;
if (udev->speed == USB_SPEED_HIGH) {
sch_ep->cs_count = 0;
@@ -93,7 +241,6 @@ static void setup_sch_info(struct usb_device *udev,
* in a interval
*/
sch_ep->num_budget_microframes = 1;
- sch_ep->repeat = 0;
/*
* xHCI spec section6.2.3.4
@@ -101,19 +248,33 @@ static void setup_sch_info(struct usb_device *udev,
* opportunities per microframe
*/
sch_ep->pkts = max_burst + 1;
- sch_ep->bw_cost_per_microframe = max_packet_size * sch_ep->pkts;
- } else if (udev->speed == USB_SPEED_SUPER) {
+ sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
+ bwb_table[0] = sch_ep->bw_cost_per_microframe;
+ } else if (udev->speed >= USB_SPEED_SUPER) {
/* usb3_r1 spec section4.4.7 & 4.4.8 */
sch_ep->cs_count = 0;
- esit_pkts = (mult + 1) * (max_burst + 1);
+ sch_ep->burst_mode = 1;
+ /*
+ * some device's (d)wBytesPerInterval is set as 0,
+ * then max_esit_payload is 0, so evaluate esit_pkts from
+ * mult and burst
+ */
+ esit_pkts = DIV_ROUND_UP(max_esit_payload, maxpkt);
+ if (esit_pkts == 0)
+ esit_pkts = (mult + 1) * (max_burst + 1);
+
if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
sch_ep->pkts = esit_pkts;
sch_ep->num_budget_microframes = 1;
- sch_ep->repeat = 0;
+ bwb_table[0] = maxpkt * sch_ep->pkts;
}
if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
- if (esit_pkts <= sch_ep->esit)
+ u32 remainder;
+
+ if (sch_ep->esit == 1)
+ sch_ep->pkts = esit_pkts;
+ else if (esit_pkts <= sch_ep->esit)
sch_ep->pkts = 1;
else
sch_ep->pkts = roundup_pow_of_two(esit_pkts)
@@ -122,43 +283,48 @@ static void setup_sch_info(struct usb_device *udev,
sch_ep->num_budget_microframes =
DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
- if (sch_ep->num_budget_microframes > 1)
- sch_ep->repeat = 1;
- else
- sch_ep->repeat = 0;
+ sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
+ sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
+
+ remainder = sch_ep->bw_cost_per_microframe;
+ remainder *= sch_ep->num_budget_microframes;
+ remainder -= (maxpkt * esit_pkts);
+ for (i = 0; i < sch_ep->num_budget_microframes - 1; i++)
+ bwb_table[i] = sch_ep->bw_cost_per_microframe;
+
+ /* last one <= bw_cost_per_microframe */
+ bwb_table[i] = remainder;
}
- sch_ep->bw_cost_per_microframe = max_packet_size * sch_ep->pkts;
} else if (is_fs_or_ls(udev->speed)) {
+ sch_ep->pkts = 1; /* at most one packet for each microframe */
/*
- * usb_20 spec section11.18.4
- * assume worst cases
+ * num_budget_microframes and cs_count will be updated when
+ * check TT for INT_OUT_EP, ISOC/INT_IN_EP type
*/
- sch_ep->repeat = 0;
- sch_ep->pkts = 1; /* at most one packet for each microframe */
- if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
- sch_ep->cs_count = 3; /* at most need 3 CS*/
- /* one for SS and one for budgeted transaction */
- sch_ep->num_budget_microframes = sch_ep->cs_count + 2;
- sch_ep->bw_cost_per_microframe = max_packet_size;
- }
- if (ep_type == ISOC_OUT_EP) {
+ sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
+ sch_ep->num_budget_microframes = sch_ep->cs_count;
+ sch_ep->bw_cost_per_microframe =
+ (maxpkt < FS_PAYLOAD_MAX) ? maxpkt : FS_PAYLOAD_MAX;
+ /* init budget table */
+ if (ep_type == ISOC_OUT_EP) {
+ for (i = 0; i < sch_ep->num_budget_microframes; i++)
+ bwb_table[i] = sch_ep->bw_cost_per_microframe;
+ } else if (ep_type == INT_OUT_EP) {
+ /* only first one consumes bandwidth, others as zero */
+ bwb_table[0] = sch_ep->bw_cost_per_microframe;
+ } else { /* INT_IN_EP or ISOC_IN_EP */
+ bwb_table[0] = 0; /* start split */
+ bwb_table[1] = 0; /* idle */
/*
- * the best case FS budget assumes that 188 FS bytes
- * occur in each microframe
+ * due to cs_count will be updated according to cs
+ * position, assign all remainder budget array
+ * elements as @bw_cost_per_microframe, but only first
+ * @num_budget_microframes elements will be used later
*/
- sch_ep->num_budget_microframes = DIV_ROUND_UP(
- max_packet_size, FS_PAYLOAD_MAX);
- sch_ep->bw_cost_per_microframe = FS_PAYLOAD_MAX;
- sch_ep->cs_count = sch_ep->num_budget_microframes;
- }
- if (ep_type == ISOC_IN_EP) {
- /* at most need additional two CS. */
- sch_ep->cs_count = DIV_ROUND_UP(
- max_packet_size, FS_PAYLOAD_MAX) + 2;
- sch_ep->num_budget_microframes = sch_ep->cs_count + 2;
- sch_ep->bw_cost_per_microframe = FS_PAYLOAD_MAX;
+ for (i = 2; i < TT_MICROFRAMES_MAX; i++)
+ bwb_table[i] = sch_ep->bw_cost_per_microframe;
}
}
}
@@ -169,6 +335,7 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
{
u32 num_esit;
u32 max_bw = 0;
+ u32 bw;
int i;
int j;
@@ -177,15 +344,17 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
u32 base = offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
- if (sch_bw->bus_bw[base + j] > max_bw)
- max_bw = sch_bw->bus_bw[base + j];
+ bw = sch_bw->bus_bw[base + j] +
+ sch_ep->bw_budget_table[j];
+ if (bw > max_bw)
+ max_bw = bw;
}
}
return max_bw;
}
static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
- struct mu3h_sch_ep_info *sch_ep, int bw_cost)
+ struct mu3h_sch_ep_info *sch_ep, bool used)
{
u32 num_esit;
u32 base;
@@ -195,9 +364,105 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
for (i = 0; i < num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
+ for (j = 0; j < sch_ep->num_budget_microframes; j++) {
+ if (used)
+ sch_bw->bus_bw[base + j] +=
+ sch_ep->bw_budget_table[j];
+ else
+ sch_bw->bus_bw[base + j] -=
+ sch_ep->bw_budget_table[j];
+ }
+ }
+}
+
+static int check_sch_tt(struct usb_device *udev,
+ struct mu3h_sch_ep_info *sch_ep, u32 offset)
+{
+ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ u32 extra_cs_count;
+ u32 fs_budget_start;
+ u32 start_ss, last_ss;
+ u32 start_cs, last_cs;
+ int i;
+
+ start_ss = offset % 8;
+ fs_budget_start = (start_ss + 1) % 8;
+
+ if (sch_ep->ep_type == ISOC_OUT_EP) {
+ last_ss = start_ss + sch_ep->cs_count - 1;
+
+ /*
+ * usb_20 spec section11.18:
+ * must never schedule Start-Split in Y6
+ */
+ if (!(start_ss == 7 || last_ss < 6))
+ return -ERANGE;
+
+ for (i = 0; i < sch_ep->cs_count; i++)
+ if (test_bit(offset + i, tt->split_bit_map))
+ return -ERANGE;
+
+ } else {
+ u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
+
+ /*
+ * usb_20 spec section11.18:
+ * must never schedule Start-Split in Y6
+ */
+ if (start_ss == 6)
+ return -ERANGE;
+
+ /* one uframe for ss + one uframe for idle */
+ start_cs = (start_ss + 2) % 8;
+ last_cs = start_cs + cs_count - 1;
+
+ if (last_cs > 7)
+ return -ERANGE;
+
+ if (sch_ep->ep_type == ISOC_IN_EP)
+ extra_cs_count = (last_cs == 7) ? 1 : 2;
+ else /* ep_type : INTR IN / INTR OUT */
+ extra_cs_count = (fs_budget_start == 6) ? 1 : 2;
+
+ cs_count += extra_cs_count;
+ if (cs_count > 7)
+ cs_count = 7; /* HW limit */
+
+ for (i = 0; i < cs_count + 2; i++) {
+ if (test_bit(offset + i, tt->split_bit_map))
+ return -ERANGE;
+ }
+
+ sch_ep->cs_count = cs_count;
+ /* one for ss, the other for idle */
+ sch_ep->num_budget_microframes = cs_count + 2;
+
+ /*
+ * if interval=1, maxp >752, num_budge_micoframe is larger
+ * than sch_ep->esit, will overstep boundary
+ */
+ if (sch_ep->num_budget_microframes > sch_ep->esit)
+ sch_ep->num_budget_microframes = sch_ep->esit;
+ }
+
+ return 0;
+}
+
+static void update_sch_tt(struct usb_device *udev,
+ struct mu3h_sch_ep_info *sch_ep)
+{
+ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ u32 base, num_esit;
+ int i, j;
+
+ num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
+ for (i = 0; i < num_esit; i++) {
+ base = sch_ep->offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++)
- sch_bw->bus_bw[base + j] += bw_cost;
+ set_bit(base + j, tt->split_bit_map);
}
+
+ list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
}
static int check_sch_bw(struct usb_device *udev,
@@ -205,17 +470,16 @@ static int check_sch_bw(struct usb_device *udev,
{
u32 offset;
u32 esit;
- u32 num_budget_microframes;
u32 min_bw;
u32 min_index;
u32 worst_bw;
u32 bw_boundary;
-
- if (sch_ep->esit > XHCI_MTK_MAX_ESIT)
- sch_ep->esit = XHCI_MTK_MAX_ESIT;
+ u32 min_num_budget;
+ u32 min_cs_count;
+ bool tt_offset_ok = false;
+ int ret;
esit = sch_ep->esit;
- num_budget_microframes = sch_ep->num_budget_microframes;
/*
* Search through all possible schedule microframes.
@@ -223,36 +487,56 @@ static int check_sch_bw(struct usb_device *udev,
*/
min_bw = ~0;
min_index = 0;
+ min_cs_count = sch_ep->cs_count;
+ min_num_budget = sch_ep->num_budget_microframes;
for (offset = 0; offset < esit; offset++) {
- if ((offset + num_budget_microframes) > sch_ep->esit)
- break;
+ if (is_fs_or_ls(udev->speed)) {
+ ret = check_sch_tt(udev, sch_ep, offset);
+ if (ret)
+ continue;
+ else
+ tt_offset_ok = true;
+ }
- /*
- * usb_20 spec section11.18:
- * must never schedule Start-Split in Y6
- */
- if (is_fs_or_ls(udev->speed) && (offset % 8 == 6))
- continue;
+ if ((offset + sch_ep->num_budget_microframes) > sch_ep->esit)
+ break;
worst_bw = get_max_bw(sch_bw, sch_ep, offset);
if (min_bw > worst_bw) {
min_bw = worst_bw;
min_index = offset;
+ min_cs_count = sch_ep->cs_count;
+ min_num_budget = sch_ep->num_budget_microframes;
}
if (min_bw == 0)
break;
}
- sch_ep->offset = min_index;
- bw_boundary = (udev->speed == USB_SPEED_SUPER)
- ? SS_BW_BOUNDARY : HS_BW_BOUNDARY;
+ if (udev->speed == USB_SPEED_SUPER_PLUS)
+ bw_boundary = SSP_BW_BOUNDARY;
+ else if (udev->speed == USB_SPEED_SUPER)
+ bw_boundary = SS_BW_BOUNDARY;
+ else
+ bw_boundary = HS_BW_BOUNDARY;
/* check bandwidth */
- if (min_bw + sch_ep->bw_cost_per_microframe > bw_boundary)
+ if (min_bw > bw_boundary)
return -ERANGE;
+ sch_ep->offset = min_index;
+ sch_ep->cs_count = min_cs_count;
+ sch_ep->num_budget_microframes = min_num_budget;
+
+ if (is_fs_or_ls(udev->speed)) {
+ /* all offset for tt is not ok*/
+ if (!tt_offset_ok)
+ return -ERANGE;
+
+ update_sch_tt(udev, sch_ep);
+ }
+
/* update bus bandwidth info */
- update_bus_bw(sch_bw, sch_ep, sch_ep->bw_cost_per_microframe);
+ update_bus_bw(sch_bw, sch_ep, 1);
return 0;
}
@@ -347,8 +631,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
- sch_ep = kzalloc(sizeof(struct mu3h_sch_ep_info), GFP_NOIO);
- if (!sch_ep)
+ sch_ep = create_sch_ep(udev, ep, ep_ctx);
+ if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(udev, ep_ctx, sch_ep);
@@ -356,12 +640,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
ret = check_sch_bw(udev, sch_bw, sch_ep);
if (ret) {
xhci_err(xhci, "Not enough bandwidth!\n");
+ if (is_fs_or_ls(udev->speed))
+ drop_tt(udev);
+
kfree(sch_ep);
return -ENOSPC;
}
list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
- sch_ep->ep = ep;
ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
| EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
@@ -406,9 +692,12 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
if (sch_ep->ep == ep) {
- update_bus_bw(sch_bw, sch_ep,
- -sch_ep->bw_cost_per_microframe);
+ update_bus_bw(sch_bw, sch_ep, 0);
list_del(&sch_ep->endpoint);
+ if (is_fs_or_ls(udev->speed)) {
+ list_del(&sch_ep->tt_endpoint);
+ drop_tt(udev);
+ }
kfree(sch_ep);
break;
}
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index cc59d80b663b..8be8c5f7ff62 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -20,6 +20,19 @@
#define XHCI_MTK_MAX_ESIT 64
/**
+ * @split_bit_map: used to avoid split microframes overlay
+ * @ep_list: Endpoints using this TT
+ * @usb_tt: usb TT related
+ * @tt_port: TT port number
+ */
+struct mu3h_sch_tt {
+ DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT);
+ struct list_head ep_list;
+ struct usb_tt *usb_tt;
+ int tt_port;
+};
+
+/**
* struct mu3h_sch_bw_info: schedule information for bandwidth domain
*
* @bus_bw: array to keep track of bandwidth already used at each uframes
@@ -41,6 +54,10 @@ struct mu3h_sch_bw_info {
* (@repeat==1) scheduled within the interval
* @bw_cost_per_microframe: bandwidth cost per microframe
* @endpoint: linked into bandwidth domain which it belongs to
+ * @tt_endpoint: linked into mu3h_sch_tt's list which it belongs to
+ * @sch_tt: mu3h_sch_tt linked into
+ * @ep_type: endpoint type
+ * @maxpkt: max packet size of endpoint
* @ep: address of usb_host_endpoint struct
* @offset: which uframe of the interval that transfer should be
* scheduled first time within the interval
@@ -57,12 +74,17 @@ struct mu3h_sch_bw_info {
* times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets
* according to @pkts and @repeat. normal mode is used by
* default
+ * @bw_budget_table: table to record bandwidth budget per microframe
*/
struct mu3h_sch_ep_info {
u32 esit;
u32 num_budget_microframes;
u32 bw_cost_per_microframe;
struct list_head endpoint;
+ struct list_head tt_endpoint;
+ struct mu3h_sch_tt *sch_tt;
+ u32 ep_type;
+ u32 maxpkt;
void *ep;
/*
* mtk xHCI scheduling information put into reserved DWs
@@ -73,6 +95,7 @@ struct mu3h_sch_ep_info {
u32 pkts;
u32 cs_count;
u32 burst_mode;
+ u32 bw_budget_table[0];
};
#define MU3C_U3_PORT_MAX 4
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 51dd8e00c4f8..01c57055c0c5 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -41,6 +41,13 @@
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI 0x15b6
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI 0x15db
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI 0x15d4
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -193,6 +200,16 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI))
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -336,6 +353,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
+ if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ pm_runtime_allow(&dev->dev);
+
return 0;
put_usb3_hcd:
@@ -353,6 +373,10 @@ static void xhci_pci_remove(struct pci_dev *dev)
xhci = hcd_to_xhci(pci_get_drvdata(dev));
xhci->xhc_state |= XHCI_STATE_REMOVING;
+
+ if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ pm_runtime_forbid(&dev->dev);
+
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 94e939249b2b..32b5574ad5c5 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -18,6 +18,7 @@
#include <linux/usb/phy.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/usb/of.h>
#include "xhci.h"
#include "xhci-plat.h"
@@ -305,6 +306,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd->skip_phy_initialization = 1;
}
+ hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
+ xhci->shared_hcd->tpl_support = hcd->tpl_support;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto disable_usb_phy;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f0a99aa0ac58..a8d92c90fb58 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1155,6 +1155,10 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
/* Clear our internal halted state */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
}
+
+ /* if this was a soft reset, then restart */
+ if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
@@ -1602,6 +1606,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[hcd_portnum]);
+ usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
bogus_port_status = true;
}
}
@@ -2132,10 +2137,16 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
+ struct xhci_slot_ctx *slot_ctx;
struct xhci_ring *ep_ring;
u32 trb_comp_code;
u32 remaining, requested, ep_trb_len;
+ unsigned int slot_id;
+ int ep_index;
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[slot_id]->out_ctx);
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
@@ -2144,6 +2155,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
switch (trb_comp_code) {
case COMP_SUCCESS:
+ ep_ring->err_count = 0;
/* handle success with untransferred data as short packet */
if (ep_trb != td->last_trb || remaining) {
xhci_warn(xhci, "WARN Successful completion on short TX\n");
@@ -2167,6 +2179,14 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_trb_len = 0;
remaining = 0;
break;
+ case COMP_USB_TRANSACTION_ERROR:
+ if ((ep_ring->err_count++ > MAX_SOFT_RETRY) ||
+ le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
+ break;
+ *status = 0;
+ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
+ ep_ring->stream_id, td, EP_SOFT_RESET);
+ return 0;
default:
/* do nothing */
break;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 4b463e5202a4..6b5db344de30 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -18,6 +18,7 @@
#include <linux/phy/tegra/xusb.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -107,35 +108,35 @@
#define IMEM_BLOCK_SIZE 256
struct tegra_xusb_fw_header {
- u32 boot_loadaddr_in_imem;
- u32 boot_codedfi_offset;
- u32 boot_codetag;
- u32 boot_codesize;
- u32 phys_memaddr;
- u16 reqphys_memsize;
- u16 alloc_phys_memsize;
- u32 rodata_img_offset;
- u32 rodata_section_start;
- u32 rodata_section_end;
- u32 main_fnaddr;
- u32 fwimg_cksum;
- u32 fwimg_created_time;
- u32 imem_resident_start;
- u32 imem_resident_end;
- u32 idirect_start;
- u32 idirect_end;
- u32 l2_imem_start;
- u32 l2_imem_end;
- u32 version_id;
+ __le32 boot_loadaddr_in_imem;
+ __le32 boot_codedfi_offset;
+ __le32 boot_codetag;
+ __le32 boot_codesize;
+ __le32 phys_memaddr;
+ __le16 reqphys_memsize;
+ __le16 alloc_phys_memsize;
+ __le32 rodata_img_offset;
+ __le32 rodata_section_start;
+ __le32 rodata_section_end;
+ __le32 main_fnaddr;
+ __le32 fwimg_cksum;
+ __le32 fwimg_created_time;
+ __le32 imem_resident_start;
+ __le32 imem_resident_end;
+ __le32 idirect_start;
+ __le32 idirect_end;
+ __le32 l2_imem_start;
+ __le32 l2_imem_end;
+ __le32 version_id;
u8 init_ddirect;
u8 reserved[3];
- u32 phys_addr_log_buffer;
- u32 total_log_entries;
- u32 dequeue_ptr;
- u32 dummy_var[2];
- u32 fwimg_len;
+ __le32 phys_addr_log_buffer;
+ __le32 total_log_entries;
+ __le32 dequeue_ptr;
+ __le32 dummy_var[2];
+ __le32 fwimg_len;
u8 magic[8];
- u32 ss_low_power_entry_timeout;
+ __le32 ss_low_power_entry_timeout;
u8 num_hsic_port;
u8 padding[139]; /* Pad to 256 bytes */
};
@@ -194,6 +195,11 @@ struct tegra_xusb {
struct reset_control *host_rst;
struct reset_control *ss_rst;
+ struct device *genpd_dev_host;
+ struct device *genpd_dev_ss;
+ struct device_link *genpd_dl_host;
+ struct device_link *genpd_dl_ss;
+
struct phy **phys;
unsigned int num_phys;
@@ -928,6 +934,57 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
return 0;
}
+static void tegra_xusb_powerdomain_remove(struct device *dev,
+ struct tegra_xusb *tegra)
+{
+ if (tegra->genpd_dl_ss)
+ device_link_del(tegra->genpd_dl_ss);
+ if (tegra->genpd_dl_host)
+ device_link_del(tegra->genpd_dl_host);
+ if (tegra->genpd_dev_ss)
+ dev_pm_domain_detach(tegra->genpd_dev_ss, true);
+ if (tegra->genpd_dev_host)
+ dev_pm_domain_detach(tegra->genpd_dev_host, true);
+}
+
+static int tegra_xusb_powerdomain_init(struct device *dev,
+ struct tegra_xusb *tegra)
+{
+ int err;
+
+ tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host");
+ if (IS_ERR(tegra->genpd_dev_host)) {
+ err = PTR_ERR(tegra->genpd_dev_host);
+ dev_err(dev, "failed to get host pm-domain: %d\n", err);
+ return err;
+ }
+
+ tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss");
+ if (IS_ERR(tegra->genpd_dev_ss)) {
+ err = PTR_ERR(tegra->genpd_dev_ss);
+ dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
+ return err;
+ }
+
+ tegra->genpd_dl_host = device_link_add(dev, tegra->genpd_dev_host,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!tegra->genpd_dl_host) {
+ dev_err(dev, "adding host device link failed!\n");
+ return -ENODEV;
+ }
+
+ tegra->genpd_dl_ss = device_link_add(dev, tegra->genpd_dev_ss,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!tegra->genpd_dl_ss) {
+ dev_err(dev, "adding superspeed device link failed!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int tegra_xusb_probe(struct platform_device *pdev)
{
struct tegra_xusb_mbox_msg msg;
@@ -1038,7 +1095,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_padctl;
}
- if (!pdev->dev.pm_domain) {
+ if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra->host_rst = devm_reset_control_get(&pdev->dev,
"xusb_host");
if (IS_ERR(tegra->host_rst)) {
@@ -1069,17 +1126,22 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->host_clk,
tegra->host_rst);
if (err) {
+ tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
dev_err(&pdev->dev,
"failed to enable XUSBC domain: %d\n", err);
- goto disable_xusba;
+ goto put_padctl;
}
+ } else {
+ err = tegra_xusb_powerdomain_init(&pdev->dev, tegra);
+ if (err)
+ goto put_powerdomains;
}
tegra->supplies = devm_kcalloc(&pdev->dev, tegra->soc->num_supplies,
sizeof(*tegra->supplies), GFP_KERNEL);
if (!tegra->supplies) {
err = -ENOMEM;
- goto disable_xusbc;
+ goto put_powerdomains;
}
for (i = 0; i < tegra->soc->num_supplies; i++)
@@ -1089,7 +1151,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->supplies);
if (err) {
dev_err(&pdev->dev, "failed to get regulators: %d\n", err);
- goto disable_xusbc;
+ goto put_powerdomains;
}
for (i = 0; i < tegra->soc->num_types; i++)
@@ -1099,7 +1161,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
sizeof(*tegra->phys), GFP_KERNEL);
if (!tegra->phys) {
err = -ENOMEM;
- goto disable_xusbc;
+ goto put_powerdomains;
}
for (i = 0, k = 0; i < tegra->soc->num_types; i++) {
@@ -1115,7 +1177,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
"failed to get PHY %s: %ld\n", prop,
PTR_ERR(phy));
err = PTR_ERR(phy);
- goto disable_xusbc;
+ goto put_powerdomains;
}
tegra->phys[k++] = phy;
@@ -1126,7 +1188,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
dev_name(&pdev->dev));
if (!tegra->hcd) {
err = -ENOMEM;
- goto disable_xusbc;
+ goto put_powerdomains;
}
/*
@@ -1222,12 +1284,13 @@ put_rpm:
disable_rpm:
pm_runtime_disable(&pdev->dev);
usb_put_hcd(tegra->hcd);
-disable_xusbc:
- if (!pdev->dev.pm_domain)
+put_powerdomains:
+ if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
-disable_xusba:
- if (!pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
+ } else {
+ tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
+ }
put_padctl:
tegra_xusb_padctl_put(tegra->padctl);
return err;
@@ -1249,6 +1312,13 @@ static int tegra_xusb_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
+ tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
+ tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
+ } else {
+ tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
+ }
+
tegra_xusb_padctl_put(tegra->padctl);
return 0;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6230a578324c..bf0b3692dc9a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1496,6 +1496,7 @@ static inline const char *xhci_trb_type_string(u8 type)
/* How much data is left before the 64KB boundary? */
#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \
(addr & (TRB_MAX_BUFF_SIZE - 1)))
+#define MAX_SOFT_RETRY 3
struct xhci_segment {
union xhci_trb *trbs;
@@ -1583,6 +1584,7 @@ struct xhci_ring {
* if we own the TRB (if we are the consumer). See section 4.9.1.
*/
u32 cycle_state;
+ unsigned int err_count;
unsigned int stream_id;
unsigned int num_segs;
unsigned int num_trbs_free;
@@ -1846,6 +1848,7 @@ struct xhci_hcd {
#define XHCI_SUSPEND_DELAY BIT_ULL(30)
#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
#define XHCI_ZERO_64B_REGS BIT_ULL(32)
+#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index d746c26a8055..bd539f3058bc 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -146,8 +146,11 @@ static int appledisplay_bl_update_status(struct backlight_device *bd)
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
mutex_unlock(&pdata->sysfslock);
-
- return retval;
+
+ if (retval < 0)
+ return retval;
+ else
+ return 0;
}
static int appledisplay_bl_get_brightness(struct backlight_device *bd)
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c2991b8a65ce..ba05dd80a020 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -808,8 +808,8 @@ static int iowarrior_probe(struct usb_interface *interface,
dev->int_in_endpoint->bInterval);
/* create an internal buffer for interrupt data from the device */
dev->read_queue =
- kmalloc(((dev->report_size + 1) * MAX_INTERRUPT_BUFFER),
- GFP_KERNEL);
+ kmalloc_array(dev->report_size + 1, MAX_INTERRUPT_BUFFER,
+ GFP_KERNEL);
if (!dev->read_queue)
goto error;
/* Get the serial-number of the chip */
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index b3e1f553954a..ac357ce2d1a6 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -46,7 +46,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
struct trancevibrator *tv = usb_get_intfdata(intf);
int temp, retval, old;
- temp = simple_strtoul(buf, NULL, 10);
+ retval = kstrtoint(buf, 10, &temp);
+ if (retval)
+ return retval;
if (temp > 255)
temp = 255;
else if (temp < 0)
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index d045d8458f81..ae70b9bfd797 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -185,8 +185,8 @@ static void mtu3_intr_enable(struct mtu3 *mtu)
if (mtu->is_u3_ip) {
/* Enable U3 LTSSM interrupts */
- value = HOT_RST_INTR | WARM_RST_INTR | VBUS_RISE_INTR |
- VBUS_FALL_INTR | ENTER_U3_INTR | EXIT_U3_INTR;
+ value = HOT_RST_INTR | WARM_RST_INTR |
+ ENTER_U3_INTR | EXIT_U3_INTR;
mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value);
}
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 5c60a8c5a0b5..bbcd3332471d 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -585,6 +585,17 @@ static const struct usb_gadget_ops mtu3_gadget_ops = {
.udc_stop = mtu3_gadget_stop,
};
+static void mtu3_state_reset(struct mtu3 *mtu)
+{
+ mtu->address = 0;
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+ mtu->may_wakeup = 0;
+ mtu->u1_enable = 0;
+ mtu->u2_enable = 0;
+ mtu->delayed_status = false;
+ mtu->test_mode = false;
+}
+
static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
u32 epnum, u32 is_in)
{
@@ -702,6 +713,7 @@ void mtu3_gadget_disconnect(struct mtu3 *mtu)
spin_lock(&mtu->lock);
}
+ mtu3_state_reset(mtu);
usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
}
@@ -712,12 +724,6 @@ void mtu3_gadget_reset(struct mtu3 *mtu)
/* report disconnect, if we didn't flush EP state */
if (mtu->g.speed != USB_SPEED_UNKNOWN)
mtu3_gadget_disconnect(mtu);
-
- mtu->address = 0;
- mtu->ep0_state = MU3D_EP0_STATE_SETUP;
- mtu->may_wakeup = 0;
- mtu->u1_enable = 0;
- mtu->u2_enable = 0;
- mtu->delayed_status = false;
- mtu->test_mode = false;
+ else
+ mtu3_state_reset(mtu);
}
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 66143ab8c043..aaf363f19714 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -505,15 +505,19 @@ static int abx500_usb_link_status_update(struct ab8500_usb *ab)
if (is_ab8500(ab->ab8500)) {
enum ab8500_usb_link_status lsts;
- abx500_get_register_interruptible(ab->dev,
+ ret = abx500_get_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_LINE_STAT_REG, &reg);
+ if (ret < 0)
+ return ret;
lsts = (reg >> 3) & 0x0F;
ret = ab8500_usb_link_status_update(ab, lsts);
} else if (is_ab8505(ab->ab8500)) {
enum ab8505_usb_link_status lsts;
- abx500_get_register_interruptible(ab->dev,
+ ret = abx500_get_register_interruptible(ab->dev,
AB8500_USB, AB8505_USB_LINE_STAT_REG, &reg);
+ if (ret < 0)
+ return ret;
lsts = (reg >> 3) & 0x1F;
ret = ab8505_usb_link_status_update(ab, lsts);
}
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index e5aa24c1e4fd..1b1bb0ad40c3 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -563,7 +563,7 @@ static enum usb_charger_type mxs_charger_primary_detection(struct mxs_phy *x)
regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
if (!(val & ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED)) {
chgr_type = SDP_TYPE;
- dev_dbg(x->phy.dev, "It is a stardard downstream port\n");
+ dev_dbg(x->phy.dev, "It is a standard downstream port\n");
}
/* Disable charger detector */
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 4310df46639d..a3e1290d682d 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -5,6 +5,7 @@
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/io.h>
@@ -12,6 +13,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "common.h"
@@ -290,6 +292,79 @@ static void usbhsc_set_buswait(struct usbhs_priv *priv)
usbhs_bset(priv, BUSWAIT, 0x000F, wait);
}
+static bool usbhsc_is_multi_clks(struct usbhs_priv *priv)
+{
+ if (priv->dparam.type == USBHS_TYPE_RCAR_GEN3 ||
+ priv->dparam.type == USBHS_TYPE_RCAR_GEN3_WITH_PLL)
+ return true;
+
+ return false;
+}
+
+static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv)
+{
+ if (!usbhsc_is_multi_clks(priv))
+ return 0;
+
+ /* The first clock should exist */
+ priv->clks[0] = of_clk_get(dev->of_node, 0);
+ if (IS_ERR(priv->clks[0]))
+ return PTR_ERR(priv->clks[0]);
+
+ /*
+ * To backward compatibility with old DT, this driver checks the return
+ * value if it's -ENOENT or not.
+ */
+ priv->clks[1] = of_clk_get(dev->of_node, 1);
+ if (PTR_ERR(priv->clks[1]) == -ENOENT)
+ priv->clks[1] = NULL;
+ else if (IS_ERR(priv->clks[1]))
+ return PTR_ERR(priv->clks[1]);
+
+ return 0;
+}
+
+static void usbhsc_clk_put(struct usbhs_priv *priv)
+{
+ int i;
+
+ if (!usbhsc_is_multi_clks(priv))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(priv->clks); i++)
+ clk_put(priv->clks[i]);
+}
+
+static int usbhsc_clk_prepare_enable(struct usbhs_priv *priv)
+{
+ int i, ret;
+
+ if (!usbhsc_is_multi_clks(priv))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(priv->clks); i++) {
+ ret = clk_prepare_enable(priv->clks[i]);
+ if (ret) {
+ while (--i >= 0)
+ clk_disable_unprepare(priv->clks[i]);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void usbhsc_clk_disable_unprepare(struct usbhs_priv *priv)
+{
+ int i;
+
+ if (!usbhsc_is_multi_clks(priv))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(priv->clks); i++)
+ clk_disable_unprepare(priv->clks[i]);
+}
+
/*
* platform default param
*/
@@ -340,6 +415,10 @@ static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
/* enable PM */
pm_runtime_get_sync(dev);
+ /* enable clks */
+ if (usbhsc_clk_prepare_enable(priv))
+ return;
+
/* enable platform power */
usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
@@ -352,6 +431,9 @@ static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
/* disable platform power */
usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
+ /* disable clks */
+ usbhsc_clk_disable_unprepare(priv);
+
/* disable PM */
pm_runtime_put_sync(dev);
}
@@ -478,6 +560,10 @@ static const struct of_device_id usbhs_of_match[] = {
.data = (void *)USBHS_TYPE_RCAR_GEN3,
},
{
+ .compatible = "renesas,usbhs-r8a77990",
+ .data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
+ },
+ {
.compatible = "renesas,usbhs-r8a77995",
.data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
},
@@ -574,6 +660,10 @@ static int usbhs_probe(struct platform_device *pdev)
return PTR_ERR(priv->edev);
}
+ priv->rsts = devm_reset_control_array_get_optional_shared(&pdev->dev);
+ if (IS_ERR(priv->rsts))
+ return PTR_ERR(priv->rsts);
+
/*
* care platform info
*/
@@ -591,15 +681,6 @@ static int usbhs_probe(struct platform_device *pdev)
break;
case USBHS_TYPE_RCAR_GEN3_WITH_PLL:
priv->pfunc = usbhs_rcar3_with_pll_ops;
- if (!IS_ERR_OR_NULL(priv->edev)) {
- priv->nb.notifier_call = priv->pfunc.notifier;
- ret = devm_extcon_register_notifier(&pdev->dev,
- priv->edev,
- EXTCON_USB_HOST,
- &priv->nb);
- if (ret < 0)
- dev_err(&pdev->dev, "no notifier registered\n");
- }
break;
case USBHS_TYPE_RZA1:
priv->pfunc = usbhs_rza1_ops;
@@ -658,6 +739,14 @@ static int usbhs_probe(struct platform_device *pdev)
/* dev_set_drvdata should be called after usbhs_mod_init */
platform_set_drvdata(pdev, priv);
+ ret = reset_control_deassert(priv->rsts);
+ if (ret)
+ goto probe_fail_rst;
+
+ ret = usbhsc_clk_get(&pdev->dev, priv);
+ if (ret)
+ goto probe_fail_clks;
+
/*
* deviece reset here because
* USB device might be used in boot loader.
@@ -711,6 +800,10 @@ static int usbhs_probe(struct platform_device *pdev)
return ret;
probe_end_mod_exit:
+ usbhsc_clk_put(priv);
+probe_fail_clks:
+ reset_control_assert(priv->rsts);
+probe_fail_rst:
usbhs_mod_remove(priv);
probe_end_fifo_exit:
usbhs_fifo_remove(priv);
@@ -739,6 +832,8 @@ static int usbhs_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
usbhs_platform_call(priv, hardware_exit, pdev);
+ usbhsc_clk_put(priv);
+ reset_control_assert(priv->rsts);
usbhs_mod_remove(priv);
usbhs_fifo_remove(priv);
usbhs_pipe_remove(priv);
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 6137f7942c05..3777af848a35 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -8,8 +8,10 @@
#ifndef RENESAS_USB_DRIVER_H
#define RENESAS_USB_DRIVER_H
+#include <linux/clk.h>
#include <linux/extcon.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/usb/renesas_usbhs.h>
struct usbhs_priv;
@@ -255,7 +257,6 @@ struct usbhs_priv {
struct platform_device *pdev;
struct extcon_dev *edev;
- struct notifier_block nb;
spinlock_t lock;
@@ -277,6 +278,8 @@ struct usbhs_priv {
struct usbhs_fifo_info fifo_info;
struct phy *phy;
+ struct reset_control *rsts;
+ struct clk *clks[2];
};
/*
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index d0ea4ff89622..aa3820448286 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -27,7 +27,6 @@
* Remarks: bit[31:11] and bit[9:6] should be 0
*/
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
-#define UGCTRL2_USB0SEL_EHCI 0x00000010
#define UGCTRL2_USB0SEL_HSUSB 0x00000020
#define UGCTRL2_USB0SEL_OTG 0x00000030
#define UGCTRL2_VBUSSEL 0x00000400
@@ -50,14 +49,6 @@ static void usbhs_rcar3_set_ugctrl2(struct usbhs_priv *priv, u32 val)
usbhs_write32(priv, UGCTRL2, val | UGCTRL2_RESERVED_3);
}
-static void usbhs_rcar3_set_usbsel(struct usbhs_priv *priv, bool ehci)
-{
- if (ehci)
- usbhs_rcar3_set_ugctrl2(priv, UGCTRL2_USB0SEL_EHCI);
- else
- usbhs_rcar3_set_ugctrl2(priv, UGCTRL2_USB0SEL_HSUSB);
-}
-
static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
@@ -83,14 +74,11 @@ static int usbhs_rcar3_power_and_pll_ctrl(struct platform_device *pdev,
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
u32 val;
int timeout = 1000;
- bool is_host = false;
if (enable) {
usbhs_write32(priv, UGCTRL, 0); /* release PLLRESET */
- if (priv->edev)
- is_host = extcon_get_state(priv->edev, EXTCON_USB_HOST);
-
- usbhs_rcar3_set_usbsel(priv, is_host);
+ usbhs_rcar3_set_ugctrl2(priv,
+ UGCTRL2_USB0SEL_OTG | UGCTRL2_VBUSSEL);
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
do {
@@ -112,16 +100,6 @@ static int usbhs_rcar3_get_id(struct platform_device *pdev)
return USBHS_GADGET;
}
-static int usbhs_rcar3_notifier(struct notifier_block *nb, unsigned long event,
- void *data)
-{
- struct usbhs_priv *priv = container_of(nb, struct usbhs_priv, nb);
-
- usbhs_rcar3_set_usbsel(priv, !!event);
-
- return NOTIFY_DONE;
-}
-
const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
.power_ctrl = usbhs_rcar3_power_ctrl,
.get_id = usbhs_rcar3_get_id,
@@ -130,5 +108,4 @@ const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
const struct renesas_usbhs_platform_callback usbhs_rcar3_with_pll_ops = {
.power_ctrl = usbhs_rcar3_power_and_pll_ctrl,
.get_id = usbhs_rcar3_get_id,
- .notifier = usbhs_rcar3_notifier,
};
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e0035c023120..ed51bc48eea6 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -378,7 +378,7 @@ static int cypress_serial_control(struct tty_struct *tty,
retval = -ENOTTY;
goto out;
}
- dev_dbg(dev, "%s - retreiving serial line settings\n", __func__);
+ dev_dbg(dev, "%s - retrieving serial line settings\n", __func__);
do {
retval = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
@@ -769,7 +769,7 @@ send:
usb_fill_int_urb(port->interrupt_out_urb, port->serial->dev,
usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress),
- port->interrupt_out_buffer, port->interrupt_out_size,
+ port->interrupt_out_buffer, actual_size,
cypress_write_int_callback, port, priv->write_urb_interval);
result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
if (result) {
@@ -863,7 +863,7 @@ static void cypress_set_termios(struct tty_struct *tty,
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
int data_bits, stop_bits, parity_type, parity_enable;
- unsigned cflag, iflag;
+ unsigned int cflag;
unsigned long flags;
__u8 oldlines;
int linechange = 0;
@@ -899,7 +899,6 @@ static void cypress_set_termios(struct tty_struct *tty,
tty->termios.c_cflag &= ~(CMSPAR|CRTSCTS);
cflag = tty->termios.c_cflag;
- iflag = tty->termios.c_iflag;
/* check if there are new settings */
if (old_termios) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 758ba789e997..609198d9594c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -39,6 +39,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/serial.h>
+#include <linux/gpio/driver.h>
#include <linux/usb/serial.h>
#include "ftdi_sio.h"
#include "ftdi_sio_ids.h"
@@ -72,6 +73,15 @@ struct ftdi_private {
unsigned int latency; /* latency setting in use */
unsigned short max_packet_size;
struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gc;
+ struct mutex gpio_lock; /* protects GPIO state */
+ bool gpio_registered; /* is the gpiochip in kernel registered */
+ bool gpio_used; /* true if the user requested a gpio */
+ u8 gpio_altfunc; /* which pins are in gpio mode */
+ u8 gpio_output; /* pin directions cache */
+ u8 gpio_value; /* pin value for outputs */
+#endif
};
/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -1764,6 +1774,375 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
}
+#ifdef CONFIG_GPIOLIB
+
+static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ int result;
+ u16 val;
+
+ val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
+ result = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ FTDI_SIO_SET_BITMODE_REQUEST,
+ FTDI_SIO_SET_BITMODE_REQUEST_TYPE, val,
+ priv->interface, NULL, 0, WDR_TIMEOUT);
+ if (result < 0) {
+ dev_err(&serial->interface->dev,
+ "bitmode request failed for value 0x%04x: %d\n",
+ val, result);
+ }
+
+ return result;
+}
+
+static int ftdi_set_cbus_pins(struct usb_serial_port *port)
+{
+ return ftdi_set_bitmode(port, FTDI_SIO_BITMODE_CBUS);
+}
+
+static int ftdi_exit_cbus_mode(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ priv->gpio_output = 0;
+ priv->gpio_value = 0;
+ return ftdi_set_bitmode(port, FTDI_SIO_BITMODE_RESET);
+}
+
+static int ftdi_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int result;
+
+ if (priv->gpio_altfunc & BIT(offset))
+ return -ENODEV;
+
+ mutex_lock(&priv->gpio_lock);
+ if (!priv->gpio_used) {
+ /* Set default pin states, as we cannot get them from device */
+ priv->gpio_output = 0x00;
+ priv->gpio_value = 0x00;
+ result = ftdi_set_cbus_pins(port);
+ if (result) {
+ mutex_unlock(&priv->gpio_lock);
+ return result;
+ }
+
+ priv->gpio_used = true;
+ }
+ mutex_unlock(&priv->gpio_lock);
+
+ return 0;
+}
+
+static int ftdi_read_cbus_pins(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ unsigned char *buf;
+ int result;
+
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ result = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ FTDI_SIO_READ_PINS_REQUEST,
+ FTDI_SIO_READ_PINS_REQUEST_TYPE, 0,
+ priv->interface, buf, 1, WDR_TIMEOUT);
+ if (result < 1) {
+ if (result >= 0)
+ result = -EIO;
+ } else {
+ result = buf[0];
+ }
+
+ kfree(buf);
+
+ return result;
+}
+
+static int ftdi_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ int result;
+
+ result = ftdi_read_cbus_pins(port);
+ if (result < 0)
+ return result;
+
+ return !!(result & BIT(gpio));
+}
+
+static void ftdi_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ mutex_lock(&priv->gpio_lock);
+
+ if (value)
+ priv->gpio_value |= BIT(gpio);
+ else
+ priv->gpio_value &= ~BIT(gpio);
+
+ ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+}
+
+static int ftdi_gpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ int result;
+
+ result = ftdi_read_cbus_pins(port);
+ if (result < 0)
+ return result;
+
+ *bits = result & *mask;
+
+ return 0;
+}
+
+static void ftdi_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ mutex_lock(&priv->gpio_lock);
+
+ priv->gpio_value &= ~(*mask);
+ priv->gpio_value |= *bits & *mask;
+ ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+}
+
+static int ftdi_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ return !(priv->gpio_output & BIT(gpio));
+}
+
+static int ftdi_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int result;
+
+ mutex_lock(&priv->gpio_lock);
+
+ priv->gpio_output &= ~BIT(gpio);
+ result = ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+
+ return result;
+}
+
+static int ftdi_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
+ int value)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int result;
+
+ mutex_lock(&priv->gpio_lock);
+
+ priv->gpio_output |= BIT(gpio);
+ if (value)
+ priv->gpio_value |= BIT(gpio);
+ else
+ priv->gpio_value &= ~BIT(gpio);
+
+ result = ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+
+ return result;
+}
+
+static int ftdi_read_eeprom(struct usb_serial *serial, void *dst, u16 addr,
+ u16 nbytes)
+{
+ int read = 0;
+
+ if (addr % 2 != 0)
+ return -EINVAL;
+ if (nbytes % 2 != 0)
+ return -EINVAL;
+
+ /* Read EEPROM two bytes at a time */
+ while (read < nbytes) {
+ int rv;
+
+ rv = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ FTDI_SIO_READ_EEPROM_REQUEST,
+ FTDI_SIO_READ_EEPROM_REQUEST_TYPE,
+ 0, (addr + read) / 2, dst + read, 2,
+ WDR_TIMEOUT);
+ if (rv < 2) {
+ if (rv >= 0)
+ return -EIO;
+ else
+ return rv;
+ }
+
+ read += rv;
+ }
+
+ return 0;
+}
+
+static int ftdi_gpio_init_ft232r(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ u16 cbus_config;
+ u8 *buf;
+ int ret;
+ int i;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ftdi_read_eeprom(port->serial, buf, 0x14, 2);
+ if (ret < 0)
+ goto out_free;
+
+ cbus_config = le16_to_cpup((__le16 *)buf);
+ dev_dbg(&port->dev, "cbus_config = 0x%04x\n", cbus_config);
+
+ priv->gc.ngpio = 4;
+
+ priv->gpio_altfunc = 0xff;
+ for (i = 0; i < priv->gc.ngpio; ++i) {
+ if ((cbus_config & 0xf) == FTDI_FT232R_CBUS_MUX_GPIO)
+ priv->gpio_altfunc &= ~BIT(i);
+ cbus_config >>= 4;
+ }
+out_free:
+ kfree(buf);
+
+ return ret;
+}
+
+static int ftdi_gpio_init_ftx(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ const u16 cbus_cfg_addr = 0x1a;
+ const u16 cbus_cfg_size = 4;
+ u8 *cbus_cfg_buf;
+ int result;
+ u8 i;
+
+ cbus_cfg_buf = kmalloc(cbus_cfg_size, GFP_KERNEL);
+ if (!cbus_cfg_buf)
+ return -ENOMEM;
+
+ result = ftdi_read_eeprom(serial, cbus_cfg_buf,
+ cbus_cfg_addr, cbus_cfg_size);
+ if (result < 0)
+ goto out_free;
+
+ /* FIXME: FT234XD alone has 1 GPIO, but how to recognize this IC? */
+ priv->gc.ngpio = 4;
+
+ /* Determine which pins are configured for CBUS bitbanging */
+ priv->gpio_altfunc = 0xff;
+ for (i = 0; i < priv->gc.ngpio; ++i) {
+ if (cbus_cfg_buf[i] == FTDI_FTX_CBUS_MUX_GPIO)
+ priv->gpio_altfunc &= ~BIT(i);
+ }
+
+out_free:
+ kfree(cbus_cfg_buf);
+
+ return result;
+}
+
+static int ftdi_gpio_init(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ int result;
+
+ switch (priv->chip_type) {
+ case FT232RL:
+ result = ftdi_gpio_init_ft232r(port);
+ break;
+ case FTX:
+ result = ftdi_gpio_init_ftx(port);
+ break;
+ default:
+ return 0;
+ }
+
+ if (result < 0)
+ return result;
+
+ mutex_init(&priv->gpio_lock);
+
+ priv->gc.label = "ftdi-cbus";
+ priv->gc.request = ftdi_gpio_request;
+ priv->gc.get_direction = ftdi_gpio_direction_get;
+ priv->gc.direction_input = ftdi_gpio_direction_input;
+ priv->gc.direction_output = ftdi_gpio_direction_output;
+ priv->gc.get = ftdi_gpio_get;
+ priv->gc.set = ftdi_gpio_set;
+ priv->gc.get_multiple = ftdi_gpio_get_multiple;
+ priv->gc.set_multiple = ftdi_gpio_set_multiple;
+ priv->gc.owner = THIS_MODULE;
+ priv->gc.parent = &serial->interface->dev;
+ priv->gc.base = -1;
+ priv->gc.can_sleep = true;
+
+ result = gpiochip_add_data(&priv->gc, port);
+ if (!result)
+ priv->gpio_registered = true;
+
+ return result;
+}
+
+static void ftdi_gpio_remove(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ if (priv->gpio_registered) {
+ gpiochip_remove(&priv->gc);
+ priv->gpio_registered = false;
+ }
+
+ if (priv->gpio_used) {
+ /* Exiting CBUS-mode does not reset pin states. */
+ ftdi_exit_cbus_mode(port);
+ priv->gpio_used = false;
+ }
+}
+
+#else
+
+static int ftdi_gpio_init(struct usb_serial_port *port)
+{
+ return 0;
+}
+
+static void ftdi_gpio_remove(struct usb_serial_port *port) { }
+
+#endif /* CONFIG_GPIOLIB */
+
/*
* ***************************************************************************
* FTDI driver specific functions
@@ -1792,7 +2171,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
{
struct ftdi_private *priv;
const struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial);
-
+ int result;
priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL);
if (!priv)
@@ -1811,6 +2190,14 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
priv->latency = 16;
write_latency_timer(port);
create_sysfs_attrs(port);
+
+ result = ftdi_gpio_init(port);
+ if (result < 0) {
+ dev_err(&port->serial->interface->dev,
+ "GPIO initialisation failed: %d\n",
+ result);
+ }
+
return 0;
}
@@ -1928,6 +2315,8 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
+ ftdi_gpio_remove(port);
+
remove_sysfs_attrs(port);
kfree(priv);
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index dcd0b6e05baf..a79a1325b4d9 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -35,7 +35,10 @@
#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
-#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
+#define FTDI_SIO_GET_LATENCY_TIMER 0x0a /* Get the latency timer */
+#define FTDI_SIO_SET_BITMODE 0x0b /* Set bitbang mode */
+#define FTDI_SIO_READ_PINS 0x0c /* Read immediate value of pins */
+#define FTDI_SIO_READ_EEPROM 0x90 /* Read EEPROM */
/* Interface indices for FT2232, FT2232H and FT4232H devices */
#define INTERFACE_A 1
@@ -433,6 +436,29 @@ enum ftdi_sio_baudrate {
* 1 = active
*/
+/* FTDI_SIO_SET_BITMODE */
+#define FTDI_SIO_SET_BITMODE_REQUEST_TYPE 0x40
+#define FTDI_SIO_SET_BITMODE_REQUEST FTDI_SIO_SET_BITMODE
+
+/* Possible bitmodes for FTDI_SIO_SET_BITMODE_REQUEST */
+#define FTDI_SIO_BITMODE_RESET 0x00
+#define FTDI_SIO_BITMODE_CBUS 0x20
+
+/* FTDI_SIO_READ_PINS */
+#define FTDI_SIO_READ_PINS_REQUEST_TYPE 0xc0
+#define FTDI_SIO_READ_PINS_REQUEST FTDI_SIO_READ_PINS
+
+/*
+ * FTDI_SIO_READ_EEPROM
+ *
+ * EEPROM format found in FTDI AN_201, "FT-X MTP memory Configuration",
+ * http://www.ftdichip.com/Support/Documents/AppNotes/AN_201_FT-X%20MTP%20Memory%20Configuration.pdf
+ */
+#define FTDI_SIO_READ_EEPROM_REQUEST_TYPE 0xc0
+#define FTDI_SIO_READ_EEPROM_REQUEST FTDI_SIO_READ_EEPROM
+
+#define FTDI_FTX_CBUS_MUX_GPIO 0x8
+#define FTDI_FT232R_CBUS_MUX_GPIO 0xa
/* Descriptors returned by the device
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index ec84758f0e23..6fd427284b12 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -23,16 +23,16 @@ config USB_STORAGE
To compile this driver as a module, choose M here: the
module will be called usb-storage.
+if USB_STORAGE
+
config USB_STORAGE_DEBUG
bool "USB Mass Storage verbose debug"
- depends on USB_STORAGE
help
Say Y here in order to have the USB Mass Storage code generate
verbose debugging messages.
config USB_STORAGE_REALTEK
tristate "Realtek Card Reader support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the power-saving function
for Realtek RTS51xx USB card readers.
@@ -46,7 +46,6 @@ config REALTEK_AUTOPM
config USB_STORAGE_DATAFAB
tristate "Datafab Compact Flash Reader support"
- depends on USB_STORAGE
help
Support for certain Datafab CompactFlash readers.
Datafab has a web page at <http://www.datafab.com/>.
@@ -55,7 +54,6 @@ config USB_STORAGE_DATAFAB
config USB_STORAGE_FREECOM
tristate "Freecom USB/ATAPI Bridge support"
- depends on USB_STORAGE
help
Support for the Freecom USB to IDE/ATAPI adaptor.
Freecom has a web page at <http://www.freecom.de/>.
@@ -64,7 +62,6 @@ config USB_STORAGE_FREECOM
config USB_STORAGE_ISD200
tristate "ISD-200 USB/ATA Bridge support"
- depends on USB_STORAGE
---help---
Say Y here if you want to use USB Mass Store devices based
on the In-Systems Design ISD-200 USB/ATA bridge.
@@ -82,7 +79,6 @@ config USB_STORAGE_ISD200
config USB_STORAGE_USBAT
tristate "USBAT/USBAT02-based storage support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support storage devices
based on the SCM/Shuttle USBAT/USBAT02 processors.
@@ -105,7 +101,6 @@ config USB_STORAGE_USBAT
config USB_STORAGE_SDDR09
tristate "SanDisk SDDR-09 (and other SmartMedia, including DPCM) support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Sandisk SDDR-09
SmartMedia reader in the USB Mass Storage driver.
@@ -115,7 +110,6 @@ config USB_STORAGE_SDDR09
config USB_STORAGE_SDDR55
tristate "SanDisk SDDR-55 SmartMedia support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Sandisk SDDR-55
SmartMedia reader in the USB Mass Storage driver.
@@ -124,7 +118,6 @@ config USB_STORAGE_SDDR55
config USB_STORAGE_JUMPSHOT
tristate "Lexar Jumpshot Compact Flash Reader"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Lexar Jumpshot
USB CompactFlash reader.
@@ -133,7 +126,6 @@ config USB_STORAGE_JUMPSHOT
config USB_STORAGE_ALAUDA
tristate "Olympus MAUSB-10/Fuji DPC-R1 support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Olympus MAUSB-10
and Fujifilm DPC-R1 USB Card reader/writer devices.
@@ -145,7 +137,6 @@ config USB_STORAGE_ALAUDA
config USB_STORAGE_ONETOUCH
tristate "Support OneTouch Button on Maxtor Hard Drives"
- depends on USB_STORAGE
depends on INPUT=y || INPUT=USB_STORAGE
help
Say Y here to include additional code to support the Maxtor OneTouch
@@ -160,7 +151,6 @@ config USB_STORAGE_ONETOUCH
config USB_STORAGE_KARMA
tristate "Support for Rio Karma music player"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Rio Karma
USB interface.
@@ -174,7 +164,6 @@ config USB_STORAGE_KARMA
config USB_STORAGE_CYPRESS_ATACB
tristate "SAT emulation on Cypress USB/ATA Bridge with ATACB"
- depends on USB_STORAGE
---help---
Say Y here if you want to use SAT (ata pass through) on devices based
on the Cypress USB/ATA bridge supporting ATACB. This will allow you
@@ -187,19 +176,15 @@ config USB_STORAGE_CYPRESS_ATACB
config USB_STORAGE_ENE_UB6250
tristate "USB ENE card reader support"
- depends on SCSI
- depends on USB_STORAGE
---help---
Say Y here if you wish to control a ENE SD/MS Card reader.
Note that this driver does not support SM cards.
- This option depends on 'SCSI' support being enabled, but you
- probably also need 'SCSI device support: SCSI disk support'
- (BLK_DEV_SD) for most USB storage devices.
-
To compile this driver as a module, choose M here: the
module will be called ums-eneub6250.
+endif # USB_STORAGE
+
config USB_UAS
tristate "USB Attached SCSI"
depends on SCSI && USB_STORAGE
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index f5e4500d9970..2b474d60b4db 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1153,7 +1153,7 @@ static int isd200_get_inquiry_data( struct us_data *us )
/* Fill in vendor identification fields */
src = (__be16 *)&id[ATA_ID_PROD];
dest = (__u16*)info->InquiryData.VendorId;
- for (i=0;i<4;i++)
+ for (i = 0; i < 4; i++)
dest[i] = be16_to_cpu(src[i]);
src = (__be16 *)&id[ATA_ID_PROD + 8/2];
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 00878c386dd0..30a847c2089d 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -45,50 +45,7 @@ menuconfig TYPEC
if TYPEC
-config TYPEC_TCPM
- tristate "USB Type-C Port Controller Manager"
- depends on USB
- select USB_ROLE_SWITCH
- select POWER_SUPPLY
- help
- The Type-C Port Controller Manager provides a USB PD and USB Type-C
- state machine for use with Type-C Port Controllers.
-
-if TYPEC_TCPM
-
-config TYPEC_TCPCI
- tristate "Type-C Port Controller Interface driver"
- depends on I2C
- select REGMAP_I2C
- help
- Type-C Port Controller driver for TCPCI-compliant controller.
-
-config TYPEC_RT1711H
- tristate "Richtek RT1711H Type-C chip driver"
- depends on I2C
- select TYPEC_TCPCI
- help
- Richtek RT1711H Type-C chip driver that works with
- Type-C Port Controller Manager to provide USB PD and USB
- Type-C functionalities.
-
-source "drivers/usb/typec/fusb302/Kconfig"
-
-config TYPEC_WCOVE
- tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
- depends on ACPI
- depends on INTEL_SOC_PMIC
- depends on INTEL_PMC_IPC
- depends on BXT_WC_PMIC_OPREGION
- help
- This driver adds support for USB Type-C detection on Intel Broxton
- platforms that have Intel Whiskey Cove PMIC. The driver can detect the
- role and cable orientation.
-
- To compile this driver as module, choose M here: the module will be
- called typec_wcove
-
-endif # TYPEC_TCPM
+source "drivers/usb/typec/tcpm/Kconfig"
source "drivers/usb/typec/ucsi/Kconfig"
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 45b0aef428a8..6696b7263d61 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -2,11 +2,7 @@
obj-$(CONFIG_TYPEC) += typec.o
typec-y := class.o mux.o bus.o
obj-$(CONFIG_TYPEC) += altmodes/
-obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
-obj-y += fusb302/
-obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
+obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
obj-$(CONFIG_TYPEC) += mux/
-obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
-obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index e61dffb27a0c..5db0593ca0bd 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1322,7 +1322,7 @@ void typec_set_pwr_role(struct typec_port *port, enum typec_role role)
EXPORT_SYMBOL_GPL(typec_set_pwr_role);
/**
- * typec_set_pwr_role - Report VCONN source change
+ * typec_set_vconn_role - Report VCONN source change
* @port: The USB Type-C Port which VCONN role changed
* @role: Source when @port is sourcing VCONN, or Sink when it's not
*
@@ -1500,7 +1500,7 @@ typec_port_register_altmode(struct typec_port *port,
sprintf(id, "id%04xm%02x", desc->svid, desc->mode);
- mux = typec_mux_get(port->dev.parent, id);
+ mux = typec_mux_get(&port->dev, id);
if (IS_ERR(mux))
return ERR_CAST(mux);
@@ -1540,18 +1540,6 @@ struct typec_port *typec_register_port(struct device *parent,
return ERR_PTR(id);
}
- port->sw = typec_switch_get(cap->fwnode ? &port->dev : parent);
- if (IS_ERR(port->sw)) {
- ret = PTR_ERR(port->sw);
- goto err_switch;
- }
-
- port->mux = typec_mux_get(parent, "typec-mux");
- if (IS_ERR(port->mux)) {
- ret = PTR_ERR(port->mux);
- goto err_mux;
- }
-
switch (cap->type) {
case TYPEC_PORT_SRC:
port->pwr_role = TYPEC_SOURCE;
@@ -1592,13 +1580,26 @@ struct typec_port *typec_register_port(struct device *parent,
port->port_type = cap->type;
port->prefer_role = cap->prefer_role;
+ device_initialize(&port->dev);
port->dev.class = typec_class;
port->dev.parent = parent;
port->dev.fwnode = cap->fwnode;
port->dev.type = &typec_port_dev_type;
dev_set_name(&port->dev, "port%d", id);
- ret = device_register(&port->dev);
+ port->sw = typec_switch_get(&port->dev);
+ if (IS_ERR(port->sw)) {
+ put_device(&port->dev);
+ return ERR_CAST(port->sw);
+ }
+
+ port->mux = typec_mux_get(&port->dev, "typec-mux");
+ if (IS_ERR(port->mux)) {
+ put_device(&port->dev);
+ return ERR_CAST(port->mux);
+ }
+
+ ret = device_add(&port->dev);
if (ret) {
dev_err(parent, "failed to register port (%d)\n", ret);
put_device(&port->dev);
@@ -1606,15 +1607,6 @@ struct typec_port *typec_register_port(struct device *parent,
}
return port;
-
-err_mux:
- typec_switch_put(port->sw);
-
-err_switch:
- ida_simple_remove(&typec_index_ida, port->id);
- kfree(port);
-
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(typec_register_port);
diff --git a/drivers/usb/typec/fusb302/Kconfig b/drivers/usb/typec/fusb302/Kconfig
deleted file mode 100644
index fce099ff39fe..000000000000
--- a/drivers/usb/typec/fusb302/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-config TYPEC_FUSB302
- tristate "Fairchild FUSB302 Type-C chip driver"
- depends on I2C
- help
- The Fairchild FUSB302 Type-C chip driver that works with
- Type-C Port Controller Manager to provide USB PD and USB
- Type-C functionalities.
diff --git a/drivers/usb/typec/fusb302/Makefile b/drivers/usb/typec/fusb302/Makefile
deleted file mode 100644
index 3b51b33631a0..000000000000
--- a/drivers/usb/typec/fusb302/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
new file mode 100644
index 000000000000..f03ea8a61768
--- /dev/null
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -0,0 +1,52 @@
+config TYPEC_TCPM
+ tristate "USB Type-C Port Controller Manager"
+ depends on USB
+ select USB_ROLE_SWITCH
+ select POWER_SUPPLY
+ help
+ The Type-C Port Controller Manager provides a USB PD and USB Type-C
+ state machine for use with Type-C Port Controllers.
+
+if TYPEC_TCPM
+
+config TYPEC_TCPCI
+ tristate "Type-C Port Controller Interface driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Type-C Port Controller driver for TCPCI-compliant controller.
+
+if TYPEC_TCPCI
+
+config TYPEC_RT1711H
+ tristate "Richtek RT1711H Type-C chip driver"
+ help
+ Richtek RT1711H Type-C chip driver that works with
+ Type-C Port Controller Manager to provide USB PD and USB
+ Type-C functionalities.
+
+endif # TYPEC_TCPCI
+
+config TYPEC_FUSB302
+ tristate "Fairchild FUSB302 Type-C chip driver"
+ depends on I2C
+ help
+ The Fairchild FUSB302 Type-C chip driver that works with
+ Type-C Port Controller Manager to provide USB PD and USB
+ Type-C functionalities.
+
+config TYPEC_WCOVE
+ tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
+ depends on ACPI
+ depends on INTEL_SOC_PMIC
+ depends on INTEL_PMC_IPC
+ depends on BXT_WC_PMIC_OPREGION
+ help
+ This driver adds support for USB Type-C on Intel Broxton platforms
+ that have Intel Whiskey Cove PMIC. The driver works with USB Type-C
+ Port Controller Manager to provide USB PD and Type-C functionalities.
+
+ To compile this driver as module, choose M here: the module will be
+ called typec_wcove.ko
+
+endif # TYPEC_TCPM
diff --git a/drivers/usb/typec/tcpm/Makefile b/drivers/usb/typec/tcpm/Makefile
new file mode 100644
index 000000000000..a5ff6c8eb892
--- /dev/null
+++ b/drivers/usb/typec/tcpm/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
+obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
+obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
+typec_wcove-y := wcove.o
+obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
+obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 82bed9810be6..43b64d9309d0 100644
--- a/drivers/usb/typec/fusb302/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -42,19 +42,12 @@
#define T_BC_LVL_DEBOUNCE_DELAY_MS 30
enum toggling_mode {
- TOGGLINE_MODE_OFF,
+ TOGGLING_MODE_OFF,
TOGGLING_MODE_DRP,
TOGGLING_MODE_SNK,
TOGGLING_MODE_SRC,
};
-static const char * const toggling_mode_name[] = {
- [TOGGLINE_MODE_OFF] = "toggling_OFF",
- [TOGGLING_MODE_DRP] = "toggling_DRP",
- [TOGGLING_MODE_SNK] = "toggling_SNK",
- [TOGGLING_MODE_SRC] = "toggling_SRC",
-};
-
enum src_current_status {
SRC_CURRENT_DEFAULT,
SRC_CURRENT_MEDIUM,
@@ -601,7 +594,7 @@ static int fusb302_set_toggling(struct fusb302_chip *chip,
chip->intr_comp_chng = false;
/* configure toggling mode: none/snk/src/drp */
switch (mode) {
- case TOGGLINE_MODE_OFF:
+ case TOGGLING_MODE_OFF:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
FUSB_REG_CONTROL2_MODE_MASK,
FUSB_REG_CONTROL2_MODE_NONE);
@@ -633,7 +626,7 @@ static int fusb302_set_toggling(struct fusb302_chip *chip,
break;
}
- if (mode == TOGGLINE_MODE_OFF) {
+ if (mode == TOGGLING_MODE_OFF) {
/* mask TOGDONE interrupt */
ret = fusb302_i2c_set_bits(chip, FUSB_REG_MASKA,
FUSB_REG_MASKA_TOGDONE);
@@ -686,6 +679,7 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
int ret = 0;
bool pull_up, pull_down;
u8 rd_mda;
+ enum toggling_mode mode;
mutex_lock(&chip->lock);
switch (cc) {
@@ -709,7 +703,7 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
ret = -EINVAL;
goto done;
}
- ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip, "cannot stop toggling, ret=%d", ret);
goto done;
@@ -771,6 +765,29 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
chip->intr_comp_chng = false;
}
fusb302_log(chip, "cc := %s", typec_cc_status_name[cc]);
+
+ /* Enable detection for fixed SNK or SRC only roles */
+ switch (cc) {
+ case TYPEC_CC_RD:
+ mode = TOGGLING_MODE_SNK;
+ break;
+ case TYPEC_CC_RP_DEF:
+ case TYPEC_CC_RP_1_5:
+ case TYPEC_CC_RP_3_0:
+ mode = TOGGLING_MODE_SRC;
+ break;
+ default:
+ mode = TOGGLING_MODE_OFF;
+ break;
+ }
+
+ if (mode != TOGGLING_MODE_OFF) {
+ ret = fusb302_set_toggling(chip, mode);
+ if (ret < 0)
+ fusb302_log(chip,
+ "cannot set fixed role toggling mode, ret=%d",
+ ret);
+ }
done:
mutex_unlock(&chip->lock);
@@ -1178,10 +1195,6 @@ static const u32 src_pdo[] = {
PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
};
-static const u32 snk_pdo[] = {
- PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
-};
-
static const struct tcpc_config fusb302_tcpc_config = {
.src_pdo = src_pdo,
.nr_src_pdo = ARRAY_SIZE(src_pdo),
@@ -1303,7 +1316,7 @@ static int fusb302_handle_togdone_snk(struct fusb302_chip *chip,
tcpm_cc_change(chip->tcpm_port);
}
/* turn off toggling */
- ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip,
"cannot set toggling mode off, ret=%d", ret);
@@ -1399,7 +1412,7 @@ static int fusb302_handle_togdone_src(struct fusb302_chip *chip,
tcpm_cc_change(chip->tcpm_port);
}
/* turn off toggling */
- ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip,
"cannot set toggling mode off, ret=%d", ret);
@@ -1730,12 +1743,14 @@ static int fusb302_probe(struct i2c_client *client,
return -ENOMEM;
chip->i2c_client = client;
- i2c_set_clientdata(client, chip);
chip->dev = &client->dev;
chip->tcpc_config = fusb302_tcpc_config;
chip->tcpc_dev.config = &chip->tcpc_config;
mutex_init(&chip->lock);
+ chip->tcpc_dev.fwnode =
+ device_get_named_child_node(dev, "connector");
+
if (!device_property_read_u32(dev, "fcs,operating-sink-microwatt", &v))
chip->tcpc_config.operating_snk_mw = v / 1000;
@@ -1756,22 +1771,17 @@ static int fusb302_probe(struct i2c_client *client,
return -EPROBE_DEFER;
}
- fusb302_debugfs_init(chip);
+ chip->vbus = devm_regulator_get(chip->dev, "vbus");
+ if (IS_ERR(chip->vbus))
+ return PTR_ERR(chip->vbus);
chip->wq = create_singlethread_workqueue(dev_name(chip->dev));
- if (!chip->wq) {
- ret = -ENOMEM;
- goto clear_client_data;
- }
+ if (!chip->wq)
+ return -ENOMEM;
+
INIT_DELAYED_WORK(&chip->bc_lvl_handler, fusb302_bc_lvl_handler_work);
init_tcpc_dev(&chip->tcpc_dev);
- chip->vbus = devm_regulator_get(chip->dev, "vbus");
- if (IS_ERR(chip->vbus)) {
- ret = PTR_ERR(chip->vbus);
- goto destroy_workqueue;
- }
-
if (client->irq) {
chip->gpio_int_n_irq = client->irq;
} else {
@@ -1797,15 +1807,15 @@ static int fusb302_probe(struct i2c_client *client,
goto tcpm_unregister_port;
}
enable_irq_wake(chip->gpio_int_n_irq);
+ fusb302_debugfs_init(chip);
+ i2c_set_clientdata(client, chip);
+
return ret;
tcpm_unregister_port:
tcpm_unregister_port(chip->tcpm_port);
destroy_workqueue:
destroy_workqueue(chip->wq);
-clear_client_data:
- i2c_set_clientdata(client, NULL);
- fusb302_debugfs_exit(chip);
return ret;
}
@@ -1816,7 +1826,6 @@ static int fusb302_remove(struct i2c_client *client)
tcpm_unregister_port(chip->tcpm_port);
destroy_workqueue(chip->wq);
- i2c_set_clientdata(client, NULL);
fusb302_debugfs_exit(chip);
return 0;
diff --git a/drivers/usb/typec/fusb302/fusb302_reg.h b/drivers/usb/typec/tcpm/fusb302_reg.h
index 00b39d365478..00b39d365478 100644
--- a/drivers/usb/typec/fusb302/fusb302_reg.h
+++ b/drivers/usb/typec/tcpm/fusb302_reg.h
diff --git a/drivers/usb/typec/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index ac6b418b15f1..ac6b418b15f1 100644
--- a/drivers/usb/typec/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
diff --git a/drivers/usb/typec/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index 303ebde26546..303ebde26546 100644
--- a/drivers/usb/typec/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
diff --git a/drivers/usb/typec/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 017389021b96..017389021b96 100644
--- a/drivers/usb/typec/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 4f1f4215f3d6..dbbd71f754d0 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1430,8 +1430,8 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
break;
- if (pdo_pps_apdo_max_current(pdo[i]) <
- pdo_pps_apdo_max_current(pdo[i - 1]))
+ if (pdo_pps_apdo_max_voltage(pdo[i]) <
+ pdo_pps_apdo_max_voltage(pdo[i - 1]))
return PDO_ERR_PPS_APDO_NOT_SORTED;
else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
@@ -2209,7 +2209,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
{
unsigned int i, j, max_mw = 0, max_mv = 0;
unsigned int min_src_mv, max_src_mv, src_ma, src_mw;
- unsigned int min_snk_mv, max_snk_mv, snk_ma;
+ unsigned int min_snk_mv, max_snk_mv;
u32 pdo;
unsigned int src_pdo = 0, snk_pdo = 0;
@@ -2253,8 +2253,6 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
pdo_pps_apdo_min_voltage(pdo);
max_snk_mv =
pdo_pps_apdo_max_voltage(pdo);
- snk_ma =
- pdo_pps_apdo_max_current(pdo);
break;
default:
tcpm_log(port,
@@ -2402,7 +2400,7 @@ static int tcpm_pd_send_request(struct tcpm_port *port)
static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
{
- unsigned int out_mv, op_ma, op_mw, min_mv, max_mv, max_ma, flags;
+ unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
enum pd_pdo_type type;
unsigned int src_pdo_index;
u32 pdo;
@@ -2420,7 +2418,6 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
tcpm_log(port, "Invalid APDO selected!");
return -EINVAL;
}
- min_mv = port->pps_data.min_volt;
max_mv = port->pps_data.max_volt;
max_ma = port->pps_data.max_curr;
out_mv = port->pps_data.out_volt;
@@ -4116,6 +4113,9 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
goto port_unlock;
}
+ /* Round down operating current to align with PPS valid steps */
+ op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
+
reinit_completion(&port->pps_complete);
port->pps_data.op_curr = op_curr;
port->pps_status = 0;
@@ -4169,6 +4169,9 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
goto port_unlock;
}
+ /* Round down output voltage to align with PPS valid steps */
+ out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
+
reinit_completion(&port->pps_complete);
port->pps_data.out_volt = out_volt;
port->pps_status = 0;
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 423208e19383..423208e19383 100644
--- a/drivers/usb/typec/typec_wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c
index 3fc22037a82f..390733e6937e 100644
--- a/drivers/usb/usbip/vudc_main.c
+++ b/drivers/usb/usbip/vudc_main.c
@@ -73,6 +73,10 @@ static int __init init(void)
cleanup:
list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
list_del(&udc_dev->dev_entry);
+ /*
+ * Just do platform_device_del() here, put_vudc_device()
+ * calls the platform_device_put()
+ */
platform_device_del(udc_dev->pdev);
put_vudc_device(udc_dev);
}
@@ -89,7 +93,11 @@ static void __exit cleanup(void)
list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
list_del(&udc_dev->dev_entry);
- platform_device_unregister(udc_dev->pdev);
+ /*
+ * Just do platform_device_del() here, put_vudc_device()
+ * calls the platform_device_put()
+ */
+ platform_device_del(udc_dev->pdev);
put_vudc_device(udc_dev);
}
platform_driver_unregister(&vudc_driver);
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index aff50eb09ca9..68ddee86a886 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -189,7 +189,7 @@ struct wusb_mac_scratch {
* NOTE: blen is not aligned to a block size, we'll pad zeros, that's
* what sg[4] is for. Maybe there is a smarter way to do this.
*/
-static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
+static int wusb_ccm_mac(struct crypto_sync_skcipher *tfm_cbc,
struct crypto_cipher *tfm_aes,
struct wusb_mac_scratch *scratch,
void *mic,
@@ -198,7 +198,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
size_t blen)
{
int result = 0;
- SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
@@ -224,7 +224,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
if (!dst_buf)
goto error_dst_buf;
- iv = kzalloc(crypto_skcipher_ivsize(tfm_cbc), GFP_KERNEL);
+ iv = kzalloc(crypto_sync_skcipher_ivsize(tfm_cbc), GFP_KERNEL);
if (!iv)
goto error_iv;
@@ -251,7 +251,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
sg_init_one(&sg_dst, dst_buf, dst_size);
- skcipher_request_set_tfm(req, tfm_cbc);
+ skcipher_request_set_sync_tfm(req, tfm_cbc);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, &sg_dst, dst_size, iv);
result = crypto_skcipher_encrypt(req);
@@ -298,19 +298,19 @@ ssize_t wusb_prf(void *out, size_t out_size,
{
ssize_t result, bytes = 0, bitr;
struct aes_ccm_nonce n = *_n;
- struct crypto_skcipher *tfm_cbc;
+ struct crypto_sync_skcipher *tfm_cbc;
struct crypto_cipher *tfm_aes;
struct wusb_mac_scratch *scratch;
u64 sfn = 0;
__le64 sfn_le;
- tfm_cbc = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm_cbc = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
if (IS_ERR(tfm_cbc)) {
result = PTR_ERR(tfm_cbc);
printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
goto error_alloc_cbc;
}
- result = crypto_skcipher_setkey(tfm_cbc, key, 16);
+ result = crypto_sync_skcipher_setkey(tfm_cbc, key, 16);
if (result < 0) {
printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
goto error_setkey_cbc;
@@ -351,7 +351,7 @@ error_setkey_aes:
crypto_free_cipher(tfm_aes);
error_alloc_aes:
error_setkey_cbc:
- crypto_free_skcipher(tfm_cbc);
+ crypto_free_sync_skcipher(tfm_cbc);
error_alloc_cbc:
return result;
}
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index 38884aac862b..a5734cbcd5ad 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -470,9 +470,7 @@ error:
int wa_rpipes_create(struct wahc *wa)
{
wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes);
- wa->rpipe_bm = kcalloc(BITS_TO_LONGS(wa->rpipes),
- sizeof(unsigned long),
- GFP_KERNEL);
+ wa->rpipe_bm = bitmap_zalloc(wa->rpipes, GFP_KERNEL);
if (wa->rpipe_bm == NULL)
return -ENOMEM;
return 0;
@@ -487,7 +485,7 @@ void wa_rpipes_destroy(struct wahc *wa)
dev_err(dev, "BUG: pipes not released on exit: %*pb\n",
wa->rpipes, wa->rpipe_bm);
}
- kfree(wa->rpipe_bm);
+ bitmap_free(wa->rpipe_bm);
}
/*
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 96721b154454..b30926e11d87 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -444,7 +444,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
struct mm_iommu_table_group_mem_t *mem = NULL;
int ret;
unsigned long hpa = 0;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua)
return;
@@ -467,8 +467,27 @@ static int tce_iommu_clear(struct tce_container *container,
unsigned long oldhpa;
long ret;
enum dma_data_direction direction;
+ unsigned long lastentry = entry + pages;
+
+ for ( ; entry < lastentry; ++entry) {
+ if (tbl->it_indirect_levels && tbl->it_userspace) {
+ /*
+ * For multilevel tables, we can take a shortcut here
+ * and skip some TCEs as we know that the userspace
+ * addresses cache is a mirror of the real TCE table
+ * and if it is missing some indirect levels, then
+ * the hardware table does not have them allocated
+ * either and therefore does not require updating.
+ */
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
+ entry);
+ if (!pua) {
+ /* align to level_size which is power of two */
+ entry |= tbl->it_level_size - 1;
+ continue;
+ }
+ }
- for ( ; pages; --pages, ++entry) {
cond_resched();
direction = DMA_NONE;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 2919e2334052..71ee978c848f 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -111,22 +111,6 @@ config LCD_HP700
If you have an HP Jornada 700 series handheld (710/720/728)
say Y to enable LCD control driver.
-config LCD_S6E63M0
- tristate "S6E63M0 AMOLED LCD Driver"
- depends on SPI && BACKLIGHT_CLASS_DEVICE
- default n
- help
- If you have an S6E63M0 LCD Panel, say Y to enable its
- LCD control driver.
-
-config LCD_LD9040
- tristate "LD9040 AMOLED LCD Driver"
- depends on SPI && BACKLIGHT_CLASS_DEVICE
- default n
- help
- If you have an LD9040 Panel, say Y to enable its
- control driver.
-
config LCD_AMS369FG06
tristate "AMS369FG06 AMOLED LCD Driver"
depends on SPI && BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 0dcc2c745c03..63c507c07437 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -9,13 +9,11 @@ obj-$(CONFIG_LCD_HX8357) += hx8357.o
obj-$(CONFIG_LCD_ILI922X) += ili922x.o
obj-$(CONFIG_LCD_ILI9320) += ili9320.o
obj-$(CONFIG_LCD_L4F00242T03) += l4f00242t03.o
-obj-$(CONFIG_LCD_LD9040) += ld9040.o
obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
obj-$(CONFIG_LCD_LMS501KF03) += lms501kf03.o
obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
obj-$(CONFIG_LCD_OTM3225A) += otm3225a.o
obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
-obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 35373e2065b2..5e38353b4423 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -391,7 +391,7 @@ static struct platform_driver adp5520_bl_driver = {
module_platform_driver(adp5520_bl_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:adp5520-backlight");
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index f1dc41cf19e3..85318236da2f 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -822,5 +822,5 @@ static struct i2c_driver adp8860_driver = {
module_i2c_driver(adp8860_driver);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP8860 Backlight driver");
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 4fec9aa92d9b..8d50e0299578 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -992,5 +992,5 @@ static struct i2c_driver adp8870_driver = {
module_i2c_driver(adp8870_driver);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP8870 Backlight driver");
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
deleted file mode 100644
index 677f8abba27c..000000000000
--- a/drivers/video/backlight/ld9040.c
+++ /dev/null
@@ -1,811 +0,0 @@
-/*
- * ld9040 AMOLED LCD panel driver.
- *
- * Copyright (c) 2011 Samsung Electronics
- * Author: Donghwa Lee <dh09.lee@samsung.com>
- * Derived from drivers/video/backlight/s6e63m0.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/lcd.h>
-#include <linux/module.h>
-#include <linux/regulator/consumer.h>
-#include <linux/spi/spi.h>
-#include <linux/wait.h>
-
-#include "ld9040_gamma.h"
-
-#define SLEEPMSEC 0x1000
-#define ENDDEF 0x2000
-#define DEFMASK 0xFF00
-#define COMMAND_ONLY 0xFE
-#define DATA_ONLY 0xFF
-
-#define MIN_BRIGHTNESS 0
-#define MAX_BRIGHTNESS 24
-
-struct ld9040 {
- struct device *dev;
- struct spi_device *spi;
- unsigned int power;
- unsigned int current_brightness;
-
- struct lcd_device *ld;
- struct backlight_device *bd;
- struct lcd_platform_data *lcd_pd;
-
- struct mutex lock;
- bool enabled;
-};
-
-static struct regulator_bulk_data supplies[] = {
- { .supply = "vdd3", },
- { .supply = "vci", },
-};
-
-static void ld9040_regulator_enable(struct ld9040 *lcd)
-{
- int ret = 0;
- struct lcd_platform_data *pd = NULL;
-
- pd = lcd->lcd_pd;
- mutex_lock(&lcd->lock);
- if (!lcd->enabled) {
- ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
- if (ret)
- goto out;
-
- lcd->enabled = true;
- }
- msleep(pd->power_on_delay);
-out:
- mutex_unlock(&lcd->lock);
-}
-
-static void ld9040_regulator_disable(struct ld9040 *lcd)
-{
- int ret = 0;
-
- mutex_lock(&lcd->lock);
- if (lcd->enabled) {
- ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
- if (ret)
- goto out;
-
- lcd->enabled = false;
- }
-out:
- mutex_unlock(&lcd->lock);
-}
-
-static const unsigned short seq_swreset[] = {
- 0x01, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_user_setting[] = {
- 0xF0, 0x5A,
-
- DATA_ONLY, 0x5A,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_elvss_on[] = {
- 0xB1, 0x0D,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x16,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gtcon[] = {
- 0xF7, 0x09,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_panel_condition[] = {
- 0xF8, 0x05,
-
- DATA_ONLY, 0x65,
- DATA_ONLY, 0x96,
- DATA_ONLY, 0x71,
- DATA_ONLY, 0x7D,
- DATA_ONLY, 0x19,
- DATA_ONLY, 0x3B,
- DATA_ONLY, 0x0D,
- DATA_ONLY, 0x19,
- DATA_ONLY, 0x7E,
- DATA_ONLY, 0x0D,
- DATA_ONLY, 0xE2,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x7E,
- DATA_ONLY, 0x7D,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x02,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gamma_set1[] = {
- 0xF9, 0x00,
-
- DATA_ONLY, 0xA7,
- DATA_ONLY, 0xB4,
- DATA_ONLY, 0xAE,
- DATA_ONLY, 0xBF,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x91,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xB2,
- DATA_ONLY, 0xB4,
- DATA_ONLY, 0xAA,
- DATA_ONLY, 0xBB,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xAC,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xB3,
- DATA_ONLY, 0xB1,
- DATA_ONLY, 0xAA,
- DATA_ONLY, 0xBC,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xB3,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gamma_ctrl[] = {
- 0xFB, 0x02,
-
- DATA_ONLY, 0x5A,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gamma_start[] = {
- 0xF9, COMMAND_ONLY,
-
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_apon[] = {
- 0xF3, 0x00,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x0A,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_display_ctrl[] = {
- 0xF2, 0x02,
-
- DATA_ONLY, 0x08,
- DATA_ONLY, 0x08,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x10,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_manual_pwr[] = {
- 0xB0, 0x04,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_pwr_ctrl[] = {
- 0xF4, 0x0A,
-
- DATA_ONLY, 0x87,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x6A,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x02,
- DATA_ONLY, 0x88,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_sleep_out[] = {
- 0x11, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_sleep_in[] = {
- 0x10, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_display_on[] = {
- 0x29, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_display_off[] = {
- 0x28, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vci1_1st_en[] = {
- 0xF3, 0x10,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vl1_en[] = {
- 0xF3, 0x11,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vl2_en[] = {
- 0xF3, 0x13,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vci1_2nd_en[] = {
- 0xF3, 0x33,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vl3_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vreg1_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x01,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vgh_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vgl_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x31,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vmos_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xB1,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vint_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xF1,
- /* DATA_ONLY, 0x71, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vbh_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xF9,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vbl_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFD,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gam_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_sd_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x80,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gls_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x81,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_els_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x83,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_el_on[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x87,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static int ld9040_spi_write_byte(struct ld9040 *lcd, int addr, int data)
-{
- u16 buf[1];
- struct spi_message msg;
-
- struct spi_transfer xfer = {
- .len = 2,
- .tx_buf = buf,
- };
-
- buf[0] = (addr << 8) | data;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- return spi_sync(lcd->spi, &msg);
-}
-
-static int ld9040_spi_write(struct ld9040 *lcd, unsigned char address,
- unsigned char command)
-{
- int ret = 0;
-
- if (address != DATA_ONLY)
- ret = ld9040_spi_write_byte(lcd, 0x0, address);
- if (command != COMMAND_ONLY)
- ret = ld9040_spi_write_byte(lcd, 0x1, command);
-
- return ret;
-}
-
-static int ld9040_panel_send_sequence(struct ld9040 *lcd,
- const unsigned short *wbuf)
-{
- int ret = 0, i = 0;
-
- while ((wbuf[i] & DEFMASK) != ENDDEF) {
- if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
- ret = ld9040_spi_write(lcd, wbuf[i], wbuf[i+1]);
- if (ret)
- break;
- } else {
- msleep(wbuf[i+1]);
- }
- i += 2;
- }
-
- return ret;
-}
-
-static int _ld9040_gamma_ctl(struct ld9040 *lcd, const unsigned int *gamma)
-{
- unsigned int i = 0;
- int ret = 0;
-
- /* start gamma table updating. */
- ret = ld9040_panel_send_sequence(lcd, seq_gamma_start);
- if (ret) {
- dev_err(lcd->dev, "failed to disable gamma table updating.\n");
- goto gamma_err;
- }
-
- for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
- ret = ld9040_spi_write(lcd, DATA_ONLY, gamma[i]);
- if (ret) {
- dev_err(lcd->dev, "failed to set gamma table.\n");
- goto gamma_err;
- }
- }
-
- /* update gamma table. */
- ret = ld9040_panel_send_sequence(lcd, seq_gamma_ctrl);
- if (ret)
- dev_err(lcd->dev, "failed to update gamma table.\n");
-
-gamma_err:
- return ret;
-}
-
-static int ld9040_gamma_ctl(struct ld9040 *lcd, int gamma)
-{
- return _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
-}
-
-static int ld9040_ldi_init(struct ld9040 *lcd)
-{
- int ret, i;
- static const unsigned short *init_seq[] = {
- seq_user_setting,
- seq_panel_condition,
- seq_display_ctrl,
- seq_manual_pwr,
- seq_elvss_on,
- seq_gtcon,
- seq_gamma_set1,
- seq_gamma_ctrl,
- seq_sleep_out,
- };
-
- for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
- ret = ld9040_panel_send_sequence(lcd, init_seq[i]);
- /* workaround: minimum delay time for transferring CMD */
- usleep_range(300, 310);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int ld9040_ldi_enable(struct ld9040 *lcd)
-{
- return ld9040_panel_send_sequence(lcd, seq_display_on);
-}
-
-static int ld9040_ldi_disable(struct ld9040 *lcd)
-{
- int ret;
-
- ret = ld9040_panel_send_sequence(lcd, seq_display_off);
- ret = ld9040_panel_send_sequence(lcd, seq_sleep_in);
-
- return ret;
-}
-
-static int ld9040_power_is_on(int power)
-{
- return power <= FB_BLANK_NORMAL;
-}
-
-static int ld9040_power_on(struct ld9040 *lcd)
-{
- int ret = 0;
- struct lcd_platform_data *pd;
-
- pd = lcd->lcd_pd;
-
- /* lcd power on */
- ld9040_regulator_enable(lcd);
-
- if (!pd->reset) {
- dev_err(lcd->dev, "reset is NULL.\n");
- return -EINVAL;
- }
-
- pd->reset(lcd->ld);
- msleep(pd->reset_delay);
-
- ret = ld9040_ldi_init(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to initialize ldi.\n");
- return ret;
- }
-
- ret = ld9040_ldi_enable(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to enable ldi.\n");
- return ret;
- }
-
- return 0;
-}
-
-static int ld9040_power_off(struct ld9040 *lcd)
-{
- int ret;
- struct lcd_platform_data *pd;
-
- pd = lcd->lcd_pd;
-
- ret = ld9040_ldi_disable(lcd);
- if (ret) {
- dev_err(lcd->dev, "lcd setting failed.\n");
- return -EIO;
- }
-
- msleep(pd->power_off_delay);
-
- /* lcd power off */
- ld9040_regulator_disable(lcd);
-
- return 0;
-}
-
-static int ld9040_power(struct ld9040 *lcd, int power)
-{
- int ret = 0;
-
- if (ld9040_power_is_on(power) && !ld9040_power_is_on(lcd->power))
- ret = ld9040_power_on(lcd);
- else if (!ld9040_power_is_on(power) && ld9040_power_is_on(lcd->power))
- ret = ld9040_power_off(lcd);
-
- if (!ret)
- lcd->power = power;
-
- return ret;
-}
-
-static int ld9040_set_power(struct lcd_device *ld, int power)
-{
- struct ld9040 *lcd = lcd_get_data(ld);
-
- if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
- power != FB_BLANK_NORMAL) {
- dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
- return -EINVAL;
- }
-
- return ld9040_power(lcd, power);
-}
-
-static int ld9040_get_power(struct lcd_device *ld)
-{
- struct ld9040 *lcd = lcd_get_data(ld);
-
- return lcd->power;
-}
-
-static int ld9040_set_brightness(struct backlight_device *bd)
-{
- int ret = 0, brightness = bd->props.brightness;
- struct ld9040 *lcd = bl_get_data(bd);
-
- if (brightness < MIN_BRIGHTNESS ||
- brightness > bd->props.max_brightness) {
- dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
- MIN_BRIGHTNESS, MAX_BRIGHTNESS);
- return -EINVAL;
- }
-
- ret = ld9040_gamma_ctl(lcd, bd->props.brightness);
- if (ret) {
- dev_err(&bd->dev, "lcd brightness setting failed.\n");
- return -EIO;
- }
-
- return ret;
-}
-
-static struct lcd_ops ld9040_lcd_ops = {
- .set_power = ld9040_set_power,
- .get_power = ld9040_get_power,
-};
-
-static const struct backlight_ops ld9040_backlight_ops = {
- .update_status = ld9040_set_brightness,
-};
-
-static int ld9040_probe(struct spi_device *spi)
-{
- int ret = 0;
- struct ld9040 *lcd = NULL;
- struct lcd_device *ld = NULL;
- struct backlight_device *bd = NULL;
- struct backlight_properties props;
-
- lcd = devm_kzalloc(&spi->dev, sizeof(struct ld9040), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
-
- /* ld9040 lcd panel uses 3-wire 9bits SPI Mode. */
- spi->bits_per_word = 9;
-
- ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "spi setup failed.\n");
- return ret;
- }
-
- lcd->spi = spi;
- lcd->dev = &spi->dev;
-
- lcd->lcd_pd = dev_get_platdata(&spi->dev);
- if (!lcd->lcd_pd) {
- dev_err(&spi->dev, "platform data is NULL.\n");
- return -EINVAL;
- }
-
- mutex_init(&lcd->lock);
-
- ret = devm_regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
- if (ret) {
- dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
- return ret;
- }
-
- ld = devm_lcd_device_register(&spi->dev, "ld9040", &spi->dev, lcd,
- &ld9040_lcd_ops);
- if (IS_ERR(ld))
- return PTR_ERR(ld);
-
- lcd->ld = ld;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = MAX_BRIGHTNESS;
-
- bd = devm_backlight_device_register(&spi->dev, "ld9040-bl", &spi->dev,
- lcd, &ld9040_backlight_ops, &props);
- if (IS_ERR(bd))
- return PTR_ERR(bd);
-
- bd->props.brightness = MAX_BRIGHTNESS;
- lcd->bd = bd;
-
- /*
- * if lcd panel was on from bootloader like u-boot then
- * do not lcd on.
- */
- if (!lcd->lcd_pd->lcd_enabled) {
- /*
- * if lcd panel was off from bootloader then
- * current lcd status is powerdown and then
- * it enables lcd panel.
- */
- lcd->power = FB_BLANK_POWERDOWN;
-
- ld9040_power(lcd, FB_BLANK_UNBLANK);
- } else {
- lcd->power = FB_BLANK_UNBLANK;
- }
-
- spi_set_drvdata(spi, lcd);
-
- dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
- return 0;
-}
-
-static int ld9040_remove(struct spi_device *spi)
-{
- struct ld9040 *lcd = spi_get_drvdata(spi);
-
- ld9040_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ld9040_suspend(struct device *dev)
-{
- struct ld9040 *lcd = dev_get_drvdata(dev);
-
- dev_dbg(dev, "lcd->power = %d\n", lcd->power);
-
- /*
- * when lcd panel is suspend, lcd panel becomes off
- * regardless of status.
- */
- return ld9040_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static int ld9040_resume(struct device *dev)
-{
- struct ld9040 *lcd = dev_get_drvdata(dev);
-
- lcd->power = FB_BLANK_POWERDOWN;
-
- return ld9040_power(lcd, FB_BLANK_UNBLANK);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ld9040_pm_ops, ld9040_suspend, ld9040_resume);
-
-/* Power down all displays on reboot, poweroff or halt. */
-static void ld9040_shutdown(struct spi_device *spi)
-{
- struct ld9040 *lcd = spi_get_drvdata(spi);
-
- ld9040_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static struct spi_driver ld9040_driver = {
- .driver = {
- .name = "ld9040",
- .pm = &ld9040_pm_ops,
- },
- .probe = ld9040_probe,
- .remove = ld9040_remove,
- .shutdown = ld9040_shutdown,
-};
-
-module_spi_driver(ld9040_driver);
-
-MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
-MODULE_DESCRIPTION("ld9040 LCD Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ld9040_gamma.h b/drivers/video/backlight/ld9040_gamma.h
deleted file mode 100644
index c5e586d97385..000000000000
--- a/drivers/video/backlight/ld9040_gamma.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Gamma level definitions.
- *
- * Copyright (c) 2011 Samsung Electronics
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _LD9040_BRIGHTNESS_H
-#define _LD9040_BRIGHTNESS_H
-
-#define MAX_GAMMA_LEVEL 25
-#define GAMMA_TABLE_COUNT 21
-
-/* gamma value: 2.2 */
-static const unsigned int ld9040_22_300[] = {
- 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91,
- 0x00, 0xb2, 0xb4, 0xaa, 0xbb, 0x00, 0xac,
- 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3
-};
-
-static const unsigned int ld9040_22_290[] = {
- 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89,
- 0x00, 0xb7, 0xb6, 0xa8, 0xba, 0x00, 0xa4,
- 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa
-};
-
-static const unsigned int ld9040_22_280[] = {
- 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86,
- 0x00, 0xb8, 0xb5, 0xa8, 0xbc, 0x00, 0xa0,
- 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7
-};
-
-static const unsigned int ld9040_22_270[] = {
- 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84,
- 0x00, 0xb9, 0xb7, 0xa8, 0xbc, 0x00, 0x9d,
- 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4
-
-};
-static const unsigned int ld9040_22_260[] = {
- 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80,
- 0x00, 0xb8, 0xb6, 0xaa, 0xbc, 0x00, 0x9a,
- 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0
-};
-
-static const unsigned int ld9040_22_250[] = {
- 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d,
- 0x00, 0xb9, 0xb6, 0xaa, 0xbb, 0x00, 0x97,
- 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d
-};
-
-static const unsigned int ld9040_22_240[] = {
- 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a,
- 0x00, 0xb9, 0xb7, 0xaa, 0xbd, 0x00, 0x94,
- 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a
-};
-
-static const unsigned int ld9040_22_230[] = {
- 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77,
- 0x00, 0xb9, 0xb7, 0xab, 0xbe, 0x00, 0x90,
- 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97
-};
-
-static const unsigned int ld9040_22_220[] = {
- 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75,
- 0x00, 0xb9, 0xb8, 0xab, 0xbe, 0x00, 0x8e,
- 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94
-};
-
-static const unsigned int ld9040_22_210[] = {
- 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72,
- 0x00, 0xb8, 0xb8, 0xac, 0xbf, 0x00, 0x8a,
- 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91
-};
-
-static const unsigned int ld9040_22_200[] = {
- 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f,
- 0x00, 0xb8, 0xb8, 0xad, 0xc0, 0x00, 0x86,
- 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d
-};
-
-static const unsigned int ld9040_22_190[] = {
- 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c,
- 0x00, 0xb8, 0xb8, 0xae, 0xc1, 0x00, 0x82,
- 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89
-};
-
-static const unsigned int ld9040_22_180[] = {
- 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69,
- 0x00, 0xb8, 0xb9, 0xae, 0xc1, 0x00, 0x7f,
- 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85
-};
-
-static const unsigned int ld9040_22_170[] = {
- 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65,
- 0x00, 0xb7, 0xb8, 0xaf, 0xc3, 0x00, 0x7a,
- 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81
-};
-
-static const unsigned int ld9040_22_160[] = {
- 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62,
- 0x00, 0xb6, 0xba, 0xaf, 0xc3, 0x00, 0x76,
- 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e
-};
-
-static const unsigned int ld9040_22_150[] = {
- 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f,
- 0x00, 0xb5, 0xba, 0xb0, 0xc3, 0x00, 0x72,
- 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a
-};
-
-static const unsigned int ld9040_22_140[] = {
- 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b,
- 0x00, 0xb5, 0xba, 0xb1, 0xc4, 0x00, 0x6e,
- 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75
-};
-
-static const unsigned int ld9040_22_130[] = {
- 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57,
- 0x00, 0xb5, 0xbb, 0xb0, 0xc5, 0x00, 0x6a,
- 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70
-};
-
-static const unsigned int ld9040_22_120[] = {
- 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53,
- 0x00, 0xb5, 0xbb, 0xb3, 0xc6, 0x00, 0x65,
- 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c
-};
-
-static const unsigned int ld9040_22_110[] = {
- 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f,
- 0x00, 0xb4, 0xbb, 0xb3, 0xc7, 0x00, 0x60,
- 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67
-};
-
-static const unsigned int ld9040_22_100[] = {
- 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b,
- 0x00, 0xb3, 0xbc, 0xb4, 0xc7, 0x00, 0x5c,
- 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62
-};
-
-static const unsigned int ld9040_22_90[] = {
- 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46,
- 0x00, 0xb1, 0xbc, 0xb5, 0xc8, 0x00, 0x56,
- 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d
-};
-
-static const unsigned int ld9040_22_80[] = {
- 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41,
- 0x00, 0xb0, 0xbe, 0xb5, 0xc9, 0x00, 0x51,
- 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57
-};
-
-static const unsigned int ld9040_22_70[] = {
- 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c,
- 0x00, 0xaf, 0xbf, 0xb6, 0xcb, 0x00, 0x4b,
- 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52
-};
-
-static const unsigned int ld9040_22_50[] = {
- 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30,
- 0x00, 0xaf, 0xc0, 0xb8, 0xcd, 0x00, 0x3d,
- 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44
-};
-
-struct ld9040_gamma {
- unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
-};
-
-static struct ld9040_gamma gamma_table = {
- .gamma_22_table[0] = (unsigned int *)&ld9040_22_50,
- .gamma_22_table[1] = (unsigned int *)&ld9040_22_70,
- .gamma_22_table[2] = (unsigned int *)&ld9040_22_80,
- .gamma_22_table[3] = (unsigned int *)&ld9040_22_90,
- .gamma_22_table[4] = (unsigned int *)&ld9040_22_100,
- .gamma_22_table[5] = (unsigned int *)&ld9040_22_110,
- .gamma_22_table[6] = (unsigned int *)&ld9040_22_120,
- .gamma_22_table[7] = (unsigned int *)&ld9040_22_130,
- .gamma_22_table[8] = (unsigned int *)&ld9040_22_140,
- .gamma_22_table[9] = (unsigned int *)&ld9040_22_150,
- .gamma_22_table[10] = (unsigned int *)&ld9040_22_160,
- .gamma_22_table[11] = (unsigned int *)&ld9040_22_170,
- .gamma_22_table[12] = (unsigned int *)&ld9040_22_180,
- .gamma_22_table[13] = (unsigned int *)&ld9040_22_190,
- .gamma_22_table[14] = (unsigned int *)&ld9040_22_200,
- .gamma_22_table[15] = (unsigned int *)&ld9040_22_210,
- .gamma_22_table[16] = (unsigned int *)&ld9040_22_220,
- .gamma_22_table[17] = (unsigned int *)&ld9040_22_230,
- .gamma_22_table[18] = (unsigned int *)&ld9040_22_240,
- .gamma_22_table[19] = (unsigned int *)&ld9040_22_250,
- .gamma_22_table[20] = (unsigned int *)&ld9040_22_260,
- .gamma_22_table[21] = (unsigned int *)&ld9040_22_270,
- .gamma_22_table[22] = (unsigned int *)&ld9040_22_280,
- .gamma_22_table[23] = (unsigned int *)&ld9040_22_290,
- .gamma_22_table[24] = (unsigned int *)&ld9040_22_300,
-};
-
-#endif
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index cd50df5807ea..086611c7bc03 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -400,10 +400,8 @@ static int lm3639_remove(struct i2c_client *client)
regmap_write(pchip->regmap, REG_ENABLE, 0x00);
- if (&pchip->cdev_torch)
- led_classdev_unregister(&pchip->cdev_torch);
- if (&pchip->cdev_flash)
- led_classdev_unregister(&pchip->cdev_flash);
+ led_classdev_unregister(&pchip->cdev_torch);
+ led_classdev_unregister(&pchip->cdev_flash);
if (pchip->bled)
device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
return 0;
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index bdfcc0a71db1..678b27063198 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,10 +28,8 @@
struct pwm_bl_data {
struct pwm_device *pwm;
struct device *dev;
- unsigned int period;
unsigned int lth_brightness;
unsigned int *levels;
- bool enabled;
struct regulator *power_supply;
struct gpio_desc *enable_gpio;
unsigned int scale;
@@ -46,31 +44,35 @@ struct pwm_bl_data {
void (*exit)(struct device *);
};
-static void pwm_backlight_power_on(struct pwm_bl_data *pb, int brightness)
+static void pwm_backlight_power_on(struct pwm_bl_data *pb)
{
+ struct pwm_state state;
int err;
- if (pb->enabled)
+ pwm_get_state(pb->pwm, &state);
+ if (state.enabled)
return;
err = regulator_enable(pb->power_supply);
if (err < 0)
dev_err(pb->dev, "failed to enable power supply\n");
- pwm_enable(pb->pwm);
+ state.enabled = true;
+ pwm_apply_state(pb->pwm, &state);
if (pb->post_pwm_on_delay)
msleep(pb->post_pwm_on_delay);
if (pb->enable_gpio)
gpiod_set_value_cansleep(pb->enable_gpio, 1);
-
- pb->enabled = true;
}
static void pwm_backlight_power_off(struct pwm_bl_data *pb)
{
- if (!pb->enabled)
+ struct pwm_state state;
+
+ pwm_get_state(pb->pwm, &state);
+ if (!state.enabled)
return;
if (pb->enable_gpio)
@@ -79,24 +81,27 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
if (pb->pwm_off_delay)
msleep(pb->pwm_off_delay);
- pwm_config(pb->pwm, 0, pb->period);
- pwm_disable(pb->pwm);
+ state.enabled = false;
+ state.duty_cycle = 0;
+ pwm_apply_state(pb->pwm, &state);
regulator_disable(pb->power_supply);
- pb->enabled = false;
}
static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
{
unsigned int lth = pb->lth_brightness;
+ struct pwm_state state;
u64 duty_cycle;
+ pwm_get_state(pb->pwm, &state);
+
if (pb->levels)
duty_cycle = pb->levels[brightness];
else
duty_cycle = brightness;
- duty_cycle *= pb->period - lth;
+ duty_cycle *= state.period - lth;
do_div(duty_cycle, pb->scale);
return duty_cycle + lth;
@@ -106,7 +111,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
{
struct pwm_bl_data *pb = bl_get_data(bl);
int brightness = bl->props.brightness;
- int duty_cycle;
+ struct pwm_state state;
if (bl->props.power != FB_BLANK_UNBLANK ||
bl->props.fb_blank != FB_BLANK_UNBLANK ||
@@ -117,9 +122,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
brightness = pb->notify(pb->dev, brightness);
if (brightness > 0) {
- duty_cycle = compute_duty_cycle(pb, brightness);
- pwm_config(pb->pwm, duty_cycle, pb->period);
- pwm_backlight_power_on(pb, brightness);
+ pwm_get_state(pb->pwm, &state);
+ state.duty_cycle = compute_duty_cycle(pb, brightness);
+ pwm_apply_state(pb->pwm, &state);
+ pwm_backlight_power_on(pb);
} else
pwm_backlight_power_off(pb);
@@ -447,7 +453,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct pwm_bl_data *pb;
struct pwm_state state;
- struct pwm_args pargs;
unsigned int i;
int ret;
@@ -478,7 +483,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->check_fb = data->check_fb;
pb->exit = data->exit;
pb->dev = &pdev->dev;
- pb->enabled = false;
pb->post_pwm_on_delay = data->post_pwm_on_delay;
pb->pwm_off_delay = data->pwm_off_delay;
@@ -539,10 +543,26 @@ static int pwm_backlight_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "got pwm for backlight\n");
- if (!data->levels) {
- /* Get the PWM period (in nanoseconds) */
- pwm_get_state(pb->pwm, &state);
+ /* Sync up PWM state. */
+ pwm_init_state(pb->pwm, &state);
+ /*
+ * The DT case will set the pwm_period_ns field to 0 and store the
+ * period, parsed from the DT, in the PWM device. For the non-DT case,
+ * set the period from platform data if it has not already been set
+ * via the PWM lookup table.
+ */
+ if (!state.period && (data->pwm_period_ns > 0))
+ state.period = data->pwm_period_ns;
+
+ ret = pwm_apply_state(pb->pwm, &state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
+ ret);
+ goto err_alloc;
+ }
+
+ if (!data->levels) {
ret = pwm_backlight_brightness_default(&pdev->dev, data,
state.period);
if (ret < 0) {
@@ -559,24 +579,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->levels = data->levels;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pb->pwm);
-
- /*
- * The DT case will set the pwm_period_ns field to 0 and store the
- * period, parsed from the DT, in the PWM device. For the non-DT case,
- * set the period from platform data if it has not already been set
- * via the PWM lookup table.
- */
- pwm_get_args(pb->pwm, &pargs);
- pb->period = pargs.period;
- if (!pb->period && (data->pwm_period_ns > 0))
- pb->period = data->pwm_period_ns;
-
- pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale);
+ pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
deleted file mode 100644
index 3c4a22a3063a..000000000000
--- a/drivers/video/backlight/s6e63m0.c
+++ /dev/null
@@ -1,857 +0,0 @@
-/*
- * S6E63M0 AMOLED LCD panel driver.
- *
- * Author: InKi Dae <inki.dae@samsung.com>
- *
- * Derived from drivers/video/omap/lcd-apollon.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/lcd.h>
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-#include <linux/wait.h>
-
-#include "s6e63m0_gamma.h"
-
-#define SLEEPMSEC 0x1000
-#define ENDDEF 0x2000
-#define DEFMASK 0xFF00
-#define COMMAND_ONLY 0xFE
-#define DATA_ONLY 0xFF
-
-#define MIN_BRIGHTNESS 0
-#define MAX_BRIGHTNESS 10
-
-struct s6e63m0 {
- struct device *dev;
- struct spi_device *spi;
- unsigned int power;
- unsigned int current_brightness;
- unsigned int gamma_mode;
- unsigned int gamma_table_count;
- struct lcd_device *ld;
- struct backlight_device *bd;
- struct lcd_platform_data *lcd_pd;
-};
-
-static const unsigned short seq_panel_condition_set[] = {
- 0xF8, 0x01,
- DATA_ONLY, 0x27,
- DATA_ONLY, 0x27,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x54,
- DATA_ONLY, 0x9f,
- DATA_ONLY, 0x63,
- DATA_ONLY, 0x86,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x0d,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_display_condition_set[] = {
- 0xf2, 0x02,
- DATA_ONLY, 0x03,
- DATA_ONLY, 0x1c,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x10,
-
- 0xf7, 0x03,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_gamma_setting[] = {
- 0xfa, 0x00,
- DATA_ONLY, 0x18,
- DATA_ONLY, 0x08,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x64,
- DATA_ONLY, 0x56,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0xb6,
- DATA_ONLY, 0xba,
- DATA_ONLY, 0xa8,
- DATA_ONLY, 0xac,
- DATA_ONLY, 0xb1,
- DATA_ONLY, 0x9d,
- DATA_ONLY, 0xc1,
- DATA_ONLY, 0xc1,
- DATA_ONLY, 0xb7,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x9c,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x9f,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xd6,
-
- 0xfa, 0x01,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_etc_condition_set[] = {
- 0xf6, 0x00,
- DATA_ONLY, 0x8c,
- DATA_ONLY, 0x07,
-
- 0xb3, 0xc,
-
- 0xb5, 0x2c,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x0c,
- DATA_ONLY, 0x0a,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0e,
- DATA_ONLY, 0x17,
- DATA_ONLY, 0x13,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x2a,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1b,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x17,
-
- DATA_ONLY, 0x2b,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x3a,
- DATA_ONLY, 0x34,
- DATA_ONLY, 0x30,
- DATA_ONLY, 0x2c,
- DATA_ONLY, 0x29,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x23,
- DATA_ONLY, 0x21,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x1e,
- DATA_ONLY, 0x1e,
-
- 0xb6, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
-
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
-
- 0xb7, 0x2c,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x0c,
- DATA_ONLY, 0x0a,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0e,
- DATA_ONLY, 0x17,
- DATA_ONLY, 0x13,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x2a,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1b,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x17,
-
- DATA_ONLY, 0x2b,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x3a,
- DATA_ONLY, 0x34,
- DATA_ONLY, 0x30,
- DATA_ONLY, 0x2c,
- DATA_ONLY, 0x29,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x23,
- DATA_ONLY, 0x21,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x1e,
- DATA_ONLY, 0x1e,
-
- 0xb8, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
-
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
-
- 0xb9, 0x2c,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x0c,
- DATA_ONLY, 0x0a,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0e,
- DATA_ONLY, 0x17,
- DATA_ONLY, 0x13,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x2a,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1b,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x17,
-
- DATA_ONLY, 0x2b,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x3a,
- DATA_ONLY, 0x34,
- DATA_ONLY, 0x30,
- DATA_ONLY, 0x2c,
- DATA_ONLY, 0x29,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x23,
- DATA_ONLY, 0x21,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x1e,
- DATA_ONLY, 0x1e,
-
- 0xba, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
-
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
-
- 0xc1, 0x4d,
- DATA_ONLY, 0x96,
- DATA_ONLY, 0x1d,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x01,
- DATA_ONLY, 0xdf,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- DATA_ONLY, 0x06,
- DATA_ONLY, 0x09,
- DATA_ONLY, 0x0d,
- DATA_ONLY, 0x0f,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x15,
- DATA_ONLY, 0x18,
-
- 0xb2, 0x10,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0b,
- DATA_ONLY, 0x05,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_acl_on[] = {
- /* ACL on */
- 0xc0, 0x01,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_acl_off[] = {
- /* ACL off */
- 0xc0, 0x00,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_elvss_on[] = {
- /* ELVSS on */
- 0xb1, 0x0b,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_elvss_off[] = {
- /* ELVSS off */
- 0xb1, 0x0a,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_stand_by_off[] = {
- 0x11, COMMAND_ONLY,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_stand_by_on[] = {
- 0x10, COMMAND_ONLY,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_display_on[] = {
- 0x29, COMMAND_ONLY,
-
- ENDDEF, 0x0000
-};
-
-
-static int s6e63m0_spi_write_byte(struct s6e63m0 *lcd, int addr, int data)
-{
- u16 buf[1];
- struct spi_message msg;
-
- struct spi_transfer xfer = {
- .len = 2,
- .tx_buf = buf,
- };
-
- buf[0] = (addr << 8) | data;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- return spi_sync(lcd->spi, &msg);
-}
-
-static int s6e63m0_spi_write(struct s6e63m0 *lcd, unsigned char address,
- unsigned char command)
-{
- int ret = 0;
-
- if (address != DATA_ONLY)
- ret = s6e63m0_spi_write_byte(lcd, 0x0, address);
- if (command != COMMAND_ONLY)
- ret = s6e63m0_spi_write_byte(lcd, 0x1, command);
-
- return ret;
-}
-
-static int s6e63m0_panel_send_sequence(struct s6e63m0 *lcd,
- const unsigned short *wbuf)
-{
- int ret = 0, i = 0;
-
- while ((wbuf[i] & DEFMASK) != ENDDEF) {
- if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
- ret = s6e63m0_spi_write(lcd, wbuf[i], wbuf[i+1]);
- if (ret)
- break;
- } else {
- msleep(wbuf[i+1]);
- }
- i += 2;
- }
-
- return ret;
-}
-
-static int _s6e63m0_gamma_ctl(struct s6e63m0 *lcd, const unsigned int *gamma)
-{
- unsigned int i = 0;
- int ret = 0;
-
- /* disable gamma table updating. */
- ret = s6e63m0_spi_write(lcd, 0xfa, 0x00);
- if (ret) {
- dev_err(lcd->dev, "failed to disable gamma table updating.\n");
- goto gamma_err;
- }
-
- for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
- ret = s6e63m0_spi_write(lcd, DATA_ONLY, gamma[i]);
- if (ret) {
- dev_err(lcd->dev, "failed to set gamma table.\n");
- goto gamma_err;
- }
- }
-
- /* update gamma table. */
- ret = s6e63m0_spi_write(lcd, 0xfa, 0x01);
- if (ret)
- dev_err(lcd->dev, "failed to update gamma table.\n");
-
-gamma_err:
- return ret;
-}
-
-static int s6e63m0_gamma_ctl(struct s6e63m0 *lcd, int gamma)
-{
- int ret = 0;
-
- ret = _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
-
- return ret;
-}
-
-
-static int s6e63m0_ldi_init(struct s6e63m0 *lcd)
-{
- int ret, i;
- const unsigned short *init_seq[] = {
- seq_panel_condition_set,
- seq_display_condition_set,
- seq_gamma_setting,
- seq_etc_condition_set,
- seq_acl_on,
- seq_elvss_on,
- };
-
- for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
- ret = s6e63m0_panel_send_sequence(lcd, init_seq[i]);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int s6e63m0_ldi_enable(struct s6e63m0 *lcd)
-{
- int ret = 0, i;
- const unsigned short *enable_seq[] = {
- seq_stand_by_off,
- seq_display_on,
- };
-
- for (i = 0; i < ARRAY_SIZE(enable_seq); i++) {
- ret = s6e63m0_panel_send_sequence(lcd, enable_seq[i]);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int s6e63m0_ldi_disable(struct s6e63m0 *lcd)
-{
- int ret;
-
- ret = s6e63m0_panel_send_sequence(lcd, seq_stand_by_on);
-
- return ret;
-}
-
-static int s6e63m0_power_is_on(int power)
-{
- return power <= FB_BLANK_NORMAL;
-}
-
-static int s6e63m0_power_on(struct s6e63m0 *lcd)
-{
- int ret = 0;
- struct lcd_platform_data *pd;
- struct backlight_device *bd;
-
- pd = lcd->lcd_pd;
- bd = lcd->bd;
-
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EINVAL;
- }
-
- pd->power_on(lcd->ld, 1);
- msleep(pd->power_on_delay);
-
- if (!pd->reset) {
- dev_err(lcd->dev, "reset is NULL.\n");
- return -EINVAL;
- }
-
- pd->reset(lcd->ld);
- msleep(pd->reset_delay);
-
- ret = s6e63m0_ldi_init(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to initialize ldi.\n");
- return ret;
- }
-
- ret = s6e63m0_ldi_enable(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to enable ldi.\n");
- return ret;
- }
-
- /* set brightness to current value after power on or resume. */
- ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness);
- if (ret) {
- dev_err(lcd->dev, "lcd gamma setting failed.\n");
- return ret;
- }
-
- return 0;
-}
-
-static int s6e63m0_power_off(struct s6e63m0 *lcd)
-{
- int ret;
- struct lcd_platform_data *pd;
-
- pd = lcd->lcd_pd;
-
- ret = s6e63m0_ldi_disable(lcd);
- if (ret) {
- dev_err(lcd->dev, "lcd setting failed.\n");
- return -EIO;
- }
-
- msleep(pd->power_off_delay);
-
- pd->power_on(lcd->ld, 0);
-
- return 0;
-}
-
-static int s6e63m0_power(struct s6e63m0 *lcd, int power)
-{
- int ret = 0;
-
- if (s6e63m0_power_is_on(power) && !s6e63m0_power_is_on(lcd->power))
- ret = s6e63m0_power_on(lcd);
- else if (!s6e63m0_power_is_on(power) && s6e63m0_power_is_on(lcd->power))
- ret = s6e63m0_power_off(lcd);
-
- if (!ret)
- lcd->power = power;
-
- return ret;
-}
-
-static int s6e63m0_set_power(struct lcd_device *ld, int power)
-{
- struct s6e63m0 *lcd = lcd_get_data(ld);
-
- if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
- power != FB_BLANK_NORMAL) {
- dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
- return -EINVAL;
- }
-
- return s6e63m0_power(lcd, power);
-}
-
-static int s6e63m0_get_power(struct lcd_device *ld)
-{
- struct s6e63m0 *lcd = lcd_get_data(ld);
-
- return lcd->power;
-}
-
-static int s6e63m0_set_brightness(struct backlight_device *bd)
-{
- int ret = 0, brightness = bd->props.brightness;
- struct s6e63m0 *lcd = bl_get_data(bd);
-
- if (brightness < MIN_BRIGHTNESS ||
- brightness > bd->props.max_brightness) {
- dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
- MIN_BRIGHTNESS, MAX_BRIGHTNESS);
- return -EINVAL;
- }
-
- ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness);
- if (ret) {
- dev_err(&bd->dev, "lcd brightness setting failed.\n");
- return -EIO;
- }
-
- return ret;
-}
-
-static struct lcd_ops s6e63m0_lcd_ops = {
- .set_power = s6e63m0_set_power,
- .get_power = s6e63m0_get_power,
-};
-
-static const struct backlight_ops s6e63m0_backlight_ops = {
- .update_status = s6e63m0_set_brightness,
-};
-
-static ssize_t s6e63m0_sysfs_show_gamma_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
- char temp[10];
-
- switch (lcd->gamma_mode) {
- case 0:
- sprintf(temp, "2.2 mode\n");
- strcat(buf, temp);
- break;
- case 1:
- sprintf(temp, "1.9 mode\n");
- strcat(buf, temp);
- break;
- case 2:
- sprintf(temp, "1.7 mode\n");
- strcat(buf, temp);
- break;
- default:
- dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7)n");
- break;
- }
-
- return strlen(buf);
-}
-
-static ssize_t s6e63m0_sysfs_store_gamma_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
- struct backlight_device *bd = NULL;
- int brightness, rc;
-
- rc = kstrtouint(buf, 0, &lcd->gamma_mode);
- if (rc < 0)
- return rc;
-
- bd = lcd->bd;
-
- brightness = bd->props.brightness;
-
- switch (lcd->gamma_mode) {
- case 0:
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]);
- break;
- case 1:
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_19_table[brightness]);
- break;
- case 2:
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_17_table[brightness]);
- break;
- default:
- dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7\n");
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]);
- break;
- }
- return len;
-}
-
-static DEVICE_ATTR(gamma_mode, 0644,
- s6e63m0_sysfs_show_gamma_mode, s6e63m0_sysfs_store_gamma_mode);
-
-static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
- char temp[3];
-
- sprintf(temp, "%u\n", lcd->gamma_table_count);
- strcpy(buf, temp);
-
- return strlen(buf);
-}
-static DEVICE_ATTR(gamma_table, 0444,
- s6e63m0_sysfs_show_gamma_table, NULL);
-
-static int s6e63m0_probe(struct spi_device *spi)
-{
- int ret = 0;
- struct s6e63m0 *lcd = NULL;
- struct lcd_device *ld = NULL;
- struct backlight_device *bd = NULL;
- struct backlight_properties props;
-
- lcd = devm_kzalloc(&spi->dev, sizeof(struct s6e63m0), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
-
- /* s6e63m0 lcd panel uses 3-wire 9bits SPI Mode. */
- spi->bits_per_word = 9;
-
- ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "spi setup failed.\n");
- return ret;
- }
-
- lcd->spi = spi;
- lcd->dev = &spi->dev;
-
- lcd->lcd_pd = dev_get_platdata(&spi->dev);
- if (!lcd->lcd_pd) {
- dev_err(&spi->dev, "platform data is NULL.\n");
- return -EINVAL;
- }
-
- ld = devm_lcd_device_register(&spi->dev, "s6e63m0", &spi->dev, lcd,
- &s6e63m0_lcd_ops);
- if (IS_ERR(ld))
- return PTR_ERR(ld);
-
- lcd->ld = ld;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = MAX_BRIGHTNESS;
-
- bd = devm_backlight_device_register(&spi->dev, "s6e63m0bl-bl",
- &spi->dev, lcd, &s6e63m0_backlight_ops,
- &props);
- if (IS_ERR(bd))
- return PTR_ERR(bd);
-
- bd->props.brightness = MAX_BRIGHTNESS;
- lcd->bd = bd;
-
- /*
- * it gets gamma table count available so it gets user
- * know that.
- */
- lcd->gamma_table_count =
- sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int *));
-
- ret = device_create_file(&(spi->dev), &dev_attr_gamma_mode);
- if (ret < 0)
- dev_err(&(spi->dev), "failed to add sysfs entries\n");
-
- ret = device_create_file(&(spi->dev), &dev_attr_gamma_table);
- if (ret < 0)
- dev_err(&(spi->dev), "failed to add sysfs entries\n");
-
- /*
- * if lcd panel was on from bootloader like u-boot then
- * do not lcd on.
- */
- if (!lcd->lcd_pd->lcd_enabled) {
- /*
- * if lcd panel was off from bootloader then
- * current lcd status is powerdown and then
- * it enables lcd panel.
- */
- lcd->power = FB_BLANK_POWERDOWN;
-
- s6e63m0_power(lcd, FB_BLANK_UNBLANK);
- } else {
- lcd->power = FB_BLANK_UNBLANK;
- }
-
- spi_set_drvdata(spi, lcd);
-
- dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n");
-
- return 0;
-}
-
-static int s6e63m0_remove(struct spi_device *spi)
-{
- struct s6e63m0 *lcd = spi_get_drvdata(spi);
-
- s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
- device_remove_file(&spi->dev, &dev_attr_gamma_table);
- device_remove_file(&spi->dev, &dev_attr_gamma_mode);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int s6e63m0_suspend(struct device *dev)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
-
- dev_dbg(dev, "lcd->power = %d\n", lcd->power);
-
- /*
- * when lcd panel is suspend, lcd panel becomes off
- * regardless of status.
- */
- return s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static int s6e63m0_resume(struct device *dev)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
-
- lcd->power = FB_BLANK_POWERDOWN;
-
- return s6e63m0_power(lcd, FB_BLANK_UNBLANK);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(s6e63m0_pm_ops, s6e63m0_suspend, s6e63m0_resume);
-
-/* Power down all displays on reboot, poweroff or halt. */
-static void s6e63m0_shutdown(struct spi_device *spi)
-{
- struct s6e63m0 *lcd = spi_get_drvdata(spi);
-
- s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static struct spi_driver s6e63m0_driver = {
- .driver = {
- .name = "s6e63m0",
- .pm = &s6e63m0_pm_ops,
- },
- .probe = s6e63m0_probe,
- .remove = s6e63m0_remove,
- .shutdown = s6e63m0_shutdown,
-};
-
-module_spi_driver(s6e63m0_driver);
-
-MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("S6E63M0 LCD Driver");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/video/backlight/s6e63m0_gamma.h b/drivers/video/backlight/s6e63m0_gamma.h
deleted file mode 100644
index 2c44bdb0696b..000000000000
--- a/drivers/video/backlight/s6e63m0_gamma.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/* linux/drivers/video/samsung/s6e63m0_brightness.h
- *
- * Gamma level definitions.
- *
- * Copyright (c) 2009 Samsung Electronics
- * InKi Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _S6E63M0_BRIGHTNESS_H
-#define _S6E63M0_BRIGHTNESS_H
-
-#define MAX_GAMMA_LEVEL 11
-#define GAMMA_TABLE_COUNT 21
-
-/* gamma value: 2.2 */
-static const unsigned int s6e63m0_22_300[] = {
- 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6,
- 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0,
- 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb
-};
-
-static const unsigned int s6e63m0_22_280[] = {
- 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6,
- 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1,
- 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6
-};
-
-static const unsigned int s6e63m0_22_260[] = {
- 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6,
- 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2,
- 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1
-
-};
-
-static const unsigned int s6e63m0_22_240[] = {
- 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9,
- 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3,
- 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA
-
-};
-static const unsigned int s6e63m0_22_220[] = {
- 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8,
- 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4,
- 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2
-};
-
-static const unsigned int s6e63m0_22_200[] = {
- 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA,
- 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6,
- 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA
-};
-
-static const unsigned int s6e63m0_22_170[] = {
- 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB,
- 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8,
- 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB
-};
-
-static const unsigned int s6e63m0_22_140[] = {
- 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC,
- 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9,
- 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E
-};
-
-static const unsigned int s6e63m0_22_110[] = {
- 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF,
- 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC,
- 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D
-};
-
-static const unsigned int s6e63m0_22_90[] = {
- 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0,
- 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF,
- 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82
-};
-
-static const unsigned int s6e63m0_22_30[] = {
- 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8,
- 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7,
- 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51
-};
-
-/* gamma value: 1.9 */
-static const unsigned int s6e63m0_19_300[] = {
- 0x18, 0x08, 0x24, 0x61, 0x5F, 0x39, 0xBA,
- 0xBD, 0xAD, 0xB1, 0xB6, 0xA5, 0xC4, 0xC5,
- 0xBC, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB
-};
-
-static const unsigned int s6e63m0_19_280[] = {
- 0x18, 0x08, 0x24, 0x61, 0x60, 0x39, 0xBB,
- 0xBE, 0xAD, 0xB2, 0xB6, 0xA6, 0xC5, 0xC7,
- 0xBD, 0x00, 0x9B, 0x00, 0x9E, 0x00, 0xD5
-};
-
-static const unsigned int s6e63m0_19_260[] = {
- 0x18, 0x08, 0x24, 0x63, 0x61, 0x3B, 0xBA,
- 0xBE, 0xAC, 0xB3, 0xB8, 0xA7, 0xC6, 0xC8,
- 0xBD, 0x00, 0x96, 0x00, 0x98, 0x00, 0xCF
-};
-
-static const unsigned int s6e63m0_19_240[] = {
- 0x18, 0x08, 0x24, 0x67, 0x64, 0x3F, 0xBB,
- 0xBE, 0xAD, 0xB3, 0xB9, 0xA7, 0xC8, 0xC9,
- 0xBE, 0x00, 0x90, 0x00, 0x92, 0x00, 0xC8
-};
-
-static const unsigned int s6e63m0_19_220[] = {
- 0x18, 0x08, 0x24, 0x68, 0x64, 0x40, 0xBC,
- 0xBF, 0xAF, 0xB4, 0xBA, 0xA9, 0xC8, 0xCA,
- 0xBE, 0x00, 0x8B, 0x00, 0x8C, 0x00, 0xC0
-};
-
-static const unsigned int s6e63m0_19_200[] = {
- 0x18, 0x08, 0x24, 0x68, 0x64, 0x3F, 0xBE,
- 0xC0, 0xB0, 0xB6, 0xBB, 0xAB, 0xC8, 0xCB,
- 0xBF, 0x00, 0x85, 0x00, 0x86, 0x00, 0xB8
-};
-
-static const unsigned int s6e63m0_19_170[] = {
- 0x18, 0x08, 0x24, 0x69, 0x64, 0x40, 0xBF,
- 0xC1, 0xB0, 0xB9, 0xBE, 0xAD, 0xCB, 0xCD,
- 0xC2, 0x00, 0x7A, 0x00, 0x7B, 0x00, 0xAA
-};
-
-static const unsigned int s6e63m0_19_140[] = {
- 0x18, 0x08, 0x24, 0x6E, 0x65, 0x45, 0xC0,
- 0xC3, 0xB2, 0xBA, 0xBE, 0xAE, 0xCD, 0xD0,
- 0xC4, 0x00, 0x70, 0x00, 0x70, 0x00, 0x9C
-};
-
-static const unsigned int s6e63m0_19_110[] = {
- 0x18, 0x08, 0x24, 0x6F, 0x65, 0x46, 0xC2,
- 0xC4, 0xB3, 0xBF, 0xC2, 0xB2, 0xCF, 0xD1,
- 0xC6, 0x00, 0x64, 0x00, 0x64, 0x00, 0x8D
-};
-
-static const unsigned int s6e63m0_19_90[] = {
- 0x18, 0x08, 0x24, 0x74, 0x60, 0x4A, 0xC3,
- 0xC6, 0xB5, 0xBF, 0xC3, 0xB2, 0xD2, 0xD3,
- 0xC8, 0x00, 0x5B, 0x00, 0x5B, 0x00, 0x81
-};
-
-static const unsigned int s6e63m0_19_30[] = {
- 0x18, 0x08, 0x24, 0x84, 0x45, 0x4F, 0xCA,
- 0xCB, 0xBC, 0xC9, 0xCB, 0xBC, 0xDA, 0xDA,
- 0xD0, 0x00, 0x35, 0x00, 0x34, 0x00, 0x4E
-};
-
-/* gamma value: 1.7 */
-static const unsigned int s6e63m0_17_300[] = {
- 0x18, 0x08, 0x24, 0x70, 0x70, 0x4F, 0xBF,
- 0xC2, 0xB2, 0xB8, 0xBC, 0xAC, 0xCB, 0xCD,
- 0xC3, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB
-};
-
-static const unsigned int s6e63m0_17_280[] = {
- 0x18, 0x08, 0x24, 0x71, 0x71, 0x50, 0xBF,
- 0xC2, 0xB2, 0xBA, 0xBE, 0xAE, 0xCB, 0xCD,
- 0xC3, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6
-};
-
-static const unsigned int s6e63m0_17_260[] = {
- 0x18, 0x08, 0x24, 0x72, 0x72, 0x50, 0xC0,
- 0xC3, 0xB4, 0xB9, 0xBE, 0xAE, 0xCC, 0xCF,
- 0xC4, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1
-};
-
-static const unsigned int s6e63m0_17_240[] = {
- 0x18, 0x08, 0x24, 0x71, 0x72, 0x4F, 0xC2,
- 0xC4, 0xB5, 0xBB, 0xBF, 0xB0, 0xCC, 0xCF,
- 0xC3, 0x00, 0x91, 0x00, 0x95, 0x00, 0xCA
-};
-
-static const unsigned int s6e63m0_17_220[] = {
- 0x18, 0x08, 0x24, 0x71, 0x73, 0x4F, 0xC2,
- 0xC5, 0xB5, 0xBD, 0xC0, 0xB2, 0xCD, 0xD1,
- 0xC5, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2
-};
-
-static const unsigned int s6e63m0_17_200[] = {
- 0x18, 0x08, 0x24, 0x72, 0x75, 0x51, 0xC2,
- 0xC6, 0xB5, 0xBF, 0xC1, 0xB3, 0xCE, 0xD1,
- 0xC6, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA
-};
-
-static const unsigned int s6e63m0_17_170[] = {
- 0x18, 0x08, 0x24, 0x75, 0x77, 0x54, 0xC3,
- 0xC7, 0xB7, 0xC0, 0xC3, 0xB4, 0xD1, 0xD3,
- 0xC9, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB
-};
-
-static const unsigned int s6e63m0_17_140[] = {
- 0x18, 0x08, 0x24, 0x7B, 0x77, 0x58, 0xC3,
- 0xC8, 0xB8, 0xC2, 0xC6, 0xB6, 0xD3, 0xD4,
- 0xCA, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E
-};
-
-static const unsigned int s6e63m0_17_110[] = {
- 0x18, 0x08, 0x24, 0x81, 0x7B, 0x5D, 0xC6,
- 0xCA, 0xBB, 0xC3, 0xC7, 0xB8, 0xD6, 0xD8,
- 0xCD, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D
-};
-
-static const unsigned int s6e63m0_17_90[] = {
- 0x18, 0x08, 0x24, 0x82, 0x7A, 0x5B, 0xC8,
- 0xCB, 0xBD, 0xC5, 0xCA, 0xBA, 0xD6, 0xD8,
- 0xCE, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82
-};
-
-static const unsigned int s6e63m0_17_30[] = {
- 0x18, 0x08, 0x24, 0x8F, 0x73, 0x63, 0xD1,
- 0xD0, 0xC5, 0xCC, 0xD1, 0xC2, 0xDE, 0xE0,
- 0xD6, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51
-};
-
-struct s6e63m0_gamma {
- unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
- unsigned int *gamma_19_table[MAX_GAMMA_LEVEL];
- unsigned int *gamma_17_table[MAX_GAMMA_LEVEL];
-};
-
-static struct s6e63m0_gamma gamma_table = {
- .gamma_22_table[0] = (unsigned int *)&s6e63m0_22_30,
- .gamma_22_table[1] = (unsigned int *)&s6e63m0_22_90,
- .gamma_22_table[2] = (unsigned int *)&s6e63m0_22_110,
- .gamma_22_table[3] = (unsigned int *)&s6e63m0_22_140,
- .gamma_22_table[4] = (unsigned int *)&s6e63m0_22_170,
- .gamma_22_table[5] = (unsigned int *)&s6e63m0_22_200,
- .gamma_22_table[6] = (unsigned int *)&s6e63m0_22_220,
- .gamma_22_table[7] = (unsigned int *)&s6e63m0_22_240,
- .gamma_22_table[8] = (unsigned int *)&s6e63m0_22_260,
- .gamma_22_table[9] = (unsigned int *)&s6e63m0_22_280,
- .gamma_22_table[10] = (unsigned int *)&s6e63m0_22_300,
-
- .gamma_19_table[0] = (unsigned int *)&s6e63m0_19_30,
- .gamma_19_table[1] = (unsigned int *)&s6e63m0_19_90,
- .gamma_19_table[2] = (unsigned int *)&s6e63m0_19_110,
- .gamma_19_table[3] = (unsigned int *)&s6e63m0_19_140,
- .gamma_19_table[4] = (unsigned int *)&s6e63m0_19_170,
- .gamma_19_table[5] = (unsigned int *)&s6e63m0_19_200,
- .gamma_19_table[6] = (unsigned int *)&s6e63m0_19_220,
- .gamma_19_table[7] = (unsigned int *)&s6e63m0_19_240,
- .gamma_19_table[8] = (unsigned int *)&s6e63m0_19_260,
- .gamma_19_table[9] = (unsigned int *)&s6e63m0_19_280,
- .gamma_19_table[10] = (unsigned int *)&s6e63m0_19_300,
-
- .gamma_17_table[0] = (unsigned int *)&s6e63m0_17_30,
- .gamma_17_table[1] = (unsigned int *)&s6e63m0_17_90,
- .gamma_17_table[2] = (unsigned int *)&s6e63m0_17_110,
- .gamma_17_table[3] = (unsigned int *)&s6e63m0_17_140,
- .gamma_17_table[4] = (unsigned int *)&s6e63m0_17_170,
- .gamma_17_table[5] = (unsigned int *)&s6e63m0_17_200,
- .gamma_17_table[6] = (unsigned int *)&s6e63m0_17_220,
- .gamma_17_table[7] = (unsigned int *)&s6e63m0_17_240,
- .gamma_17_table[8] = (unsigned int *)&s6e63m0_17_260,
- .gamma_17_table[9] = (unsigned int *)&s6e63m0_17_280,
- .gamma_17_table[10] = (unsigned int *)&s6e63m0_17_300,
-};
-
-#endif
-
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index f103665cad43..40182ed85648 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -27,7 +27,6 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/console.h>
-#include <asm/io.h>
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
@@ -401,7 +400,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
#endif /* CONFIG_PMAC_BACKLIGHT */
#ifdef CONFIG_PPC
- p->screen_base = __ioremap(addr, 0x200000, _PAGE_NO_CACHE);
+ p->screen_base = ioremap_wc(addr, 0x200000);
#else
p->screen_base = ioremap(addr, 0x200000);
#endif
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 8d14b29aafea..9cb0ef7ac29e 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -48,9 +48,7 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
-#include <asm/io.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/btext.h>
#include "macmodes.h"
@@ -715,8 +713,7 @@ static int __init control_of_init(struct device_node *dp)
goto error_out;
}
/* map at most 8MB for the frame buffer */
- p->frame_buffer = __ioremap(p->frame_buffer_phys, 0x800000,
- _PAGE_WRITETHRU);
+ p->frame_buffer = ioremap_wt(p->frame_buffer_phys, 0x800000);
if (!p->control_regs_phys ||
!request_mem_region(p->control_regs_phys, p->control_regs_size,
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index bc9eb8afc313..332a56b6811f 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1925,7 +1925,7 @@ static int __init fsl_diu_init(void)
pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n");
#ifdef CONFIG_NOT_COHERENT_CACHE
- np = of_find_node_by_type(NULL, "cpu");
+ np = of_get_cpu_node(0, NULL);
if (!np) {
pr_err("fsl-diu-fb: can't find 'cpu' device node\n");
return -ENODEV;
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index 377d3399a3ad..bf6b7fb83cf4 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -32,9 +32,7 @@
#include <linux/nvram.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <asm/io.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include "macmodes.h"
#include "platinumfb.h"
@@ -577,8 +575,7 @@ static int platinumfb_probe(struct platform_device* odev)
/* frame buffer - map only 4MB */
pinfo->frame_buffer_phys = pinfo->rsrc_fb.start;
- pinfo->frame_buffer = __ioremap(pinfo->rsrc_fb.start, 0x400000,
- _PAGE_WRITETHRU);
+ pinfo->frame_buffer = ioremap_wt(pinfo->rsrc_fb.start, 0x400000);
pinfo->base_frame_buffer = pinfo->frame_buffer;
/* registers */
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 275fb98236d3..d51c3a8009cb 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -54,13 +54,11 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
-#include <asm/io.h>
#ifdef CONFIG_MAC
#include <asm/macintosh.h>
#else
#include <asm/prom.h>
#endif
-#include <asm/pgtable.h>
#include "macmodes.h"
#include "valkyriefb.h"
@@ -318,7 +316,7 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
int __init valkyriefb_init(void)
{
struct fb_info_valkyrie *p;
- unsigned long frame_buffer_phys, cmap_regs_phys, flags;
+ unsigned long frame_buffer_phys, cmap_regs_phys;
int err;
char *option = NULL;
@@ -337,7 +335,6 @@ int __init valkyriefb_init(void)
/* Hardcoded addresses... welcome to 68k Macintosh country :-) */
frame_buffer_phys = 0xf9000000;
cmap_regs_phys = 0x50f24000;
- flags = IOMAP_NOCACHE_SER; /* IOMAP_WRITETHROUGH?? */
#else /* ppc (!CONFIG_MAC) */
{
struct device_node *dp;
@@ -354,7 +351,6 @@ int __init valkyriefb_init(void)
frame_buffer_phys = r.start;
cmap_regs_phys = r.start + 0x304000;
- flags = _PAGE_WRITETHRU;
}
#endif /* ppc (!CONFIG_MAC) */
@@ -369,7 +365,11 @@ int __init valkyriefb_init(void)
}
p->total_vram = 0x100000;
p->frame_buffer_phys = frame_buffer_phys;
- p->frame_buffer = __ioremap(frame_buffer_phys, p->total_vram, flags);
+#ifdef CONFIG_MAC
+ p->frame_buffer = ioremap_nocache(frame_buffer_phys, p->total_vram);
+#else
+ p->frame_buffer = ioremap_wt(frame_buffer_phys, p->total_vram);
+#endif
p->cmap_regs_phys = cmap_regs_phys;
p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
p->valkyrie_regs_phys = cmap_regs_phys+0x6000;
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 92500f6bdad1..520a5f9c27de 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1890,7 +1890,6 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
err_reg:
put_device(&vdev->dev);
- kfree(vdev);
err_devalloc:
list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
list_del(&vdev->drv_list);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 83fc9aab34e8..3099052e1243 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -763,6 +763,8 @@ static int omap_hdq_remove(struct platform_device *pdev)
/* remove module dependency */
pm_runtime_disable(&pdev->dev);
+ w1_remove_master_device(&omap_w1_master);
+
return 0;
}
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index bf641a191d07..7c4e33dbee4d 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -186,8 +186,8 @@ static int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
return -1;
}
-static uint16_t w1_ds2438_get_voltage(struct w1_slave *sl,
- int adc_input, uint16_t *voltage)
+static int w1_ds2438_get_voltage(struct w1_slave *sl,
+ int adc_input, uint16_t *voltage)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
@@ -235,6 +235,25 @@ post_unlock:
return ret;
}
+static int w1_ds2438_get_current(struct w1_slave *sl, int16_t *voltage)
+{
+ u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
+ int ret;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
+ /* The voltage measured across current sense resistor RSENS. */
+ *voltage = (((int16_t) w1_buf[DS2438_CURRENT_MSB]) << 8) | ((int16_t) w1_buf[DS2438_CURRENT_LSB]);
+ ret = 0;
+ } else
+ ret = -1;
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
static ssize_t iad_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
@@ -257,6 +276,27 @@ static ssize_t iad_write(struct file *filp, struct kobject *kobj,
return ret;
}
+static ssize_t iad_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int ret;
+ int16_t voltage;
+
+ if (off != 0)
+ return 0;
+ if (!buf)
+ return -EINVAL;
+
+ if (w1_ds2438_get_current(sl, &voltage) == 0) {
+ ret = snprintf(buf, count, "%i\n", voltage);
+ } else
+ ret = -EIO;
+
+ return ret;
+}
+
static ssize_t page0_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
@@ -272,9 +312,13 @@ static ssize_t page0_read(struct file *filp, struct kobject *kobj,
mutex_lock(&sl->master->bus_mutex);
+ /* Read no more than page0 size */
+ if (count > DS2438_PAGE_SIZE)
+ count = DS2438_PAGE_SIZE;
+
if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
- memcpy(buf, &w1_buf, DS2438_PAGE_SIZE);
- ret = DS2438_PAGE_SIZE;
+ memcpy(buf, &w1_buf, count);
+ ret = count;
} else
ret = -EIO;
@@ -289,7 +333,6 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
- ssize_t c = PAGE_SIZE;
int16_t temp;
if (off != 0)
@@ -298,8 +341,7 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (w1_ds2438_get_temperature(sl, &temp) == 0) {
- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", temp);
- ret = PAGE_SIZE - c;
+ ret = snprintf(buf, count, "%i\n", temp);
} else
ret = -EIO;
@@ -312,7 +354,6 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
- ssize_t c = PAGE_SIZE;
uint16_t voltage;
if (off != 0)
@@ -321,8 +362,7 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0) {
- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage);
- ret = PAGE_SIZE - c;
+ ret = snprintf(buf, count, "%u\n", voltage);
} else
ret = -EIO;
@@ -335,7 +375,6 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
- ssize_t c = PAGE_SIZE;
uint16_t voltage;
if (off != 0)
@@ -344,15 +383,14 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0) {
- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage);
- ret = PAGE_SIZE - c;
+ ret = snprintf(buf, count, "%u\n", voltage);
} else
ret = -EIO;
return ret;
}
-static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, NULL, iad_write, 1);
+static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, iad_read, iad_write, 0);
static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
static BIN_ATTR_RO(temperature, 0/* real length varies */);
static BIN_ATTR_RO(vad, 0/* real length varies */);